diff --git a/.melli.py.swp b/.melli.py.swp new file mode 100644 index 0000000..124def3 Binary files /dev/null and b/.melli.py.swp differ diff --git a/.test_melli.py.swp b/.test_melli.py.swp new file mode 100644 index 0000000..9f250cf Binary files /dev/null and b/.test_melli.py.swp differ diff --git a/melli.py b/melli.py index d6854d2..7fb0346 100644 --- a/melli.py +++ b/melli.py @@ -1,4 +1,5 @@ from fastapi import FastAPI +from typing import Any, Dict, List app = FastAPI( title="Melli Hiring Challenge 👩‍💻", @@ -12,25 +13,35 @@ @app.get("/task1/greet/{name}", tags=["Task 1"], summary="👋🇩🇪🇬🇧🇪🇸") -async def task1_greet(name: str) -> str: +async def task1_greet(name: str, language: str = "de") -> str: """Greet somebody in German, English or Spanish!""" # Write your code below - ... - return f"Hello {name}, I am Melli." - + greetings: Dict[str, str] = { + "en": f"Hello {name}, I am Melli.", + "de": f"Hallo {name}, ich bin Melli.", + "es": f"Hola {name}, soy Melli.", + } + try: + return greetings[language] + except: + return f"Hallo {name}, leider spreche ich nicht '{language}'!" """ Task 2 - snake_case to cameCase """ -from typing import Any - - -def camelize(key: str): +def camelize(key: str) -> str: """Takes string in snake_case format returns camelCase formatted version.""" # Write your code below - ... - return key + temp: List[str] = [c for c in key[::-1]] + ret: str = "" + while temp: + c: str = temp.pop() + if c != '_': + ret += c + else: + temp[-1] = temp[-1].upper() + return ret @app.post("/task2/camelize", tags=["Task 2"], summary="🐍➡️🐪") @@ -45,7 +56,7 @@ async def task2_camelize(data: dict[str, Any]) -> dict[str, Any]: from pydantic import BaseModel -friends = { +friends: Dict[str, List[str]] = { "Matthias": ["Sahar", "Franziska", "Hans"], "Stefan": ["Felix", "Ben", "Philip"], } @@ -60,28 +71,27 @@ class ActionResponse(BaseModel): message: str -def handle_call_action(action: str): +def handle_call_action(action: str, user: str = None) -> str: # Write your code below - ... - return "🤙 Why don't you call them yourself!" + for f in friends[user]: + if f in action: + return {"message": f"🤙 Calling {f} ..."} + return {"message": f"{user}, I can't find this person in your contacts."} -def handle_reminder_action(action: str): +def handle_reminder_action(action: str, user: str = None) -> str: # Write your code below - ... - return "🔔 I can't even remember my own stuff!" + return {"message": "🔔 Alright, I will remind you!"} -def handle_timer_action(action: str): +def handle_timer_action(action: str, user: str = None) -> str: # Write your code below - ... - return "⏰ I don't know how to read the clock!" + return {"message": "⏰ Alright, the timer is set!"} -def handle_unknown_action(action: str): +def handle_unknown_action(action: str, user: str = None) -> str: # Write your code below - ... - return "🤬 #$!@" + return {"message": "👀 Sorry , but I can't help with that!"} @app.post("/task3/action", tags=["Task 3"], summary="🤌") @@ -90,19 +100,18 @@ def task3_action(request: ActionRequest): # tip: you have to use the response model above and also might change the signature # of the action handlers # Write your code below - ... - from random import choice - - # There must be a better way! - handler = choice( - [ - handle_call_action, - handle_reminder_action, - handle_timer_action, - handle_unknown_action, - ] - ) - return handler(request.action) + if request.username not in friends: + return {"message": f"Hi {request.username}, I don't know you yet. But I would love to meet you!"} + if "call" in request.action.lower(): + handler = handle_call_action + elif "remind" in request.action.lower(): + handler = handle_reminder_action + elif "timer" in request.action.lower(): + handler = handle_timer_action + else: + handler = handle_unknown_action + + return handler(request.action, request.username) """ @@ -167,6 +176,17 @@ async def login(form_data: OAuth2PasswordRequestForm = Depends()): # tip: check the verify_password above # Write your code below ... + user = fake_users_db.get(form_data.username) + + if user is None or not verify_password(form_data.password, user["hashed_password"]): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect username or password", + headers={"WWW-Authenticate": "Bearer"}, + ) + + # If the user is found and the password is correct, create a JWT token + payload = { "sub": form_data.username, "exp": datetime.utcnow() + timedelta(minutes=30), @@ -193,18 +213,41 @@ async def get_current_user(token: str = Depends(oauth2_scheme)) -> User: # otherwise raise the credentials_exception above # Write your code below ... + try: + payload = decode_jwt(token) + username = payload["sub"] + user = fake_users_db.get(username) + + if user is None: + raise credentials_exception + return User(**user) + except JWTError: + raise credentials_exception + @app.get("/task4/users/{username}/secret", summary="🤫", tags=["Task 4"]) async def read_user_secret( username: str, current_user: User = Depends(get_current_user) -): +) -> str: """Read a user's secret.""" # uppps 🤭 maybe we should check if the requested secret actually belongs to the user # Write your code below ... - if user := get_user(username): - return user.secret + user: User = get_user(username) + + if user is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found", + ) + if user.username != current_user.username: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Don't spy on other user!", + ) + + return user.secret """ diff --git a/myenv/bin/Activate.ps1 b/myenv/bin/Activate.ps1 new file mode 100644 index 0000000..2fb3852 --- /dev/null +++ b/myenv/bin/Activate.ps1 @@ -0,0 +1,241 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/myenv/bin/activate b/myenv/bin/activate new file mode 100644 index 0000000..251f6b5 --- /dev/null +++ b/myenv/bin/activate @@ -0,0 +1,66 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/Users/kerianyousfi/fun/python-challenge/myenv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(myenv) ${PS1:-}" + export PS1 +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/myenv/bin/activate.csh b/myenv/bin/activate.csh new file mode 100644 index 0000000..4a73a1a --- /dev/null +++ b/myenv/bin/activate.csh @@ -0,0 +1,25 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/Users/kerianyousfi/fun/python-challenge/myenv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(myenv) $prompt" +endif + +alias pydoc python -m pydoc + +rehash diff --git a/myenv/bin/activate.fish b/myenv/bin/activate.fish new file mode 100644 index 0000000..9f39418 --- /dev/null +++ b/myenv/bin/activate.fish @@ -0,0 +1,64 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + functions -e fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + + set -e VIRTUAL_ENV + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/Users/kerianyousfi/fun/python-challenge/myenv" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(myenv) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/myenv/bin/black b/myenv/bin/black new file mode 100755 index 0000000..581e566 --- /dev/null +++ b/myenv/bin/black @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from black import patched_main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(patched_main()) diff --git a/myenv/bin/black-primer b/myenv/bin/black-primer new file mode 100755 index 0000000..f8f3bb0 --- /dev/null +++ b/myenv/bin/black-primer @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from black_primer.cli import main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/blackd b/myenv/bin/blackd new file mode 100755 index 0000000..c81e4e0 --- /dev/null +++ b/myenv/bin/blackd @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from blackd import patched_main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(patched_main()) diff --git a/myenv/bin/dotenv b/myenv/bin/dotenv new file mode 100755 index 0000000..3d1efb7 --- /dev/null +++ b/myenv/bin/dotenv @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from dotenv.cli import cli +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(cli()) diff --git a/myenv/bin/easy_install b/myenv/bin/easy_install new file mode 100755 index 0000000..28a43f0 --- /dev/null +++ b/myenv/bin/easy_install @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python3.9 +# -*- coding: utf-8 -*- +import re +import sys +from setuptools.command.easy_install import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/easy_install-3.9 b/myenv/bin/easy_install-3.9 new file mode 100755 index 0000000..28a43f0 --- /dev/null +++ b/myenv/bin/easy_install-3.9 @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python3.9 +# -*- coding: utf-8 -*- +import re +import sys +from setuptools.command.easy_install import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/epylint b/myenv/bin/epylint new file mode 100755 index 0000000..a0d8d7c --- /dev/null +++ b/myenv/bin/epylint @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pylint import run_epylint +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(run_epylint()) diff --git a/myenv/bin/future-fstrings-show b/myenv/bin/future-fstrings-show new file mode 100755 index 0000000..9c35bed --- /dev/null +++ b/myenv/bin/future-fstrings-show @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from future_fstrings import main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/get_objgraph b/myenv/bin/get_objgraph new file mode 100755 index 0000000..9f437d9 --- /dev/null +++ b/myenv/bin/get_objgraph @@ -0,0 +1,54 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +display the reference paths for objects in ``dill.types`` or a .pkl file + +Notes: + the generated image is useful in showing the pointer references in + objects that are or can be pickled. Any object in ``dill.objects`` + listed in ``dill.load_types(picklable=True, unpicklable=True)`` works. + +Examples:: + + $ get_objgraph FrameType + Image generated as FrameType.png +""" + +import dill as pickle +#pickle.debug.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types +load_types(pickleable=True,unpickleable=True) +from dill import objects + +if __name__ == "__main__": + import sys + if len(sys.argv) != 2: + print ("Please provide exactly one file or type name (e.g. 'IntType')") + msg = "\n" + for objtype in list(objects.keys())[:40]: + msg += objtype + ', ' + print (msg + "...") + else: + objtype = str(sys.argv[-1]) + try: + obj = objects[objtype] + except KeyError: + obj = pickle.load(open(objtype,'rb')) + import os + objtype = os.path.splitext(objtype)[0] + try: + import objgraph + objgraph.show_refs(obj, filename=objtype+'.png') + except ImportError: + print ("Please install 'objgraph' to view object graphs") + + +# EOF diff --git a/myenv/bin/httpx b/myenv/bin/httpx new file mode 100755 index 0000000..4650911 --- /dev/null +++ b/myenv/bin/httpx @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from httpx import main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/isort b/myenv/bin/isort new file mode 100755 index 0000000..133a8c2 --- /dev/null +++ b/myenv/bin/isort @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from isort.main import main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/isort-identify-imports b/myenv/bin/isort-identify-imports new file mode 100755 index 0000000..9e7e113 --- /dev/null +++ b/myenv/bin/isort-identify-imports @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from isort.main import identify_imports_main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(identify_imports_main()) diff --git a/myenv/bin/pip b/myenv/bin/pip new file mode 100755 index 0000000..7a69eac --- /dev/null +++ b/myenv/bin/pip @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python3.9 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/pip3 b/myenv/bin/pip3 new file mode 100755 index 0000000..7a69eac --- /dev/null +++ b/myenv/bin/pip3 @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python3.9 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/pip3.9 b/myenv/bin/pip3.9 new file mode 100755 index 0000000..7a69eac --- /dev/null +++ b/myenv/bin/pip3.9 @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python3.9 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/py.test b/myenv/bin/py.test new file mode 100755 index 0000000..bff0c09 --- /dev/null +++ b/myenv/bin/py.test @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(console_main()) diff --git a/myenv/bin/pylint b/myenv/bin/pylint new file mode 100755 index 0000000..25dd42d --- /dev/null +++ b/myenv/bin/pylint @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pylint import run_pylint +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(run_pylint()) diff --git a/myenv/bin/pyreverse b/myenv/bin/pyreverse new file mode 100755 index 0000000..ab48604 --- /dev/null +++ b/myenv/bin/pyreverse @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pylint import run_pyreverse +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(run_pyreverse()) diff --git a/myenv/bin/pyrsa-decrypt b/myenv/bin/pyrsa-decrypt new file mode 100755 index 0000000..e32a47e --- /dev/null +++ b/myenv/bin/pyrsa-decrypt @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import decrypt +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(decrypt()) diff --git a/myenv/bin/pyrsa-encrypt b/myenv/bin/pyrsa-encrypt new file mode 100755 index 0000000..e8dced7 --- /dev/null +++ b/myenv/bin/pyrsa-encrypt @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import encrypt +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(encrypt()) diff --git a/myenv/bin/pyrsa-keygen b/myenv/bin/pyrsa-keygen new file mode 100755 index 0000000..d8cf5c0 --- /dev/null +++ b/myenv/bin/pyrsa-keygen @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import keygen +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(keygen()) diff --git a/myenv/bin/pyrsa-priv2pub b/myenv/bin/pyrsa-priv2pub new file mode 100755 index 0000000..68cf697 --- /dev/null +++ b/myenv/bin/pyrsa-priv2pub @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.util import private_to_public +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(private_to_public()) diff --git a/myenv/bin/pyrsa-sign b/myenv/bin/pyrsa-sign new file mode 100755 index 0000000..46756be --- /dev/null +++ b/myenv/bin/pyrsa-sign @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import sign +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(sign()) diff --git a/myenv/bin/pyrsa-verify b/myenv/bin/pyrsa-verify new file mode 100755 index 0000000..4afcbd2 --- /dev/null +++ b/myenv/bin/pyrsa-verify @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import verify +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(verify()) diff --git a/myenv/bin/pytest b/myenv/bin/pytest new file mode 100755 index 0000000..bff0c09 --- /dev/null +++ b/myenv/bin/pytest @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(console_main()) diff --git a/myenv/bin/python b/myenv/bin/python new file mode 120000 index 0000000..e616d26 --- /dev/null +++ b/myenv/bin/python @@ -0,0 +1 @@ +python3.9 \ No newline at end of file diff --git a/myenv/bin/python3 b/myenv/bin/python3 new file mode 120000 index 0000000..e616d26 --- /dev/null +++ b/myenv/bin/python3 @@ -0,0 +1 @@ +python3.9 \ No newline at end of file diff --git a/myenv/bin/python3.9 b/myenv/bin/python3.9 new file mode 120000 index 0000000..c03713e --- /dev/null +++ b/myenv/bin/python3.9 @@ -0,0 +1 @@ +/Library/Frameworks/Python.framework/Versions/3.9/bin/python3.9 \ No newline at end of file diff --git a/myenv/bin/symilar b/myenv/bin/symilar new file mode 100755 index 0000000..5a5f2ac --- /dev/null +++ b/myenv/bin/symilar @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pylint import run_symilar +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(run_symilar()) diff --git a/myenv/bin/undill b/myenv/bin/undill new file mode 100755 index 0000000..da19feb --- /dev/null +++ b/myenv/bin/undill @@ -0,0 +1,22 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +unpickle the contents of a pickled object file + +Examples:: + + $ undill hello.pkl + ['hello', 'world'] +""" + +if __name__ == '__main__': + import sys + import dill + for file in sys.argv[1:]: + print (dill.load(open(file,'rb'))) + diff --git a/myenv/bin/uvicorn b/myenv/bin/uvicorn new file mode 100755 index 0000000..a7b06ce --- /dev/null +++ b/myenv/bin/uvicorn @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from uvicorn.main import main +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(main()) diff --git a/myenv/bin/watchgod b/myenv/bin/watchgod new file mode 100755 index 0000000..4a948f9 --- /dev/null +++ b/myenv/bin/watchgod @@ -0,0 +1,8 @@ +#!/Users/kerianyousfi/fun/python-challenge/myenv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from watchgod.cli import cli +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) + sys.exit(cli()) diff --git a/myenv/lib/python3.9/site-packages/CHANGELOG.md b/myenv/lib/python3.9/site-packages/CHANGELOG.md new file mode 100644 index 0000000..3781be9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/CHANGELOG.md @@ -0,0 +1,235 @@ +# Python-RSA changelog + +## Version 4.8 - in development + +- Switch to [Poetry](https://python-poetry.org/) for dependency and release management. +- Compatibility with Python 3.10. +- Chain exceptions using `raise new_exception from old_exception` + ([#157](https://github.com/sybrenstuvel/python-rsa/pull/157)) +- Added marker file for PEP 561. This will allow type checking tools in dependent projects + to use type annotations from Python-RSA + ([#136](https://github.com/sybrenstuvel/python-rsa/pull/136)). +- Use the Chinese Remainder Theorem when decrypting with a private key. This + makes decryption 2-4x faster + ([#163](https://github.com/sybrenstuvel/python-rsa/pull/163)). + +## Version 4.7.2 - released 2021-02-24 + +- Fix picking/unpickling issue introduced in 4.7 + ([#173](https://github.com/sybrenstuvel/python-rsa/issues/173)) + +## Version 4.7.1 - released 2021-02-15 + +- Fix threading issue introduced in 4.7 + ([#173](https://github.com/sybrenstuvel/python-rsa/issues/173)) + +## Version 4.7 - released 2021-01-10 + +- Fix [#165](https://github.com/sybrenstuvel/python-rsa/issues/165): + CVE-2020-25658 - Bleichenbacher-style timing oracle in PKCS#1 v1.5 decryption + code +- Add padding length check as described by PKCS#1 v1.5 (Fixes + [#164](https://github.com/sybrenstuvel/python-rsa/issues/164)) +- Reuse of blinding factors to speed up blinding operations. + Fixes [#162](https://github.com/sybrenstuvel/python-rsa/issues/162). +- Declare & test support for Python 3.9 + + +## Version 4.4 & 4.6 - released 2020-06-12 + +Version 4.4 and 4.6 are almost a re-tagged release of version 4.2. It requires +Python 3.5+. To avoid older Python installations from trying to upgrade to RSA +4.4, this is now made explicit in the `python_requires` argument in `setup.py`. +There was a mistake releasing 4.4 as "3.5+ only", which made it necessary to +retag 4.4 as 4.6 as well. + +No functional changes compared to version 4.2. + + +## Version 4.3 & 4.5 - released 2020-06-12 + +Version 4.3 and 4.5 are almost a re-tagged release of version 4.0. It is the +last to support Python 2.7. This is now made explicit in the `python_requires` +argument in `setup.py`. Python 3.4 is not supported by this release. There was a +mistake releasing 4.4 as "3.5+ only", which made it necessary to retag 4.3 as +4.5 as well. + +Two security fixes have also been backported, so 4.3 = 4.0 + these two fixes. + +- Choose blinding factor relatively prime to N. Thanks Christian Heimes for pointing this out. +- Reject cyphertexts (when decrypting) and signatures (when verifying) that have + been modified by prepending zero bytes. This resolves CVE-2020-13757. Thanks + Carnil for pointing this out. + + +## Version 4.2 - released 2020-06-10 + +- Rolled back the switch to Poetry, and reverted back to using Pipenv + setup.py + for dependency management. There apparently is an issue no-binary installs of + packages build with Poetry. This fixes + [#148](https://github.com/sybrenstuvel/python-rsa/issues/148) +- Limited SHA3 support to those Python versions (3.6+) that support it natively. + The third-party library that adds support for this to Python 3.5 is a binary + package, and thus breaks the pure-Python nature of Python-RSA. + This should fix [#147](https://github.com/sybrenstuvel/python-rsa/issues/147). + + +## Version 4.1 - released 2020-06-10 + +- Added support for Python 3.8. +- Dropped support for Python 2 and 3.4. +- Added type annotations to the source code. This will make Python-RSA easier to use in + your IDE, and allows better type checking. +- Added static type checking via [MyPy](http://mypy-lang.org/). +- Fix [#129](https://github.com/sybrenstuvel/python-rsa/issues/129) Installing from source + gives UnicodeDecodeError. +- Switched to using [Poetry](https://poetry.eustace.io/) for package + management. +- Added support for SHA3 hashing: SHA3-256, SHA3-384, SHA3-512. This + is natively supported by Python 3.6+ and supported via a third-party + library on Python 3.5. +- Choose blinding factor relatively prime to N. Thanks Christian Heimes for pointing this out. +- Reject cyphertexts (when decrypting) and signatures (when verifying) that have + been modified by prepending zero bytes. This resolves CVE-2020-13757. Thanks + Adelapie for pointing this out. + + +## Version 4.0 - released 2018-09-16 + +- Removed deprecated modules: + - rsa.varblock + - rsa.bigfile + - rsa._version133 + - rsa._version200 +- Removed CLI commands that use the VARBLOCK/bigfile format. +- Ensured that PublicKey.save_pkcs1() and PrivateKey.save_pkcs1() always return bytes. +- Dropped support for Python 2.6 and 3.3. +- Dropped support for Psyco. +- Miller-Rabin iterations determined by bitsize of key. + [#58](https://github.com/sybrenstuvel/python-rsa/pull/58) +- Added function `rsa.find_signature_hash()` to return the name of the hashing + algorithm used to sign a message. `rsa.verify()` now also returns that name, + instead of always returning `True`. + [#78](https://github.com/sybrenstuvel/python-rsa/issues/13) +- Add support for SHA-224 for PKCS1 signatures. + [#104](https://github.com/sybrenstuvel/python-rsa/pull/104) +- Transitioned from `requirements.txt` to Pipenv for package management. + + +## Version 3.4.2 - released 2016-03-29 + +- Fixed dates in CHANGELOG.txt + + +## Version 3.4.1 - released 2016-03-26 + +- Included tests/private.pem in MANIFEST.in +- Included README.md and CHANGELOG.txt in MANIFEST.in + + +## Version 3.4 - released 2016-03-17 + +- Moved development to GitHub: https://github.com/sybrenstuvel/python-rsa +- Solved side-channel vulnerability by implementing blinding, fixes #19 +- Deprecated the VARBLOCK format and rsa.bigfile module due to security issues, see + https://github.com/sybrenstuvel/python-rsa/issues/13 +- Integration with Travis-CI [1], Coveralls [2] and Code Climate [3] +- Deprecated the old rsa._version133 and rsa._version200 submodules, they will be + completely removed in version 4.0. +- Add an 'exponent' argument to key.newkeys() +- Switched from Solovay-Strassen to Miller-Rabin primality testing, to + comply with NIST FIPS 186-4 [4] as probabilistic primality test + (Appendix C, subsection C.3): +- Fixed bugs #12, #14, #27, #30, #49 + +[1] https://travis-ci.org/sybrenstuvel/python-rsa +[2] https://coveralls.io/github/sybrenstuvel/python-rsa +[3] https://codeclimate.com/github/sybrenstuvel/python-rsa +[4] http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf + + +## Version 3.3 - released 2016-01-13 + +- Thanks to Filippo Valsorda: Fix BB'06 attack in verify() by + switching from parsing to comparison. See [1] for more information. +- Simplified Tox configuration and dropped Python 3.2 support. The + coverage package uses a u'' prefix, which was reintroduced in 3.3 + for ease of porting. + +[1] https://blog.filippo.io/bleichenbacher-06-signature-forgery-in-python-rsa/ + + +## Version 3.2.3 - released 2015-11-05 + +- Added character encoding markers for Python 2.x + + +## Version 3.2.1 - released 2015-11-05 + +- Added per-file licenses +- Added support for wheel packages +- Made example code more consistent and up to date with Python 3.4 + + +## Version 3.2 - released 2015-07-29 + +- Mentioned support for Python 3 in setup.py + + +## Version 3.1.4 - released 2014-02-22 + +- Fixed some bugs + + +## Version 3.1.3 - released 2014-02-02 + +- Dropped support for Python 2.5 + + +## Version 3.1.2 - released 2013-09-15 + +- Added Python 3.3 to the test environment. +- Removed dependency on Distribute +- Added support for loading public keys from OpenSSL + + +## Version 3.1.1 - released 2012-06-18 + +- Fixed doctests for Python 2.7 +- Removed obsolete unittest so all tests run fine on Python 3.2 + +## Version 3.1 - released 2012-06-17 + +- Big, big credits to Yesudeep Mangalapilly for all the changes listed + below! +- Added ability to generate keys on multiple cores simultaneously. +- Massive speedup +- Partial Python 3.2 compatibility (core functionality works, but + saving or loading keys doesn't, for that the pyasn1 package needs to + be ported to Python 3 first) +- Lots of bug fixes + + + +## Version 3.0.1 - released 2011-08-07 + +- Removed unused import of abc module + + +## Version 3.0 - released 2011-08-05 + +- Changed the meaning of the keysize to mean the size of ``n`` rather than + the size of both ``p`` and ``q``. This is the common interpretation of + RSA keysize. To get the old behaviour, double the keysize when generating a + new key. +- Added a lot of doctests +- Added random-padded encryption and decryption using PKCS#1 version 1.5 +- Added hash-based signatures and verification using PKCS#1v1.5 +- Modeling private and public key as real objects rather than dicts. +- Support for saving and loading keys as PEM and DER files. +- Ability to extract a public key from a private key (PEM+DER) + + +## Version 2.0 + +- Security improvements by Barry Mead. diff --git a/myenv/lib/python3.9/site-packages/LICENSE b/myenv/lib/python3.9/site-packages/LICENSE new file mode 100644 index 0000000..67589cb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011 Sybren A. Stüvel + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/LICENSE new file mode 100644 index 0000000..2f1b8e1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/METADATA new file mode 100644 index 0000000..9a91076 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Download-URL: https://pypi.org/project/PyYAML/ +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.6 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. + diff --git a/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/RECORD new file mode 100644 index 0000000..ac2b633 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/RECORD @@ -0,0 +1,25 @@ +PyYAML-6.0.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.dist-info/WHEEL,sha256=JIE30nfOWUuazI4Vcfiuv_cYm-SkZCh6YOqQQjhm90A,109 +PyYAML-6.0.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +PyYAML-6.0.dist-info/METADATA,sha256=QmHx9kGp_0yezQCXYaft4eEFeJ6W4oyFfYwHDLP1kdg,2006 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/_yaml.cpython-39-darwin.so,sha256=MwASErZMw1iveBOlLW1RJRrd4yYWYf2bWYq_nhpnFD0,462160 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/__init__.py,sha256=NDS7S8XgA72-hY6LRmGzUWTPvzGzjWVrWk-OGA-77AA,12309 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +PyYAML-6.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +PyYAML-6.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/WHEEL new file mode 100644 index 0000000..9c3644e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_10_9_x86_64 + diff --git a/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/top_level.txt new file mode 100644 index 0000000..e6475e9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/PyYAML-6.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/myenv/lib/python3.9/site-packages/README.md b/myenv/lib/python3.9/site-packages/README.md new file mode 100644 index 0000000..02761da --- /dev/null +++ b/myenv/lib/python3.9/site-packages/README.md @@ -0,0 +1,43 @@ +# Pure Python RSA implementation + +[![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/) +[![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa) +[![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master) +[![Code Climate](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability) + +[Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports +encryption and decryption, signing and verifying signatures, and key +generation according to PKCS#1 version 1.5. It can be used as a Python +library as well as on the commandline. The code was mostly written by +Sybren A. Stüvel. + +Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa). For all changes, check [the changelog](https://github.com/sybrenstuvel/python-rsa/blob/master/CHANGELOG.md). + +Download and install using: + + pip install rsa + +or download it from the [Python Package Index](https://pypi.org/project/rsa/). + +The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is +licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0) + +## Security + +Because of how Python internally stores numbers, it is very hard (if not impossible) to make a pure-Python program secure against timing attacks. This library is no exception, so use it with care. See https://securitypitfalls.wordpress.com/2018/08/03/constant-time-compare-in-python/ for more info. + +## Setup of Development Environment + +``` +python3 -m venv .venv +. ./.venv/bin/activate +pip install poetry +poetry install +``` + +## Publishing a New Release + +``` +. ./.venv/bin/activate +poetry publish --build +``` diff --git a/myenv/lib/python3.9/site-packages/_black_version.py b/myenv/lib/python3.9/site-packages/_black_version.py new file mode 100644 index 0000000..8c76549 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_black_version.py @@ -0,0 +1 @@ +version = "21.12b0" diff --git a/myenv/lib/python3.9/site-packages/_cffi_backend.cpython-39-darwin.so b/myenv/lib/python3.9/site-packages/_cffi_backend.cpython-39-darwin.so new file mode 100755 index 0000000..bdcbe38 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/_cffi_backend.cpython-39-darwin.so differ diff --git a/myenv/lib/python3.9/site-packages/_pytest/__init__.py b/myenv/lib/python3.9/site-packages/_pytest/__init__.py new file mode 100644 index 0000000..46c7827 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/__init__.py @@ -0,0 +1,8 @@ +__all__ = ["__version__"] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" diff --git a/myenv/lib/python3.9/site-packages/_pytest/_argcomplete.py b/myenv/lib/python3.9/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000..41d9d94 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,117 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" +import argparse +import os +import sys +from glob import glob +from typing import Any +from typing import List +from typing import Optional + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> List[str]: + # Only called on non option completions. + if os.path.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.path.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/myenv/lib/python3.9/site-packages/_pytest/_code/__init__.py b/myenv/lib/python3.9/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000..511d0dd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,22 @@ +"""Python inspection/code generation API.""" +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + +__all__ = [ + "Code", + "ExceptionInfo", + "filter_traceback", + "Frame", + "getfslineno", + "getrawcode", + "Traceback", + "TracebackEntry", + "Source", +] diff --git a/myenv/lib/python3.9/site-packages/_pytest/_code/code.py b/myenv/lib/python3.9/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000..4230693 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_code/code.py @@ -0,0 +1,1259 @@ +import inspect +import re +import sys +import traceback +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +from pathlib import Path +from traceback import format_exception_only +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +from weakref import ref + +import attr +import pluggy +import py + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import final +from _pytest.compat import get_real_func + +if TYPE_CHECKING: + from typing_extensions import Literal + from weakref import ReferenceType + + _TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> "Code": + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Union[py.path.local, str]: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + raise OSError("py.path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Optional["Source"]: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> "Source": + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> Tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> Dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> Dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> "Source": + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_excinfo", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None, + ) -> None: + self._rawentry = rawentry + self._excinfo = excinfo + self._repr_style: Optional['Literal["short", "long"]'] = None + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + def set_repr_style(self, mode: "Literal['short', 'long']") -> None: + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self) -> "Source": + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Union[py.path.local, str]: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> Dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource(self, astcache=None) -> Optional["Source"]: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: Union[bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]] = ( + False + ) + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return " File %r:%d in %s\n %s\n" % ( + str(self.path), + self.lineno + 1, + name, + line, + ) + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(List[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: Union[TracebackType, Iterable[TracebackEntry]], + excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None, + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + self._excinfo = excinfo + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: Optional[TracebackType] = cur + while cur_ is not None: + yield TracebackEntry(cur_, excinfo=excinfo) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path=None, + lineno: Optional[int] = None, + firstlineno: Optional[int] = None, + excludepath: Optional[py.path.local] = None, + ) -> "Traceback": + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + for x in self: + code = x.frame.code + codepath = code.path + if ( + (path is None or codepath == path) + and ( + excludepath is None + or not isinstance(codepath, py.path.local) + or not codepath.relto(excludepath) + ) + and (lineno is None or x.lineno == lineno) + and (firstlineno is None or x.frame.code.firstlineno == firstlineno) + ): + return Traceback(x._rawentry, self._excinfo) + return self + + @overload + def __getitem__(self, key: int) -> TracebackEntry: + ... + + @overload + def __getitem__(self, key: slice) -> "Traceback": + ... + + def __getitem__(self, key: Union[int, slice]) -> Union[TracebackEntry, "Traceback"]: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, fn: Callable[[TracebackEntry], bool] = lambda x: not x.ishidden() + ) -> "Traceback": + """Return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not. + + By default this removes all the TracebackEntries which are hidden + (see ishidden() above). + """ + return Traceback(filter(fn, self), self._excinfo) + + def getcrashentry(self) -> TracebackEntry: + """Return last non-hidden traceback entry that lead to the exception of a traceback.""" + for i in range(-1, -len(self) - 1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self) -> Optional[int]: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: + f = entry.frame + loc = f.f_locals + for otherloc in values: + if f.eval( + co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc, + ): + return i + values.append(entry.frame.f_locals) + return None + + +co_equal = compile( + "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" +) + + +_E = TypeVar("_E", bound=BaseException, covariant=True) + + +@final +@attr.s(repr=False) +class ExceptionInfo(Generic[_E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr = "AssertionError('assert " + + _excinfo = attr.ib(type=Optional[Tuple[Type["_E"], "_E", TracebackType]]) + _striptext = attr.ib(type=str, default="") + _traceback = attr.ib(type=Optional[Traceback], default=None) + + @classmethod + def from_exc_info( + cls, + exc_info: Tuple[Type[_E], _E, TracebackType], + exprinfo: Optional[str] = None, + ) -> "ExceptionInfo[_E]": + """Return an ExceptionInfo for an existing exc_info tuple. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext) + + @classmethod + def from_current( + cls, exprinfo: Optional[str] = None + ) -> "ExceptionInfo[BaseException]": + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> "ExceptionInfo[_E]": + """Return an unfilled ExceptionInfo.""" + return cls(None) + + def fill_unfilled(self, exc_info: Tuple[Type[_E], _E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> Type[_E]: + """The exception class.""" + assert ( + self._excinfo is not None + ), ".type can only be used after the context manager exits" + return self._excinfo[0] + + @property + def value(self) -> _E: + """The exception value.""" + assert ( + self._excinfo is not None + ), ".value can only be used after the context manager exits" + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert ( + self._excinfo is not None + ), ".tb can only be used after the context manager exits" + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert ( + self._excinfo is not None + ), ".typename can only be used after the context manager exits" + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb, excinfo=ref(self)) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return "<{} {} tblen={}>".format( + self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback) + ) + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is a + _pytest._code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning). + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance( + self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]] + ) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> "ReprFileLocation": + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno + 1, exconly) + + def getrepr( + self, + showlocals: bool = False, + style: "_TracebackStyle" = "long", + abspath: bool = False, + tbfilter: bool = True, + funcargs: bool = False, + truncate_locals: bool = True, + chain: bool = True, + ) -> Union["ReprExceptionInfo", "ExceptionChainRepr"]: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param bool tbfilter: + Hide entries that contain a local variable ``__tracebackhide__==True``. + Ignored if ``style=="native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + ReprTracebackNative( + traceback.format_exception( + self.type, self.value, self.traceback[0]._rawentry + ) + ), + self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]": + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + msg = "Regex pattern {!r} does not match {!r}." + if regexp == str(self.value): + msg += " Did you mean to `re.escape()` the regex?" + assert re.search(regexp, str(self.value)), msg.format(regexp, str(self.value)) + # Return True to allow for "assert excinfo.match()". + return True + + +@attr.s +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + showlocals = attr.ib(type=bool, default=False) + style = attr.ib(type="_TracebackStyle", default="long") + abspath = attr.ib(type=bool, default=True) + tbfilter = attr.ib(type=bool, default=True) + funcargs = attr.ib(type=bool, default=False) + truncate_locals = attr.ib(type=bool, default=True) + chain = attr.ib(type=bool, default=True) + astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) + + def _getindent(self, source: "Source") -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Optional["Source"]: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> Optional["ReprFuncArgs"]: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, saferepr(argvalue))) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Optional["Source"], + line_index: int = -1, + excinfo: Optional[ExceptionInfo[BaseException]] = None, + short: bool = False, + ) -> List[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is None or line_index >= len(source.lines): + source = Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> List[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> Optional["ReprLocals"]: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: TracebackEntry, + excinfo: Optional[ExceptionInfo[BaseException]] = None, + ) -> "ReprEntry": + lines: List[str] = [] + style = entry._repr_style if entry._repr_style is not None else self.style + if style in ("short", "long"): + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" % (entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback": + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + last = traceback[-1] + entries = [] + if self.style == "value": + reprentry = self.repr_traceback_entry(last, excinfo) + entries.append(reprentry) + return ReprTraceback(entries, None, style=self.style) + + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> Tuple[Traceback, Optional[str]]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: Optional[str] = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=str(e), + max_frames=max_frames, + total=len(traceback), + ) + # Type ignored because adding two instaces of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo( + self, excinfo: ExceptionInfo[BaseException] + ) -> "ExceptionChainRepr": + repr_chain: List[ + Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]] + ] = [] + e: Optional[BaseException] = excinfo.value + excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo + descr = None + seen: Set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + if excinfo_: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash: Optional[ReprFileLocation] = ( + excinfo_._getreprcrash() if self.style != "value" else None + ) + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@attr.s(eq=False) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return "<{} instance at {:0x}>".format(self.__class__, id(self)) + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@attr.s(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprcrash: Optional["ReprFileLocation"] + reprtraceback: "ReprTraceback" + + def __attrs_post_init__(self) -> None: + self.sections: List[Tuple[str, str, str]] = [] + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@attr.s(eq=False) +class ExceptionChainRepr(ExceptionRepr): + chain = attr.ib( + type=Sequence[ + Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]] + ] + ) + + def __attrs_post_init__(self) -> None: + super().__attrs_post_init__() + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + self.reprtraceback = self.chain[-1][0] + self.reprcrash = self.chain[-1][1] + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@attr.s(eq=False) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback = attr.ib(type="ReprTraceback") + reprcrash = attr.ib(type="ReprFileLocation") + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@attr.s(eq=False) +class ReprTraceback(TerminalRepr): + reprentries = attr.ib(type=Sequence[Union["ReprEntry", "ReprEntryNative"]]) + extraline = attr.ib(type=Optional[str]) + style = attr.ib(type="_TracebackStyle") + + entrysep = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + + +@attr.s(eq=False) +class ReprEntryNative(TerminalRepr): + lines = attr.ib(type=Sequence[str]) + style: "_TracebackStyle" = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@attr.s(eq=False) +class ReprEntry(TerminalRepr): + lines = attr.ib(type=Sequence[str]) + reprfuncargs = attr.ib(type=Optional["ReprFuncArgs"]) + reprlocals = attr.ib(type=Optional["ReprLocals"]) + reprfileloc = attr.ib(type=Optional["ReprFileLocation"]) + style = attr.ib(type="_TracebackStyle") + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + + if not self.lines: + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: List[str] = [] + source_lines: List[str] = [] + failure_lines: List[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + if self.style == "value": + source_lines.append(line) + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + assert self.reprfileloc is not None + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@attr.s(eq=False) +class ReprFileLocation(TerminalRepr): + path = attr.ib(type=str, converter=str) + lineno = attr.ib(type=int) + message = attr.ib(type=str) + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@attr.s(eq=False) +class ReprLocals(TerminalRepr): + lines = attr.ib(type=Sequence[str]) + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@attr.s(eq=False) +class ReprFuncArgs(TerminalRepr): + args = attr.ib(type=Sequence[Tuple[str, object]]) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> Tuple[Union[str, py.path.local], int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as # type: ignore[attr-defined] + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent +_PY_DIR = Path(py.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + if _PY_DIR in parents: + return False + + return True diff --git a/myenv/lib/python3.9/site-packages/_pytest/_code/source.py b/myenv/lib/python3.9/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000..6f54057 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_code/source.py @@ -0,0 +1,212 @@ +import ast +import inspect +import textwrap +import tokenize +import types +import warnings +from bisect import bisect_right +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Union + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: List[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + elif isinstance(obj, (tuple, list)): + self.lines = deindent(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: + ... + + @overload + def __getitem__(self, key: slice) -> "Source": + ... + + def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> "Source": + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> "Source": + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> "Source": + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> Tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> "Source": + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> Tuple[Optional[Source], int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> List[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: List[int] = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: Optional[List[ast.stmt]] = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: Optional[ast.AST] = None, +) -> Tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/myenv/lib/python3.9/site-packages/_pytest/_io/__init__.py b/myenv/lib/python3.9/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000..db001e9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,8 @@ +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/myenv/lib/python3.9/site-packages/_pytest/_io/saferepr.py b/myenv/lib/python3.9/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000..5eb1e08 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,129 @@ +import pprint +import reprlib +from typing import Any +from typing import Dict +from typing import IO +from typing import Optional + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return '{}("{}")'.format(type(obj).__name__, obj) + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + exc_info = "unpresentable exception ({})".format(_try_repr_or_str(exc)) + return "<[{} raised in repr()] {} object at 0x{:x}>".format( + exc_info, type(obj).__name__, id(obj) + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call.""" + + def __init__(self, maxsize: int) -> None: + super().__init__() + self.maxstring = maxsize + self.maxsize = maxsize + + def repr(self, x: object) -> str: + try: + s = super().repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + return _ellipsize(s, self.maxsize) + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + return _ellipsize(s, self.maxsize) + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +def saferepr(obj: object, maxsize: int = 240) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + standard 2.6 lib. + """ + return SafeRepr(maxsize).repr(obj) + + +class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter): + """PrettyPrinter that always dispatches (regardless of width).""" + + def _format( + self, + object: object, + stream: IO[str], + indent: int, + allowance: int, + context: Dict[int, Any], + level: int, + ) -> None: + # Type ignored because _dispatch is private. + p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined] + + objid = id(object) + if objid in context or p is None: + # Type ignored because _format is private. + super()._format( # type: ignore[misc] + object, stream, indent, allowance, context, level, + ) + return + + context[objid] = 1 + p(self, object, stream, indent, allowance, context, level + 1) + del context[objid] + + +def _pformat_dispatch( + object: object, + indent: int = 1, + width: int = 80, + depth: Optional[int] = None, + *, + compact: bool = False, +) -> str: + return AlwaysDispatchingPrettyPrinter( + indent=indent, width=width, depth=depth, compact=compact + ).pformat(object) diff --git a/myenv/lib/python3.9/site-packages/_pytest/_io/terminalwriter.py b/myenv/lib/python3.9/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 0000000..8edf4cd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,210 @@ +"""Helper functions for writing to terminals and files.""" +import os +import shutil +import sys +from typing import Optional +from typing import Sequence +from typing import TextIO + +from .wcwidth import wcswidth +from _pytest.compat import final + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if "NO_COLOR" in os.environ: + return False + if "FORCE_COLOR" in os.environ: + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: Optional[TextIO] = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: Optional[int] = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: Optional[str] = None, + fullwidth: Optional[int] = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + "indents size ({}) should have same size as lines ({})".format( + len(indents), len(lines) + ) + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + for indent, new_line in zip(indents, new_lines): + self.line(indent + new_line) + + def _highlight(self, source: str) -> str: + """Highlight the given source code if we have markup support.""" + if not self.hasmarkup or not self.code_highlight: + return source + try: + from pygments.formatters.terminal import TerminalFormatter + from pygments.lexers.python import PythonLexer + from pygments import highlight + except ImportError: + return source + else: + highlighted: str = highlight( + source, PythonLexer(), TerminalFormatter(bg="dark") + ) + return highlighted diff --git a/myenv/lib/python3.9/site-packages/_pytest/_io/wcwidth.py b/myenv/lib/python3.9/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 0000000..e5c7bf4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,55 @@ +import unicodedata +from functools import lru_cache + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/myenv/lib/python3.9/site-packages/_pytest/_version.py b/myenv/lib/python3.9/site-packages/_pytest/_version.py new file mode 100644 index 0000000..8351858 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/_version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '6.2.5' +version_tuple = (6, 2, 5) diff --git a/myenv/lib/python3.9/site-packages/_pytest/assertion/__init__.py b/myenv/lib/python3.9/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000..a18cf19 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,179 @@ +"""Support for presenting detailed information in failing assertions.""" +import sys +from typing import Any +from typing import Generator +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook." + "Make sure to delete any previously generated pyc cache files.", + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raises TypeError: If the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + # TODO(typing): Add a protocol for mark_rewrite() and use it + # for importhook and for PytestPluginManager.rewrite_hook. + importhook = DummyRewriteHook() # type: ignore + importhook.mark_rewrite(*names) + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: Optional[rewrite.AssertionRewritingHook] = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config._store[assertstate_key] = AssertionState(config, "rewrite") + config._store[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config._store[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config._store[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: "Session") -> None: + # This hook is only called when test modules are collected + # so for example not in the master process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config._store.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> Optional[str]: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + yield + + util._reprcompare, util._assertion_pass = saved_assert_hooks + + +def pytest_sessionfinish(session: "Session") -> None: + assertstate = session.config._store.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> Optional[List[str]]: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/myenv/lib/python3.9/site-packages/_pytest/assertion/rewrite.py b/myenv/lib/python3.9/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000..37ff076 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1125 @@ +"""Rewrite assertion AST to produce nice error messages.""" +import ast +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +import struct +import sys +import tokenize +import types +from pathlib import Path +from pathlib import PurePath +from typing import Callable +from typing import Dict +from typing import IO +from typing import Iterable +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import py + +from _pytest._io.saferepr import saferepr +from _pytest._version import version +from _pytest.assertion import util +from _pytest.assertion.util import ( # noqa: F401 + format_explanation as _format_explanation, +) +from _pytest.config import Config +from _pytest.main import Session +from _pytest.pathlib import fnmatch_ex +from _pytest.store import StoreKey + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + + +assertstate_key = StoreKey["AssertionState"]() + + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Optional[Session] = None + self._rewritten_names: Set[str] = set() + self._must_rewrite: Set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: Dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Optional[Session]) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Optional[Sequence[Union[str, bytes]]] = None, + target: Optional[types.ModuleType] = None, + ) -> Optional[importlib.machinery.ModuleSpec]: + if self._writing_pyc: + return None + state = self.config._store[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace("find_module called for: %s" % name) + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + # python3.6: `namespace` + # python3.7+: `None` + or spec.origin == "namespace" + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> Optional[types.ModuleType]: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config._store[assertstate_key] + + self._rewritten_names.add(module.__name__) + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.path.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(os.path.sep.join(parts) + ".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: "AssertionState") -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(py.path.local(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: "AssertionState") -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + "Module already imported so cannot be rewritten: %s" % name + ), + stacklevel=5, + ) + + def get_data(self, pathname: Union[str, bytes]) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + if sys.version_info >= (3, 7): + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + try: + with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + + +else: + + def _write_pyc( + state: "AssertionState", + co: types.CodeType, + source_stat: os.stat_result, + pyc: Path, + ) -> bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + fp = open(proc_pyc, "wb") + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + _write_pyc_fp(fp, source_stat, co) + os.rename(proc_pyc, os.fspath(pyc)) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + finally: + fp.close() + return True + + +def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + fn_ = os.fspath(fn) + stat = os.stat(fn_) + with open(fn_, "rb") as f: + source = f.read() + tree = ast.parse(source, filename=fn_) + rewrite_asserts(tree, source, fn_, config) + co = compile(tree, fn_, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> Optional[types.CodeType]: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(os.fspath(pyc), "rb") + except OSError: + return None + with fp: + # https://www.python.org/dev/peps/pep-0552/ + has_flags = sys.version_info >= (3, 7) + try: + stat_result = os.stat(os.fspath(source)) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16 if has_flags else 12) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16 if has_flags else 12): + trace("_read_pyc(%s): invalid pyc (too short)" % source) + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace("_read_pyc(%s): invalid pyc (bad magic number)" % source) + return None + if has_flags and data[4:8] != b"\x00\x00\x00\x00": + trace("_read_pyc(%s): invalid pyc (unsupported flags)" % source) + return None + mtime_data = data[8 if has_flags else 4 : 12 if has_flags else 8] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace("_read_pyc(%s): out of date" % source) + return None + size_data = data[12 if has_flags else 8 : 16 if has_flags else 12] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace("_read_pyc(%s): invalid pyc (incorrect size)" % source) + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace("_read_pyc(%s): not a code object" % source) + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: Optional[str] = None, + config: Optional[Config] = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + return saferepr(obj).replace("\n", "\\n") + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + return False + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def set_location(node, lineno, col_offset): + """Set node location information recursively.""" + + def _fix(node, lineno, col_offset): + if "lineno" in node._attributes: + node.lineno = lineno + if "col_offset" in node._attributes: + node.col_offset = col_offset + for child in ast.iter_child_nodes(node): + _fix(child, lineno, col_offset) + + _fix(node, lineno, col_offset) + return node + + +def _get_assertion_exprs(src: bytes) -> Dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: Dict[int, str] = {} + + depth = 0 + lines: List[str] = [] + assert_lineno: Optional[int] = None + seen_lines: Set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escapd newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + This state is reset on every new assert statement visited and used + by the other visitors. + """ + + def __init__( + self, module_path: Optional[str], config: Optional[Config], source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + + @functools.lru_cache(maxsize=1) + def _assert_expr_to_lineno(self) -> Dict[int, str]: + return _get_assertion_exprs(self.source) + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + lineno = 1 + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Str) + ): + doc = item.value.s + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + isinstance(item, ast.ImportFrom) + and item.level == 0 + and item.module == "__future__" + ): + pass + else: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + if sys.version_info >= (3, 10): + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + else: + aliases = [ + ast.alias("builtins", "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + nodes: List[ast.AST] = [mod] + while nodes: + node = nodes.pop() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: List[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: Dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> Tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> List[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + from _pytest.warning_types import PytestAssertRewriteWarning + import warnings + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=os.fspath(self.module_path), + lineno=assert_.lineno, + ) + + self.statements: List[ast.stmt] = [] + self.variables: List[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: List[str] = [] + + self.stack: List[Dict[str, ast.expr]] = [] + self.expl_stmts: List[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(ast.Str(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = ast.Str("") + gluestr = "assert " + err_explanation = ast.BinOp(ast.Str(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = self._assert_expr_to_lineno()[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + ast.Num(assert_.lineno), + ast.Str(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + self.expl_stmts + [hook_call_pass], + [], + ) + statements_pass = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, ast.NameConstant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Str("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, ast.NameConstant(None)) + self.statements.append(clear) + # Fix line numbers. + for stmt in self.statements: + set_location(stmt, assert_.lineno, assert_.col_offset) + return self.statements + + def visit_Name(self, name: ast.Name) -> Tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> Tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: List[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa + self.expl_stmts = fail_inner + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Str(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: List[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> Tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> Tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call(self, call: ast.Call) -> Tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> Tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> Tuple[ast.expr, str]: + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(ast.Str(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(os.fspath(cache_dir), exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + if e.errno == errno.EROFS: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.version_info >= (3, 8) and sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/myenv/lib/python3.9/site-packages/_pytest/assertion/truncate.py b/myenv/lib/python3.9/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000..5ba9ddc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,100 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +import os +from typing import List +from typing import Optional + +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required( + explanation: List[str], item: Item, max_length: Optional[int] = None +) -> List[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item: Item) -> bool: + """Whether or not this test item is eligible for truncation.""" + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation( + input_lines: List[str], + max_lines: Optional[int] = None, + max_chars: Optional[int] = None, +) -> List[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = "...Full output truncated" + if truncated_line_count == 1: + msg += f" ({truncated_line_count} line hidden)" + else: + msg += f" ({truncated_line_count} lines hidden)" + msg += f", {USAGE_MSG}" + truncated_explanation.extend(["", str(msg)]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines: List[str], max_chars: int) -> List[str]: + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/myenv/lib/python3.9/site-packages/_pytest/assertion/util.py b/myenv/lib/python3.9/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000..da1ffd1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/assertion/util.py @@ -0,0 +1,477 @@ +"""Utilities for assertion debugging.""" +import collections.abc +import pprint +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence + +import _pytest._code +from _pytest import outcomes +from _pytest._io.saferepr import _pformat_dispatch +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Optional[Callable[[int, str, str], None]] = None + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> List[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> List[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, (set, frozenset)) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except TypeError: + return False + + +def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[str]]: + """Return specialised explanations for some operators/operands.""" + verbose = config.getoption("verbose") + if verbose > 1: + left_repr = safeformat(left) + right_repr = safeformat(right) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + left_repr = saferepr(left, maxsize=maxsize) + right_repr = saferepr(right, maxsize=maxsize) + + summary = f"{left_repr} {op} {right_repr}" + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except outcomes.Exit: + raise + except Exception: + explanation = [ + "(pytest_assertion plugin: representation of details failed: {}.".format( + _pytest._code.ExceptionInfo.from_current()._getreprcrash() + ), + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + return [summary] + explanation + + +def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + if type(left) == type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + elif verbose > 0: + explanation = _compare_eq_verbose(left, right) + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + explanation.extend(expl) + return explanation + + +def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: List[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + "Skipping %s identical leading characters in diff, use -v to show" % i + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + "Skipping {} identical trailing " + "characters in diff, use -v to show".format(i) + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation += [ + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ] + return explanation + + +def _compare_eq_verbose(left: Any, right: Any) -> List[str]: + keepends = True + left_lines = repr(left).splitlines(keepends) + right_lines = repr(right).splitlines(keepends) + + explanation: List[str] = [] + explanation += ["+" + line for line in left_lines] + explanation += ["-" + line for line in right_lines] + + return explanation + + +def _surrounding_parens_on_own_lines(lines: List[str]) -> None: + """Move opening/closing parenthesis/bracket to own lines.""" + opening = lines[0][:1] + if opening in ["(", "[", "{"]: + lines[0] = " " + lines[0][1:] + lines[:] = [opening] + lines + closing = lines[-1][-1:] + if closing in [")", "]", "}"]: + lines[-1] = lines[-1][:-1] + "," + lines[:] = lines + [closing] + + +def _compare_eq_iterable( + left: Iterable[Any], right: Iterable[Any], verbose: int = 0 +) -> List[str]: + if not verbose: + return ["Use -v to get the full diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + + # Re-format for different output lengths. + lines_left = len(left_formatting) + lines_right = len(right_formatting) + if lines_left != lines_right: + left_formatting = _pformat_dispatch(left).splitlines() + right_formatting = _pformat_dispatch(right).splitlines() + + if lines_left > 1 or lines_right > 1: + _surrounding_parens_on_own_lines(left_formatting) + _surrounding_parens_on_own_lines(right_formatting) + + explanation = ["Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting) + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], right: Sequence[Any], verbose: int = 0 +) -> List[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: List[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"] + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [f"{dir_with_more} contains one more item: {extra}"] + else: + explanation += [ + "%s contains %d more items, first extra item: %s" + % (dir_with_more, len_diff, extra) + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0 +) -> List[str]: + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append("Extra items in the left set:") + for item in diff_left: + explanation.append(saferepr(item)) + if diff_right: + explanation.append("Extra items in the right set:") + for item in diff_right: + explanation.append(saferepr(item)) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0 +) -> List[str]: + explanation: List[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += ["Omitting %s identical items, use -vv to show" % len(same)] + elif same: + explanation += ["Common items:"] + explanation += pprint.pformat(same).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + "Left contains %d more item%s:" + % (len_extra_left, "" if len_extra_left == 1 else "s") + ) + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + "Right contains %d more item%s:" + % (len_extra_right, "" if len_extra_right == 1 else "s") + ) + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) + return explanation + + +def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]: + if isdatacls(left): + all_fields = left.__dataclass_fields__ + fields_to_check = [field for field, info in all_fields.items() if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append("Omitting %s identical items, use -vv to show" % len(same)) + elif same: + explanation += ["Matching attributes:"] + explanation += pprint.pformat(same).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += pprint.pformat(diff).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + "Drill down into differing attribute %s:" % field, + ("%s%s: %r != %r") % (indent, field, field_left, field_right), + ] + explanation += [ + indent + line + for line in _compare_eq_any(field_left, field_right, verbose) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, verbose) + newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/myenv/lib/python3.9/site-packages/_pytest/cacheprovider.py b/myenv/lib/python3.9/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000..03acd03 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,575 @@ +"""Implementation of the cache provider.""" +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +import json +import os +from pathlib import Path +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Set +from typing import Union + +import attr +import py + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.python import Module +from _pytest.python import Package +from _pytest.reports import TestReport + + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# http://www.bford.info/cachedir/spec.html +""" + + +@final +@attr.s(init=False) +class Cache: + _cachedir = attr.ib(type=Path, repr=False) + _config = attr.ib(type=Config, repr=False) + + # sub-directory under cache-dir for directories created by "makedir" + _CACHE_PREFIX_DIRS = "d" + + # sub-directory under cache-dir for values created by "set" + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache": + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def makedir(self, name: str) -> py.path.local: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + res.mkdir(exist_ok=True, parents=True) + return py.path.local(res) + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + if path.parent.is_dir(): + cache_dir_exists_already = True + else: + cache_dir_exists_already = self._cachedir.exists() + path.parent.mkdir(exist_ok=True, parents=True) + except OSError: + self.warn("could not create cache path {path}", path=path, _ispytest=True) + return + if not cache_dir_exists_already: + self._ensure_supporting_files() + data = json.dumps(value, indent=2, sort_keys=True) + try: + f = path.open("w") + except OSError: + self.warn("cache could not write path {path}", path=path, _ispytest=True) + else: + with f: + f.write(data) + + def _ensure_supporting_files(self) -> None: + """Create supporting files in the cache dir that are not really part of the cache.""" + readme_path = self._cachedir / "README.md" + readme_path.write_text(README_CONTENT) + + gitignore_path = self._cachedir.joinpath(".gitignore") + msg = "# Created by pytest automatically.\n*\n" + gitignore_path.write_text(msg, encoding="UTF-8") + + cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") + cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: "LFPlugin") -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector: nodes.Collector): + if isinstance(collector, Session): + out = yield + res: CollectReport = out.get_result() + + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + res.result = sorted( + res.result, key=lambda x: 0 if Path(str(x.fspath)) in lf_paths else 1, + ) + return + + elif isinstance(collector, Module): + if Path(str(collector.fspath)) in self.lfplugin._last_failed_paths: + out = yield + res = out.get_result() + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.fspath) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + return + yield + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: "LFPlugin") -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Optional[CollectReport]: + # Packages are Modules, but _last_failed_paths only contains + # test-bearing paths and doesn't try to include the paths of their + # packages, so don't filter them. + if isinstance(collector, Module) and not isinstance(collector, Package): + if Path(str(collector.fspath)) not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: Optional[int] = None + self._report_status: Optional[str] = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> Set[Path]: + """Return a set with all Paths()s of the previously failed nodeids.""" + rootpath = self.config.rootpath + result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed} + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> Optional[str]: + if self.active and self.config.getoption("verbose") >= 0: + return "run-last-failure: %s" % self._report_status + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: List[nodes.Item] + ) -> Generator[None, None, None]: + yield + + if not self.active: + return + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = "%d known failures not in selected tests" % ( + len(self.lastfailed), + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += " (skipped {files} {files_noun})".format( + files=self._skipped_files, files_noun=files_noun + ) + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, items: List[nodes.Item] + ) -> Generator[None, None, None]: + yield + + if self.active: + new_items: Dict[str, nodes.Item] = {} + other_items: Dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]: + return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) # type: ignore[no-any-return] + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="run all tests, but run the last failures first.\n" + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="remove all cache contents at start of test run.", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="which tests to run with no previously (known) failures.", + ) + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.cacheshow: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> Optional[str]: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", "cache values for %r" % glob) + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, will be ignored" % key) + else: + tw.line("%s contains:" % key) + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", "cache directories for %r" % glob) + for p in contents: + # if p.check(dir=1): + # print("%s/" % p.relto(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size:d}") + return 0 diff --git a/myenv/lib/python3.9/site-packages/_pytest/capture.py b/myenv/lib/python3.9/site-packages/_pytest/capture.py new file mode 100644 index 0000000..0863026 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/capture.py @@ -0,0 +1,967 @@ +"""Per-test stdout/stderr capturing mechanism.""" +import contextlib +import functools +import io +import os +import sys +from io import UnsupportedOperation +from tempfile import TemporaryFile +from typing import Any +from typing import AnyStr +from typing import Generator +from typing import Generic +from typing import Iterator +from typing import Optional +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item + +if TYPE_CHECKING: + from typing_extensions import Literal + + _CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="per-test capturing method: one of fd|sys|no|tee-sys.", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="shortcut for --capture=no.", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _readline_workaround() -> None: + """Ensure readline is imported so that it attaches to the correct stdio + handles on Windows. + + Pdb uses readline support where available--when not running from the Python + prompt, the readline module is not imported until running the pdb REPL. If + running pytest with the --pdb option this means the readline module is not + imported until after I/O capture has been started. + + This is a problem for pyreadline, which is often used to implement readline + support on Windows, as it does not attach to the correct handles for stdout + and/or stdin if they have been redirected by the FDCapture mechanism. This + workaround ensures that readline is imported before I/O capture is setup so + that it can attach to the actual stdin/out for the console. + + See https://github.com/pytest-dev/pytest/pull/1281. + """ + if sys.platform.startswith("win32"): + try: + import readline # noqa: F401 + except ImportError: + pass + + +def _py36_windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling on Python>=3.6. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable] + return + + buffered = hasattr(stream.buffer, "raw") + raw_stdout = stream.buffer.raw if buffered else stream.buffer # type: ignore[attr-defined] + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined] + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), # type: ignore[arg-type] + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config: Config): + ns = early_config.known_args_namespace + if ns.capture == "fd": + _py36_windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + outcome = yield + capman.suspend_global_capture() + if outcome.excinfo is not None: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + return self.buffer.mode.replace("b", "") + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput: + encoding = None + + def read(self, *args): + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + readlines = read + __next__ = read + + def __iter__(self): + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + @property + def buffer(self): + return self + + +# Capture classes. + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture: + EMPTY_BUFFER = None + __init__ = start = done = suspend = resume = lambda *args: None + + +class SysCaptureBinary: + + EMPTY_BUFFER = b"" + + def __init__(self, fd: int, tmpfile=None, *, tee: bool = False) -> None: + name = patchsysdict[fd] + self._old = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: Tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def snap(self): + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def writeorg(self, data) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBinary): + EMPTY_BUFFER = "" # type: ignore[assignment] + + def snap(self): + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data): + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBinary: + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this loveley scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: Optional[int] = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull) + self.syscapture = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture() + + self._state = "initialized" + + def __repr__(self) -> str: + return "<{} {} oldfd={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.targetfd, + self.targetfd_save, + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: Tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def snap(self): + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + def writeorg(self, data): + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBinary): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + # Ignore type because it doesn't match the type in the superclass (bytes). + EMPTY_BUFFER = "" # type: ignore + + def snap(self): + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data): + """Write to original file descriptor.""" + super().writeorg(data.encode("utf-8")) # XXX use encoding of original stream + + +# MultiCapture + + +# This class was a namedtuple, but due to mypy limitation[0] it could not be +# made generic, so was replaced by a regular class which tries to emulate the +# pertinent parts of a namedtuple. If the mypy limitation is ever lifted, can +# make it a namedtuple again. +# [0]: https://github.com/python/mypy/issues/685 +@final +@functools.total_ordering +class CaptureResult(Generic[AnyStr]): + """The result of :method:`CaptureFixture.readouterr`.""" + + __slots__ = ("out", "err") + + def __init__(self, out: AnyStr, err: AnyStr) -> None: + self.out: AnyStr = out + self.err: AnyStr = err + + def __len__(self) -> int: + return 2 + + def __iter__(self) -> Iterator[AnyStr]: + return iter((self.out, self.err)) + + def __getitem__(self, item: int) -> AnyStr: + return tuple(self)[item] + + def _replace( + self, *, out: Optional[AnyStr] = None, err: Optional[AnyStr] = None + ) -> "CaptureResult[AnyStr]": + return CaptureResult( + out=self.out if out is None else out, err=self.err if err is None else err + ) + + def count(self, value: AnyStr) -> int: + return tuple(self).count(value) + + def index(self, value) -> int: + return tuple(self).index(value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, (CaptureResult, tuple)): + return NotImplemented + return tuple(self) == tuple(other) + + def __hash__(self) -> int: + return hash(tuple(self)) + + def __lt__(self, other: object) -> bool: + if not isinstance(other, (CaptureResult, tuple)): + return NotImplemented + return tuple(self) < tuple(other) + + def __repr__(self) -> str: + return f"CaptureResult(out={self.out!r}, err={self.err!r})" + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__(self, in_, out, err) -> None: + self.in_ = in_ + self.out = out + self.err = err + + def __repr__(self) -> str: + return "".format( + self.out, self.err, self.in_, self._state, self._in_suspended, + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> Tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + self.out.writeorg(out) + if err: + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + if self.out: + out = self.out.snap() + else: + out = "" + if self.err: + err = self.err.snap() + else: + err = "" + return CaptureResult(out, err) + + +def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: "_CaptureMethod") -> None: + self._method = method + self._global_capturing: Optional[MultiCapture[str]] = None + self._capture_fixture: Optional[CaptureFixture[Any]] = None + + def __repr__(self) -> str: + return "".format( + self._method, self._global_capturing, self._capture_fixture + ) + + def is_capturing(self) -> Union[str, bool]: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return "fixture %s" % self._capture_fixture.request.fixturename + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: "CaptureFixture[Any]") -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + "cannot use {} and {} at the same time".format( + requested_fixture, current_fixture + ) + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None, None, None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None, None, None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector: Collector): + if isinstance(collector, File): + self.resume_global_capture() + outcome = yield + self.suspend_global_capture() + out, err = self.read_global_capture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("setup", item): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("call", item): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("teardown", item): + yield + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, captureclass, request: SubRequest, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self.captureclass = captureclass + self.request = request + self._capture: Optional[MultiCapture[AnyStr]] = None + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, out=self.captureclass(1), err=self.captureclass(2), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None, None, None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager = self.request.config.pluginmanager.getplugin("capturemanager") + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: + """Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]: + """Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: + """Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]: + """Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/myenv/lib/python3.9/site-packages/_pytest/compat.py b/myenv/lib/python3.9/site-packages/_pytest/compat.py new file mode 100644 index 0000000..c7f86ea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/compat.py @@ -0,0 +1,400 @@ +"""Python version compatibility code.""" +import enum +import functools +import inspect +import re +import sys +from contextlib import contextmanager +from inspect import Parameter +from inspect import signature +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Generic +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Final + + +_T = TypeVar("_T") +_S = TypeVar("_S") + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: "Final" = NotSetType.token # noqa: E305 +# fmt: on + +if sys.version_info >= (3, 8): + from importlib import metadata as importlib_metadata +else: + import importlib_metadata # noqa: F401 + + +def _format_args(func: Callable[..., Any]) -> str: + return str(signature(func)) + + +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile("")) + + +def is_generator(func: object) -> bool: + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def getlocation(function, curdir: Optional[str] = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return "%s:%d" % (relfn, lineno + 1) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., Any], + *, + name: str = "", + is_method: bool = False, + cls: Optional[type] = None, +) -> Tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + fail( + f"Could not determine arguments of {function!r}: {e}", pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if is_method or ( + cls and not isinstance(cls.__dict__.get(name, None), staticmethod) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +if sys.version_info < (3, 7): + + @contextmanager + def nullcontext(): + yield + + +else: + from contextlib import nullcontext as nullcontext # noqa: F401 + + +def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def _translate_non_printable(s: str) -> str: + return s.translate(_non_printable_ascii_translate_table) + + +STRING_TYPES = bytes, str + + +def _bytes_to_ascii(val: bytes) -> str: + return val.decode("ascii", "backslashreplace") + + +def ascii_escaped(val: Union[bytes, str]) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = _bytes_to_ascii(val) + else: + ret = val.encode("unicode_escape").decode("ascii") + return _translate_non_printable(ret) + + +@attr.s +class _PytestWrapper: + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object when we are + creating fixtures, because we wrap the function object ourselves with a + decorator to issue warnings when the fixture function is called directly. + """ + + obj = attr.ib() + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial.""" + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + from _pytest._io.saferepr import saferepr + + raise ValueError( + ("could not find real function of {start}\nstopped at {current}").format( + start=saferepr(start_obj), current=saferepr(obj) + ) + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """Attempt to obtain the real function object that might be wrapping + ``obj``, while at the same time returning a bound method to ``holder`` if + the original object was a bound method.""" + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: # pragma: no cover + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +if TYPE_CHECKING: + if sys.version_info >= (3, 8): + from typing import final as final + else: + from typing_extensions import final as final +elif sys.version_info >= (3, 8): + from typing import final as final +else: + + def final(f): + return f + + +if sys.version_info >= (3, 8): + from functools import cached_property as cached_property +else: + from typing import overload + from typing import Type + + class cached_property(Generic[_S, _T]): + __slots__ = ("func", "__doc__") + + def __init__(self, func: Callable[[_S], _T]) -> None: + self.func = func + self.__doc__ = func.__doc__ + + @overload + def __get__( + self, instance: None, owner: Optional[Type[_S]] = ... + ) -> "cached_property[_S, _T]": + ... + + @overload + def __get__(self, instance: _S, owner: Optional[Type[_S]] = ...) -> _T: + ... + + def __get__(self, instance, owner=None): + if instance is None: + return self + value = instance.__dict__[self.func.__name__] = self.func(instance) + return value + + +# Perform exhaustiveness checking. +# +# Consider this example: +# +# MyUnion = Union[int, str] +# +# def handle(x: MyUnion) -> int { +# if isinstance(x, int): +# return 1 +# elif isinstance(x, str): +# return 2 +# else: +# raise Exception('unreachable') +# +# Now suppose we add a new variant: +# +# MyUnion = Union[int, str, bytes] +# +# After doing this, we must remember ourselves to go and update the handle +# function to handle the new variant. +# +# With `assert_never` we can do better: +# +# // raise Exception('unreachable') +# return assert_never(x) +# +# Now, if we forget to handle the new variant, the type-checker will emit a +# compile-time error, instead of the runtime error we would have gotten +# previously. +# +# This also work for Enums (if you use `is` to compare) and Literals. +def assert_never(value: "NoReturn") -> "NoReturn": + assert False, "Unhandled value: {} ({})".format(value, type(value).__name__) diff --git a/myenv/lib/python3.9/site-packages/_pytest/config/__init__.py b/myenv/lib/python3.9/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000..bd9e288 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1606 @@ +"""Command line options, ini-file and conftest.py processing.""" +import argparse +import collections.abc +import contextlib +import copy +import enum +import inspect +import os +import re +import shlex +import sys +import types +import warnings +from functools import lru_cache +from pathlib import Path +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import attr +import py +from pluggy import HookimplMarker +from pluggy import HookspecMarker +from pluggy import PluginManager + +import _pytest._code +import _pytest.deprecated +import _pytest.hookspec +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.compat import importlib_metadata +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.store import Store +from _pytest.warning_types import PytestConfigWarning + +if TYPE_CHECKING: + + from _pytest._code.code import _TracebackStyle + from _pytest.terminal import TerminalReporter + from .argparsing import Argument + + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: py.path.local, + excinfo: Tuple[Type[Exception], Exception, TracebackType], + ) -> None: + super().__init__(path, excinfo) + self.path = path + self.excinfo = excinfo + + def __str__(self) -> str: + return "{}: {} (from {})".format( + self.excinfo[0].__name__, self.excinfo[1], self.path + ) + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def main( + args: Optional[Union[List[str], py.path.local]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> Union[int, ExitCode]: + """Perform an in-process test run. + + :param args: List of command line arguments. + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo(e.excinfo) + tw = TerminalWriter(sys.stderr) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return ExitCode.USAGE_ERROR + else: + try: + ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( + config=config + ) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = TerminalWriter(sys.stderr) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + return ExitCode.USAGE_ERROR + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = essential_plugins + ( + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "nose", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "warnings", + "logging", + "reports", + *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []), + "faulthandler", +) + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") +builtin_plugins.add("pytester_assertions") + + +def get_config( + args: Optional[List[str]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config( + pluginmanager, + invocation_params=Config.InvocationParams( + args=args or (), plugins=plugins, dir=Path.cwd(), + ), + ) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> "PytestPluginManager": + """Obtain a new instance of the + :py:class:`_pytest.config.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: Optional[Union[py.path.local, List[str]]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, list): + msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + raise TypeError(msg.format(args, type(args))) + + config = get_config(args, plugins) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + config._ensure_unconfigure() + raise + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + import _pytest.assertion + + super().__init__("pytest") + # The objects are module objects, only used generically. + self._conftest_plugins: Set[types.ModuleType] = set() + + # State related to local conftest plugins. + self._dirpath2confmods: Dict[py.path.local, List[types.ModuleType]] = {} + self._conftestpath2mod: Dict[Path, types.ModuleType] = {} + self._confcutdir: Optional[py.path.local] = None + self._noconftest = False + self._duplicatepaths: Set[py.path.local] = set() + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: List[Tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str): + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return + # Ignore names which can not be hooks. + if name == "pytest_plugins": + return + + method = getattr(plugin, name) + opts = super().parse_hookimpl_opts(plugin, name) + + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return + + # Collect unmarked hooks as long as they have the `pytest_' prefix. + if opts is None and name.startswith("pytest_"): + opts = {} + if opts is not None: + # TODO: DeprecationWarning, people should use hookimpl + # https://github.com/pytest-dev/pytest/issues/4562 + known_marks = {m.name for m in getattr(method, "pytestmark", [])} + + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + opts.setdefault(name, hasattr(method, name) or name in known_marks) + return opts + + def parse_hookspec_opts(self, module_or_class, name: str): + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + + if name.startswith("pytest_"): + # todo: deprecate hookspec hacks + # https://github.com/pytest-dev/pytest/issues/4562 + known_marks = {m.name for m in getattr(method, "pytestmark", [])} + opts = { + "firstresult": hasattr(method, "firstresult") + or "firstresult" in known_marks, + "historic": hasattr(method, "historic") + or "historic" in known_marks, + } + return opts + + def register( + self, plugin: _PluggyPlugin, name: Optional[str] = None + ) -> Optional[str]: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + ret: Optional[str] = super().register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return ret + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: Optional[_PluggyPlugin] = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: "Config") -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests(self, namespace: argparse.Namespace) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + current = py.path.local() + self._confcutdir = ( + current.join(namespace.confcutdir, abs=True) + if namespace.confcutdir + else None + ) + self._noconftest = namespace.noconftest + self._using_pyargs = namespace.pyargs + testpaths = namespace.file_or_dir + foundanchor = False + for testpath in testpaths: + path = str(testpath) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = current.join(path, abs=1) + if anchor.exists(): # we found some file object + self._try_load_conftest(anchor, namespace.importmode) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current, namespace.importmode) + + def _try_load_conftest( + self, anchor: py.path.local, importmode: Union[str, ImportMode] + ) -> None: + self._getconftestmodules(anchor, importmode) + # let's also consider test* subdirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self._getconftestmodules(x, importmode) + + @lru_cache(maxsize=128) + def _getconftestmodules( + self, path: py.path.local, importmode: Union[str, ImportMode], + ) -> List[types.ModuleType]: + if self._noconftest: + return [] + + if path.isfile(): + directory = path.dirpath() + else: + directory = path + + # XXX these days we may rather want to use config.rootpath + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir. + clist = [] + for parent in directory.parts(): + if self._confcutdir and self._confcutdir.relto(parent): + continue + conftestpath = parent.join("conftest.py") + if conftestpath.isfile(): + mod = self._importconftest(conftestpath, importmode) + clist.append(mod) + self._dirpath2confmods[directory] = clist + return clist + + def _rget_with_confmod( + self, name: str, path: py.path.local, importmode: Union[str, ImportMode], + ) -> Tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path, importmode) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, conftestpath: py.path.local, importmode: Union[str, ImportMode], + ) -> types.ModuleType: + # Use a resolved Path object as key to avoid loading the same conftest + # twice with build systems that create build directories containing + # symlinks to actual files. + # Using Path().resolve() is better than py.path.realpath because + # it resolves to the correct path/drive in case-insensitive file systems (#5792) + key = Path(str(conftestpath)).resolve() + + with contextlib.suppress(KeyError): + return self._conftestpath2mod[key] + + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + + try: + mod = import_path(conftestpath, mode=importmode) + except Exception as e: + assert e.__traceback__ is not None + exc_info = (type(e), e, e.__traceback__) + raise ConftestImportFailure(conftestpath, exc_info) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + self._conftestpath2mod[key] = mod + dirpath = conftestpath.dirpath() + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod) + return mod + + def _check_non_top_pytest_plugins( + self, mod: types.ModuleType, conftestpath: py.path.local, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError("plugin %s cannot be disabled" % name) + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. None indicates that it has been blocked. + # There is no interface with pluggy for this. + if self._name2plugin.get(name, -1) is None: + del self._name2plugin[name] + if not name.startswith("pytest_"): + if self._name2plugin.get("pytest_" + name, -1) is None: + del self._name2plugin["pytest_" + name] + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest(self, conftestmodule: types.ModuleType) -> None: + self.register(conftestmodule, name=conftestmodule.__file__) + + def consider_env(self) -> None: + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: Union[None, types.ModuleType, str, Sequence[str]] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str), ( + "module name as text required, got %r" % modname + ) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + 'Error importing plugin "{}": {}'.format(modname, str(e.args[0])) + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: Union[None, types.ModuleType, str, Sequence[str]] +) -> List[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r" + % specs + ) + + +def _ensure_removed_sysmodule(modname: str) -> None: + try: + del sys.modules[modname] + except KeyError: + pass + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + if module_name != "setup": + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +def _args_converter(args: Iterable[str]) -> Tuple[str, ...]: + return tuple(args) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @attr.s(frozen=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + ini option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args = attr.ib(type=Tuple[str, ...], converter=_args_converter) + """The command-line arguments as passed to :func:`pytest.main`. + + :type: Tuple[str, ...] + """ + plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]]) + """Extra plugins, might be `None`. + + :type: Optional[Sequence[Union[str, plugin]]] + """ + dir = attr.ib(type=Path) + """The directory from which :func:`pytest.main` was invoked. + + :type: pathlib.Path + """ + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: Optional[InvocationParams] = None, + ) -> None: + from .argparsing import Parser, FILE_OR_DIR + + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + _a = FILE_OR_DIR + self._parser = Parser( + usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + processopt=self._processopt, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = self.pluginmanager.hook + self._inicache: Dict[str, Any] = {} + self._override_ini: Sequence[str] = () + self._opt2dest: Dict[str, str] = {} + self._cleanup: List[Callable[[], None]] = [] + # A place where plugins can store information on the config for their + # own use. Currently only intended for internal plugins. + self._store = Store() + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + + if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + + self.cache: Optional[Cache] = None + + @property + def invocation_dir(self) -> py.path.local: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: py.path.local + """ + return py.path.local(str(self.invocation_params.dir)) + + @property + def rootpath(self) -> Path: + """The path to the :ref:`rootdir `. + + :type: pathlib.Path + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def rootdir(self) -> py.path.local: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: py.path.local + """ + return py.path.local(str(self.rootpath)) + + @property + def inipath(self) -> Optional[Path]: + """The path to the :ref:`configfile `. + + :type: Optional[pathlib.Path] + + .. versionadded:: 6.1 + """ + return self._inipath + + @property + def inifile(self) -> Optional[py.path.local]: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[py.path.local] + """ + return py.path.local(str(self.inipath)) if self.inipath else None + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coninciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: TerminalReporter = self.pluginmanager.get_plugin( + "terminalreporter" + ) + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: List[str] + ) -> "Config": + try: + self.parse(args) + except UsageError: + + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: Optional[argparse.Namespace] = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: _TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" % line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + fullpath = self.rootpath / nodeid + nodeid = bestrelpath(self.invocation_params.dir, fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args) -> "Config": + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: "Argument") -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: "Config") -> None: + self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) + + def _initini(self, args: Sequence[str]) -> None: + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + rootpath, inipath, inicfg = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + config=self, + ) + self._rootpath = rootpath + self._inipath = inipath + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + self._parser.addini("addopts", "extra command line options", "args") + self._parser.addini("minversion", "minimally required pytest version") + self._parser.addini( + "required_plugins", + "plugins that must be present for pytest to run", + type="args", + default=[], + ) + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args: Sequence[str]) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from setuptools entry points, no need to continue. + return + + package_files = ( + str(file) + for dist in importlib_metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _validate_args(self, args: List[str], via: str) -> List[str]: + """Validate known args.""" + self._parser._config_source_hint = via # type: ignore + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint # type: ignore + + return args + + def _preparse(self, args: List[str], addopts: bool = True) -> None: + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args, exclude_only=False) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from setuptools entry point. Only explicitly specified + # plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.known_args_namespace) + ) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.strict: + self.issue_config_time_warning( + _pytest.deprecated.STRICT_OPTION, stacklevel=2 + ) + + if self.known_args_namespace.confcutdir is None and self.inipath is not None: + confcutdir = str(self.inipath.parent) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + @hookimpl(hookwrapper=True) + def pytest_collection(self) -> Generator[None, None, None]: + """Validate invalid ini keys after collection is done so we take in account + options added by late-loading conftest files.""" + yield + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + "%s: 'minversion' must be a single value" % self.inipath + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + "%s: 'minversion' requires pytest-%s, actual pytest-%s'" + % (self.inipath, minver, pytest.__version__,) + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.version import Version + from packaging.requirements import InvalidRequirement, Requirement + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + spec = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if spec.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif Version(plugin_dist_info[spec.name]) not in spec.specifier: + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + if self.known_args_namespace.strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> List[str]: + parser_inicfg = self._parser._inidict + return [name for name in self.inicfg if name not in parser_inicfg] + + def parse(self, args: List[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert not hasattr( + self, "args" + ), "can only parse cmdline args at most once per Config object" + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + self._parser.after_preparse = True # type: ignore + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + if not args: + if self.invocation_params.dir == self.rootpath: + args = self.getini("testpaths") + if not args: + args = [str(self.invocation_params.dir)] + self.args = args + except PrintHelp: + pass + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hookwrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_captured.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + item=None, + location=location, + ) + ) + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str): + """Return configuration value from an :ref:`ini file `. + + If the specified name hasn't been registered through a prior + :py:func:`parser.addini <_pytest.config.argparsing.Parser.addini>` + call (usually from a plugin), a ValueError is raised. + """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + def _getini(self, name: str): + try: + description, type, default = self._parser._inidict[name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + override_value = self._get_override_ini_value(name) + if override_value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return "" + return [] + else: + value = override_value + # Coerce the values based on types. + # + # Note: some coercions are only required if we are reading from .ini files, because + # the file format doesn't contain type information, but when reading from toml we will + # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). + # For example: + # + # ini: + # a_line_list = "tests acceptance" + # in this case, we need to split the string to obtain a list of strings. + # + # toml: + # a_line_list = ["tests", "acceptance"] + # in this case, we already have a list ready to use. + # + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [py.path.local(str(dp / x)) for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + else: + assert type in [None, "string"] + return value + + def _getconftest_pathlist( + self, name: str, path: py.path.local + ) -> Optional[List[py.path.local]]: + try: + mod, relroots = self.pluginmanager._rget_with_confmod( + name, path, self.getoption("importmode") + ) + except KeyError: + return None + modpath = py.path.local(mod.__file__).dirpath() + values: List[py.path.local] = [] + for relroot in relroots: + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", os.sep) + relroot = modpath.join(relroot, abs=True) + values.append(relroot) + return values + + def _get_override_ini_value(self, name: str) -> Optional[str]: + value = None + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + "-o/--override-ini expects option=value style (got: {!r}).".format( + ini_config + ) + ) from e + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name: str, default=notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Default value if no option of that name exists. + :param skip: If True, raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: Optional[TextIO] = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> Tuple[str, str, Type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption, but does not apply the filter, + only parses it, and makes the escaping optional. + """ + parts = arg.split(":") + if len(parts) > 5: + raise warnings._OptionError(f"too many fields (max 5): {arg!r}") + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = [s.strip() for s in parts] + action: str = warnings._getaction(action_) # type: ignore[attr-defined] + category: Type[Warning] = warnings._getcategory(category_) # type: ignore[attr-defined] + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError) as e: + raise warnings._OptionError(f"invalid lineno {lineno_!r}") from e + else: + lineno = 0 + return action, message, category, module, lineno + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + for arg in cmdline_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) diff --git a/myenv/lib/python3.9/site-packages/_pytest/config/argparsing.py b/myenv/lib/python3.9/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000..9a48196 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,522 @@ +import argparse +import sys +import warnings +from gettext import gettext +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import py + +import _pytest._io +from _pytest.compat import final +from _pytest.config.exceptions import UsageError + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Literal + +FILE_OR_DIR = "file_or_dir" + + +@final +class Parser: + """Parser for command line arguments and ini-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog: Optional[str] = None + + def __init__( + self, + usage: Optional[str] = None, + processopt: Optional[Callable[["Argument"], None]] = None, + ) -> None: + self._anonymous = OptionGroup("custom options", parser=self) + self._groups: List[OptionGroup] = [] + self._processopt = processopt + self._usage = usage + self._inidict: Dict[str, Tuple[str, Optional[str], Any]] = {} + self._ininames: List[str] = [] + self.extra_info: Dict[str, Any] = {} + + def processoption(self, option: "Argument") -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: Optional[str] = None + ) -> "OptionGroup": + """Get (or create) a named option Group. + + :name: Name of the option group. + :description: Long description for --help output. + :after: Name of another group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.argparsing.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :opts: Option names, can be short or long options. + :attrs: Same attributes which the ``add_argument()`` function of the + `argparse library `_ + accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[Union[str, py.path.local]], + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + strargs = [str(x) if isinstance(x, py.path.local) else x for x in args] + return self.optparser.parse_args(strargs, namespace=namespace) + + def _getparser(self) -> "MyOptionParser": + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") + # bash like autocompletion for dirs (appending '/') + # Type ignored because typeshed doesn't know about argcomplete. + file_or_dir_arg.completer = filescompleter # type: ignore + return optparser + + def parse_setoption( + self, + args: Sequence[Union[str, py.path.local]], + option: argparse.Namespace, + namespace: Optional[argparse.Namespace] = None, + ) -> List[str]: + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return cast(List[str], getattr(parsedoption, FILE_OR_DIR)) + + def parse_known_args( + self, + args: Sequence[Union[str, py.path.local]], + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + """Parse and return a namespace object with known arguments at this point.""" + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[Union[str, py.path.local]], + namespace: Optional[argparse.Namespace] = None, + ) -> Tuple[argparse.Namespace, List[str]]: + """Parse and return a namespace object with known arguments, and + the remaining arguments unknown at this point.""" + optparser = self._getparser() + strargs = [str(x) if isinstance(x, py.path.local) else x for x in args] + return optparser.parse_known_args(strargs, namespace=namespace) + + def addini( + self, + name: str, + help: str, + type: Optional[ + "Literal['string', 'pathlist', 'args', 'linelist', 'bool']" + ] = None, + default=None, + ) -> None: + """Register an ini-file option. + + :name: Name of the ini-variable. + :type: Type of the variable, can be ``string``, ``pathlist``, ``args``, + ``linelist`` or ``bool``. Defaults to ``string`` if ``None`` or + not passed. + :default: Default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ + assert type in (None, "string", "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Union["Argument", str]) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store parms in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: List[str] = [] + self._long_opts: List[str] = [] + if "%default" in (attrs.get("help") or ""): + warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + DeprecationWarning, + stacklevel=3, + ) + try: + typ = attrs["type"] + except KeyError: + pass + else: + # This might raise a keyerror as well, don't want to catch that. + if isinstance(typ, str): + if typ == "choice": + warnings.warn( + "`type` argument to addoption() is the string %r." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + # argparse expects a type here take it from + # the type of the first element + attrs["type"] = type(attrs["choices"][0]) + else: + warnings.warn( + "`type` argument to addoption() is the string %r, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + attrs["type"] = Argument._typ_map[typ] + # Used in test_parseopt -> test_parse_defaultgetter. + self.type = attrs["type"] + else: + self.type = typ + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: Optional[str] = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> List[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + attrs = "default dest help".split() + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") + # a = a.replace('%prog', '%(prog)s') + self._attrs["help"] = a + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: List[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + def __init__( + self, name: str, description: str = "", parser: Optional[Parser] = None + ) -> None: + self.name = name + self.description = description + self.options: List[Argument] = [] + self.parser = parser + + def addoption(self, *optnames: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords. + """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames: str, **attrs: Any) -> None: + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: "Argument", shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + extra_info: Optional[Dict[str, Any]] = None, + prog: Optional[str] = None, + ) -> None: + self._parser = parser + argparse.ArgumentParser.__init__( + self, + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info if extra_info else {} + + def error(self, message: str) -> "NoReturn": + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + + if hasattr(self._parser, "_config_source_hint"): + # Type ignored because the attribute is set dynamically. + msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore + + raise UsageError(self.format_usage() + msg) + + # Type ignored because typeshed has a very complex type in the superclass. + def parse_args( # type: ignore + self, + args: Optional[Sequence[str]] = None, + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + """Allow splitting of positional arguments.""" + parsed, unrecognized = self.parse_known_args(args, namespace) + if unrecognized: + for arg in unrecognized: + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))] + for k, v in sorted(self.extra_info.items()): + lines.append(f" {k}: {v}") + self.error("\n".join(lines)) + getattr(parsed, FILE_OR_DIR).extend(unrecognized) + return parsed + + if sys.version_info[:2] < (3, 9): # pragma: no cover + # Backport of https://github.com/python/cpython/pull/14316 so we can + # disable long --argument abbreviations without breaking short flags. + def _parse_optional( + self, arg_string: str + ) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]: + if not arg_string: + return None + if not arg_string[0] in self.prefix_chars: + return None + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + if len(arg_string) == 1: + return None + if "=" in arg_string: + option_string, explicit_arg = arg_string.split("=", 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + if self.allow_abbrev or not arg_string.startswith("--"): + option_tuples = self._get_option_tuples(arg_string) + if len(option_tuples) > 1: + msg = gettext( + "ambiguous option: %(option)s could match %(matches)s" + ) + options = ", ".join(option for _, option, _ in option_tuples) + self.error(msg % {"option": arg_string, "matches": options}) + elif len(option_tuples) == 1: + (option_tuple,) = option_tuples + return option_tuple + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + if " " in arg_string: + return None + return None, arg_string, None + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = argparse.HelpFormatter._format_action_invocation(self, action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: Optional[str] = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: Dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines diff --git a/myenv/lib/python3.9/site-packages/_pytest/config/exceptions.py b/myenv/lib/python3.9/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000..4f1320e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,11 @@ +from _pytest.compat import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/myenv/lib/python3.9/site-packages/_pytest/config/findpaths.py b/myenv/lib/python3.9/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000..2edf545 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,211 @@ +import os +from pathlib import Path +from typing import Dict +from typing import Iterable +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath + +if TYPE_CHECKING: + from . import Config + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> Optional[Dict[str, Union[str, List[str]]]]: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return dict(iniconfig["pytest"].items()) + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name == "pytest.ini": + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return dict(iniconfig["tool:pytest"].items()) + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + elif filepath.suffix == ".toml": + import toml + + config = toml.load(str(filepath)) + + result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) + if result is not None: + # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), + # however we need to convert all scalar values to str for compatibility with the rest + # of the configuration system, which expects strings only. + def make_scalar(v: object) -> Union[str, List[str]]: + return v if isinstance(v, list) else str(v) + + return {k: make_scalar(v) for k, v in result.items()} + + return None + + +def locate_config( + args: Iterable[Path], +) -> Tuple[ + Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]], +]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict).""" + config_names = [ + "pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [Path.cwd()] + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + return base, p, ini_config + return None, None, {} + + +def get_common_ancestor(paths: Iterable[Path]) -> Path: + common_ancestor: Optional[Path] = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = Path.cwd() + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> List[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + def safe_exists(path: Path) -> bool: + # This can throw on paths that contain characters unrepresentable at the OS level, + # or with invalid syntax on Windows (https://bugs.python.org/issue35306) + try: + return path.exists() + except OSError: + return False + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + inifile: Optional[str], + args: Sequence[str], + rootdir_cmd_arg: Optional[str] = None, + config: Optional["Config"] = None, +) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]: + rootdir = None + dirs = get_dirs_from_args(args) + if inifile: + inipath_ = absolutepath(inifile) + inipath: Optional[Path] = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = get_common_ancestor(dirs) + else: + ancestor = get_common_ancestor(dirs) + rootdir, inipath, inicfg = locate_config([ancestor]) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg = locate_config(dirs) + if rootdir is None: + if config is not None: + cwd = config.invocation_params.dir + else: + cwd = Path.cwd() + rootdir = get_common_ancestor([cwd, ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" + if is_fs_root: + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir + ) + ) + assert rootdir is not None + return rootdir, inipath, inicfg or {} diff --git a/myenv/lib/python3.9/site-packages/_pytest/debugging.py b/myenv/lib/python3.9/site-packages/_pytest/debugging.py new file mode 100644 index 0000000..d3a5c61 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/debugging.py @@ -0,0 +1,388 @@ +"""Interactive debugging with PDB, the Python Debugger.""" +import argparse +import functools +import sys +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport + +if TYPE_CHECKING: + from _pytest.capture import CaptureManager + from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> Tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="start the interactive Python debugger on errors or KeyboardInterrupt.", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="start a custom interactive Python debugger on errors. " + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test.", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config._cleanup.append(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: Optional[PytestPluginManager] = None + _config: Optional[Config] = None + _saved: List[ + Tuple[Callable[..., None], Optional[PytestPluginManager], Optional[Config]] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: Optional[Tuple[Type[Any], Type[Any]]] = None + + @classmethod + def _is_capturing(cls, capman: Optional["CaptureManager"]) -> Union[str, bool]: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: Optional["CaptureManager"]): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: Optional["CaptureManager"]): + import _pytest.config + + # Type ignored because mypy doesn't support "dynamic" + # inheritance like this. + class PytestPdbWrapper(pdb_cls): # type: ignore[valid-type,misc] + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + "PDB continue (IO-capturing resumed for %s)" + % capturing, + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + do_c = do_cont = do_continue + + def do_quit(self, arg): + """Raise Exit outcome when quit command is used in pdb. + + This is a bit of a hack - it would be better if BdbQuit + could be handled, but this would require to wrap the + whole pytest run, and adjust the report etc. + """ + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: Optional[CaptureManager] = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + "PDB %s (IO-capturing turned off for %s)" + % (method, capturing), + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: "CallInfo[Any]", report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace: + @hookimpl(hookwrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, None, None]: + wrap_pytest_function_for_tracing(pyfuncitem) + yield + + +def wrap_pytest_function_for_tracing(pyfuncitem): + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs): + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem): + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb) + return rep + + +def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType: + from doctest import UnexpectedException + + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.excinfo[2] + else: + assert excinfo._excinfo is not None + return excinfo._excinfo[2] + + +def post_mortem(t: types.TracebackType) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, t) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/myenv/lib/python3.9/site-packages/_pytest/deprecated.py b/myenv/lib/python3.9/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000..19b31d6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/deprecated.py @@ -0,0 +1,87 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import UnformattedWarning + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", +} + + +FILLFUNCARGS = UnformattedWarning( + PytestDeprecationWarning, + "{name} is deprecated, use " + "function._request._fillfixtures() instead if you cannot avoid reaching into internals.", +) + +PYTEST_COLLECT_MODULE = UnformattedWarning( + PytestDeprecationWarning, + "pytest.collect.{name} was moved to pytest.{name}\n" + "Please update to the new name.", +) + +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +MINUS_K_DASH = PytestDeprecationWarning( + "The `-k '-expr'` syntax to -k is deprecated.\nUse `-k 'not expr'` instead." +) + +MINUS_K_COLON = PytestDeprecationWarning( + "The `-k 'expr:'` syntax to -k is deprecated.\n" + "Please open an issue if you use this and want a replacement." +) + +WARNING_CAPTURED_HOOK = PytestDeprecationWarning( + "The pytest_warning_captured is deprecated and will be removed in a future release.\n" + "Please use pytest_warning_recorded instead." +) + +FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestDeprecationWarning( + "The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; " + "use self.session.gethookproxy() and self.session.isinitpath() instead. " +) + +STRICT_OPTION = PytestDeprecationWarning( + "The --strict option is deprecated, use --strict-markers instead." +) + +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/myenv/lib/python3.9/site-packages/_pytest/doctest.py b/myenv/lib/python3.9/site-packages/_pytest/doctest.py new file mode 100644 index 0000000..64e8f0e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/doctest.py @@ -0,0 +1,724 @@ +"""Discover and run doctests in modules and test files.""" +import bdb +import inspect +import platform +import sys +import traceback +import types +import warnings +from contextlib import contextmanager +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import py.path + +import pytest +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.outcomes import OutcomeException +from _pytest.pathlib import import_path +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + +if TYPE_CHECKING: + import doctest + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + path: py.path.local, parent: Collector, +) -> Optional[Union["DoctestModule", "DoctestTextfile"]]: + config = parent.config + if path.ext == ".py": + if config.option.doctestmodules and not _is_setup_py(path): + mod: DoctestModule = DoctestModule.from_parent(parent, fspath=path) + return mod + elif _is_doctest(config, path, parent): + txt: DoctestTextfile = DoctestTextfile.from_parent(parent, fspath=path) + return txt + return None + + +def _is_setup_py(path: py.path.local) -> bool: + if path.basename != "setup.py": + return False + contents = path.read_binary() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: py.path.local, parent) -> bool: + if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + for glob in globs: + if path.check(fnmatch=glob): + return True + return False + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> Type["doctest.DocTestRunner"]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: Optional["doctest.OutputChecker"] = None, + verbose: Optional[bool] = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + doctest.DebugRunner.__init__( + self, checker=checker, verbose=verbose, optionflags=optionflags + ) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, out, test: "doctest.DocTest", example: "doctest.Example", got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: "doctest.DocTest", + example: "doctest.Example", + exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: Optional["doctest.OutputChecker"] = None, + verbose: Optional[bool] = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> "doctest.DocTestRunner": + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(pytest.Item): + def __init__( + self, + name: str, + parent: "Union[DoctestTextfile, DoctestModule]", + runner: Optional["doctest.DocTestRunner"] = None, + dtest: Optional["doctest.DocTest"] = None, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request: Optional[FixtureRequest] = None + + @classmethod + def from_parent( # type: ignore + cls, + parent: "Union[DoctestTextfile, DoctestModule]", + *, + name: str, + runner: "doctest.DocTestRunner", + dtest: "doctest.DocTest", + ): + # incompatible signature due to to imposed limits on sublcass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def setup(self) -> None: + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + assert self.dtest is not None + assert self.runner is not None + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: List["doctest.DocTestFailure"] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException], + ) -> Union[str, TerminalRepr]: + import doctest + + failures: Optional[ + Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]] + ] = (None) + if isinstance( + excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is not None: + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice( + self.config.getoption("doctestreport") + ) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + "%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += [ + x.strip("\n") + for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + else: + return super().repr_failure(excinfo) + + def reportinfo(self): + assert self.dtest is not None + return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name + + +def _get_flag_lookup() -> Dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(pytest.Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) + filename = str(self.fspath) + name = self.fspath.basename + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: "doctest.DocTest") -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None, None, None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + "Got %r when unwrapping %r. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080" % (e, func), + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(pytest.Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug. + + https://github.com/pytest-dev/pytest/issues/3456 + https://bugs.python.org/issue25532 + """ + + def _find_lineno(self, obj, source_lines): + """Doctest code does not take into account `@property`, this + is a hackish way to fix it. + + https://bugs.python.org/issue17446 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + # Type ignored because this is a private function. + return doctest.DocTestFinder._find_lineno( # type: ignore + self, obj, source_lines, + ) + + def _find( + self, tests, obj, name, module, source_lines, globs, seen + ) -> None: + if _is_mocked(obj): + return + with _patch_unwrap_mock_aware(): + + # Type ignored because this is a private function. + doctest.DocTestFinder._find( # type: ignore + self, tests, obj, name, module, source_lines, globs, seen + ) + + if self.fspath.basename == "conftest.py": + module = self.config.pluginmanager._importconftest( + self.fspath, self.config.getoption("importmode") + ) + else: + try: + module = import_path(self.fspath) + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + pytest.skip("unable to import module %r" % self.fspath) + else: + raise + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest: + """Used by DoctestTextfile and DoctestItem to setup fixture information.""" + + def func() -> None: + pass + + doctest_item.funcargs = {} # type: ignore[attr-defined] + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined] + node=doctest_item, func=func, cls=None, funcargs=False + ) + fixture_request = FixtureRequest(doctest_item, _ispytest=True) + fixture_request._fillfixtures() + return fixture_request + + +def _init_checker_class() -> Type["doctest.OutputChecker"]: + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if doctest.OutputChecker.check_output(self, want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return doctest.OutputChecker.check_output(self, want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots): + fraction: Optional[str] = w.group("fraction") + exponent: Optional[str] = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + if fraction is None: + precision = 0 + else: + precision = len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10 ** -precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> "doctest.OutputChecker": + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@pytest.fixture(scope="session") +def doctest_namespace() -> Dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests.""" + return dict() diff --git a/myenv/lib/python3.9/site-packages/_pytest/faulthandler.py b/myenv/lib/python3.9/site-packages/_pytest/faulthandler.py new file mode 100644 index 0000000..ff673b5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/faulthandler.py @@ -0,0 +1,116 @@ +import io +import os +import sys +from typing import Generator +from typing import TextIO + +import pytest +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.store import StoreKey + + +fault_handler_stderr_key = StoreKey[TextIO]() + + +def pytest_addoption(parser: Parser) -> None: + help = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish." + ) + parser.addini("faulthandler_timeout", help, default=0.0) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + if not faulthandler.is_enabled(): + # faulthhandler is not enabled, so install plugin that does the actual work + # of enabling faulthandler before each test executes. + config.pluginmanager.register(FaultHandlerHooks(), "faulthandler-hooks") + else: + # Do not handle dumping to stderr if faulthandler is already enabled, so warn + # users that the option is being ignored. + timeout = FaultHandlerHooks.get_timeout_config_value(config) + if timeout > 0: + config.issue_config_time_warning( + pytest.PytestConfigWarning( + "faulthandler module enabled before pytest configuration step, " + "'faulthandler_timeout' option ignored" + ), + stacklevel=2, + ) + + +class FaultHandlerHooks: + """Implements hooks that will actually install fault handler before tests execute, + as well as correctly handle pdb and internal errors.""" + + def pytest_configure(self, config: Config) -> None: + import faulthandler + + stderr_fd_copy = os.dup(self._get_stderr_fileno()) + config._store[fault_handler_stderr_key] = open(stderr_fd_copy, "w") + faulthandler.enable(file=config._store[fault_handler_stderr_key]) + + def pytest_unconfigure(self, config: Config) -> None: + import faulthandler + + faulthandler.disable() + # close our dup file installed during pytest_configure + # re-enable the faulthandler, attaching it to the default sys.stderr + # so we can see crashes after pytest has finished, usually during + # garbage collection during interpreter shutdown + config._store[fault_handler_stderr_key].close() + del config._store[fault_handler_stderr_key] + faulthandler.enable(file=self._get_stderr_fileno()) + + @staticmethod + def _get_stderr_fileno(): + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, io.UnsupportedOperation): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + return sys.__stderr__.fileno() + + @staticmethod + def get_timeout_config_value(config): + return float(config.getini("faulthandler_timeout") or 0.0) + + @pytest.hookimpl(hookwrapper=True, trylast=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]: + timeout = self.get_timeout_config_value(item.config) + stderr = item.config._store[fault_handler_stderr_key] + if timeout > 0 and stderr is not None: + import faulthandler + + faulthandler.dump_traceback_later(timeout, file=stderr) + try: + yield + finally: + faulthandler.cancel_dump_traceback_later() + else: + yield + + @pytest.hookimpl(tryfirst=True) + def pytest_enter_pdb(self) -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + @pytest.hookimpl(tryfirst=True) + def pytest_exception_interact(self) -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/myenv/lib/python3.9/site-packages/_pytest/fixtures.py b/myenv/lib/python3.9/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000..273bcaf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/fixtures.py @@ -0,0 +1,1680 @@ +import functools +import inspect +import os +import sys +import warnings +from collections import defaultdict +from collections import deque +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr +import py + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import _format_args +from _pytest.compat import _PytestWrapper +from _pytest.compat import assert_never +from _pytest.compat import final +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import FILLFUNCARGS +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.store import StoreKey + +if TYPE_CHECKING: + from typing import Deque + from typing import NoReturn + from typing_extensions import Literal + + from _pytest.main import Session + from _pytest.python import CallSpec2 + from _pytest.python import Function + from _pytest.python import Metafunc + + _Scope = Literal["session", "package", "module", "class", "function"] + + +# The value of the fixture -- return/yield of the fixture function (type variable). +_FixtureValue = TypeVar("_FixtureValue") +# The type of the fixture function (type variable). +_FixtureFunction = TypeVar("_FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Union[ + Callable[..., _FixtureValue], Callable[..., Generator[_FixtureValue, None, None]] +] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = Union[ + Tuple[ + # The result. + _FixtureValue, + # Cache key. + object, + None, + ], + Tuple[ + None, + # Cache key. + object, + # Exc info if raised. + Tuple[Type[BaseException], BaseException, TracebackType], + ], +] + + +@attr.s(frozen=True) +class PseudoFixtureDef(Generic[_FixtureValue]): + cached_result = attr.ib(type="_FixtureCachedResult[_FixtureValue]") + scope = attr.ib(type="_Scope") + + +def pytest_sessionstart(session: "Session") -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package(node, fixturedef: "FixtureDef[object]"): + import pytest + + cls = pytest.Package + current = node + fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py") + while current and ( + type(current) is not cls or fixture_package_name != current.nodeid + ): + current = current.parent + if current is None: + return node.session + return current + + +def get_scope_node( + node: nodes.Node, scope: "_Scope" +) -> Optional[Union[nodes.Item, nodes.Collector]]: + import _pytest.python + + if scope == "function": + return node.getparent(nodes.Item) + elif scope == "class": + return node.getparent(_pytest.python.Class) + elif scope == "module": + return node.getparent(_pytest.python.Module) + elif scope == "package": + return node.getparent(_pytest.python.Package) + elif scope == "session": + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +# Used for storing artificial fixturedefs for direct parametrization. +name2pseudofixturedef_key = StoreKey[Dict[str, "FixtureDef[Any]"]]() + + +def add_funcarg_pseudo_fixture_def( + collector: nodes.Collector, metafunc: "Metafunc", fixturemanager: "FixtureManager" +) -> None: + # This function will transform all collected calls to functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + # This function call does not have direct parametrization. + return + # Collect funcargs of all callspecs into a list of values. + arg2params: Dict[str, List[object]] = {} + arg2scope: Dict[str, _Scope] = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scopenum = callspec._arg2scopenum.get(argname, scopenum_function) + arg2scope[argname] = scopes[scopenum] + callspec.funcargs.clear() + + # Register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # If we have a scope that is higher than function, we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope != "function": + node = get_scope_node(collector, scope) + if node is None: + assert scope == "class" and isinstance(collector, _pytest.python.Module) + # Use module-level collector for class-scope (for now). + node = collector + if node is None: + name2pseudofixturedef = None + else: + default: Dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node._store.setdefault( + name2pseudofixturedef_key, default + ) + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + arg2fixturedefs[argname] = [name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef( + fixturemanager=fixturemanager, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=arg2scope[argname], + params=valuelist, + unittest=False, + ids=None, + ) + arg2fixturedefs[argname] = [fixturedef] + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + + +def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]: + """Return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + fixturemarker: Optional[FixtureFunctionMarker] = getattr( + obj, "_pytestfixturefunction", None + ) + except TEST_OUTCOME: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + return fixturemarker + + +# Parametrized fixture key, helper alias for code below. +_Key = Tuple[object, ...] + + +def get_parametrized_fixture_keys(item: nodes.Item, scopenum: int) -> Iterator[_Key]: + """Return list of keys for all parametrized arguments which match + the specified scope. """ + assert scopenum < scopenum_function # function + try: + callspec = item.callspec # type: ignore[attr-defined] + except AttributeError: + pass + else: + cs: CallSpec2 = callspec + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): + if cs._arg2scopenum[argname] != scopenum: + continue + if scopenum == 0: # session + key: _Key = (argname, param_index) + elif scopenum == 1: # package + key = (argname, param_index, item.fspath.dirpath()) + elif scopenum == 2: # module + key = (argname, param_index, item.fspath) + elif scopenum == 3: # class + item_cls = item.cls # type: ignore[attr-defined] + key = (argname, param_index, item.fspath, item_cls) + yield key + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for scopenum==0 (session) first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]: + argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]] = {} + items_by_argkey: Dict[int, Dict[_Key, Deque[nodes.Item]]] = {} + for scopenum in range(0, scopenum_function): + d: Dict[nodes.Item, Dict[_Key, None]] = {} + argkeys_cache[scopenum] = d + item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque) + items_by_argkey[scopenum] = item_d + for item in items: + keys = dict.fromkeys(get_parametrized_fixture_keys(item, scopenum), None) + if keys: + d[item] = keys + for key in keys: + item_d[key].append(item) + items_dict = dict.fromkeys(items, None) + return list(reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, 0)) + + +def fix_cache_order( + item: nodes.Item, + argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[int, Dict[_Key, "Deque[nodes.Item]"]], +) -> None: + for scopenum in range(0, scopenum_function): + for key in argkeys_cache[scopenum].get(item, []): + items_by_argkey[scopenum][key].appendleft(item) + + +def reorder_items_atscope( + items: Dict[nodes.Item, None], + argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[int, Dict[_Key, "Deque[nodes.Item]"]], + scopenum: int, +) -> Dict[nodes.Item, None]: + if scopenum >= scopenum_function or len(items) < 3: + return items + ignore: Set[Optional[_Key]] = set() + items_deque = deque(items) + items_done: Dict[nodes.Item, None] = {} + scoped_items_by_argkey = items_by_argkey[scopenum] + scoped_argkeys_cache = argkeys_cache[scopenum] + while items_deque: + no_argkey_group: Dict[nodes.Item, None] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = dict.fromkeys( + (k for k in scoped_argkeys_cache.get(item, []) if k not in ignore), None + ) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 + ) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done + + +def _fillfuncargs(function: "Function") -> None: + """Fill missing fixtures for a test function, old public API (deprecated).""" + warnings.warn(FILLFUNCARGS.format(name="pytest._fillfuncargs()"), stacklevel=2) + _fill_fixtures_impl(function) + + +def fillfixtures(function: "Function") -> None: + """Fill missing fixtures for a test function (deprecated).""" + warnings.warn( + FILLFUNCARGS.format(name="_pytest.fixtures.fillfixtures()"), stacklevel=2 + ) + _fill_fixtures_impl(function) + + +def _fill_fixtures_impl(function: "Function") -> None: + """Internal implementation to fill fixtures on the given function object.""" + try: + request = function._request + except AttributeError: + # XXX this special code path is only expected to execute + # with the oejskit plugin. It uses classes with funcargs + # and we thus have to work a bit to allow this. + fm = function.session._fixturemanager + assert function.parent is not None + fi = fm.getfixtureinfo(function.parent, function.obj, None) + function._fixtureinfo = fi + request = function._request = FixtureRequest(function, _ispytest=True) + request._fillfixtures() + # Prune out funcargs for jstests. + newfuncargs = {} + for name in fi.argnames: + newfuncargs[name] = function.funcargs[name] + function.funcargs = newfuncargs + else: + request._fillfixtures() + + +def get_direct_param_fixture_func(request): + return request.param + + +@attr.s(slots=True) +class FuncFixtureInfo: + # Original function argument names. + argnames = attr.ib(type=Tuple[str, ...]) + # Argnames that function immediately requires. These include argnames + + # fixture names specified via usefixtures and via autouse=True in fixture + # definitions. + initialnames = attr.ib(type=Tuple[str, ...]) + names_closure = attr.ib(type=List[str]) + name2fixturedefs = attr.ib(type=Dict[str, Sequence["FixtureDef[Any]"]]) + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: Set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be smth not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest: + """A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context and has + an optional ``param`` attribute in case the fixture is parametrized + indirectly. + """ + + def __init__(self, pyfuncitem, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pyfuncitem = pyfuncitem + #: Fixture for which this request is being performed. + self.fixturename: Optional[str] = None + #: Scope string, one of "function", "class", "module", "session". + self.scope: _Scope = "function" + self._fixture_defs: Dict[str, FixtureDef[Any]] = {} + fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index: Dict[str, int] = {} + self._fixturemanager: FixtureManager = (pyfuncitem.session._fixturemanager) + + @property + def fixturenames(self) -> List[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem._fixtureinfo.names_closure) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + def node(self): + """Underlying collection node (depends on current request scope).""" + return self._getscopeitem(self.scope) + + def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]": + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time. + assert self._pyfuncitem.parent is not None + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + # TODO: Fix this type ignore. Either add assert or adjust types. + # Can this be None here? + self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment] + # fixturedefs list is immutable so we maintain a decreasing index. + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config # type: ignore[no-any-return] + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + # unittest support hack, see _pytest.unittest.TestCaseFunction. + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + return getattr(function, "__self__", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @property + def fspath(self) -> py.path.local: + """The file system path of the test module which collected this test.""" + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + # TODO: Remove ignore once _pyfuncitem is properly typed. + return self._pyfuncitem.fspath # type: ignore + + @property + def keywords(self): + """Keywords/markers dictionary for the underlying node.""" + return self.node.keywords + + @property + def session(self) -> "Session": + """Pytest session object.""" + return self._pyfuncitem.session # type: ignore[no-any-return] + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called after the last test + within the requesting test context finished execution.""" + # XXX usually this method is shadowed by fixturedef specific ones. + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None: + colitem = self._getscopeitem(scope) + self._pyfuncitem.session._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem + ) + + def applymarker(self, marker: Union[str, MarkDecorator]) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + A :py:class:`_pytest.mark.MarkDecorator` object created by a call + to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: Optional[str]) -> "NoReturn": + """Raise a FixtureLookupError with the given message.""" + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None + return fixturedef.cached_result[0] + + def _get_active_fixturedef( + self, argname: str + ) -> Union["FixtureDef[object]", PseudoFixtureDef[object]]: + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + cached_result = (self, [0], None) + scope: _Scope = "function" + return PseudoFixtureDef(cached_result, scope) + raise + # Remove indent to prevent the python3 exception + # from leaking into the call. + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self) -> List["FixtureDef[Any]"]: + current = self + values: List[FixtureDef[Any]] = [] + while 1: + fixturedef = getattr(current, "_fixturedef", None) + if fixturedef is None: + values.reverse() + return values + values.append(fixturedef) + assert isinstance(current, SubRequest) + current = current._parent_request + + def _compute_fixture_value(self, fixturedef: "FixtureDef[object]") -> None: + """Create a SubRequest based on "self" and call the execute method + of the given FixtureDef object. + + This will force the FixtureDef object to throw away any previous + results and compute a new fixture value, which will be stored into + the FixtureDef object itself. + """ + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef.scope + try: + param = funcitem.callspec.getparam(argname) + except (AttributeError, ValueError): + param = NOTSET + param_index = 0 + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" + "Node id: {nodeid}\n" + "Function type: {typename}" + ).format( + name=funcitem.name, + nodeid=funcitem.nodeid, + typename=type(funcitem).__name__, + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = py.path.local(frameinfo.filename) + source_lineno = frameinfo.lineno + rel_source_path = source_path.relto(funcitem.config.rootdir) + if rel_source_path: + source_path_str = rel_source_path + else: + source_path_str = str(source_path) + msg = ( + "The requested fixture has no parameter defined for test:\n" + " {}\n\n" + "Requested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( + funcitem.nodeid, + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootdir), + source_path_str, + source_lineno, + ) + ) + fail(msg, pytrace=False) + else: + param_index = funcitem.callspec.indices[argname] + # If a parametrize invocation set a scope it will override + # the static scope defined with the fixture function. + paramscopenum = funcitem.callspec._arg2scopenum.get(argname) + if paramscopenum is not None: + scope = scopes[paramscopenum] + + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Check if a higher-level scoped fixture accesses a lower level one. + subrequest._check_scope(argname, self.scope, scope) + try: + # Call the fixture function. + fixturedef.execute(request=subrequest) + finally: + self._schedule_finalizers(fixturedef, subrequest) + + def _schedule_finalizers( + self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" + ) -> None: + # If fixture function failed it might have registered finalizers. + self.session._setupstate.addfinalizer( + functools.partial(fixturedef.finish, request=subrequest), subrequest.node + ) + + def _check_scope( + self, argname: str, invoking_scope: "_Scope", requested_scope: "_Scope", + ) -> None: + if argname == "request": + return + if scopemismatch(invoking_scope, requested_scope): + # Try to report something helpful. + lines = self._factorytraceback() + fail( + "ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" + % ((requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False, + ) + + def _factorytraceback(self) -> List[str]: + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + p = self._pyfuncitem.session.fspath.bestrelpath(fs) + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) + return lines + + def _getscopeitem(self, scope: "_Scope") -> Union[nodes.Item, nodes.Collector]: + if scope == "function": + # This might also be a non-function Item despite its attribute name. + node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem + elif scope == "package": + # FIXME: _fixturedef is not defined on FixtureRequest (this class), + # but on FixtureRequest (a subclass). + node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined] + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope == "class": + # Fallback to function item itself. + node = self._pyfuncitem + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) + return node + + def __repr__(self) -> str: + return "" % (self.node) + + +@final +class SubRequest(FixtureRequest): + """A sub request for handling getting a fixture from a test function/fixture.""" + + def __init__( + self, + request: "FixtureRequest", + scope: "_Scope", + param, + param_index: int, + fixturedef: "FixtureDef[object]", + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self.scope = scope + self._fixturedef = fixturedef + self._pyfuncitem = request._pyfuncitem + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self) -> str: + return f"" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called after the last test + within the requesting test context finished execution.""" + self._fixturedef.addfinalizer(finalizer) + + def _schedule_finalizers( + self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" + ) -> None: + # If the executing fixturedef was not explicitly requested in the argument list (via + # getfixturevalue inside the fixture call) then ensure this fixture def will be finished + # first. + if fixturedef.argname not in self.fixturenames: + fixturedef.addfinalizer( + functools.partial(self._fixturedef.finish, request=self) + ) + super()._schedule_finalizers(fixturedef, subrequest) + + +scopes: List["_Scope"] = ["session", "package", "module", "class", "function"] +scopenum_function = scopes.index("function") + + +def scopemismatch(currentscope: "_Scope", newscope: "_Scope") -> bool: + return scopes.index(newscope) > scopes.index(currentscope) + + +def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int: + """Look up the index of ``scope`` and raise a descriptive value error + if not defined.""" + strscopes: Sequence[str] = scopes + try: + return strscopes.index(scope) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope + ), + pytrace=False, + ) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: Optional[str], request: FixtureRequest, msg: Optional[str] = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> "FixtureLookupErrorRepr": + tblines: List[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline("file {}, line {}".format(fspath, lineno + 1)) + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist: + available.add(name) + if self.argname in available: + msg = " recursive dependency involving fixture '{}' detected".format( + self.argname + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: Union[str, py.path.local], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: Optional[str], + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", red=True, + ) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) + + +def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn": + fs, lineno = getfslineno(fixturefunc) + location = "{}:{}".format(fs, lineno + 1) + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) + + +def call_fixture_func( + fixturefunc: "_FixtureFunc[_FixtureValue]", request: FixtureRequest, kwargs +) -> _FixtureValue: + if is_generator(fixturefunc): + fixturefunc = cast( + Callable[..., Generator[_FixtureValue, None, None]], fixturefunc + ) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., _FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc(fixturefunc, "fixture function has more than one 'yield'") + + +def _eval_scope_callable( + scope_callable: "Callable[[str, Config], _Scope]", + fixture_name: str, + config: Config, +) -> "_Scope": + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + "Error evaluating {} while defining fixture '{}'.\n" + "Expected a function with the signature (*, fixture_name, config)".format( + scope_callable, fixture_name + ) + ) from e + if not isinstance(result, str): + fail( + "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n" + "{!r}".format(scope_callable, fixture_name, result), + pytrace=False, + ) + return result + + +@final +class FixtureDef(Generic[_FixtureValue]): + """A container for a factory definition.""" + + def __init__( + self, + fixturemanager: "FixtureManager", + baseid: Optional[str], + argname: str, + func: "_FixtureFunc[_FixtureValue]", + scope: "Union[_Scope, Callable[[str, Config], _Scope]]", + params: Optional[Sequence[object]], + unittest: bool = False, + ids: Optional[ + Union[ + Tuple[Union[None, str, float, int, bool], ...], + Callable[[Any], Optional[object]], + ] + ] = None, + ) -> None: + self._fixturemanager = fixturemanager + self.baseid = baseid or "" + self.has_location = baseid is not None + self.func = func + self.argname = argname + if callable(scope): + scope_ = _eval_scope_callable(scope, argname, fixturemanager.config) + else: + scope_ = scope + self.scopenum = scope2index( + # TODO: Check if the `or` here is really necessary. + scope_ or "function", # type: ignore[unreachable] + descr=f"Fixture '{func.__name__}'", + where=baseid, + ) + self.scope = scope_ + self.params: Optional[Sequence[object]] = params + self.argnames: Tuple[str, ...] = getfuncargnames( + func, name=argname, is_method=unittest + ) + self.unittest = unittest + self.ids = ids + self.cached_result: Optional[_FixtureCachedResult[_FixtureValue]] = None + self._finalizers: List[Callable[[], object]] = [] + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exc = None + try: + while self._finalizers: + try: + func = self._finalizers.pop() + func() + except BaseException as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + finally: + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers = [] + + def execute(self, request: SubRequest) -> _FixtureValue: + # Get required arguments and register our own finish() + # with their finalization. + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + # PseudoFixtureDef is only for "request". + assert isinstance(fixturedef, FixtureDef) + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) + + my_cache_key = self.cache_key(request) + if self.cached_result is not None: + # note: comparison with `==` can fail (or be expensive) for e.g. + # numpy arrays (#6497). + cache_key = self.cached_result[1] + if my_cache_key is cache_key: + if self.cached_result[2] is not None: + _, val, tb = self.cached_result[2] + raise val.with_traceback(tb) + else: + result = self.cached_result[0] + return result + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + result = hook.pytest_fixture_setup(fixturedef=self, request=request) + return result + + def cache_key(self, request: SubRequest) -> object: + return request.param_index if not hasattr(request, "param") else request.param + + def __repr__(self) -> str: + return "".format( + self.argname, self.scope, self.baseid + ) + + +def resolve_fixture_function( + fixturedef: FixtureDef[_FixtureValue], request: FixtureRequest +) -> "_FixtureFunc[_FixtureValue]": + """Get the actual callable that can be called to obtain the fixture + value, dealing with unittest-specific instances and bound methods.""" + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # Bind the unbound method to the TestCase instance. + fixturefunc = fixturedef.func.__get__(request.instance) # type: ignore[union-attr] + else: + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr] + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) # type: ignore[union-attr] + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[_FixtureValue], request: SubRequest +) -> _FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + assert fixdef.cached_result is not None + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request.scope, fixdef.scope) + kwargs[argname] = result + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME: + exc_info = sys.exc_info() + assert exc_info[0] is not None + fixturedef.cached_result = (None, my_cache_key, exc_info) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def _ensure_immutable_ids( + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ], +) -> Optional[ + Union[ + Tuple[Union[None, str, float, int, bool], ...], + Callable[[Any], Optional[object]], + ] +]: + if ids is None: + return None + if callable(ids): + return ids + return tuple(ids) + + +def _params_converter( + params: Optional[Iterable[object]], +) -> Optional[Tuple[object, ...]]: + return tuple(params) if params is not None else None + + +def wrap_function_to_error_out_if_called_directly( + function: _FixtureFunction, fixture_marker: "FixtureFunctionMarker", +) -> _FixtureFunction: + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function.""" + message = ( + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/fixture.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code." + ).format(name=fixture_marker.name or function.__name__) + + @functools.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) + + # Keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774). + result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined] + + return cast(_FixtureFunction, result) + + +@final +@attr.s(frozen=True) +class FixtureFunctionMarker: + scope = attr.ib(type="Union[_Scope, Callable[[str, Config], _Scope]]") + params = attr.ib(type=Optional[Tuple[object, ...]], converter=_params_converter) + autouse = attr.ib(type=bool, default=False) + ids = attr.ib( + type=Union[ + Tuple[Union[None, str, float, int, bool], ...], + Callable[[Any], Optional[object]], + ], + default=None, + converter=_ensure_immutable_ids, + ) + name = attr.ib(type=Optional[str], default=None) + + def __call__(self, function: _FixtureFunction) -> _FixtureFunction: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + "fixture is being applied more than once to the same function" + ) + + function = wrap_function_to_error_out_if_called_directly(function, self) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + "'request' is a reserved word for fixtures, use another name:\n {}".format( + location + ), + pytrace=False, + ) + + # Type ignored because https://github.com/python/mypy/issues/2087. + function._pytestfixturefunction = self # type: ignore[attr-defined] + return function + + +@overload +def fixture( + fixture_function: _FixtureFunction, + *, + scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ..., + params: Optional[Iterable[object]] = ..., + autouse: bool = ..., + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = ..., + name: Optional[str] = ..., +) -> _FixtureFunction: + ... + + +@overload +def fixture( + fixture_function: None = ..., + *, + scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ..., + params: Optional[Iterable[object]] = ..., + autouse: bool = ..., + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = ..., + name: Optional[str] = None, +) -> FixtureFunctionMarker: + ... + + +def fixture( + fixture_function: Optional[_FixtureFunction] = None, + *, + scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = "function", + params: Optional[Iterable[object]] = None, + autouse: bool = False, + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = None, + name: Optional[str] = None, +) -> Union[FixtureFunctionMarker, _FixtureFunction]: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + List of string ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, params=params, autouse=autouse, ids=ids, name=name, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose") > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="list of default fixtures to be used with this project", + ) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session: "Session") -> None: + self.session = session + self.config: Config = session.config + self._arg2fixturedefs: Dict[str, List[FixtureDef[Any]]] = {} + self._holderobjseen: Set[object] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Dict[str, List[str]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def _get_direct_parametrize_args(self, node: nodes.Node) -> List[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: List[str] = [] + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.extend(p_argnames) + + return parametrize_argnames + + def getfixtureinfo( + self, node: nodes.Node, func, cls, funcargs: bool = True + ) -> FuncFixtureInfo: + if funcargs and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + + usefixtures = tuple( + arg for mark in node.iter_markers(name="usefixtures") for arg in mark.args + ) + initialnames = usefixtures + argnames + fm = node.session._fixturemanager + initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( + initialnames, node, ignore_args=self._get_direct_parametrize_args(node) + ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + nodeid = None + try: + p = absolutepath(plugin.__file__) # type: ignore[attr-defined] + except AttributeError: + pass + else: + # Construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id). + if p.name.startswith("conftest.py"): + try: + nodeid = str(p.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid: str) -> Iterator[str]: + """Return the names of autouse fixtures applicable to nodeid.""" + for parentnodeid in nodes.iterparentnodeids(nodeid): + basenames = self._nodeid_autousenames.get(parentnodeid) + if basenames: + yield from basenames + + def getfixtureclosure( + self, + fixturenames: Tuple[str, ...], + parentnode: nodes.Node, + ignore_args: Sequence[str] = (), + ) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + parentid = parentnode.nodeid + fixturenames_closure = list(self._getautousenames(parentid)) + + def merge(otherlist: Iterable[str]) -> None: + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + + # At this point, fixturenames_closure contains what we call "initialnames", + # which is a set of fixturenames the function immediately requests. We + # need to return it as well, so save this. + initialnames = tuple(fixturenames_closure) + + arg2fixturedefs: Dict[str, Sequence[FixtureDef[Any]]] = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in ignore_args: + continue + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name: str) -> int: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return scopes.index("function") + else: + return fixturedefs[-1].scopenum + + fixturenames_closure.sort(key=sort_by_scope) + return initialnames, fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: "Metafunc") -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: List[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + def parsefactories( + self, node_or_obj, nodeid=NOTSET, unittest: bool = False + ) -> None: + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + marker = getfixturemarker(obj) + if not isinstance(marker, FixtureFunctionMarker): + # Magic globals with __getattr__ might have got us a wrong + # fixture attribute. + continue + + if marker.name: + name = marker.name + + # During fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in + # order to not emit the warning when pytest itself calls the + # fixture function. + obj = get_real_method(obj, holderobj) + + fixture_def = FixtureDef( + fixturemanager=self, + baseid=nodeid, + argname=name, + func=obj, + scope=marker.scope, + params=marker.params, + unittest=unittest, + ids=marker.ids, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_autousenames.setdefault(nodeid or "", []).extend(autousenames) + + def getfixturedefs( + self, argname: str, nodeid: str + ) -> Optional[Sequence[FixtureDef[Any]]]: + """Get a list of fixtures which are applicable to the given node id. + + :param str argname: Name of the fixture to search for. + :param str nodeid: Full node id of the requesting test. + :rtype: Sequence[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = set(nodes.iterparentnodeids(nodeid)) + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef diff --git a/myenv/lib/python3.9/site-packages/_pytest/freeze_support.py b/myenv/lib/python3.9/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000..8b93ed5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" +import types +from typing import Iterator +from typing import List +from typing import Union + + +def freeze_includes() -> List[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import py + import _pytest + + result = list(_iter_all_modules(py)) + result += list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: Union[str, types.ModuleType], prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ # type: ignore[attr-defined] + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/myenv/lib/python3.9/site-packages/_pytest/helpconfig.py b/myenv/lib/python3.9/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000..4384d07 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/helpconfig.py @@ -0,0 +1,261 @@ +"""Version info, help messages, tracing configuration.""" +import os +import sys +from argparse import Action +from typing import List +from typing import Optional +from typing import Union + +import py + +import pytest +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser + + +class HelpAction(Action): + """An argparse Action that will raise an exception in order to skip the + rest of the argument parsing when --help is passed. + + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done. + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="display pytest version and information about plugins." + "When given twice, also display information about plugins.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="early-load given plugin module name or entry point (multi-allowed).\n" + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="trace considerations of conftest.py files.", + ) + group.addoption( + "--debug", + action="store_true", + dest="debug", + default=False, + help="store internal tracing debug information in 'pytestdebug.log'.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config: Config = outcome.get_result() + if config.option.debug: + path = os.path.abspath("pytestdebug.log") + debugfile = open(path, "w") + debugfile.write( + "versions pytest-%s, py-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytestdebug information to %s\n" % path) + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + +def showversion(config: Config) -> None: + if config.option.version > 1: + sys.stderr.write( + "This is pytest version {}, imported from {}\n".format( + pytest.__version__, pytest.__file__ + ) + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stderr.write(line + "\n") + else: + sys.stderr.write(f"pytest {pytest.__version__}\n") + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.version > 0: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter = config.pluginmanager.get_plugin("terminalreporter") + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(" %s" % spec) + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "extra command line options"), + ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config: Config) -> List[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> List[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__} pylib-{py.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/myenv/lib/python3.9/site-packages/_pytest/hookspec.py b/myenv/lib/python3.9/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000..e499b74 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/hookspec.py @@ -0,0 +1,891 @@ +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" +from typing import Any +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import py.path +from pluggy import HookspecMarker + +from _pytest.deprecated import WARNING_CAPTURED_HOOK + +if TYPE_CHECKING: + import pdb + import warnings + from typing_extensions import Literal + + from _pytest._code.code import ExceptionRepr + from _pytest.code import ExceptionInfo + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config import _PluggyPlugin + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.python import PyCollector + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None: + """Called at plugin registration time to allow adding new hooks via a call to + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: "_PluggyPlugin", manager: "PytestPluginManager" +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param _pytest.config.PytestPluginManager manager: pytest plugin manager. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> None: + """Register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :param _pytest.config.argparsing.Parser parser: + To add command line options, call + :py:func:`parser.addoption(...) <_pytest.config.argparsing.Parser.addoption>`. + To add ini-file values call :py:func:`parser.addini(...) + <_pytest.config.argparsing.Parser.addini>`. + + :param _pytest.config.PytestPluginManager pluginmanager: + pytest plugin manager, which can be used to install :py:func:`hookspec`'s + or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config <_pytest.config.Config>` object, respectively: + + - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_configure(config: "Config") -> None: + """Allow plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :param _pytest.config.Config config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: "PytestPluginManager", args: List[str] +) -> Optional["Config"]: + """Return an initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook will only be called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param _pytest.config.PytestPluginManager pluginmanager: Pytest plugin manager. + :param List[str] args: List of arguments passed on the command line. + """ + + +def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None: + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :func:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config config: The pytest config object. + :param List[str] args: Arguments passed on the command line. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]: + """Called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + Stops at first non-None result, see :ref:`firstresult`. + + :param _pytest.config.Config config: The pytest config object. + """ + + +def pytest_load_initial_conftests( + early_config: "Config", parser: "Parser", args: List[str] +) -> None: + """Called to implement the loading of initial conftest files ahead + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config early_config: The pytest config object. + :param List[str] args: Arguments passed on the command line. + :param _pytest.config.argparsing.Parser parser: To add command line options. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: "Session") -> Optional[object]: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param pytest.Session session: The pytest session object. + """ + + +def pytest_collection_modifyitems( + session: "Session", config: "Config", items: List["Item"] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + :param pytest.Session session: The pytest session object. + :param _pytest.config.Config config: The pytest config object. + :param List[pytest.Item] items: List of item objects. + """ + + +def pytest_collection_finish(session: "Session") -> None: + """Called after collection has been performed and modified. + + :param pytest.Session session: The pytest session object. + """ + + +@hookspec(firstresult=True) +def pytest_ignore_collect(path: py.path.local, config: "Config") -> Optional[bool]: + """Return True to prevent considering this path for collection. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param py.path.local path: The path to analyze. + :param _pytest.config.Config config: The pytest config object. + """ + + +def pytest_collect_file( + path: py.path.local, parent: "Collector" +) -> "Optional[Collector]": + """Create a Collector for the given path, or None if not relevant. + + The new node needs to have the specified ``parent`` as a parent. + + :param py.path.local path: The path to collect. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: "Collector") -> None: + """Collector starts collecting.""" + + +def pytest_itemcollected(item: "Item") -> None: + """We just collected a test item.""" + + +def pytest_collectreport(report: "CollectReport") -> None: + """Collector finished collecting.""" + + +def pytest_deselected(items: Sequence["Item"]) -> None: + """Called for deselected test items, e.g. by keyword. + + May be called multiple times. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]": + """Perform ``collector.collect()`` and return a CollectReport. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule(path: py.path.local, parent) -> Optional["Module"]: + """Return a Module collector or None for the given path. + + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param py.path.local path: The path of module to collect. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: "PyCollector", name: str, obj: object +) -> Union[None, "Item", "Collector", List[Union["Item", "Collector"]]]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_generate_tests(metafunc: "Metafunc") -> None: + """Generate (multiple) parametrized calls to a test function.""" + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id( + config: "Config", val: object, argname: str +) -> Optional[str]: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param _pytest.config.Config config: The pytest config object. + :param val: The parametrized value. + :param str argname: The automatic parameter name produced by pytest. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: "Session") -> Optional[object]: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param pytest.Session session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol( + item: "Item", nextitem: "Optional[Item]" +) -> Optional[object]: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + """ + + +def pytest_runtest_logstart( + nodeid: str, location: Tuple[str, Optional[int], str] +) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param str nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)``. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: Tuple[str, Optional[int], str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param str nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)``. + """ + + +def pytest_runtest_setup(item: "Item") -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + """ + + +def pytest_runtest_call(item: "Item") -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + """ + + +def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument can be used to perform exact teardowns, + i.e. calling just enough finalizers so that nextitem only needs to + call setup-functions. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport( + item: "Item", call: "CallInfo[None]" +) -> Optional["TestReport"]: + """Called to create a :py:class:`_pytest.reports.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param CallInfo[None] call: The ``CallInfo`` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_runtest_logreport(report: "TestReport") -> None: + """Process the :py:class:`_pytest.reports.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: "Config", report: Union["CollectReport", "TestReport"], +) -> Optional[Dict[str, Any]]: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON.""" + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: "Config", data: Dict[str, Any], +) -> Optional[Union["CollectReport", "TestReport"]]: + """Restore a report object previously serialized with pytest_report_to_serializable().""" + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: "FixtureDef[Any]", request: "SubRequest" +) -> Optional[object]: + """Perform fixture setup execution. + + :returns: The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: "FixtureDef[Any]", request: "SubRequest" +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``).""" + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: "Session") -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param pytest.Session session: The pytest session object. + """ + + +def pytest_sessionfinish( + session: "Session", exitstatus: Union[int, "ExitCode"], +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param pytest.Session session: The pytest session object. + :param int exitstatus: The status which pytest will return to the system. + """ + + +def pytest_unconfigure(config: "Config") -> None: + """Called before test process is exited. + + :param _pytest.config.Config config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: "Config", op: str, left: object, right: object +) -> Optional[List[str]]: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param _pytest.config.Config config: The pytest config object. + """ + + +def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None: + """**(Experimental)** Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` + ini-file option: + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook=true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param pytest.Item item: pytest item object of current test. + :param int lineno: Line number of the assert statement. + :param str orig: String with the original assertion. + :param str expl: String with the assert explanation. + + .. note:: + + This hook is **experimental**, so its parameters or even the hook itself might + be changed/removed without warning in any future pytest release. + + If you find this hook useful, please share your feedback in an issue. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +def pytest_report_header( + config: "Config", startdir: py.path.local +) -> Union[str, List[str]]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param _pytest.config.Config config: The pytest config object. + :param py.path.local startdir: The starting dir. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + """ + + +def pytest_report_collectionfinish( + config: "Config", startdir: py.path.local, items: Sequence["Item"], +) -> Union[str, List[str]]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param _pytest.config.Config config: The pytest config object. + :param py.path.local startdir: The starting dir. + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( + report: Union["CollectReport", "TestReport"], config: "Config" +) -> Tuple[ + str, str, Union[str, Mapping[str, bool]], +]: + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param _pytest.config.Config config: The pytest config object. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_terminal_summary( + terminalreporter: "TerminalReporter", exitstatus: "ExitCode", config: "Config", +) -> None: + """Add a section to terminal summary reporting. + + :param _pytest.terminal.TerminalReporter terminalreporter: The internal terminal reporter object. + :param int exitstatus: The exit status that will be reported back to the OS. + :param _pytest.config.Config config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + """ + + +@hookspec(historic=True, warn_on_impl=WARNING_CAPTURED_HOOK) +def pytest_warning_captured( + warning_message: "warnings.WarningMessage", + when: "Literal['config', 'collect', 'runtest']", + item: Optional["Item"], + location: Optional[Tuple[str, int, str]], +) -> None: + """(**Deprecated**) Process a warning captured by the internal pytest warnings plugin. + + .. deprecated:: 6.0 + + This hook is considered deprecated and will be removed in a future pytest version. + Use :func:`pytest_warning_recorded` instead. + + :param warnings.WarningMessage warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param str when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param pytest.Item|None item: + The item being executed if ``when`` is ``"runtest"``, otherwise ``None``. + + :param tuple location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: "warnings.WarningMessage", + when: "Literal['config', 'collect', 'runtest']", + nodeid: str, + location: Optional[Tuple[str, int, str]], +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warnings.WarningMessage warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param str when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param str nodeid: + Full id of the item. + + :param tuple|None location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace(config: "Config") -> Dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param _pytest.config.Config config: The pytest config object. + :returns: A dictionary of additional globals to add. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: "ExceptionRepr", excinfo: "ExceptionInfo[BaseException]", +) -> Optional[bool]: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + """ + + +def pytest_keyboard_interrupt( + excinfo: "ExceptionInfo[Union[KeyboardInterrupt, Exit]]", +) -> None: + """Called for keyboard interrupt.""" + + +def pytest_exception_interact( + node: Union["Item", "Collector"], + call: "CallInfo[Any]", + report: Union["CollectReport", "TestReport"], +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :py:func:`pytest_make_collect_report`), + in which case ``report`` is a :py:class:`_pytest.reports.CollectReport`. + + May be called during runtest of an item (see :py:func:`pytest_runtest_protocol`), + in which case ``report`` is a :py:class:`_pytest.reports.TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + """ + + +def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param _pytest.config.Config config: The pytest config object. + :param pdb.Pdb pdb: The Pdb instance. + """ + + +def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param _pytest.config.Config config: The pytest config object. + :param pdb.Pdb pdb: The Pdb instance. + """ diff --git a/myenv/lib/python3.9/site-packages/_pytest/junitxml.py b/myenv/lib/python3.9/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000..c4761cd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/junitxml.py @@ -0,0 +1,700 @@ +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" +import functools +import os +import platform +import re +import xml.etree.ElementTree as ET +from datetime import datetime +from typing import Callable +from typing import Dict +from typing import List +from typing import Match +from typing import Optional +from typing import Tuple +from typing import Union + +import pytest +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.store import StoreKey +from _pytest.terminal import TerminalReporter + + +xml_key = StoreKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return "#x%02X" % i + else: + return "#x%04X" % i + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000A\u000D\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\u10000-\u10FFFF]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = {} +families["_base"] = {"testcase": ["classname", "name"]} +families["_base_legacy"] = {"testcase": ["file", "line", "url"]} + +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: Union[str, TestReport], xml: "LogXML") -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0 + self.properties: List[Tuple[str, str]] = [] + self.nodes: List[ET.Element] = [] + self.attrs: Dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> Optional[ET.Element]: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: Dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs.keys(): + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration) + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: Optional[str] = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: Optional[ReprFileLocation] = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: Optional[ReprFileLocation] = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", msg, str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element("skipped", type="pytest.skip", message=skipreason) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored becuase mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[assignment] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config._store.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format( + fixture_name=fixture_name, family=xml.family + ) + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config._store.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See issue + `#7767 `__ for details. + """ + + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junitxml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config._store.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property # noqa + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="create junit-xml style report file at given path.", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config._store[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config._store[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config._store.get(xml_key, None) + if xml: + del config._store[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> List[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + try: + names.remove("()") + except ValueError: + pass + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: Optional[str], + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: Dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: Dict[ + Tuple[Union[str, TestReport], object], _NodeReporter + ] = ({}) + self.node_reporters_ordered: List[_NodeReporter] = [] + self.global_properties: List[Tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: List[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter: + nodeid: Union[str, TestReport] = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration == "total" or report.when == self.report_duration: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start_time = timing.time() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(self.logfile, "w", encoding="utf-8") + suite_stop_time = timing.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time="%.3f" % suite_time_delta, + timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + logfile.close() + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> Optional[ET.Element]: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/myenv/lib/python3.9/site-packages/_pytest/logging.py b/myenv/lib/python3.9/site-packages/_pytest/logging.py new file mode 100644 index 0000000..2e48473 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/logging.py @@ -0,0 +1,821 @@ +"""Access and control log capturing.""" +import logging +import os +import re +import sys +from contextlib import contextmanager +from io import StringIO +from pathlib import Path +from typing import AbstractSet +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import TypeVar +from typing import Union + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.compat import final +from _pytest.compat import nullcontext +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.store import StoreKey +from _pytest.terminal import TerminalReporter + + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StoreKey["LogCaptureHandler"]() +caplog_records_key = StoreKey[Dict[str, List[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class ColoredLevelFormatter(logging.Formatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: Dict[int, str] = {} + + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + formatted_levelname = levelname_fmt % { + "levelname": logging.getLevelName(level) + } + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: Union[int, str, bool, None]) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _update_message( + record_dict: Dict[str, object], message: str + ) -> Dict[str, object]: + tmp = record_dict.copy() + tmp["message"] = message + return tmp + + @staticmethod + def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined] + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % self._update_message(record.__dict__, lines[0]) + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "level of messages to catch/display.\n" + "Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='enable log display during test run (also known as "live logging").', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="path to a file when logging will be written to.", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="log file logging level.", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs: + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: Optional[int] = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self): + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__(self, type, value, traceback): + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging.StreamHandler): + """A logging handler that stores log records and the log text.""" + + stream: StringIO + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: List[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: Optional[int] = None + # Dict of log name -> log level. + self._initial_logger_levels: Dict[Optional[str], int] = {} + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture. + + :rtype: LogCaptureHandler + """ + return self._item._store[caplog_handler_key] + + def get_records(self, when: str) -> List[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param str when: + Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + :rtype: List[logging.LogRecord] + + .. versionadded:: 3.4 + """ + return self._item._store[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> List[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> List[Tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> List[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.reset() + + def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None: + """Set the level of a logger for the duration of a test. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + :param int level: The level. + :param str logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + + @contextmanager + def at_level( + self, level: int, logger: Optional[str] = None + ) -> Generator[None, None, None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + :param int level: The level. + :param str logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting(config, "log_file_level") + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_handler = _FileHandler(log_file, mode="w", encoding="UTF-8") + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = logging.Formatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: Union[ + _LiveLoggingStreamHandler, _LiveLoggingNullHandler + ] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = logging.Formatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + stream = fpath.open(mode="w", encoding="UTF-8") + if sys.version_info >= (3, 7): + old_stream = self.log_file_handler.setStream(stream) + else: + old_stream = self.log_file_handler.stream + self.log_file_handler.acquire() + try: + self.log_file_handler.flush() + self.log_file_handler.stream = stream + finally: + self.log_file_handler.release() + if old_stream: + old_stream.close() + + def _log_cli_enabled(self): + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]: + if session.config.option.collectonly: + yield + return + + if self._log_cli_enabled() and self._config.getoption("verbose") < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with catching_logs( + self.caplog_handler, level=self.log_level, + ) as caplog_handler, catching_logs( + self.report_handler, level=self.log_level, + ) as report_handler: + caplog_handler.reset() + report_handler.reset() + item._store[caplog_records_key][when] = caplog_handler.records + item._store[caplog_handler_key] = caplog_handler + + yield + + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("setup") + + empty: Dict[str, List[logging.LogRecord]] = {} + item._store[caplog_records_key] = empty + yield from self._runtest_for(item, "setup") + + @hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("call") + + yield from self._runtest_for(item, "call") + + @hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("teardown") + + yield from self._runtest_for(item, "teardown") + del item._store[caplog_records_key] + del item._store[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging.StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: Optional[CaptureManager], + ) -> None: + logging.StreamHandler.__init__(self, stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: Optional[str]) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/myenv/lib/python3.9/site-packages/_pytest/main.py b/myenv/lib/python3.9/site-packages/_pytest/main.py new file mode 100644 index 0000000..41a33d4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/main.py @@ -0,0 +1,876 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" +import argparse +import fnmatch +import functools +import importlib +import os +import sys +from pathlib import Path +from typing import Callable +from typing import Dict +from typing import FrozenSet +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import attr +import py + +import _pytest._code +from _pytest import nodes +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureManager +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import visit +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState + + +if TYPE_CHECKING: + from typing_extensions import Literal + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "norecursedirs", + "directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", + default=[], + ) + group = parser.getgroup("general", "running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="exit instantly on first error or failed test.", + ) + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="set which warnings to report, see -W option of python itself.", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="exit after first num failures or errors.", + ) + group._addoption( + "--strict-config", + action="store_true", + help="any warnings encountered while parsing the `pytest` section of the configuration file raise errors.", + ) + group._addoption( + "--strict-markers", + action="store_true", + help="markers not registered in the `markers` section of the configuration file raise errors.", + ) + group._addoption( + "--strict", action="store_true", help="(deprecated) alias to --strict-markers.", + ) + group._addoption( + "-c", + metavar="file", + type=str, + dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="only collect tests, don't execute them.", + ) + group.addoption( + "--pyargs", + action="store_true", + help="try to interpret all arguments as python packages.", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="ignore path during collection (multi-allowed).", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="ignore path pattern during collection (multi-allowed).", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="deselect item (via node id prefix) during collection (multi-allowed).", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files.", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests.", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="prepend/append to sys.path when importing test modules and conftest files, " + "default is to prepend.", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "base temporary directory for this test run." + "(warning: this directory is removed if it exists)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + for parent in base.parents: + if parent == query: + return True + return False + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]] +) -> Union[int, ExitCode]: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc)) + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + session.startdir.chdir() + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc)) + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]: + return wrap_session(config, _main) + + +def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: "Session") -> None: + session.perform_collect() + + +def pytest_runtestloop(session: "Session") -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + "%d error%s during collection" + % (session.testsfailed, "s" if session.testsfailed != 1 else "") + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: py.path.local) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script.""" + bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") + if not bindir.isdir(): + return False + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) + return any([fname.basename in activates for fname in bindir.listdir()]) + + +def pytest_ignore_collect(path: py.path.local, config: Config) -> Optional[bool]: + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend([py.path.local(x) for x in excludeopt]) + + if py.path.local(path) in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=path.dirpath() + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend([py.path.local(x) for x in excludeglobopt]) + + if any(fnmatch.fnmatch(str(path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(path): + return True + return None + + +def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__(self, pm: PytestPluginManager, remove_mods) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@attr.s +class _bestrelpath_cache(Dict[Path, str]): + path = attr.ib(type=Path) + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Session(nodes.FSCollector): + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: Union[int, ExitCode] + + def __init__(self, config: Config) -> None: + super().__init__( + config.rootdir, parent=None, config=config, session=self, nodeid="" + ) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop: Union[bool, str] = False + self.shouldfail: Union[bool, str] = False + self.trace = config.trace.root.get("collection") + self.startdir = config.invocation_dir + self._initialpaths: FrozenSet[py.path.local] = frozenset() + + self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> "Session": + session: Session = cls._create(config) + return session + + def __repr__(self) -> str: + return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( + self.__class__.__name__, + self.name, + getattr(self, "exitstatus", ""), + self.testsfailed, + self.testscollected, + ) + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport( + self, report: Union[TestReport, CollectReport] + ) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path: py.path.local) -> bool: + return path in self._initialpaths + + def gethookproxy(self, fspath: py.path.local): + # Check if we have the common case of running + # hooks with all conftest.py files. + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules( + fspath, self.config.getoption("importmode") + ) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # One or more conftests are not in use at this fspath. + proxy = FSHookProxy(pm, remove_mods) + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _recurse(self, direntry: "os.DirEntry[str]") -> bool: + if direntry.name == "__pycache__": + return False + path = py.path.local(direntry.path) + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return False + norecursepatterns = self.config.getini("norecursedirs") + if any(path.check(fnmatch=pat) for pat in norecursepatterns): + return False + return True + + def _collectfile( + self, path: py.path.local, handle_dupes: bool = True + ) -> Sequence[nodes.Collector]: + assert ( + path.isfile() + ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( + path, path.isdir(), path.exists(), path.islink() + ) + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if path in duplicate_paths: + return () + else: + duplicate_paths.add(path) + + return ihook.pytest_collect_file(path=path, parent=self) # type: ignore[no-any-return] + + @overload + def perform_collect( + self, args: Optional[Sequence[str]] = ..., genitems: "Literal[True]" = ... + ) -> Sequence[nodes.Item]: + ... + + @overload + def perform_collect( + self, args: Optional[Sequence[str]] = ..., genitems: bool = ... + ) -> Sequence[Union[nodes.Item, nodes.Collector]]: + ... + + def perform_collect( + self, args: Optional[Sequence[str]] = None, genitems: bool = True + ) -> Sequence[Union[nodes.Item, nodes.Collector]]: + """Perform the collection phase for this session. + + This is called by the default + :func:`pytest_collection <_pytest.hookspec.pytest_collection>` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: List[Tuple[py.path.local, List[str]]] = [] + self.items: List[nodes.Item] = [] + + hook = self.config.hook + + items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items + try: + initialpaths: List[py.path.local] = [] + for arg in args: + fspath, parts = resolve_collection_argument( + self.config.invocation_params.dir, + arg, + as_pypath=self.config.option.pyargs, + ) + self._initial_parts.append((fspath, parts)) + initialpaths.append(fspath) + self._initialpaths = frozenset(initialpaths) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, cols in self._notfound: + line = f"(no name {arg!r} in any of {cols!r})" + errors.append(f"not found: {arg}\n{line}") + raise UsageError(*errors) + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + hook.pytest_collection_finish(session=self) + + self.testscollected = len(items) + return items + + def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]: + from _pytest.python import Package + + # Keep track of any collected nodes in here, so we don't duplicate fixtures. + node_cache1: Dict[py.path.local, Sequence[nodes.Collector]] = {} + node_cache2: Dict[ + Tuple[Type[nodes.Collector], py.path.local], nodes.Collector + ] = ({}) + + # Keep track of any collected collectors in matchnodes paths, so they + # are not collected more than once. + matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = ({}) + + # Dirnames of pkgs with dunder-init files. + pkg_roots: Dict[str, Package] = {} + + for argpath, names in self._initial_parts: + self.trace("processing argument", (argpath, names)) + self.trace.root.indent += 1 + + # Start with a Session root, and delve to argpath item (dir or file) + # and stack all Packages found on the way. + # No point in finding packages when collecting doctests. + if not self.config.getoption("doctestmodules", False): + pm = self.config.pluginmanager + for parent in reversed(argpath.parts()): + if pm._confcutdir and pm._confcutdir.relto(parent): + break + + if parent.isdir(): + pkginit = parent.join("__init__.py") + if pkginit.isfile() and pkginit not in node_cache1: + col = self._collectfile(pkginit, handle_dupes=False) + if col: + if isinstance(col[0], Package): + pkg_roots[str(parent)] = col[0] + node_cache1[col[0].fspath] = [col[0]] + + # If it's a directory argument, recurse and look for any Subpackages. + # Let the Package collector deal with subnodes, don't collect here. + if argpath.check(dir=1): + assert not names, "invalid arg {!r}".format((argpath, names)) + + seen_dirs: Set[py.path.local] = set() + for direntry in visit(str(argpath), self._recurse): + if not direntry.is_file(): + continue + + path = py.path.local(direntry.path) + dirpath = path.dirpath() + + if dirpath not in seen_dirs: + # Collect packages first. + seen_dirs.add(dirpath) + pkginit = dirpath.join("__init__.py") + if pkginit.exists(): + for x in self._collectfile(pkginit): + yield x + if isinstance(x, Package): + pkg_roots[str(dirpath)] = x + if str(dirpath) in pkg_roots: + # Do not collect packages here. + continue + + for x in self._collectfile(path): + key = (type(x), x.fspath) + if key in node_cache2: + yield node_cache2[key] + else: + node_cache2[key] = x + yield x + else: + assert argpath.check(file=1) + + if argpath in node_cache1: + col = node_cache1[argpath] + else: + collect_root = pkg_roots.get(argpath.dirname, self) + col = collect_root._collectfile(argpath, handle_dupes=False) + if col: + node_cache1[argpath] = col + + matching = [] + work: List[ + Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]] + ] = [(col, names)] + while work: + self.trace("matchnodes", col, names) + self.trace.root.indent += 1 + + matchnodes, matchnames = work.pop() + for node in matchnodes: + if not matchnames: + matching.append(node) + continue + if not isinstance(node, nodes.Collector): + continue + key = (type(node), node.nodeid) + if key in matchnodes_cache: + rep = matchnodes_cache[key] + else: + rep = collect_one_node(node) + matchnodes_cache[key] = rep + if rep.passed: + submatchnodes = [] + for r in rep.result: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + if ( + r.name == matchnames[0] + or r.name.split("[")[0] == matchnames[0] + ): + submatchnodes.append(r) + if submatchnodes: + work.append((submatchnodes, matchnames[1:])) + # XXX Accept IDs that don't have "()" for class instances. + elif len(rep.result) == 1 and rep.result[0].name == "()": + work.append((rep.result, matchnames)) + else: + # Report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134). + node.ihook.pytest_collectreport(report=rep) + + self.trace("matchnodes finished -> ", len(matching), "nodes") + self.trace.root.indent -= 1 + + if not matching: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, col)) + continue + + # If __init__.py was the only file requested, then the matched + # node will be the corresponding Package (by default), and the + # first yielded item will be the __init__ Module itself, so + # just use that. If this special case isn't taken, then all the + # files in the package will be yielded. + if argpath.basename == "__init__.py" and isinstance( + matching[0], Package + ): + try: + yield next(iter(matching[0].collect())) + except StopIteration: + # The package collects nothing with only an __init__.py + # file in it, which gets ignored by the default + # "python_files" option. + pass + continue + + yield from matching + + self.trace.root.indent -= 1 + + def genitems( + self, node: Union[nodes.Item, nodes.Collector] + ) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath(module_name: str) -> str: + """Search sys.path for the given a dotted module name, and return its file system path.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return module_name + if spec is None or spec.origin is None or spec.origin == "namespace": + return module_name + elif spec.submodule_search_locations: + return os.path.dirname(spec.origin) + else: + return spec.origin + + +def resolve_collection_argument( + invocation_path: Path, arg: str, *, as_pypath: bool = False +) -> Tuple[py.path.local, List[str]]: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a tuple: + + (py.path.path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"]) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module. + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + strpath, *parts = str(arg).split("::") + if as_pypath: + strpath = search_pypath(strpath) + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not fspath.exists(): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return py.path.local(str(fspath)), parts diff --git a/myenv/lib/python3.9/site-packages/_pytest/mark/__init__.py b/myenv/lib/python3.9/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000..329a11c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,282 @@ +"""Generic mechanism for marking and selecting python functions.""" +import warnings +from typing import AbstractSet +from typing import Collection +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +import attr + +from .expression import Expression +from .expression import ParseError +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import MINUS_K_COLON +from _pytest.deprecated import MINUS_K_DASH +from _pytest.store import StoreKey + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StoreKey[Optional[Config]]() + + +def param( + *values: object, + marks: Union[MarkDecorator, Collection[Union[MarkDecorator, Mark]]] = (), + id: Optional[str] = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [("3+5", 8), pytest.param("6*9", 42, marks=pytest.mark.xfail),], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + :keyword marks: A single mark or a list of marks to be applied to this parameter set. + :keyword str id: The id to attribute to this parameter set. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="only run tests matching given mark expression.\n" + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@attr.s(slots=True) +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + _names = attr.ib(type=AbstractSet[str]) + + @classmethod + def from_item(cls, item: "Item") -> "KeywordMatcher": + mapped_names = set() + + # Add the names of the current item and any parent items. + import pytest + + for node in item.listchain(): + if not isinstance(node, (pytest.Instance, pytest.Session)): + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str) -> bool: + subname = subname.lower() + names = (name.lower() for name in self._names) + + for name in names: + if subname in name: + return True + return False + + +def deselect_by_keyword(items: "List[Item]", config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + if keywordexpr.startswith("-"): + # To be removed in pytest 7.0.0. + warnings.warn(MINUS_K_DASH, stacklevel=2) + keywordexpr = "not " + keywordexpr[1:] + selectuntil = False + if keywordexpr[-1:] == ":": + # To be removed in pytest 7.0.0. + warnings.warn(MINUS_K_COLON, stacklevel=2) + selectuntil = True + keywordexpr = keywordexpr[:-1] + + try: + expression = Expression.compile(keywordexpr) + except ParseError as e: + raise UsageError( + f"Wrong expression passed to '-k': {keywordexpr}: {e}" + ) from None + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and not expression.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + if selectuntil: + keywordexpr = None + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@attr.s(slots=True) +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + own_mark_names = attr.ib() + + @classmethod + def from_item(cls, item) -> "MarkMatcher": + mark_names = {mark.name for mark in item.iter_markers()} + return cls(mark_names) + + def __call__(self, name: str) -> bool: + return name in self.own_mark_names + + +def deselect_by_mark(items: "List[Item]", config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + try: + expression = Expression.compile(matchexpr) + except ParseError as e: + raise UsageError(f"Wrong expression passed to '-m': {matchexpr}: {e}") from None + + remaining = [] + deselected = [] + for item in items: + if expression.evaluate(MarkMatcher.from_item(item)): + remaining.append(item) + else: + deselected.append(item) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def pytest_collection_modifyitems(items: "List[Item]", config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config._store[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + "{!s} must be one of skip, xfail or fail_at_collect" + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config._store.get(old_mark_config_key, None) diff --git a/myenv/lib/python3.9/site-packages/_pytest/mark/expression.py b/myenv/lib/python3.9/site-packages/_pytest/mark/expression.py new file mode 100644 index 0000000..dc3991b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/mark/expression.py @@ -0,0 +1,221 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident +ident: (\w|:|\+|-|\.|\[|\])+ + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True of False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +""" +import ast +import enum +import re +import types +from typing import Callable +from typing import Iterator +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import TYPE_CHECKING + +import attr + +if TYPE_CHECKING: + from typing import NoReturn + + +__all__ = [ + "Expression", + "ParseError", +] + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + + +@attr.s(frozen=True, slots=True) +class Token: + type = attr.ib(type=TokenType) + value = attr.ib(type=str) + pos = attr.ib(type=int) + + +class ParseError(Exception): + """The expression contains invalid syntax. + + :param column: The column in the line where the error occurred (1-based). + :param message: A description of the error. + """ + + def __init__(self, column: int, message: str) -> None: + self.column = column + self.message = message + + def __str__(self) -> str: + return f"at column {self.column}: {self.message}" + + +class Scanner: + __slots__ = ("tokens", "current") + + def __init__(self, input: str) -> None: + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\])+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise ParseError( + pos + 1, 'unexpected character "{}"'.format(input[pos]), + ) + yield Token(TokenType.EOF, "", pos) + + def accept(self, type: TokenType, *, reject: bool = False) -> Optional[Token]: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> "NoReturn": + raise ParseError( + self.current.pos + 1, + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), self.current.type.value, + ), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = ast.NameConstant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + return ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +class MatcherAdapter(Mapping[str, bool]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: Callable[[str], bool]) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> bool: + return self.matcher(key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaulated against different matchers. + """ + + __slots__ = ("code",) + + def __init__(self, code: types.CodeType) -> None: + self.code = code + + @classmethod + def compile(self, input: str) -> "Expression": + """Compile a match expression. + + :param input: The input expression - one line. + """ + astexpr = expression(Scanner(input)) + code: types.CodeType = compile( + astexpr, filename="", mode="eval", + ) + return Expression(code) + + def evaluate(self, matcher: Callable[[str], bool]) -> bool: + """Evaluate the match expression. + + :param matcher: + Given an identifier, should return whether it matches or not. + Should be prepared to handle arbitrary strings as input. + + :returns: Whether the expression matches or not. + """ + ret: bool = eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher)) + return ret diff --git a/myenv/lib/python3.9/site-packages/_pytest/mark/structures.py b/myenv/lib/python3.9/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000..6c126cf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/mark/structures.py @@ -0,0 +1,559 @@ +import collections.abc +import inspect +import warnings +from typing import Any +from typing import Callable +from typing import Collection +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +from .._code import getfslineno +from ..compat import ascii_escaped +from ..compat import final +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.outcomes import fail +from _pytest.warning_types import PytestUnknownMarkWarning + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def istestfunc(func) -> bool: + return ( + hasattr(func, "__call__") + and getattr(func, "__name__", "") != "" + ) + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> "MarkDecorator": + from ..nodes import Collector + + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet( + NamedTuple( + "ParameterSet", + [ + ("values", Sequence[Union[object, NotSetType]]), + ("marks", Collection[Union["MarkDecorator", "Mark"]]), + ("id", Optional[str]), + ], + ) +): + @classmethod + def param( + cls, + *values: object, + marks: Union["MarkDecorator", Collection[Union["MarkDecorator", "Mark"]]] = (), + id: Optional[str] = None, + ) -> "ParameterSet": + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + + if id is not None: + if not isinstance(id, str): + raise TypeError( + "Expected id to be a string, got {}: {!r}".format(type(id), id) + ) + id = ascii_escaped(id) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: Union["ParameterSet", Sequence[object], object], + force_tuple: bool = False, + ) -> "ParameterSet": + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + *args, + **kwargs, + ) -> Tuple[Union[List[str], Tuple[str, ...]], bool]: + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + force_tuple: bool, + ) -> List["ParameterSet"]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + func, + config: Config, + nodeid: str, + ) -> Tuple[Union[List[str], Tuple[str, ...]], List["ParameterSet"]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@final +@attr.s(frozen=True) +class Mark: + #: Name of the mark. + name = attr.ib(type=str) + #: Positional arguments of the mark decorator. + args = attr.ib(type=Tuple[Any, ...]) + #: Keyword arguments of the mark decorator. + kwargs = attr.ib(type=Mapping[str, Any]) + + #: Source Mark for ids with parametrize Marks. + _param_ids_from = attr.ib(type=Optional["Mark"], default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated = attr.ib( + type=Optional[Sequence[str]], default=None, repr=False + ) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: "Mark") -> "Mark": + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Optional[Mark] = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +_Markable = TypeVar("_Markable", bound=Union[Callable[..., object], type]) + + +@attr.s +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + MarkDecorators are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a MarkDecorator is called it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + MarkDecorator. + + 3. When called in any other case, it returns a new MarkDecorator instance + with the original MarkDecorator's content updated with the arguments + passed to this call. + + Note: The rules above prevent MarkDecorators from storing only a single + function or class reference as their positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark = attr.ib(type=Mark, validator=attr.validators.instance_of(Mark)) + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> Tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + return self.name # for backward-compat (2.4.1 had this attr) + + def __repr__(self) -> str: + return f"" + + def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator": + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + + :rtype: MarkDecorator + """ + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: _Markable) -> _Markable: # type: ignore[misc] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> "MarkDecorator": + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + store_mark(func, self.mark) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks(obj) -> List[Mark]: + """Obtain the unpacked marks that are stored on an object.""" + mark_list = getattr(obj, "pytestmark", []) + if not isinstance(mark_list, list): + mark_list = [mark_list] + return normalize_mark_list(mark_list) + + +def normalize_mark_list(mark_list: Iterable[Union[Mark, MarkDecorator]]) -> List[Mark]: + """Normalize marker decorating helpers to mark objects. + + :type List[Union[Mark, Markdecorator]] mark_list: + :rtype: List[Mark] + """ + extracted = [ + getattr(mark, "mark", mark) for mark in mark_list + ] # unpack MarkDecorator + for mark in extracted: + if not isinstance(mark, Mark): + raise TypeError(f"got {mark!r} instead of Mark") + return [x for x in extracted if isinstance(x, Mark)] + + +def store_mark(obj, mark: Mark) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = get_unpacked_marks(obj) + [mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + from _pytest.fixtures import _Scope + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc] + def __call__(self, arg: _Markable) -> _Markable: + ... + + @overload + def __call__(self, reason: str = ...) -> "MarkDecorator": + ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + ) -> MarkDecorator: + ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc] + def __call__(self, arg: _Markable) -> _Markable: + ... + + @overload + def __call__( + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + run: bool = ..., + raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = ..., + strict: bool = ..., + ) -> MarkDecorator: + ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + *, + indirect: Union[bool, Sequence[str]] = ..., + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = ..., + scope: Optional[_Scope] = ..., + ) -> MarkDecorator: + ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, *fixtures: str + ) -> MarkDecorator: + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, *filters: str + ) -> MarkDecorator: + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + _config: Optional[Config] = None + _markers: Set[str] = set() + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __getattr__(self, name: str) -> MarkDecorator: + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + if self._config.option.strict_markers or self._config.option.strict: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + warnings.warn( + "Unknown pytest.mark.%s - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/mark.html" % name, + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {})) + + +MARK_GEN = MarkGenerator() + + +@final +class NodeKeywords(MutableMapping[str, Any]): + def __init__(self, node: "Node") -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + seen = self._seen() + return iter(seen) + + def _seen(self) -> Set[str]: + seen = set(self._markers) + if self.parent is not None: + seen.update(self.parent.keywords) + return seen + + def __len__(self) -> int: + return len(self._seen()) + + def __repr__(self) -> str: + return f"" diff --git a/myenv/lib/python3.9/site-packages/_pytest/monkeypatch.py b/myenv/lib/python3.9/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000..a052f69 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,379 @@ +"""Monkeypatching and mocking functionality.""" +import os +import re +import sys +import warnings +from contextlib import contextmanager +from pathlib import Path +from typing import Any +from typing import Generator +from typing import List +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Tuple +from typing import TypeVar +from typing import Union + +from _pytest.compat import final +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator["MonkeyPatch", None, None]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries or + os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a KeyError + or AttributeError will be raised if the set/deletion operation has no target. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + "{!r} object at {} has no attribute {!r}".format( + type(obj).__name__, ann, name + ) + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: # type: ignore[unreachable] + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + :versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: List[Tuple[object, str, object]] = [] + self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = ([]) + self._cwd: Optional[str] = None + self._savesyspath: Optional[List[str]] = None + + @classmethod + @contextmanager + def context(cls) -> Generator["MonkeyPatch", None, None]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see `#3290 `_. + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, target: str, name: object, value: Notset = ..., raising: bool = ..., + ) -> None: + ... + + @overload + def setattr( + self, target: object, name: str, value: object, raising: bool = ..., + ) -> None: + ... + + def setattr( + self, + target: Union[str, object], + name: Union[object, str], + value: object = notset, + raising: bool = True, + ) -> None: + """Set attribute value on target, memorizing the old value. + + For convenience you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name. For example, + ``monkeypatch.setattr("os.getcwd", lambda: "/")`` + would set the ``getcwd`` function of the ``os`` module. + + Raises AttributeError if the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: Union[object, str], + name: Union[str, Notset] = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: MutableMapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic: MutableMapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + del dic[name] + + def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + "Value of environment variable {name} type should be str, but got " + "{value!r} (type: {type}); converted to str implicitly".format( + name=name, value=value, type=type(value).__name__ + ) + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + from pkg_resources import fixup_namespace_packages + + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path) -> None: + """Change the current working directory to the specified path. + + Path can be a string or a py.path.local object. + """ + if self._cwd is None: + self._cwd = os.getcwd() + if hasattr(path, "chdir"): + path.chdir() + elif isinstance(path, Path): + # Modern python uses the fspath protocol here LEGACY + os.chdir(str(path)) + else: + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + del dictionary[key] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + dictionary[key] = value + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/myenv/lib/python3.9/site-packages/_pytest/nodes.py b/myenv/lib/python3.9/site-packages/_pytest/nodes.py new file mode 100644 index 0000000..27434fb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/nodes.py @@ -0,0 +1,591 @@ +import os +import warnings +from pathlib import Path +from typing import Callable +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import py + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.compat import cached_property +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.store import Store + +if TYPE_CHECKING: + # Imported here due to circular import. + from _pytest.main import Session + from _pytest._code.code import _TracebackStyle + + +SEP = "/" + +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + + +def iterparentnodeids(nodeid: str) -> Iterator[str]: + """Return the parent node IDs of a given node ID, inclusive. + + For the node ID + + "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" + + the result would be + + "" + "testing" + "testing/code" + "testing/code/test_excinfo.py" + "testing/code/test_excinfo.py::TestFormattedExcinfo" + "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" + + Note that :: parts are only considered at the last / component. + """ + pos = 0 + sep = SEP + yield "" + while True: + at = nodeid.find(sep, pos) + if at == -1 and sep == SEP: + sep = "::" + elif at == -1: + if nodeid: + yield nodeid + break + else: + if at: + yield nodeid[:at] + pos = at + len(sep) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(type): + def __call__(self, *k, **kw): + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=self.__name__) + fail(msg, pytrace=False) + + def _create(self, *k, **kw): + return super().__call__(*k, **kw) + + +class Node(metaclass=NodeMeta): + """Base class for Collector and Item, the components of the test + collection tree. + + Collector subclasses have children; Items are leaf nodes. + """ + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "name", + "parent", + "config", + "session", + "fspath", + "_nodeid", + "_store", + "__dict__", + ) + + def __init__( + self, + name: str, + parent: "Optional[Node]" = None, + config: Optional[Config] = None, + session: "Optional[Session]" = None, + fspath: Optional[py.path.local] = None, + nodeid: Optional[str] = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name = name + + #: The parent collector node. + self.parent = parent + + #: The pytest config object. + if config: + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + #: The pytest session this node is part of. + if session: + self.session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + #: Filesystem path where this node was collected from (can be None). + self.fspath = fspath or getattr(parent, "fspath", None) + + #: Keywords/markers collected from all scopes. + self.keywords = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: List[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: Set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + if self.name != "()": + self._nodeid += "::" + self.name + + # A place where plugins can store information on the node for their + # own use. Currently only intended for internal plugins. + self._store = Store() + + @classmethod + def from_parent(cls, parent: "Node", **kw): + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self): + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.fspath) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + "warning must be an instance of Warning or subclass, got {!r}".format( + warning + ) + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, category=None, filename=str(path), lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def listchain(self) -> List["Node"]: + """Return list of all parent collectors up to self, starting from + the root of collection tree.""" + chain = [] + item: Optional[Node] = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker( + self, marker: Union[str, MarkDecorator], append: bool = True + ) -> None: + """Dynamically add a marker object to the node. + + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: Optional[str] = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: Optional[str] = None + ) -> Iterator[Tuple["Node", Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Optional[Mark]: + ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: + ... + + def get_closest_marker( + self, name: str, default: Optional[Mark] = None + ) -> Optional[Mark]: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> Set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: Set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> List[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: Type[_NodeType]) -> Optional[_NodeType]: + """Get the next parent node (including self) which is an instance of + the given class.""" + current: Optional[Node] = self + while current and not isinstance(current, cls): + current = current.parent + assert current is None or isinstance(current, cls) + return current + + def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: + pass + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: "Optional[_TracebackStyle]" = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo(excinfo.value.excinfo) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + if self.config.getoption("fulltrace", False): + style = "long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.getoption("verbose", 0) > 1: + truncate_locals = False + else: + truncate_locals = True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=False, # pruned already, or in --fulltrace mode. + truncate_locals=truncate_locals, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: "Optional[_TracebackStyle]" = None, + ) -> Union[str, TerminalRepr]: + """Return a representation of a collection or test failure. + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item( + node: "Node", +) -> Tuple[Union[str, py.path.local], Optional[int]]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "fspath": just a path + + :rtype: A tuple of (str|py.path.local, int) with filename and line number. + """ + # See Item.location. + location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "fspath", "unknown location"), -1 + + +class Collector(Node): + """Collector instances create children through collect() and thus + iteratively build a tree.""" + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + def collect(self) -> Iterable[Union["Item", "Collector"]]: + """Return a list of children (items and collectors) for this + collection node.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> Union[str, TerminalRepr]: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: + if hasattr(self, "fspath"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + + +def _check_initialpaths_for_relpath(session, fspath): + for initial_path in session._initialpaths: + if fspath.common(initial_path) == initial_path: + return fspath.relto(initial_path) + + +class FSCollector(Collector): + def __init__( + self, + fspath: py.path.local, + parent=None, + config: Optional[Config] = None, + session: Optional["Session"] = None, + nodeid: Optional[str] = None, + ) -> None: + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, SEP) + self.fspath = fspath + + session = session or parent.session + + if nodeid is None: + nodeid = self.fspath.relto(session.config.rootdir) + + if not nodeid: + nodeid = _check_initialpaths_for_relpath(session, fspath) + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__(name, parent, config, session, nodeid=nodeid, fspath=fspath) + + @classmethod + def from_parent(cls, parent, *, fspath, **kw): + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, **kw) + + def gethookproxy(self, fspath: py.path.local): + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.gethookproxy(fspath) + + def isinitpath(self, path: py.path.local) -> bool: + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.isinitpath(path) + + +class File(FSCollector): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Item(Node): + """A basic test invocation item. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Optional[Config] = None, + session: Optional["Session"] = None, + nodeid: Optional[str] = None, + ) -> None: + super().__init__(name, parent, config, session, nodeid=nodeid) + self._report_sections: List[Tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: List[Tuple[str, object]] = [] + + def runtest(self) -> None: + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> Tuple[Union[py.path.local, str], Optional[int], str]: + return self.fspath, None, "" + + @cached_property + def location(self) -> Tuple[str, Optional[int], str]: + location = self.reportinfo() + fspath = absolutepath(str(location[0])) + relfspath = self.session._node_location_to_relpath(fspath) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/myenv/lib/python3.9/site-packages/_pytest/nose.py b/myenv/lib/python3.9/site-packages/_pytest/nose.py new file mode 100644 index 0000000..bb8f997 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/nose.py @@ -0,0 +1,39 @@ +"""Run testsuites written for nose.""" +from _pytest import python +from _pytest import unittest +from _pytest.config import hookimpl +from _pytest.nodes import Item + + +@hookimpl(trylast=True) +def pytest_runtest_setup(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, "setup"): + # Call module level setup if there is no object level one. + call_optional(item.parent.obj, "setup") + # XXX This implies we only call teardown when setup worked. + item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + + +def teardown_nose(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, "teardown"): + call_optional(item.parent.obj, "teardown") + + +def is_potential_nosetest(item: Item) -> bool: + # Extra check needed since we do not do nose style setup/teardown + # on direct unittest style classes. + return isinstance(item, python.Function) and not isinstance( + item, unittest.TestCaseFunction + ) + + +def call_optional(obj, name): + method = getattr(obj, name, None) + isfixture = hasattr(method, "_pytestfixturefunction") + if method is not None and not isfixture and callable(method): + # If there's any problems allow the exception to raise rather than + # silently ignoring them. + method() + return True diff --git a/myenv/lib/python3.9/site-packages/_pytest/outcomes.py b/myenv/lib/python3.9/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000..8f6203f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/outcomes.py @@ -0,0 +1,227 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import Optional +from typing import Type +from typing import TypeVar + +TYPE_CHECKING = False # Avoid circular import through compat. + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Protocol +else: + # typing.Protocol is only available starting from Python 3.8. It is also + # available from typing_extensions, but we don't want a runtime dependency + # on that. So use a dummy runtime implementation. + from typing import Generic + + Protocol = Generic + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + BaseException.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: Optional[str] = None, + pytrace: bool = True, + allow_module_level: bool = False, + ) -> None: + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: Optional[int] = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +# Elaborate hack to work around https://github.com/python/mypy/issues/2087. +# Ideally would just be `exit.Exception = Exit` etc. + +_F = TypeVar("_F", bound=Callable[..., object]) +_ET = TypeVar("_ET", bound=Type[BaseException]) + + +class _WithException(Protocol[_F, _ET]): + Exception: _ET + __call__: _F + + +def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: + def decorate(func: _F) -> _WithException[_F, _ET]: + func_with_exception = cast(_WithException[_F, _ET], func) + func_with_exception.Exception = exception_type + return func_with_exception + + return decorate + + +# Exposed helper methods. + + +@_with_exception(Exit) +def exit(msg: str, returncode: Optional[int] = None) -> "NoReturn": + """Exit testing process. + + :param str msg: Message to display upon exit. + :param int returncode: Return code to be used when exiting pytest. + """ + __tracebackhide__ = True + raise Exit(msg, returncode) + + +@_with_exception(Skipped) +def skip(msg: str = "", *, allow_module_level: bool = False) -> "NoReturn": + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param bool allow_module_level: + Allows this function to be called at module level, skipping the rest + of the module. Defaults to False. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP + `_) + to skip a doctest statically. + """ + __tracebackhide__ = True + raise Skipped(msg=msg, allow_module_level=allow_module_level) + + +@_with_exception(Failed) +def fail(msg: str = "", pytrace: bool = True) -> "NoReturn": + """Explicitly fail an executing test with the given message. + + :param str msg: + The message to show the user as reason for the failure. + :param bool pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +@_with_exception(XFailed) +def xfail(reason: str = "") -> "NoReturn": + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +def importorskip( + modname: str, minversion: Optional[str] = None, reason: Optional[str] = None +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param str modname: + The name of the module to import. + :param str minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param str reason: + If given, this reason is shown as the message when the module cannot + be imported. + + :returns: + The imported module. This should be assigned to its canonical name. + + Example:: + + docutils = pytest.importorskip("docutils") + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + try: + __import__(modname) + except ImportError as exc: + if reason is None: + reason = f"could not import {modname!r}: {exc}" + raise Skipped(reason, allow_module_level=True) from None + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) + return mod diff --git a/myenv/lib/python3.9/site-packages/_pytest/pastebin.py b/myenv/lib/python3.9/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000..131873c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/pastebin.py @@ -0,0 +1,110 @@ +"""Submit failure or test session information to a pastebin service.""" +import tempfile +from io import StringIO +from typing import IO +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.store import StoreKey +from _pytest.terminal import TerminalReporter + + +pastebinfile_key = StoreKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="send failed|all info to bpaste.net pastebin service.", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config._store[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config._store[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config._store: + pastebinfile = config._store[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config._store[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents: Union[str, bytes]) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.request import urlopen + from urllib.parse import urlencode + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpaste.net" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except OSError as exc_info: # urllib errors + return "bad response: %s" % exc_info + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return "{}/show/{}".format(url, m.group(1)) + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/myenv/lib/python3.9/site-packages/_pytest/pathlib.py b/myenv/lib/python3.9/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000..7d9269a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/pathlib.py @@ -0,0 +1,654 @@ +import atexit +import contextlib +import fnmatch +import importlib.util +import itertools +import os +import shutil +import sys +import uuid +import warnings +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +from functools import partial +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +from types import ModuleType +from typing import Callable +from typing import Iterable +from typing import Iterator +from typing import Optional +from typing import Set +from typing import TypeVar +from typing import Union + +import py + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception): + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + exctype, excvalue = exc[:2] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(excvalue, FileNotFoundError): + return False + + if not isinstance(excvalue, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + "(rm_rf) unknown function {} when removing {}:\n{}: {}".format( + func, path, exctype, excvalue + ) + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[Path]: + """Find all elements in root that begin with the prefix, case insensitive.""" + l_prefix = prefix.lower() + for x in root.iterdir(): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for p in iter: + yield p.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink( + root: Path, target: Union[str, PurePath], link_to: Union[str, Path] +) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal(lock_path: Path, register=atexit.register): + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + paths = find_prefixed(root, prefix) + paths, paths2 = itertools.tee(paths) + numbers = map(parse_num, extract_suffixes(paths2, prefix)) + for path, number in zip(paths, numbers): + if number <= max_delete: + yield path + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + +def make_numbered_dir_with_cleanup( + root: Path, prefix: str, keep: int, lock_timeout: float, mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> Set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip(src, dst, **kwargs): + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(str(src), str(dst), **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + p: Union[str, py.path.local, Path], + *, + mode: Union[str, ImportMode] = ImportMode.prepend, +) -> ModuleType: + """Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + The import mechanism used is controlled by the `mode` parameter: + + * `mode == ImportMode.prepend`: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `__import__. + + * `mode == ImportMode.append`: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to use `__import__` and muck with `sys.path` + at all. It effectively allows having same-named test modules in different places. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + mode = ImportMode(mode) + + path = Path(str(p)) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + module_name = path.stem + + for meta_importer in sys.meta_path: + spec = meta_importer.find_spec(module_name, [str(path.parent)]) + if spec is not None: + break + else: + spec = importlib.util.spec_from_file_location(module_name, str(path)) + + if spec is None: + raise ImportError( + "Can't find module {} at location {}".format(module_name, str(path)) + ) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) # type: ignore[union-attr] + return mod + + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + names = list(path.with_suffix("").relative_to(pkg_root).parts) + if names[-1] == "__init__": + names.pop() + module_name = ".".join(names) + else: + pkg_root = path.parent + module_name = path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.path.sep + "__init__.py"): + module_file = module_file[: -(len(os.path.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def resolve_package_path(path: Path) -> Optional[Path]: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it can not be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not parent.joinpath("__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def visit( + path: str, recurse: Callable[["os.DirEntry[str]"], bool] +) -> Iterator["os.DirEntry[str]"]: + """Walk a directory recursively, in breadth-first order. + + Entries at each directory level are sorted. + """ + + # Skip entries with symlink loops and other brokenness, so the caller doesn't + # have to deal with it. + entries = [] + for entry in os.scandir(path): + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + raise + entries.append(entry) + + entries.sort(key=lambda entry: entry.name) + + yield from entries + + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: Union[Path, str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(str(path))) + + +def commonpath(path1: Path, path2: Path) -> Optional[Path]: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) diff --git a/myenv/lib/python3.9/site-packages/_pytest/py.typed b/myenv/lib/python3.9/site-packages/_pytest/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/_pytest/pytester.py b/myenv/lib/python3.9/site-packages/_pytest/pytester.py new file mode 100644 index 0000000..31259d1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/pytester.py @@ -0,0 +1,1922 @@ +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" +import collections.abc +import contextlib +import gc +import importlib +import os +import platform +import re +import shutil +import subprocess +import sys +import traceback +from fnmatch import fnmatch +from io import StringIO +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union +from weakref import WeakKeyDictionary + +import attr +import py +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import final +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestWarning + +if TYPE_CHECKING: + from typing_extensions import Literal + + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> List[Tuple[str, str]]: + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + universal_newlines=True, + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]: + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + "***** %s FD leakage detected" % len(leaked_files), + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + "***** %s FD leakage detected" % len(leaked_files), + "*** function %s:%s: %s " % item.location, + "See issue #2366", + ] + item.warn(PytestWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> "PytestArg": + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> "HookRecorder": + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> List[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +class ParsedCall: + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): + ... + + +class HookRecorder: + """Record all hooks called in a plugin manager. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__(self, pluginmanager: PytestPluginManager) -> None: + self._pluginmanager = pluginmanager + self.calls: List[ParsedCall] = [] + self.ret: Optional[Union[int, ExitCode]] = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(ParsedCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: Union[str, Iterable[str]]) -> List[ParsedCall]: + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[Tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> ParsedCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([" %s" % x for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> ParsedCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, names: "Literal['pytest_collectreport']", + ) -> Sequence[CollectReport]: + ... + + @overload + def getreports( + self, names: "Literal['pytest_runtest_logreport']", + ) -> Sequence[TestReport]: + ... + + @overload + def getreports( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + ... + + def getreports( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: Union[str, Iterable[str]] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: Optional[str] = None, + ) -> Union[CollectReport, TestReport]: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) + if len(values) > 1: + raise ValueError( + "found 2 or more testreports matching {!r}: {}".format( + inamepart, values + ) + ) + return values[0] + + @overload + def getfailures( + self, names: "Literal['pytest_collectreport']", + ) -> Sequence[CollectReport]: + ... + + @overload + def getfailures( + self, names: "Literal['pytest_runtest_logreport']", + ) -> Sequence[TestReport]: + ... + + @overload + def getfailures( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + ... + + def getfailures( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> Tuple[ + Sequence[TestReport], + Sequence[Union[CollectReport, TestReport]], + Sequence[Union[CollectReport, TestReport]], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> List[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, passed=passed, skipped=skipped, failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> "LineComp": + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> "Pytester": + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, _ispytest=True) + + +@fixture +def testdir(pytester: "Pytester") -> "Testdir": + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``py.path.local`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None, None, None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config, None, None]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +class RunResult: + """The result of running a command.""" + + def __init__( + self, + ret: Union[int, ExitCode], + outlines: List[str], + errlines: List[str], + duration: float, + ) -> None: + try: + self.ret: Union[int, ExitCode] = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + "" + % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) + ) + + def parseoutcomes(self) -> Dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> Dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + ) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + ) + + +class CwdSnapshot: + def __init__(self) -> None: + self.__saved = os.getcwd() + + def restore(self) -> None: + os.chdir(self.__saved) + + +class SysModulesSnapshot: + def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + Attributes: + + :ivar Path path: temporary directory path used to create files/run tests from, etc. + + :ivar plugins: + A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but plugins can + be added to the list. The type of items to add to the list depends on + the method using them so refer to them for details. + """ + + __test__ = False + + CLOSE_STDIN = object + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[ + Collector, List[Union[Item, Collector]] + ] = (WeakKeyDictionary()) + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + self.plugins: List[Union[str, _PluggyPlugin]] = [] + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = MonkeyPatch() + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory where files are created and pytest is executed.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() + self._monkeypatch.undo() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :py:class:`HookRecorder` for a PluginManager.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager) + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + os.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Union[Any, bytes]], + files: Dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + def to_text(s: Union[Any, bytes]) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new file(s) in the test directory. + + :param str ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a contest.py file with 'source' as contents.""" + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file with 'source' as contents.""" + return self.makefile(".ini", tox=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file with 'source' as contents. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert( + self, path: Optional[Union[str, "os.PathLike[str]"]] = None + ) -> None: + """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + + This is undone automatically when this object dies at the end of each + test. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: str) -> Path: + """Create a new (sub)directory.""" + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: str) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: Optional[str] = None) -> Path: + """Copy file from project's directory into the testdir. + + :param str name: The name of the file to copy. + :return: path to the copied directory (inside ``self.path``). + + """ + example_dir = self._request.config.getini("pytester_example_dir") + if example_dir is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir = Path(str(self._request.config.rootdir)) / example_dir + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + # TODO: py.path.local.copy can copy files to existing directories, + # while with shutil.copytree the destination directory cannot exist, + # we will need to roll our own in order to drop py.path.local completely + py.path.local(example_path).copy(py.path.local(self.path)) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + Session = Session + + def getnode( + self, config: Config, arg: Union[str, "os.PathLike[str]"] + ) -> Optional[Union[Collector, Item]]: + """Return the collection node of a file. + + :param _pytest.config.Config config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param py.path.local arg: + Path to the file. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = py.path.local(arg) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode(self, path: Union[str, "os.PathLike[str]"]): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param py.path.local path: Path to the file. + """ + path = py.path.local(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = session.fspath.bestrelpath(path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Union[Item, Collector]]) -> List[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + """ + session = colitems[0].session + result: List[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + + :param cmdlineargs: Any extra command line arguments to use. + + :returns: :py:class:`HookRecorder` instance of the result. + """ + p = self.makepyfile(source) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]: + """Run ``pytest.main(['--collectonly'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: Union[str, "os.PathLike[str]"], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + + :returns: A :py:class:`HookRecorder` instance. + """ + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect: + def pytest_configure(x, config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins.append(Collect()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret # type: ignore + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + now = timing.time() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now + ) + res.reprec = reprec # type: ignore + return res + + def runpytest( + self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any + ) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[Union[str, "os.PathLike[str]"]] + ) -> List[Union[str, "os.PathLike[str]"]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append("--basetemp=%s" % self.path.parent.joinpath("basetemp")) + return new_args + + def parseconfig(self, *args: Union[str, "os.PathLike[str]"]) -> Config: + """Return a new pytest Config instance from given commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create + a new :py:class:`_pytest.core.PluginManager` and call the + pytest_cmdline_parse hook to create a new + :py:class:`_pytest.config.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin modules + to be registered with the PluginManager. + """ + import _pytest.config + + new_args = self._ensure_basetemp(args) + new_args = [str(x) for x in new_args] + + config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: Union[str, "os.PathLike[str]"]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`_pytest.config.Config` instance like + :py:meth:`parseconfig`, but also calls the pytest_configure hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem(self, source: str, funcname: str = "test_func") -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "{!r} item not found in module:\n{}\nitems: {}".format( + funcname, source, items + ) + + def getitems(self, source: str) -> List[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, source: Union[str, Path], configargs=(), *, withinit: bool = False + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, Path): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """Return the collection node for name from the module collection. + + Searchs a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs, + stdout: Union[int, TextIO] = subprocess.PIPE, + stderr: Union[int, TextIO] = subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """Invoke subprocess.Popen. + + Calls subprocess.Popen making sure the current working directory is + in the PYTHONPATH. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: Union[str, "os.PathLike[str]"], + timeout: Optional[float] = None, + stdin=CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using subprocess.Popen saving the stdout and stderr. + + :param cmdargs: + The sequence of arguments to pass to `subprocess.Popen()`, with path-like objects + being converted to ``str`` automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. Bytes are being send, closing + the pipe, otherwise it is passed through to ``popen``. + Defaults to ``CLOSE_STDIN``, which translates to using a pipe + (``subprocess.PIPE``) that gets closed. + + :rtype: RunResult + """ + __tracebackhide__ = True + + # TODO: Remove type ignore in next mypy release. + # https://github.com/python/typeshed/pull/4582 + cmdargs = tuple( + os.fspath(arg) if isinstance(arg, os.PathLike) else arg for arg in cmdargs # type: ignore[misc] + ) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + now = timing.time() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + close_fds=(sys.platform != "win32"), + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = ( + "{seconds} second timeout expired running:" + " {command}".format(seconds=timeout, command=cmdargs) + ) + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, timing.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> Tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script) -> RunResult: + """Run a python script using sys.executable as interpreter. + + :rtype: RunResult + """ + return self.run(sys.executable, script) + + def runpython_c(self, command): + """Run python -c "command". + + :rtype: RunResult + """ + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess(self, *args, timeout: Optional[float] = None) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + + :rtype: RunResult + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = ("--basetemp=%s" % p,) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines`. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +@final +@attr.s(repr=False, str=False, init=False) +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy py.path.local objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `py.path.local` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN = Pytester.CLOSE_STDIN + TimeoutExpired = Pytester.TimeoutExpired + Session = Pytester.Session + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> py.path.local: + """Temporary directory where tests are executed.""" + return py.path.local(self._pytester.path) + + @property + def test_tmproot(self) -> py.path.local: + return py.path.local(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + """See :meth:`Pytester._finalize`.""" + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> py.path.local: + """See :meth:`Pytester.makefile`.""" + return py.path.local(str(self._pytester.makefile(ext, *args, **kwargs))) + + def makeconftest(self, source) -> py.path.local: + """See :meth:`Pytester.makeconftest`.""" + return py.path.local(str(self._pytester.makeconftest(source))) + + def makeini(self, source) -> py.path.local: + """See :meth:`Pytester.makeini`.""" + return py.path.local(str(self._pytester.makeini(source))) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> py.path.local: + """See :meth:`Pytester.makepyprojecttoml`.""" + return py.path.local(str(self._pytester.makepyprojecttoml(source))) + + def makepyfile(self, *args, **kwargs) -> py.path.local: + """See :meth:`Pytester.makepyfile`.""" + return py.path.local(str(self._pytester.makepyfile(*args, **kwargs))) + + def maketxtfile(self, *args, **kwargs) -> py.path.local: + """See :meth:`Pytester.maketxtfile`.""" + return py.path.local(str(self._pytester.maketxtfile(*args, **kwargs))) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> py.path.local: + """See :meth:`Pytester.mkdir`.""" + return py.path.local(str(self._pytester.mkdir(name))) + + def mkpydir(self, name) -> py.path.local: + """See :meth:`Pytester.mkpydir`.""" + return py.path.local(str(self._pytester.mkpydir(name))) + + def copy_example(self, name=None) -> py.path.local: + """See :meth:`Pytester.copy_example`.""" + return py.path.local(str(self._pytester.copy_example(name))) + + def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout: Union[int, TextIO] = subprocess.PIPE, + stderr: Union[int, TextIO] = subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: List[str]) -> None: + self.lines = lines + self._log_output: List[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: Union[str, Sequence[str], Source]) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = "line %r not found in output" % line + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError("invalid type for lines2: {}".format(type(lines2).__name__)) + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/myenv/lib/python3.9/site-packages/_pytest/pytester_assertions.py b/myenv/lib/python3.9/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 0000000..630c1d3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,66 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from typing import Dict +from typing import Sequence +from typing import Tuple +from typing import Union + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: Tuple[ + Sequence[TestReport], + Sequence[Union[CollectReport, TestReport]], + Sequence[Union[CollectReport, TestReport]], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: Dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + assert obtained == expected diff --git a/myenv/lib/python3.9/site-packages/_pytest/python.py b/myenv/lib/python3.9/site-packages/_pytest/python.py new file mode 100644 index 0000000..e48e753 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/python.py @@ -0,0 +1,1689 @@ +"""Python test discovery, setup and run of test functions.""" +import enum +import fnmatch +import inspect +import itertools +import os +import sys +import types +import warnings +from collections import Counter +from collections import defaultdict +from functools import partial +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import py + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import final +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_async_function +from _pytest.compat import is_generator +from _pytest.compat import NOTSET +from _pytest.compat import REGEX_TYPE +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.compat import STRING_TYPES +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.fixtures import FuncFixtureInfo +from _pytest.main import Session +from _pytest.mark import MARK_GEN +from _pytest.mark import ParameterSet +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import parts +from _pytest.pathlib import visit +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestUnhandledCoroutineWarning + +if TYPE_CHECKING: + from typing_extensions import Literal + from _pytest.fixtures import _Scope + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="disable string escape non-ascii characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def pytest_generate_tests(metafunc: "Metafunc") -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + # TODO: Fix this type-ignore (overlapping kwargs). + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) # type: ignore[misc] + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/fixture.html#usefixtures ", + ) + + +def async_warn_and_skip(nodeid: str) -> None: + msg = "async def functions are not natively supported and have been skipped.\n" + msg += ( + "You need to install a suitable plugin for your async framework, for example:\n" + ) + msg += " - anyio\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-tornasync\n" + msg += " - pytest-trio\n" + msg += " - pytest-twisted" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) + skip(msg="async def function and no async plugin installed (see warnings)") + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_warn_and_skip(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_warn_and_skip(pyfuncitem.nodeid) + return True + + +def pytest_collect_file( + path: py.path.local, parent: nodes.Collector +) -> Optional["Module"]: + ext = path.ext + if ext == ".py": + if not parent.session.isinitpath(path): + if not path_matches_patterns( + path, parent.config.getini("python_files") + ["__init__.py"] + ): + return None + ihook = parent.session.gethookproxy(path) + module: Module = ihook.pytest_pycollect_makemodule(path=path, parent=parent) + return module + return None + + +def path_matches_patterns(path: py.path.local, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(path.fnmatch(pattern) for pattern in patterns) + + +def pytest_pycollect_makemodule(path: py.path.local, parent) -> "Module": + if path.basename == "__init__.py": + pkg: Package = Package.from_parent(parent, fspath=path) + return pkg + mod: Module = Module.from_parent(parent, fspath=path) + return mod + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem(collector: "PyCollector", name: str, obj: object): + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + return Class.from_parent(collector, name=name, obj=obj) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + "cannot collect %r because it is not a function." % name + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Function.from_parent(collector, name=name) + reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format( + name=name + ) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestCollectionWarning(reason)) + else: + res = list(collector._genfunctions(name, obj)) + return res + + +class PyobjMixin: + _ALLOW_MARKERS = True + + # Function and attributes that the mixin needs (for type-checking only). + if TYPE_CHECKING: + name: str = "" + parent: Optional[nodes.Node] = None + own_markers: List[Mark] = [] + + def getparent(self, cls: Type[nodes._NodeType]) -> Optional[nodes._NodeType]: + ... + + def listchain(self) -> List[nodes.Node]: + ... + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object this node was collected from (can be None).""" + node = self.getparent(Instance) + return node.obj if node is not None else None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Instance collector marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + if isinstance(node, Instance): + continue + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> Tuple[Union[py.path.local, str], int, str]: + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + file_path = sys.modules[obj.__module__].__file__ + if file_path.endswith(".pyc"): + file_path = file_path[:-1] + fspath: Union[py.path.local, str] = file_path + lineno = compat_co_firstlineno + else: + fspath, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return fspath, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( # noqa: E305 + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # staticmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return ( + safe_getattr(obj, "__call__", False) + and fixtures.getfixturemarker(obj) is None + ) + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in ini configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + if not getattr(self.obj, "__test__", True): + return [] + + # NB. we avoid random getattrs and peek in the __dict__ instead + # (XXX originally introduced from a PyPy need, still true?) + dicts = [getattr(self.obj, "__dict__", {})] + for basecls in self.obj.__class__.__mro__: + dicts.append(basecls.__dict__) + seen: Set[str] = set() + values: List[Union[nodes.Item, nodes.Collector]] = [] + ihook = self.ihook + for dic in dicts: + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + + def sort_key(item): + fspath, lineno, _ = item.reportinfo() + return (str(fspath), lineno) + + values.sort(key=sort_key) + return values + + def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + fm = self.session._fixturemanager + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + metafunc = Metafunc( + definition, fixtureinfo, self.config, cls=cls, module=module + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs. + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures + # with direct parametrization, so make sure we update what the + # function really needs. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + callobj=funcobj, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions.""" + + def _getobj(self): + return self._importtestmodule() + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + self._inject_setup_module_fixture() + self._inject_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _inject_setup_module_fixture(self) -> None: + """Inject a hidden autouse, module scoped fixture into the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + + if setup_module is None and teardown_module is None: + return + + @fixtures.fixture( + autouse=True, + scope="module", + # Use a unique name to speed up lookup. + name=f"xunit_setup_module_fixture_{self.obj.__name__}", + ) + def xunit_setup_module_fixture(request) -> Generator[None, None, None]: + if setup_module is not None: + _call_with_optional_argument(setup_module, request.module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, request.module) + + self.obj.__pytest_setup_module = xunit_setup_module_fixture + + def _inject_setup_function_fixture(self) -> None: + """Inject a hidden autouse, function scoped fixture into the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + @fixtures.fixture( + autouse=True, + scope="function", + # Use a unique name to speed up lookup. + name=f"xunit_setup_function_fixture_{self.obj.__name__}", + ) + def xunit_setup_function_fixture(request) -> Generator[None, None, None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + if setup_function is not None: + _call_with_optional_argument(setup_function, request.function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, request.function) + + self.obj.__pytest_setup_function = xunit_setup_function_fixture + + def _importtestmodule(self): + # We assume we are only called once per module. + importmode = self.config.getoption("--import-mode") + try: + mod = import_path(self.fspath, mode=importmode) + except SyntaxError as e: + raise self.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if self.config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{fspath}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test is not allowed. " + "To decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead, and to skip a " + "module use `pytestmark = pytest.mark.{skip,skipif}." + ) from e + self.config.pluginmanager.consider_module(mod) + return mod + + +class Package(Module): + def __init__( + self, + fspath: py.path.local, + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # nodes.FSCollector.__init__(self, fspath, parent=parent) + session = parent.session + nodes.FSCollector.__init__( + self, fspath, parent=parent, config=config, session=session, nodeid=nodeid + ) + self.name = os.path.basename(str(fspath.dirname)) + + def setup(self) -> None: + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, self.obj) + + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, self.obj) + self.addfinalizer(func) + + def gethookproxy(self, fspath: py.path.local): + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.gethookproxy(fspath) + + def isinitpath(self, path: py.path.local) -> bool: + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.isinitpath(path) + + def _recurse(self, direntry: "os.DirEntry[str]") -> bool: + if direntry.name == "__pycache__": + return False + path = py.path.local(direntry.path) + ihook = self.session.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return False + norecursepatterns = self.config.getini("norecursedirs") + if any(path.check(fnmatch=pat) for pat in norecursepatterns): + return False + return True + + def _collectfile( + self, path: py.path.local, handle_dupes: bool = True + ) -> Sequence[nodes.Collector]: + assert ( + path.isfile() + ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( + path, path.isdir(), path.exists(), path.islink() + ) + ihook = self.session.gethookproxy(path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if path in duplicate_paths: + return () + else: + duplicate_paths.add(path) + + return ihook.pytest_collect_file(path=path, parent=self) # type: ignore[no-any-return] + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + this_path = self.fspath.dirpath() + init_module = this_path.join("__init__.py") + if init_module.check(file=1) and path_matches_patterns( + init_module, self.config.getini("python_files") + ): + yield Module.from_parent(self, fspath=init_module) + pkg_prefixes: Set[py.path.local] = set() + for direntry in visit(str(this_path), recurse=self._recurse): + path = py.path.local(direntry.path) + + # We will visit our own __init__.py file, in which case we skip it. + if direntry.is_file(): + if direntry.name == "__init__.py" and path.dirpath() == this_path: + continue + + parts_ = parts(direntry.path) + if any( + str(pkg_prefix) in parts_ and pkg_prefix.join("__init__.py") != path + for pkg_prefix in pkg_prefixes + ): + continue + + if direntry.is_file(): + yield from self._collectfile(path) + elif not direntry.is_dir(): + # Broken symlink or invalid/missing file. + continue + elif path.join("__init__.py").check(file=1): + pkg_prefixes.add(path) + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]): + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice.""" + for name in names: + meth = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + + +class Class(PyCollector): + """Collector for test methods.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None): + """The public constructor.""" + return super().from_parent(name=name, parent=parent) + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + "cannot collect test class %r because it has a " + "__init__ constructor (from: %s)" + % (self.obj.__name__, self.parent.nodeid) + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + "cannot collect test class %r because it has a " + "__new__ constructor (from: %s)" + % (self.obj.__name__, self.parent.nodeid) + ) + ) + return [] + + self._inject_setup_class_fixture() + self._inject_setup_method_fixture() + + return [Instance.from_parent(self, name="()")] + + def _inject_setup_class_fixture(self) -> None: + """Inject a hidden autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = getattr(self.obj, "teardown_class", None) + if setup_class is None and teardown_class is None: + return + + @fixtures.fixture( + autouse=True, + scope="class", + # Use a unique name to speed up lookup. + name=f"xunit_setup_class_fixture_{self.obj.__qualname__}", + ) + def xunit_setup_class_fixture(cls) -> Generator[None, None, None]: + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, self.obj) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, self.obj) + + self.obj.__pytest_setup_class = xunit_setup_class_fixture + + def _inject_setup_method_fixture(self) -> None: + """Inject a hidden autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_method = _get_first_non_fixture_func(self.obj, ("setup_method",)) + teardown_method = getattr(self.obj, "teardown_method", None) + if setup_method is None and teardown_method is None: + return + + @fixtures.fixture( + autouse=True, + scope="function", + # Use a unique name to speed up lookup. + name=f"xunit_setup_method_fixture_{self.obj.__qualname__}", + ) + def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]: + method = request.function + if setup_method is not None: + func = getattr(self, "setup_method") + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(self, "teardown_method") + _call_with_optional_argument(func, method) + + self.obj.__pytest_setup_method = xunit_setup_method_fixture + + +class Instance(PyCollector): + _ALLOW_MARKERS = False # hack, destroy later + # Instances share the object with their parents in a way + # that duplicates markers instances if not taken out + # can be removed at node structure reorganization time. + + def _getobj(self): + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return obj() + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def newinstance(self): + self.obj = self._getobj() + return self.obj + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +class CallSpec2: + def __init__(self, metafunc: "Metafunc") -> None: + self.metafunc = metafunc + self.funcargs: Dict[str, object] = {} + self._idlist: List[str] = [] + self.params: Dict[str, object] = {} + # Used for sorting parametrized resources. + self._arg2scopenum: Dict[str, int] = {} + self.marks: List[Mark] = [] + self.indices: Dict[str, int] = {} + + def copy(self) -> "CallSpec2": + cs = CallSpec2(self.metafunc) + cs.funcargs.update(self.funcargs) + cs.params.update(self.params) + cs.marks.extend(self.marks) + cs.indices.update(self.indices) + cs._arg2scopenum.update(self._arg2scopenum) + cs._idlist = list(self._idlist) + return cs + + def _checkargnotcontained(self, arg: str) -> None: + if arg in self.params or arg in self.funcargs: + raise ValueError(f"duplicate {arg!r}") + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(map(str, self._idlist)) + + def setmulti2( + self, + valtypes: Mapping[str, "Literal['params', 'funcargs']"], + argnames: Sequence[str], + valset: Iterable[object], + id: str, + marks: Iterable[Union[Mark, MarkDecorator]], + scopenum: int, + param_index: int, + ) -> None: + for arg, val in zip(argnames, valset): + self._checkargnotcontained(arg) + valtype_for_arg = valtypes[arg] + if valtype_for_arg == "params": + self.params[arg] = val + elif valtype_for_arg == "funcargs": + self.funcargs[arg] = val + else: # pragma: no cover + assert False, f"Unhandled valtype for arg: {valtype_for_arg}" + self.indices[arg] = param_index + self._arg2scopenum[arg] = scopenum + self._idlist.append(id) + self.marks.extend(normalize_mark_list(marks)) + + +@final +class Metafunc: + """Objects passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: "FunctionDefinition", + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + ) -> None: + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`_pytest.config.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._calls: List[CallSpec2] = [] + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + def parametrize( + self, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + indirect: Union[bool, Sequence[str]] = False, + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = None, + scope: "Optional[_Scope]" = None, + *, + _param_mark: Optional[Mark] = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather at test setup time. + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + from _pytest.fixtures import scope2index + + argnames, parameters = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + "'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is None: + scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + arg_values_types = self._resolve_arg_value_types(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_arg_ids( + argnames, ids, parameters, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + scopenum = scope2index( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2(self)]: + for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): + newcallspec = callspec.copy() + newcallspec.setmulti2( + arg_values_types, + argnames, + param_set.values, + param_id, + param_set.marks, + scopenum, + param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_arg_ids( + self, + argnames: Sequence[str], + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ], + parameters: Sequence[ParameterSet], + nodeid: str, + ) -> List[str]: + """Resolve the actual ids for the given argnames, based on the ``ids`` parameter given + to ``parametrize``. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param ids: The ids parameter of the parametrized call (see docs). + :param List[ParameterSet] parameters: The list of parameter values, same size as ``argnames``. + :param str str: The nodeid of the item that generated this parametrized call. + :rtype: List[str] + :returns: The list of ids for each argname given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parameters, self.function.__name__) + return idmaker(argnames, parameters, idfn, ids_, self.config, nodeid=nodeid) + + def _validate_ids( + self, + ids: Iterable[Union[None, str, float, int, bool]], + parameters: Sequence[ParameterSet], + func_name: str, + ) -> List[Union[None, str]]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parameters) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parameters) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parameters), num_ids), pytrace=False) + + new_ids = [] + for idx, id_value in enumerate(itertools.islice(ids, num_ids)): + if id_value is None or isinstance(id_value, str): + new_ids.append(id_value) + elif isinstance(id_value, (float, int, bool)): + new_ids.append(str(id_value)) + else: + msg = ( # type: ignore[unreachable] + "In {}: ids must be list of string/float/int/bool, " + "found: {} (type: {!r}) at index {}" + ) + fail( + msg.format(func_name, saferepr(id_value), type(id_value), idx), + pytrace=False, + ) + return new_ids + + def _resolve_arg_value_types( + self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]], + ) -> Dict[str, "Literal['params', 'funcargs']"]: + """Resolve if each parametrized argument must be considered a + parameter to a fixture or a "funcarg" to the function, based on the + ``indirect`` parameter of the parametrized() call. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :rtype: Dict[str, str] + A dict mapping each arg name to either: + * "params" if the argname should be the parameter of a fixture of the same name. + * "funcargs" if the argname should be a parameter to the parametrized test function. + """ + if isinstance(indirect, bool): + valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys( + argnames, "params" if indirect else "funcargs" + ) + elif isinstance(indirect, Sequence): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + fail( + "In {}: indirect fixture '{}' doesn't exist".format( + self.function.__name__, arg + ), + pytrace=False, + ) + valtypes[arg] = "params" + else: + fail( + "In {func}: expected Sequence or boolean for indirect, got {type}".format( + type=type(indirect).__name__, func=self.function.__name__ + ), + pytrace=False, + ) + return valtypes + + def _validate_if_using_arg_names( + self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + "In {}: function already takes an argument '{}' with a default value".format( + func_name, arg + ), + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: Union[bool, Sequence[str]], +) -> "fixtures._Scope": + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[0].scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + if used_scopes: + # Takes the most narrow scope from used fixtures. + for scope in reversed(fixtures.scopes): + if scope in used_scopes: + return scope + + return "function" + + +def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +def _idval( + val: object, + argname: str, + idx: int, + idfn: Optional[Callable[[Any], Optional[object]]], + nodeid: Optional[str], + config: Optional[Config], +) -> str: + if idfn: + try: + generated_id = idfn(val) + if generated_id is not None: + val = generated_id + except Exception as e: + prefix = f"{nodeid}: " if nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + elif config: + hook_id: Optional[str] = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname + ) + if hook_id: + return hook_id + + if isinstance(val, STRING_TYPES): + return _ascii_escaped_by_config(val, config) + elif val is None or isinstance(val, (float, int, bool)): + return str(val) + elif isinstance(val, REGEX_TYPE): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return str(argname) + str(idx) + + +def _idvalset( + idx: int, + parameterset: ParameterSet, + argnames: Iterable[str], + idfn: Optional[Callable[[Any], Optional[object]]], + ids: Optional[List[Union[None, str]]], + nodeid: Optional[str], + config: Optional[Config], +) -> str: + if parameterset.id is not None: + return parameterset.id + id = None if ids is None or idx >= len(ids) else ids[idx] + if id is None: + this_id = [ + _idval(val, argname, idx, idfn, nodeid=nodeid, config=config) + for val, argname in zip(parameterset.values, argnames) + ] + return "-".join(this_id) + else: + return _ascii_escaped_by_config(id, config) + + +def idmaker( + argnames: Iterable[str], + parametersets: Iterable[ParameterSet], + idfn: Optional[Callable[[Any], Optional[object]]] = None, + ids: Optional[List[Union[None, str]]] = None, + config: Optional[Config] = None, + nodeid: Optional[str] = None, +) -> List[str]: + resolved_ids = [ + _idvalset( + valindex, parameterset, argnames, idfn, ids, config=config, nodeid=nodeid + ) + for valindex, parameterset in enumerate(parametersets) + ] + + # All IDs must be unique! + unique_ids = set(resolved_ids) + if len(unique_ids) != len(resolved_ids): + + # Record the number of occurrences of each test ID. + test_id_counts = Counter(resolved_ids) + + # Map the test ID to its next suffix. + test_id_suffixes: Dict[str, int] = defaultdict(int) + + # Suffix non-unique IDs to make them unique. + for index, test_id in enumerate(resolved_ids): + if test_id_counts[test_id] > 1: + resolved_ids[index] = "{}{}".format(test_id, test_id_suffixes[test_id]) + test_id_suffixes[test_id] += 1 + + return resolved_ids + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_relpath(func): + loc = getlocation(func, str(curdir)) + return curdir.bestrelpath(py.path.local(loc)) + + def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + if verbose > 0: + bestrel = get_best_relpath(fixture_def.func) + funcargspec = f"{argname} -- {bestrel}" + else: + funcargspec = argname + tw.line(funcargspec, green=True) + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring(tw, fixture_doc) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: Optional[FuncFixtureInfo] = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", "({})".format(get_best_relpath(item.function))) # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> Union[int, ExitCode]: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen: Set[Tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, str(curdir)) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + curdir.bestrelpath(py.path.local(loc)), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, bestrel, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname[0] == "_": + continue + tw.write(argname, green=True) + if fixturedef.scope != "function": + tw.write(" [%s scope]" % fixturedef.scope, cyan=True) + if verbose > 0: + tw.write(" -- %s" % bestrel, yellow=True) + tw.write("\n") + loc = getlocation(fixturedef.func, str(curdir)) + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring(tw, doc) + else: + tw.line(f" {loc}: no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) + + +class Function(PyobjMixin, nodes.Item): + """An Item responsible for setting up and executing a Python test function. + + param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + param parent: + The parent Node. + param config: + The pytest Config object. + param callspec: + If given, this is function has been parametrized and the callspec contains + meta information about the parametrization. + param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + param keywords: + Keywords bound to the function object for "-k" matching. + param session: + The pytest Session object. + param fixtureinfo: + Fixture information already resolved at this fixture node.. + param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Optional[Config] = None, + callspec: Optional[CallSpec2] = None, + callobj=NOTSET, + keywords=None, + session: Optional[Session] = None, + fixtureinfo: Optional[FuncFixtureInfo] = None, + originalname: Optional[str] = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self.obj = callobj + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.keywords.update(self.obj.__dict__) + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + # this is total hostile and a mess + # keywords are broken by design by now + # this will be redeemed later + for mark in callspec.marks: + # feel free to cry, this was broken for years before + # and keywords cant fix it per design + self.keywords[mark.name] = mark + self.own_markers.extend(normalize_mark_list(callspec.marks)) + if keywords: + self.keywords.update(keywords) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + + self.keywords.update( + { + mark.name: True + for mark in self.iter_markers() + if mark.name not in self.keywords + } + ) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self, self.obj, self.cls, funcargs=True + ) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent(cls, parent, **kw): # todo: determine sound type limitations + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: Dict[str, object] = {} + self._request = fixtures.FixtureRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + def _getobj(self): + assert self.parent is not None + return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined] + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + if isinstance(self.parent, Instance): + self.parent.newinstance() + self.obj = self._getobj() + self._request._fillfixtures() + + def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + + excinfo.traceback = ntraceback.filter() + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(excinfo.traceback) > 2: + for entry in excinfo.traceback[1:-1]: + entry.set_repr_style("short") + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException], + ) -> Union[str, TerminalRepr]: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """ + This class is a step gap solution until we evolve to have actual function definition nodes + and manage to get rid of ``metafunc``. + """ + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/myenv/lib/python3.9/site-packages/_pytest/python_api.py b/myenv/lib/python3.9/site-packages/_pytest/python_api.py new file mode 100644 index 0000000..81ce4f8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/python_api.py @@ -0,0 +1,786 @@ +import math +import pprint +from collections.abc import Iterable +from collections.abc import Mapping +from collections.abc import Sized +from decimal import Decimal +from numbers import Complex +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Generic +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +if TYPE_CHECKING: + from numpy import ndarray + + +import _pytest._code +from _pytest.compat import final +from _pytest.compat import STRING_TYPES +from _pytest.outcomes import fail + + +def _non_numeric_type_error(value, at: Optional[str]) -> TypeError: + at_str = f" at {at}" if at else "" + return TypeError( + "cannot make approximate comparisons to non-numeric values: {!r} {}".format( + value, at_str + ) + ) + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> "ApproxScalar": + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + pass + + +def _recursive_list_map(f, x): + if isinstance(x, list): + return list(_recursive_list_map(f, xi) for xi in x) + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) + return f"approx({list_scalars!r})" + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequencelike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list, set): + seq_type = list + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: Union[float, Decimal] = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: Union[float, Decimal] = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf( + abs(self.expected) # type: ignore[arg-type] + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = f"{self.tolerance:.1e}" + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. + if not ( + isinstance(self.expected, (Complex, Decimal)) + and isinstance(actual, (Complex, Decimal)) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): # type: ignore[arg-type] + return self.nan_ok and math.isnan(abs(actual)) # type: ignore[arg-type] + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): # type: ignore[arg-type] + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + Dictionary *values*:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + You can also use ``approx`` to compare nonnumeric types, or dicts and + sequences containing nonnumeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ https://numpy.org/doc/stable/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, ``TypeError`` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. `More information...`__ + + __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of nonnumeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for nonnumeric types instead + of raising ``TypeError``. + """ + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: Type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif ( + isinstance(expected, Iterable) + and isinstance(expected, Sized) + # Type ignored because the error is wrong -- not unreachable. + and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable] + ): + cls = ApproxSequencelike + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> Optional["ndarray"]: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + import sys + + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None + + +# builtin pytest.raises helper + +_E = TypeVar("_E", bound=BaseException) + + +@overload +def raises( + expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], + *, + match: Optional[Union[str, Pattern[str]]] = ..., +) -> "RaisesContext[_E]": + ... + + +@overload +def raises( + expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> _pytest._code.ExceptionInfo[_E]: + ... + + +def raises( + expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], *args: Any, **kwargs: Any +) -> Union["RaisesContext[_E]", _pytest._code.ExceptionInfo[_E]]: + r"""Assert that a code block/function call raises ``expected_exception`` + or raise a failure exception otherwise. + + :kwparam match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception using ``re.search``. To match a literal + string that may contain `special characters`__, the pattern can + first be escaped with ``re.escape``. + + (This is only used when ``pytest.raises`` is used as a context manager, + and passed through to the function otherwise. + When using ``pytest.raises`` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + __ https://docs.python.org/3/library/re.html#regular-expression-syntax + + .. currentmodule:: _pytest._code + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (``ZeroDivisionError`` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if isinstance(expected_exception, type): + excepted_exceptions: Tuple[Type[_E], ...] = (expected_exception,) + else: + excepted_exceptions = expected_exception + for exc in excepted_exceptions: + if not isinstance(exc, type) or not issubclass(exc, BaseException): # type: ignore[unreachable] + msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable] + not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__ + raise TypeError(msg.format(not_a)) + + message = f"DID NOT RAISE {expected_exception}" + + if not args: + match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None) + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return RaisesContext(expected_exception, message, match) + else: + func = args[0] + if not callable(func): + raise TypeError( + "{!r} object (type: {}) must be callable".format(func, type(func)) + ) + try: + func(*args[1:], **kwargs) + except expected_exception as e: + # We just caught the exception - there is a traceback. + assert e.__traceback__ is not None + return _pytest._code.ExceptionInfo.from_exc_info( + (type(e), e, e.__traceback__) + ) + fail(message) + + +# This doesn't work with mypy for now. Use fail.Exception instead. +raises.Exception = fail.Exception # type: ignore + + +@final +class RaisesContext(Generic[_E]): + def __init__( + self, + expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], + message: str, + match_expr: Optional[Union[str, Pattern[str]]] = None, + ) -> None: + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo: Optional[_pytest._code.ExceptionInfo[_E]] = None + + def __enter__(self) -> _pytest._code.ExceptionInfo[_E]: + self.excinfo = _pytest._code.ExceptionInfo.for_later() + return self.excinfo + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(self.message) + assert self.excinfo is not None + if not issubclass(exc_type, self.expected_exception): + return False + # Cast to narrow the exception type now that it's verified. + exc_info = cast(Tuple[Type[_E], _E, TracebackType], (exc_type, exc_val, exc_tb)) + self.excinfo.fill_unfilled(exc_info) + if self.match_expr is not None: + self.excinfo.match(self.match_expr) + return True diff --git a/myenv/lib/python3.9/site-packages/_pytest/recwarn.py b/myenv/lib/python3.9/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000..d872d9d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/recwarn.py @@ -0,0 +1,296 @@ +"""Record warnings during test function execution.""" +import re +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +from _pytest.compat import final +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator["WarningsRecorder", None, None]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See http://docs.python.org/library/warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call( + *, match: Optional[Union[str, Pattern[str]]] = ... +) -> "WarningsRecorder": + ... + + +@overload +def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: + ... + + +def deprecated_call( + func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any +) -> Union["WarningsRecorder", Any]: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func,) + args + return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + + +@overload +def warns( + expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]], + *, + match: Optional[Union[str, Pattern[str]]] = ..., +) -> "WarningsChecker": + ... + + +@overload +def warns( + expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: + ... + + +def warns( + expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]], + *args: Any, + match: Optional[Union[str, Pattern[str]]] = None, + **kwargs: Any, +) -> Union["WarningsChecker", Any]: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or + sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or + classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + :func:`pytest.raises` can be used:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... + + """ + __tracebackhide__ = True + if not args: + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.warns: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError( + "{!r} object (type: {}) must be callable".format(func, type(func)) + ) + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + # Type ignored due to the way typeshed handles warnings.catch_warnings. + super().__init__(record=True) # type: ignore[call-arg] + self._entered = False + self._list: List[warnings.WarningMessage] = [] + + @property + def list(self) -> List["warnings.WarningMessage"]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> "warnings.WarningMessage": + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator["warnings.WarningMessage"]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: Type[Warning] = Warning) -> "warnings.WarningMessage": + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError("%r not found in warning list" % cls) + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + # Type ignored because it doesn't exactly warnings.catch_warnings.__enter__ + # -- it returns a List but we only emulate one. + def __enter__(self) -> "WarningsRecorder": # type: ignore + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: Optional[ + Union[Type[Warning], Tuple[Type[Warning], ...]] + ] = None, + match_expr: Optional[Union[str, Pattern[str]]] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if expected_warning is None: + expected_warning_tup = None + elif isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif issubclass(expected_warning, Warning): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + # only check if we're not currently handling an exception + if exc_type is None and exc_val is None and exc_tb is None: + if self.expected_warning is not None: + if not any(issubclass(r.category, self.expected_warning) for r in self): + __tracebackhide__ = True + fail( + "DID NOT WARN. No warnings of type {} was emitted. " + "The list of emitted warnings is: {}.".format( + self.expected_warning, [each.message for each in self] + ) + ) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail( + "DID NOT WARN. No warnings of type {} matching" + " ('{}') was emitted. The list of emitted warnings" + " is: {}.".format( + self.expected_warning, + self.match_expr, + [each.message for each in self], + ) + ) diff --git a/myenv/lib/python3.9/site-packages/_pytest/reports.py b/myenv/lib/python3.9/site-packages/_pytest/reports.py new file mode 100644 index 0000000..58f1251 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/reports.py @@ -0,0 +1,572 @@ +from io import StringIO +from pathlib import Path +from pprint import pprint +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr +import py + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import skip + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Literal + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "%s.%s.%s" % d["version_info"][:3] + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +_R = TypeVar("_R", bound="BaseReport") + + +class BaseReport: + when: Optional[str] + location: Optional[Tuple[str, Optional[int], str]] + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ] + sections: List[Tuple[str, str]] + nodeid: str + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: + ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + out.line(getworkerinfoline(self.node)) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[Tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + passed = property(lambda x: x.outcome == "passed") + failed = property(lambda x: x.outcome == "failed") + skipped = property(lambda x: x.outcome == "skipped") + + @property + def fspath(self) -> str: + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> Optional[str]: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + return None + + def _get_verbose_word(self, config: Config): + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + return verbose + + def _to_json(self) -> Dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls: Type[_R], reportdict: Dict[str, object]) -> _R: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: Type[BaseReport], reportdict +) -> "NoReturn": + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream) + pprint("report_name: %s" % report_class, stream=stream) + pprint(reportdict, stream=stream) + pprint("Please report this bug at %s" % url, stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +@final +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail).""" + + __test__ = False + + def __init__( + self, + nodeid: str, + location: Tuple[str, Optional[int], str], + keywords, + outcome: "Literal['passed', 'failed', 'skipped']", + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ], + when: "Literal['setup', 'call', 'teardown']", + sections: Iterable[Tuple[str, str]] = (), + duration: float = 0, + user_properties: Optional[Iterable[Tuple[str, object]]] = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location: Tuple[str, Optional[int], str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: List of pairs ``(str, str)`` of extra information which needs to + #: marshallable. Used by pytest to add captured text + #: from ``stdout`` and ``stderr``, but may be used by other plugins + #: to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration = duration + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return "<{} {!r} when={!r} outcome={!r}>".format( + self.__class__.__name__, self.nodeid, self.when, self.outcome + ) + + @classmethod + def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport": + """Create and fill a TestReport with standard item and call info.""" + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: Union[ + None, + ExceptionInfo[BaseException], + Tuple[str, int, str], + str, + TerminalRepr, + ] = (None) + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object.""" + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: "Literal['passed', 'skipped', 'failed']", + longrepr, + result: Optional[List[Union[Item, Collector]]], + sections: Iterable[Tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: List of pairs ``(str, str)`` of extra information which needs to + #: marshallable. + # Used by pytest to add captured text : from ``stdout`` and ``stderr``, + # but may be used by other plugins : to add arbitrary information to + # reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return "".format( + self.nodeid, len(self.result), self.outcome + ) + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: Union[CollectReport, TestReport] +) -> Optional[Dict[str, Any]]: + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: Dict[str, Any], +) -> Optional[Union[CollectReport, TestReport]]: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> Dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: Union[ReprEntry, ReprEntryNative] + ) -> Dict[str, Any]: + data = attr.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = attr.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> Dict[str, Any]: + result = attr.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: Optional[ReprFileLocation], + ) -> Optional[Dict[str, Any]]: + if reprcrash is not None: + return attr.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> Dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: Dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], (py.path.local, Path)): + d[name] = str(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: Dict[str, Any]) -> Dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: Union[ReprEntry, ReprEntryNative] = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: Optional[Dict[str, Any]]): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: Union[ + ExceptionChainRepr, ReprExceptionInfo + ] = ExceptionChainRepr(chain) + else: + exception_info = ReprExceptionInfo(reprtraceback, reprcrash) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/myenv/lib/python3.9/site-packages/_pytest/runner.py b/myenv/lib/python3.9/site-packages/_pytest/runner.py new file mode 100644 index 0000000..794690d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/runner.py @@ -0,0 +1,462 @@ +"""Basic collect and runtest protocol implementations.""" +import bdb +import os +import sys +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.compat import final +from _pytest.config.argparsing import Parser +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="show N slowest setup/test durations (N=0 for all).", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=0.005, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. Default 0.005", + ) + + +def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.getvalue("verbose") + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return] + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", "slowest %s durations" % durations) + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if verbose < 2 and rep.duration < durations_min: + tr.write_line("") + tr.write_line( + "(%s durations < %gs hidden. Use -vv to show these durations.)" + % (len(dlist) - i, durations_min) + ) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: "Session") -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: "Session") -> None: + session._setupstate.teardown_all() + + +def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Optional[Item] = None +) -> List[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.prepare(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise e + + +def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(item, nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Optional["Literal['setup', 'call', 'teardown']"] +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds +) -> TestReport: + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report: TestReport = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + # Special control flow exception. + return False + return True + + +def call_runtest_hook( + item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds +) -> "CallInfo[None]": + if when == "setup": + ihook: Callable[..., None] = item.ihook.pytest_runtest_setup + elif when == "call": + ihook = item.ihook.pytest_runtest_call + elif when == "teardown": + ihook = item.ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + reraise: Tuple[Type[BaseException], ...] = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return CallInfo.from_call( + lambda: ihook(item=item, **kwds), when=when, reraise=reraise + ) + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@attr.s(repr=False) +class CallInfo(Generic[TResult]): + """Result/Exception info a function invocation. + + :param T result: + The return value of the call, if it didn't raise. Can only be + accessed if excinfo is None. + :param Optional[ExceptionInfo] excinfo: + The captured exception of the call, if it raised. + :param float start: + The system time when the call started, in seconds since the epoch. + :param float stop: + The system time when the call ended, in seconds since the epoch. + :param float duration: + The call duration, in seconds. + :param str when: + The context of invocation: "setup", "call", "teardown", ... + """ + + _result = attr.ib(type="Optional[TResult]") + excinfo = attr.ib(type=Optional[ExceptionInfo[BaseException]]) + start = attr.ib(type=float) + stop = attr.ib(type=float) + duration = attr.ib(type=float) + when = attr.ib(type="Literal['collect', 'setup', 'call', 'teardown']") + + @property + def result(self) -> TResult: + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: "Callable[[], TResult]", + when: "Literal['collect', 'setup', 'call', 'teardown']", + reraise: Optional[ + Union[Type[BaseException], Tuple[Type[BaseException], ...]] + ] = None, + ) -> "CallInfo[TResult]": + excinfo = None + start = timing.time() + precise_start = timing.perf_counter() + try: + result: Optional[TResult] = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + # use the perf counter + precise_stop = timing.perf_counter() + duration = precise_stop - precise_start + stop = timing.time() + return cls( + start=start, + stop=stop, + duration=duration, + when=when, + result=result, + excinfo=excinfo, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") + longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + # Type ignored because unittest is loaded dynamically. + skip_exceptions.append(unittest.SkipTest) # type: ignore + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors.""" + + def __init__(self): + self.stack: List[Node] = [] + self._finalizers: Dict[Node, List[Callable[[], object]]] = {} + + def addfinalizer(self, finalizer: Callable[[], object], colitem) -> None: + """Attach a finalizer to the given colitem.""" + assert colitem and not isinstance(colitem, tuple) + assert callable(finalizer) + # assert colitem in self.stack # some unit tests don't setup stack :/ + self._finalizers.setdefault(colitem, []).append(finalizer) + + def _pop_and_teardown(self): + colitem = self.stack.pop() + self._teardown_with_finalization(colitem) + + def _callfinalizers(self, colitem) -> None: + finalizers = self._finalizers.pop(colitem, None) + exc = None + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + + def _teardown_with_finalization(self, colitem) -> None: + self._callfinalizers(colitem) + colitem.teardown() + for colitem in self._finalizers: + assert colitem in self.stack + + def teardown_all(self) -> None: + while self.stack: + self._pop_and_teardown() + for key in list(self._finalizers): + self._teardown_with_finalization(key) + assert not self._finalizers + + def teardown_exact(self, item, nextitem) -> None: + needed_collectors = nextitem and nextitem.listchain() or [] + self._teardown_towards(needed_collectors) + + def _teardown_towards(self, needed_collectors) -> None: + exc = None + while self.stack: + if self.stack == needed_collectors[: len(self.stack)]: + break + try: + self._pop_and_teardown() + except TEST_OUTCOME as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + + def prepare(self, colitem) -> None: + """Setup objects along the collector chain to the test-method.""" + + # Check if the last collection node has raised an error. + for col in self.stack: + if hasattr(col, "_prepare_exc"): + exc = col._prepare_exc # type: ignore[attr-defined] + raise exc + + needed_collectors = colitem.listchain() + for col in needed_collectors[len(self.stack) :]: + self.stack.append(col) + try: + col.setup() + except TEST_OUTCOME as e: + col._prepare_exc = e # type: ignore[attr-defined] + raise e + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/myenv/lib/python3.9/site-packages/_pytest/setuponly.py b/myenv/lib/python3.9/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000..44a1094 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/setuponly.py @@ -0,0 +1,94 @@ +from typing import Generator +from typing import Optional +from typing import Union + +import pytest +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="only setup fixtures, do not execute tests.", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="show setup of fixtures while executing tests.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, None, None]: + yield + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, "SETUP") + + +def pytest_fixture_post_finalizer(fixturedef: FixtureDef[object]) -> None: + if fixturedef.cached_result is not None: + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param # type: ignore[attr-defined] + + +def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None: + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + tw.write(" " * 2 * fixturedef.scopenum) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write("[{}]".format(saferepr(fixturedef.cached_param, maxsize=42))) # type: ignore[attr-defined] + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/myenv/lib/python3.9/site-packages/_pytest/setupplan.py b/myenv/lib/python3.9/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000..9ba81cc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/setupplan.py @@ -0,0 +1,40 @@ +from typing import Optional +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Optional[object]: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/myenv/lib/python3.9/site-packages/_pytest/skipping.py b/myenv/lib/python3.9/site-packages/_pytest/skipping.py new file mode 100644 index 0000000..9aacfec --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/skipping.py @@ -0,0 +1,324 @@ +"""Support for skip/xfail functions and markers.""" +import os +import platform +import sys +import traceback +from collections.abc import Mapping +from typing import Generator +from typing import Optional +from typing import Tuple +from typing import Type + +import attr + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo +from _pytest.store import StoreKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + "pytest_markeval_namespace() needs to return a dict, got {!r}".format( + dictionary + ) + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) # type: ignore[attr-defined] + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + "Error evaluating %r condition as a boolean" % mark.name, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + "Error evaluating %r: " % mark.name + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@attr.s(slots=True, frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason = attr.ib(type=str) + + +def evaluate_skip_marks(item: Item) -> Optional[Skip]: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + if "reason" in mark.kwargs: + reason = mark.kwargs["reason"] + elif mark.args: + reason = mark.args[0] + else: + reason = "unconditional skip" + return Skip(reason) + + return None + + +@attr.s(slots=True, frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + reason = attr.ib(type=str) + run = attr.ib(type=bool) + strict = attr.ib(type=bool) + raises = attr.ib(type=Optional[Tuple[Type[BaseException], ...]]) + + +def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Whether skipped due to skip or skipif marks. +skipped_by_mark_key = StoreKey[bool]() +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StoreKey[Optional[Xfail]]() +unexpectedsuccess_key = StoreKey[str]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + item._store[skipped_by_mark_key] = skipped is not None + if skipped: + skip(skipped.reason) + + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None, None, None]: + xfailed = item._store.get(xfailed_key, None) + if xfailed is None: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + yield + + # The test run may have added an xfail mark dynamically. + xfailed = item._store.get(xfailed_key, None) + if xfailed is None: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]): + outcome = yield + rep = outcome.get_result() + xfailed = item._store.get(xfailed_key, None) + # unittest special case, see setting of unexpectedsuccess_key + if unexpectedsuccess_key in item._store and rep.when == "call": + reason = item._store[unexpectedsuccess_key] + if reason: + rep.longrepr = f"Unexpected success: {reason}" + else: + rep.longrepr = "Unexpected success" + rep.outcome = "failed" + elif item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is not None and not isinstance(call.excinfo.value, raises): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + + if ( + item._store.get(skipped_by_mark_key, True) + and rep.skipped + and type(rep.longrepr) is tuple + ): + # Skipped by mark.skipif; change the location of the failure + # to point to the item definition, otherwise it will display + # the location of where the skip exception was raised within pytest. + _, _, reason = rep.longrepr + filename, line = item.reportinfo()[:2] + assert line is not None + rep.longrepr = str(filename), line + 1, reason + + +def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/myenv/lib/python3.9/site-packages/_pytest/stepwise.py b/myenv/lib/python3.9/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000..197577c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/stepwise.py @@ -0,0 +1,119 @@ +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +import pytest +from _pytest import nodes +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + +if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="ignore the first failing test but stop on the next failing test", + ) + + +@pytest.hookimpl +def pytest_configure(config: Config) -> None: + # We should always have a cache as cache provider plugin uses tryfirst=True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + # Clear the list of failing tests if the plugin is not active. + session.config.cache.set(STEPWISE_CACHE_DIR, []) + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Optional[Session] = None + self.report_status = "" + assert config.cache is not None + self.cache: Cache = config.cache + self.lastfailed: Optional[str] = self.cache.get(STEPWISE_CACHE_DIR, None) + self.skip: bool = config.getoption("stepwise_skip") + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: List[nodes.Item] + ) -> None: + if not self.lastfailed: + self.report_status = "no previously failed tests, not skipping." + return + + # check all item nodes until we find a match on last failed + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.lastfailed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status = "previously failed test not found, not skipping." + else: + self.report_status = f"skipping {failed_index} already passed items." + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.lastfailed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + def pytest_report_collectionfinish(self) -> Optional[str]: + if self.config.getoption("verbose") >= 0 and self.report_status: + return f"stepwise: {self.report_status}" + return None + + def pytest_sessionfinish(self) -> None: + self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed) diff --git a/myenv/lib/python3.9/site-packages/_pytest/store.py b/myenv/lib/python3.9/site-packages/_pytest/store.py new file mode 100644 index 0000000..e5008cf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/store.py @@ -0,0 +1,125 @@ +from typing import Any +from typing import cast +from typing import Dict +from typing import Generic +from typing import TypeVar +from typing import Union + + +__all__ = ["Store", "StoreKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StoreKey(Generic[T]): + """StoreKey is an object used as a key to a Store. + + A StoreKey is associated with the type T of the value of the key. + + A StoreKey is unique and cannot conflict with another key. + """ + + __slots__ = () + + +class Store: + """Store is a type-safe heterogenous mutable mapping that + allows keys and value types to be defined separately from + where it (the Store) is created. + + Usually you will be given an object which has a ``Store``: + + .. code-block:: python + + store: Store = some_object.store + + If a module wants to store data in this Store, it creates StoreKeys + for its keys (at the module level): + + .. code-block:: python + + some_str_key = StoreKey[str]() + some_bool_key = StoreKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + store[some_str_key] = "value" + store[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = store[some_str_key] + # The static type of some_bool is bool. + some_bool = store[some_bool_key] + + Why use this? + ------------- + + Problem: module Internal defines an object. Module External, which + module Internal doesn't know about, receives the object and wants to + attach information to it, to be retrieved later given the object. + + Bad solution 1: Module External assigns private attributes directly on + the object. This doesn't work well because the type checker doesn't + know about these attributes and it complains about undefined attributes. + + Bad solution 2: module Internal adds a ``Dict[str, Any]`` attribute to + the object. Module External stores its data in private keys of this dict. + This doesn't work well because retrieved values are untyped. + + Good solution: module Internal adds a ``Store`` to the object. Module + External mints StoreKeys for its own keys. Module External stores and + retrieves its data using these keys. + """ + + __slots__ = ("_store",) + + def __init__(self) -> None: + self._store: Dict[StoreKey[Any], object] = {} + + def __setitem__(self, key: StoreKey[T], value: T) -> None: + """Set a value for key.""" + self._store[key] = value + + def __getitem__(self, key: StoreKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._store[key]) + + def get(self, key: StoreKey[T], default: D) -> Union[T, D]: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StoreKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StoreKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._store[key] + + def __contains__(self, key: StoreKey[T]) -> bool: + """Return whether key was set.""" + return key in self._store diff --git a/myenv/lib/python3.9/site-packages/_pytest/terminal.py b/myenv/lib/python3.9/site-packages/_pytest/terminal.py new file mode 100644 index 0000000..fbfb09a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/terminal.py @@ -0,0 +1,1405 @@ +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +import argparse +import datetime +import inspect +import platform +import sys +import warnings +from collections import Counter +from functools import partial +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import attr +import pluggy +import py + +import _pytest._version +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io.wcwidth import wcswidth +from _pytest.compat import final +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: Optional[str] = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: Union[str, Sequence[object], None], + option_string: Optional[str] = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="increase verbosity.", + ) + group._addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="disable header", + ) + group._addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="disable summary", + ) + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="decrease verbosity.", + ) + group._addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="set verbosity. Default is 0.", + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="show locals in tracebacks (disabled by default).", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="traceback print mode (auto/long/short/line/native/no).", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="don't cut any tracebacks (default is to cut).", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="color terminal output (yes/no/auto).", + ) + group._addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled)", + ) + + parser.addini( + "console_output_style", + help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").', + default="progress", + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@attr.s +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple|py.path.local fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message = attr.ib(type=str) + nodeid = attr.ib(type=Optional[str], default=None) + fslocation = attr.ib( + type=Optional[Union[Tuple[str, int], py.path.local]], default=None + ) + count_towards_summary = True + + def get_location(self, config: Config) -> Optional[str]: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = bestrelpath( + config.invocation_params.dir, absolutepath(filename) + ) + return f"{relpath}:{linenum}" + else: + return str(self.fslocation) + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: Optional[TextIO] = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Optional[Session] = None + self._showfspath: Optional[bool] = None + + self.stats: Dict[str, List[Any]] = {} + self._main_color: Optional[str] = None + self._known_types: Optional[List[str]] = None + self.startdir = config.invocation_dir + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: Union[None, Path, str, int] = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported: Set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write: Optional[float] = None + self._already_displayed_warnings: Optional[int] = None + self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None + + def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]": + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) + if self.config.getoption("capture", "no") == "no": + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg == "progress": + return "progress" + elif cfg == "count": + return "count" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.verbosity >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: Optional[bool]) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.verbosity > 0 + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: Union[str, bytes], **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: Optional[str] = None, + fullwidth: Optional[int] = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, warning_message: warnings.WarningMessage, nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: Tuple[str, Optional[int], str] + ) -> None: + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + res: Tuple[ + str, str, Union[str, Tuple[str, Mapping[str, bool]]] + ] = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + category, letter, word = res + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + running_xdist = hasattr(rep, "node") + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + if self.verbosity <= 0: + self._tw.write(letter, **markup) + else: + self._progress_nodeids_reported.add(rep.nodeid) + line = self._locationline(rep.nodeid, *rep.location) + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + reason = _get_raw_skip_reason(rep) + reason_ = _format_trimmed(" ({})", reason, available_width) + if reason and reason_ is not None: + self._tw.write(reason_) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return len(self._progress_nodeids_reported) == self._session.testscollected + + def pytest_runtest_logfinish(self, nodeid: str) -> None: + assert self._session + if self.verbosity <= 0 and self._show_progress_info: + if self._show_progress_info == "count": + num_tests = self._session.testscollected + progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) + else: + progress_length = len(" [100%]") + + self._progress_nodeids_reported.add(nodeid) + + if self._is_last_item: + self._write_progress_information_filling_space() + else: + main_color, _ = self._get_main_color() + w = self._width_of_current_line + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = self._progress_nodeids_reported + counter_format = "{{:{}d}}".format(len(str(collected))) + format_string = f" [{counter_format}/{{}}]" + return format_string.format(len(progress), collected) + return f" [ {collected} / {collected} ]" + else: + if collected: + return " [{:3d}%]".format( + len(self._progress_nodeids_reported) * 100 // collected + ) + return " [100%]" + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty: + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + self._collect_report_last_write = timing.time() + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty: + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write "collecting" report every 0.5s. + t = timing.time() + if ( + self._collect_report_last_write is not None + and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = t + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - errors - skipped - deselected + if final: + line = "collected " + else: + line = "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d error%s" % (errors, "s" if errors != 1 else "") + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self._numcollected > selected > 0: + line += " / %d selected" % selected + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: "Session") -> None: + self._session = session + self._sessionstarttime = timing.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3]) + msg += ", pytest-{}, py-{}, pluggy-{}".format( + _pytest._version.version, py.__version__, pluggy.__version__ + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, startdir=self.startdir + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[Union[str, Sequence[str]]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> List[str]: + line = "rootdir: %s" % config.rootpath + + if config.inipath: + line += ", configfile: " + bestrelpath(config.rootpath, config.inipath) + + testpaths: List[str] = config.getini("testpaths") + if config.invocation_params.dir == config.rootpath and config.args == testpaths: + line += ", testpaths: {}".format(", ".join(testpaths)) + + result = [line] + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return result + + def pytest_collection_finish(self, session: "Session") -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, startdir=self.startdir, items=session.items + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + # To print out items and their parent collectors + # we take care to leave out Instances aka () + # because later versions are going to get rid of them anyway. + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: List[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + if col.name == "()": # Skip Instances. + continue + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if self.config.option.verbose >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(hookwrapper=True) + def pytest_sessionfinish( + self, session: "Session", exitstatus: Union[int, ExitCode] + ): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + + @hookimpl(hookwrapper=True) + def pytest_terminal_summary(self) -> Generator[None, None, None]: + self.summary_errors() + self.summary_failures() + self.summary_warnings() + self.summary_passes() + yield + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline(self, nodeid, fspath, lineno, domain): + def mkrel(nodeid): + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # collect_fspath comes from testid which has a "/"-normalized path. + + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, fspath) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + values = [] + for x in self.stats.get(name, []): + if not hasattr(x, "_pdbshown"): + values.append(x) + return values + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: Dict[str, List[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: List[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line("-- Docs: https://docs.pytest.org/en/stable/warnings.html") + + def summary_passes(self) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports: List[TestReport] = self.getreports("passed") + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> List[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + if self.config.option.tbstyle != "no": + reports: List[BaseReport] = self.getreports("failed") + if not reports: + return + self.write_sep("=", "FAILURES") + if self.config.option.tbstyle == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: List[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = timing.time() - self._sessionstarttime + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = " in {}".format(format_session_duration(session_duration)) + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(stat, lines: List[str]) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + termwidth = self._tw.fullwidth + config = self.config + for rep in failed: + line = _get_line_with_reprcrash_message(config, rep, termwidth) + lines.append(line) + + def show_xfailed(lines: List[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word = rep._get_verbose_word(self.config) + pos = _get_pos(self.config, rep) + lines.append(f"{verbose_word} {pos}") + reason = rep.wasxfail + if reason: + lines.append(" " + str(reason)) + + def show_xpassed(lines: List[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word = rep._get_verbose_word(self.config) + pos = _get_pos(self.config, rep) + reason = rep.wasxfail + lines.append(f"{verbose_word} {pos} {reason}") + + def show_skipped(lines: List[str]) -> None: + skipped: List[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word = skipped[0]._get_verbose_word(self.config) + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + if lineno is not None: + lines.append( + "%s [%d] %s:%d: %s" + % (verbose_word, num, fspath, lineno, reason) + ) + else: + lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason)) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, "failed"), + "s": show_skipped, + "p": partial(show_simple, "passed"), + "E": partial(show_simple, "error"), + } + + lines: List[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info") + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> Tuple[str, List[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: List[str] = [] + for found_type in self.stats.keys(): + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> List[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] + + return parts, main_color + + +def _get_pos(config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + return nodeid + + +def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, termwidth: int +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word = rep._get_verbose_word(config) + pos = _get_pos(config, rep) + + line = f"{verbose_word} {pos}" + line_width = wcswidth(line) + + try: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + available_width = termwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, skipped: Sequence[CollectReport], +) -> List[Tuple[int, str, Optional[int], str]]: + d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: Tuple[str, Optional[int], str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: List[Tuple[int, str, Optional[int], str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> Tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> List[str]: + values: List[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = "{dist.project_name}-{dist.version}".format(dist=dist) + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = cast(str, report.wasxfail) + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason diff --git a/myenv/lib/python3.9/site-packages/_pytest/threadexception.py b/myenv/lib/python3.9/site-packages/_pytest/threadexception.py new file mode 100644 index 0000000..1c1f62f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/threadexception.py @@ -0,0 +1,90 @@ +import threading +import traceback +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Optional +from typing import Type + +import pytest + + +# Copied from cpython/Lib/test/support/threading_helper.py, with modifications. +class catch_threading_exception: + """Context manager catching threading.Thread exception using + threading.excepthook. + + Storing exc_value using a custom hook can create a reference cycle. The + reference cycle is broken explicitly when the context manager exits. + + Storing thread using a custom hook can resurrect it if it is set to an + object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with threading_helper.catch_threading_exception() as cm: + # code spawning a thread which raises an exception + ... + # check the thread exception: use cm.args + ... + # cm.args attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + # See https://github.com/python/typeshed/issues/4767 regarding the underscore. + self.args: Optional["threading._ExceptHookArgs"] = None + self._old_hook: Optional[Callable[["threading._ExceptHookArgs"], Any]] = None + + def _hook(self, args: "threading._ExceptHookArgs") -> None: + self.args = args + + def __enter__(self) -> "catch_threading_exception": + self._old_hook = threading.excepthook + threading.excepthook = self._hook + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + assert self._old_hook is not None + threading.excepthook = self._old_hook + self._old_hook = None + del self.args + + +def thread_exception_runtest_hook() -> Generator[None, None, None]: + with catch_threading_exception() as cm: + yield + if cm.args: + if cm.args.thread is not None: + thread_name = cm.args.thread.name + else: + thread_name = "" + msg = f"Exception in thread {thread_name}\n\n" + msg += "".join( + traceback.format_exception( + cm.args.exc_type, cm.args.exc_value, cm.args.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + + +@pytest.hookimpl(hookwrapper=True, trylast=True) +def pytest_runtest_setup() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() diff --git a/myenv/lib/python3.9/site-packages/_pytest/timing.py b/myenv/lib/python3.9/site-packages/_pytest/timing.py new file mode 100644 index 0000000..925163a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/timing.py @@ -0,0 +1,12 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" +from time import perf_counter +from time import sleep +from time import time + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/myenv/lib/python3.9/site-packages/_pytest/tmpdir.py b/myenv/lib/python3.9/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000..a6bd383 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/tmpdir.py @@ -0,0 +1,254 @@ +"""Support for providing temporary directories to test functions.""" +import os +import re +import sys +import tempfile +from pathlib import Path +from typing import Optional + +import attr +import py + +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from _pytest.compat import final +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch + + +@final +@attr.s(init=False) +class TempPathFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + + _given_basetemp = attr.ib(type=Optional[Path]) + _trace = attr.ib() + _basetemp = attr.ib(type=Optional[Path]) + + def __init__( + self, + given_basetemp: Optional[Path], + trace, + basetemp: Optional[Path] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._basetemp = basetemp + + @classmethod + def from_config( + cls, config: Config, *, _ispytest: bool = False, + ) -> "TempPathFactory": + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed.""" + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + if sys.platform != "win32": + uid = os.getuid() + rootdir_stat = rootdir.stat() + # getuid shouldn't fail, but cpython defines such a case. + # Let's hope for the best. + if uid != -1: + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=3, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +@final +@attr.s(init=False) +class TempdirFactory: + """Backward comptibility wrapper that implements :class:``py.path.local`` + for :class:``TempPathFactory``.""" + + _tmppath_factory = attr.ib(type=TempPathFactory) + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> py.path.local: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" + return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> py.path.local: + """Backward compat wrapper for ``_tmppath_factory.getbasetemp``.""" + return py.path.local(self._tmppath_factory.getbasetemp().resolve()) + + +def get_user() -> Optional[str]: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + import getpass + + try: + return getpass.getuser() + except (ImportError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempdirFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmpdir_factory session fixture. + """ + mp = MonkeyPatch() + tmppath_handler = TempPathFactory.from_config(config, _ispytest=True) + t = TempdirFactory(tmppath_handler, _ispytest=True) + config._cleanup.append(mp.undo) + mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) + mp.setattr(config, "_tmpdirhandler", t, raising=False) + + +@fixture(scope="session") +def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmpdirhandler # type: ignore + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmpdir(tmp_path: Path) -> py.path.local: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a `py.path.local`_ path object. + + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html + """ + return py.path.local(tmp_path) + + +@fixture +def tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a :class:`pathlib.Path` object. + """ + + return _mk_tmp(request, tmp_path_factory) diff --git a/myenv/lib/python3.9/site-packages/_pytest/unittest.py b/myenv/lib/python3.9/site-packages/_pytest/unittest.py new file mode 100644 index 0000000..55f15ef --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/unittest.py @@ -0,0 +1,405 @@ +"""Discover and run std-library "unittest" style tests.""" +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +import pytest +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import PyCollector +from _pytest.runner import CallInfo +from _pytest.skipping import skipped_by_mark_key +from _pytest.skipping import unexpectedsuccess_key + +if TYPE_CHECKING: + import unittest + + from _pytest.fixtures import _Scope + + _SysExcInfoType = Union[ + Tuple[Type[BaseException], BaseException, types.TracebackType], + Tuple[None, None, None], + ] + + +def pytest_pycollect_makeitem( + collector: PyCollector, name: str, obj: object +) -> Optional["UnitTestCase"]: + # Has unittest been imported and is obj a subclass of its TestCase? + try: + ut = sys.modules["unittest"] + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Yes, so let's collect it. + item: UnitTestCase = UnitTestCase.from_parent(collector, name=name, obj=obj) + return item + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def collect(self) -> Iterable[Union[Item, Collector]]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._inject_setup_teardown_fixtures(cls) + self._inject_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + funcobj = getimfunc(x) + yield TestCaseFunction.from_parent(self, name=name, callobj=funcobj) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + # Type ignored because `ut` is an opaque module. + if ut is None or runtest != ut.TestCase.runTest: # type: ignore + yield TestCaseFunction.from_parent(self, name="runTest") + + def _inject_setup_teardown_fixtures(self, cls: type) -> None: + """Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding + teardown functions (#517).""" + class_fixture = _make_xunit_fixture( + cls, + "setUpClass", + "tearDownClass", + "doClassCleanups", + scope="class", + pass_self=False, + ) + if class_fixture: + cls.__pytest_class_setup = class_fixture # type: ignore[attr-defined] + + method_fixture = _make_xunit_fixture( + cls, + "setup_method", + "teardown_method", + None, + scope="function", + pass_self=True, + ) + if method_fixture: + cls.__pytest_method_setup = method_fixture # type: ignore[attr-defined] + + +def _make_xunit_fixture( + obj: type, + setup_name: str, + teardown_name: str, + cleanup_name: Optional[str], + scope: "_Scope", + pass_self: bool, +): + setup = getattr(obj, setup_name, None) + teardown = getattr(obj, teardown_name, None) + if setup is None and teardown is None: + return None + + if cleanup_name: + cleanup = getattr(obj, cleanup_name, lambda *args: None) + else: + + def cleanup(*args): + pass + + @pytest.fixture( + scope=scope, + autouse=True, + # Use a unique name to speed up lookup. + name=f"unittest_{setup_name}_fixture_{obj.__qualname__}", + ) + def fixture(self, request: FixtureRequest) -> Generator[None, None, None]: + if _is_skipped(self): + reason = self.__unittest_skip_why__ + pytest.skip(reason) + if setup is not None: + try: + if pass_self: + setup(self, request.function) + else: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + if pass_self: + cleanup(self) + else: + cleanup() + + raise + yield + try: + if teardown is not None: + if pass_self: + teardown(self, request.function) + else: + teardown() + finally: + if pass_self: + cleanup(self) + else: + cleanup() + + return fixture + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None + _testcase: Optional["unittest.TestCase"] = None + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Optional[Callable[[], None]] = None + assert self.parent is not None + self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined] + self._obj = getattr(self._testcase, self.name) + if hasattr(self, "_request"): + self._request._fillfixtures() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._testcase = None + self._obj = None + + def startTest(self, testcase: "unittest.TestCase") -> None: + pass + + def _addexcinfo(self, rawexcinfo: "_SysExcInfoType") -> None: + # Unwrap potential exception info (see twisted trial support below). + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo(rawexcinfo) # type: ignore[arg-type] + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + excinfo.value + excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType" + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType" + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None: + try: + skip(reason) + except skip.Exception: + self._store[skipped_by_mark_key] = True + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure( + self, + testcase: "unittest.TestCase", + rawexcinfo: "_SysExcInfoType", + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, testcase: "unittest.TestCase", reason: str = "" + ) -> None: + self._store[unexpectedsuccess_key] = reason + + def addSuccess(self, testcase: "unittest.TestCase") -> None: + pass + + def stopTest(self, testcase: "unittest.TestCase") -> None: + pass + + def _expecting_failure(self, test_method) -> bool: + """Return True if the given unittest method (or the entire class) is marked + with @expectedFailure.""" + expecting_failure_method = getattr( + test_method, "__unittest_expecting_failure__", False + ) + expecting_failure_class = getattr(self, "__unittest_expecting_failure__", False) + return bool(expecting_failure_class or expecting_failure_method) + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + assert self._testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + # Type ignored because self acts as the TestResult, but is not actually one. + self._testcase(result=self) # type: ignore[arg-type] + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + if self.config.getoption("usepdb") and not _is_skipped(self.obj): + self._explicit_tearDown = self._testcase.tearDown + setattr(self._testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(self._testcase, self.name, self.obj) + try: + self._testcase(result=self) # type: ignore[arg-type] + finally: + delattr(self._testcase, self.name) + + def _prunetraceback( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> None: + Function._prunetraceback(self, excinfo) + traceback = excinfo.traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest") + ) + if traceback: + excinfo.traceback = traceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + unittest = sys.modules.get("unittest") + if ( + unittest + and call.excinfo + and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined] + ): + excinfo = call.excinfo + # Let's substitute the excinfo with a pytest.skip one. + call2 = CallInfo[None].from_call( + lambda: pytest.skip(str(excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +# Twisted trial support. + + +@hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut: Any = sys.modules["twisted.python.failure"] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done: List[int] = []) -> None: + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + + classImplements(TestCaseFunction, IReporter) + done.append(1) + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) diff --git a/myenv/lib/python3.9/site-packages/_pytest/unraisableexception.py b/myenv/lib/python3.9/site-packages/_pytest/unraisableexception.py new file mode 100644 index 0000000..fcb5d82 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,93 @@ +import sys +import traceback +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Optional +from typing import Type + +import pytest + + +# Copied from cpython/Lib/test/support/__init__.py, with modifications. +class catch_unraisable_exception: + """Context manager catching unraisable exception using sys.unraisablehook. + + Storing the exception value (cm.unraisable.exc_value) creates a reference + cycle. The reference cycle is broken explicitly when the context manager + exits. + + Storing the object (cm.unraisable.object) can resurrect it if it is set to + an object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with catch_unraisable_exception() as cm: + # code creating an "unraisable exception" + ... + # check the unraisable exception: use cm.unraisable + ... + # cm.unraisable attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.unraisable: Optional["sys.UnraisableHookArgs"] = None + self._old_hook: Optional[Callable[["sys.UnraisableHookArgs"], Any]] = None + + def _hook(self, unraisable: "sys.UnraisableHookArgs") -> None: + # Storing unraisable.object can resurrect an object which is being + # finalized. Storing unraisable.exc_value creates a reference cycle. + self.unraisable = unraisable + + def __enter__(self) -> "catch_unraisable_exception": + self._old_hook = sys.unraisablehook + sys.unraisablehook = self._hook + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + assert self._old_hook is not None + sys.unraisablehook = self._old_hook + self._old_hook = None + del self.unraisable + + +def unraisable_exception_runtest_hook() -> Generator[None, None, None]: + with catch_unraisable_exception() as cm: + yield + if cm.unraisable: + if cm.unraisable.err_msg is not None: + err_msg = cm.unraisable.err_msg + else: + err_msg = "Exception ignored in" + msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" + msg += "".join( + traceback.format_exception( + cm.unraisable.exc_type, + cm.unraisable.exc_value, + cm.unraisable.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_setup() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() diff --git a/myenv/lib/python3.9/site-packages/_pytest/warning_types.py b/myenv/lib/python3.9/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000..2eadd9f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/warning_types.py @@ -0,0 +1,132 @@ +from typing import Any +from typing import Generic +from typing import Type +from typing import TypeVar + +import attr + +from _pytest.compat import final + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +@final +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> "PytestExperimentalApiWarning": + return cls( + "{apiname} is an experimental api that may change over time".format( + apiname=apiname + ) + ) + + +@final +class PytestUnhandledCoroutineWarning(PytestWarning): + """Warning emitted for an unhandled coroutine. + + A coroutine was encountered when collecting test functions, but was not + handled by any async-aware plugin. + Coroutine test functions are not natively supported. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@attr.s +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category = attr.ib(type=Type["_W"]) + template = attr.ib(type=str) + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) diff --git a/myenv/lib/python3.9/site-packages/_pytest/warnings.py b/myenv/lib/python3.9/site-packages/_pytest/warnings.py new file mode 100644 index 0000000..35eed96 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_pytest/warnings.py @@ -0,0 +1,139 @@ +import sys +import warnings +from contextlib import contextmanager +from typing import Generator +from typing import Optional +from typing import TYPE_CHECKING + +import pytest +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter + +if TYPE_CHECKING: + from typing_extensions import Literal + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: "Literal['config', 'collect', 'runtest']", + item: Optional[Item], +) -> Generator[None, None, None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=True) as log: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + yield + + for warning_message in log: + ihook.pytest_warning_captured.call_historic( + kwargs=dict( + warning_message=warning_message, + when=when, + item=item, + location=None, + ) + ) + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + warn_msg = warning_message.message + msg = warnings.formatwarning( + str(warn_msg), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + return msg + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + yield + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, None, None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None, None, None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None, None, None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests( + early_config: "Config", +) -> Generator[None, None, None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + yield diff --git a/myenv/lib/python3.9/site-packages/_yaml/__init__.py b/myenv/lib/python3.9/site-packages/_yaml/__init__.py new file mode 100644 index 0000000..7baa8c4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/myenv/lib/python3.9/site-packages/aaaaa_future_fstrings.pth b/myenv/lib/python3.9/site-packages/aaaaa_future_fstrings.pth new file mode 100644 index 0000000..74682bf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/aaaaa_future_fstrings.pth @@ -0,0 +1,2 @@ +import sys; exec('try:\n import future_fstrings\nexcept ImportError:\n pass\nelse:\n future_fstrings.register()\n') + diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/LICENSE new file mode 100644 index 0000000..104eebf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2018 Alex Grönholm + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/METADATA new file mode 100644 index 0000000..600c1fe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/METADATA @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: anyio +Version: 3.6.1 +Summary: High level compatibility layer for multiple asynchronous event loop implementations +Author: Alex Grönholm +Author-email: alex.gronholm@nextday.fi +License: MIT +Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/ +Project-URL: Source code, https://github.com/agronholm/anyio +Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Framework :: AnyIO +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.6.2 +License-File: LICENSE +Requires-Dist: idna (>=2.8) +Requires-Dist: sniffio (>=1.1) +Requires-Dist: contextvars ; python_version < "3.7" +Requires-Dist: dataclasses ; python_version < "3.7" +Requires-Dist: typing-extensions ; python_version < "3.8" +Provides-Extra: doc +Requires-Dist: packaging ; extra == 'doc' +Requires-Dist: sphinx-rtd-theme ; extra == 'doc' +Requires-Dist: sphinx-autodoc-typehints (>=1.2.0) ; extra == 'doc' +Provides-Extra: test +Requires-Dist: coverage[toml] (>=4.5) ; extra == 'test' +Requires-Dist: hypothesis (>=4.0) ; extra == 'test' +Requires-Dist: pytest (>=7.0) ; extra == 'test' +Requires-Dist: pytest-mock (>=3.6.1) ; extra == 'test' +Requires-Dist: trustme ; extra == 'test' +Requires-Dist: contextlib2 ; (python_version < "3.7") and extra == 'test' +Requires-Dist: uvloop (<0.15) ; (python_version < "3.7" and (platform_python_implementation == "CPython" and platform_system != "Windows")) and extra == 'test' +Requires-Dist: mock (>=4) ; (python_version < "3.8") and extra == 'test' +Requires-Dist: uvloop (>=0.15) ; (python_version >= "3.7" and (platform_python_implementation == "CPython" and platform_system != "Windows")) and extra == 'test' +Provides-Extra: trio +Requires-Dist: trio (>=0.16) ; extra == 'trio' + +.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg + :target: https://github.com/agronholm/anyio/actions/workflows/test.yml + :alt: Build Status +.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master + :target: https://coveralls.io/github/agronholm/anyio?branch=master + :alt: Code Coverage +.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest + :target: https://anyio.readthedocs.io/en/latest/?badge=latest + :alt: Documentation +.. image:: https://badges.gitter.im/gitterHQ/gitter.svg + :target: https://gitter.im/python-trio/AnyIO + :alt: Gitter chat + +AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or +trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio, and works in harmony +with the native SC of trio itself. + +Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or +trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full +refactoring necessary. It will blend in with native libraries of your chosen backend. + +Documentation +------------- + +View full documentation at: https://anyio.readthedocs.io/ + +Features +-------- + +AnyIO offers the following functionality: + +* Task groups (nurseries_ in trio terminology) +* High level networking (TCP, UDP and UNIX sockets) + + * `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python + 3.8) + * async/await style UDP sockets (unlike asyncio where you still have to use Transports and + Protocols) + +* A versatile API for byte streams and object streams +* Inter-task synchronization and communication (locks, conditions, events, semaphores, object + streams) +* Worker threads +* Subprocesses +* Asynchronous file I/O (using worker threads) +* Signal handling + +AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures. +It even works with the popular Hypothesis_ library. + +.. _asyncio: https://docs.python.org/3/library/asyncio.html +.. _trio: https://github.com/python-trio/trio +.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency +.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning +.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs +.. _pytest: https://docs.pytest.org/en/latest/ +.. _Hypothesis: https://hypothesis.works/ diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/RECORD new file mode 100644 index 0000000..7864a5f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/RECORD @@ -0,0 +1,45 @@ +anyio/__init__.py,sha256=M2R8dk6L5gL5lXHArzpSfEn2oH5jMyUKhzyrkRiv2AM,4037 +anyio/from_thread.py,sha256=nSq6mafYMqwxKmzdJyISg8cp-AyBj9rxZPMt_b7klSM,16497 +anyio/lowlevel.py,sha256=W4ydshns7f86YuSESFc2igTf46AWMXnGPQGsY_Esl2E,4679 +anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/pytest_plugin.py,sha256=kWj2B8BJehePJd1sztRBmJBRh8O4hk1oGSYQRlX5Gr8,5134 +anyio/to_process.py,sha256=hu0ES3HJC-VEjcdPJMzAzjyTaekaCNToO3coj3jvnus,9247 +anyio/to_thread.py,sha256=VeMQoo8Va2zz0WFk2p123QikDpqk2wYZGw20COC3wqw,2124 +anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/_backends/_asyncio.py,sha256=ZJDvRwfS4wv9WWcqWledNJyl8hx8A8-m9-gSKAJ6nBM,69238 +anyio/_backends/_trio.py,sha256=CebCaqr8Szi6uCnUzwtBRLfUitR5OnDT_wfH-KiqvBQ,29696 +anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/_core/_compat.py,sha256=X99W70r-O-JLdkKNtbddcIY5H2Nyg3Nk34oUYE9WZRs,5790 +anyio/_core/_eventloop.py,sha256=DRn_hy679LtsJFsPX7dXjDv72bLtSFkTnWY9WVVfgCQ,4108 +anyio/_core/_exceptions.py,sha256=1wqraNldZroYkoyB0HZStAruz_7yDCBaW-4zYwsKj8s,2904 +anyio/_core/_fileio.py,sha256=au82uZXZX4fia8EoZq_E-JDwZFKe6ZtI0J6IkxK8FmQ,18298 +anyio/_core/_resources.py,sha256=M_uN-90N8eSsWuvo-0xluWU_OG2BTyccAgsQ7XtHxzs,399 +anyio/_core/_signals.py,sha256=D4btJN527tAADspKBeNKaCds-ZcEZJP8LWM_MjVuQRA,827 +anyio/_core/_sockets.py,sha256=fW_Cbg6kfw4xgYuVuWbcWrAYspOcDSEjwxVATMzf2fo,19820 +anyio/_core/_streams.py,sha256=gjT5xChJ1OoV8nNinljSv1yW4nqUS-QzZzIydQz3exQ,1494 +anyio/_core/_subprocesses.py,sha256=pcchMI2OII0QSjiVxRiTEz4M0B7TlQPzGurfCuka-xc,5049 +anyio/_core/_synchronization.py,sha256=xOOG4hF9783N6E2IcD3YKiukguA5bPrj6BodDsKNaJY,16822 +anyio/_core/_tasks.py,sha256=ebGLjHvwL6I9aGyPwvCig1drebSVYFzvY3pnN3TsB4o,5273 +anyio/_core/_testing.py,sha256=VZka_yebIhJ6mJ6Vo_ilO3Nbz53ieqg0WBijwciMwdY,2196 +anyio/_core/_typedattr.py,sha256=k5-wBvMlDlKHIpn18INVnXAlGwI3CrAvPmWoceHjnOQ,2534 +anyio/abc/__init__.py,sha256=hMa47CMs5O1twC2bBcSbzwX-3Q08BAgAPTRekQobb3E,2123 +anyio/abc/_resources.py,sha256=js737mWPG6IW0fH8W4Tz9eNWLztse7dKxEC61z934Vk,752 +anyio/abc/_sockets.py,sha256=i1VdcJTLAuRlYeZoL6s5RBSWbX62Cu6ln5YZBL2YrWk,5754 +anyio/abc/_streams.py,sha256=0g70fhKAzbnK0KKmWwRgwmKdApBwduAcVj4TpjSzjzU,6501 +anyio/abc/_subprocesses.py,sha256=iREP_YQ91it88lDU4XIcI3HZ9HUvV5UmjQk_sSPonrw,2071 +anyio/abc/_tasks.py,sha256=mQQd1DANqpySKyehVVPdMfi_UEG49zZUJpt5blunOjg,3119 +anyio/abc/_testing.py,sha256=ifKCUPzcQdHAEGO-weu2GQvzjMQPPIWO24mQ0z6zkdU,1928 +anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/streams/buffered.py,sha256=FegOSO4Xcxa5SaDfU1A3ZkTTxaPrv6G435Y_giZ8k44,4437 +anyio/streams/file.py,sha256=pujJ-m6BX-gOLnVoZwkE5kh-YDs5Vx9eJFVkvliQ0S4,4353 +anyio/streams/memory.py,sha256=3RGeZoevoGIgBWfD2_X1cqxIPOz-BqQkRf6lUcOnBYc,9209 +anyio/streams/stapled.py,sha256=0E0V15v8M5GVelpHe5RT0S33tQ9hGe4ZCXo_KJEjtt4,4258 +anyio/streams/text.py,sha256=WRFyjsRpBjQKdCmR4ZuzYTEAJqGx2s5oTJmGI1C6Ng0,5014 +anyio/streams/tls.py,sha256=-WXGsMV14XHXAxc38WpBvGusjuY7e449g4UCEHIlnWw,12040 +anyio-3.6.1.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081 +anyio-3.6.1.dist-info/METADATA,sha256=cXu3CLppFqT_rBl4Eo2HOb0J1zm6Ltu_tMFuzjuQnew,4654 +anyio-3.6.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +anyio-3.6.1.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39 +anyio-3.6.1.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6 +anyio-3.6.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +anyio-3.6.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/entry_points.txt new file mode 100644 index 0000000..44dd9bd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[pytest11] +anyio = anyio.pytest_plugin diff --git a/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/top_level.txt new file mode 100644 index 0000000..c77c069 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio-3.6.1.dist-info/top_level.txt @@ -0,0 +1 @@ +anyio diff --git a/myenv/lib/python3.9/site-packages/anyio/__init__.py b/myenv/lib/python3.9/site-packages/anyio/__init__.py new file mode 100644 index 0000000..6e81178 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/__init__.py @@ -0,0 +1,167 @@ +__all__ = ( + "maybe_async", + "maybe_async_cm", + "run", + "sleep", + "sleep_forever", + "sleep_until", + "current_time", + "get_all_backends", + "get_cancelled_exc_class", + "BrokenResourceError", + "BrokenWorkerProcess", + "BusyResourceError", + "ClosedResourceError", + "DelimiterNotFound", + "EndOfStream", + "ExceptionGroup", + "IncompleteRead", + "TypedAttributeLookupError", + "WouldBlock", + "AsyncFile", + "Path", + "open_file", + "wrap_file", + "aclose_forcefully", + "open_signal_receiver", + "connect_tcp", + "connect_unix", + "create_tcp_listener", + "create_unix_listener", + "create_udp_socket", + "create_connected_udp_socket", + "getaddrinfo", + "getnameinfo", + "wait_socket_readable", + "wait_socket_writable", + "create_memory_object_stream", + "run_process", + "open_process", + "create_lock", + "CapacityLimiter", + "CapacityLimiterStatistics", + "Condition", + "ConditionStatistics", + "Event", + "EventStatistics", + "Lock", + "LockStatistics", + "Semaphore", + "SemaphoreStatistics", + "create_condition", + "create_event", + "create_semaphore", + "create_capacity_limiter", + "open_cancel_scope", + "fail_after", + "move_on_after", + "current_effective_deadline", + "TASK_STATUS_IGNORED", + "CancelScope", + "create_task_group", + "TaskInfo", + "get_current_task", + "get_running_tasks", + "wait_all_tasks_blocked", + "run_sync_in_worker_thread", + "run_async_from_thread", + "run_sync_from_thread", + "current_default_worker_thread_limiter", + "create_blocking_portal", + "start_blocking_portal", + "typed_attribute", + "TypedAttributeSet", + "TypedAttributeProvider", +) + +from typing import Any + +from ._core._compat import maybe_async, maybe_async_cm +from ._core._eventloop import ( + current_time, + get_all_backends, + get_cancelled_exc_class, + run, + sleep, + sleep_forever, + sleep_until, +) +from ._core._exceptions import ( + BrokenResourceError, + BrokenWorkerProcess, + BusyResourceError, + ClosedResourceError, + DelimiterNotFound, + EndOfStream, + ExceptionGroup, + IncompleteRead, + TypedAttributeLookupError, + WouldBlock, +) +from ._core._fileio import AsyncFile, Path, open_file, wrap_file +from ._core._resources import aclose_forcefully +from ._core._signals import open_signal_receiver +from ._core._sockets import ( + connect_tcp, + connect_unix, + create_connected_udp_socket, + create_tcp_listener, + create_udp_socket, + create_unix_listener, + getaddrinfo, + getnameinfo, + wait_socket_readable, + wait_socket_writable, +) +from ._core._streams import create_memory_object_stream +from ._core._subprocesses import open_process, run_process +from ._core._synchronization import ( + CapacityLimiter, + CapacityLimiterStatistics, + Condition, + ConditionStatistics, + Event, + EventStatistics, + Lock, + LockStatistics, + Semaphore, + SemaphoreStatistics, + create_capacity_limiter, + create_condition, + create_event, + create_lock, + create_semaphore, +) +from ._core._tasks import ( + TASK_STATUS_IGNORED, + CancelScope, + create_task_group, + current_effective_deadline, + fail_after, + move_on_after, + open_cancel_scope, +) +from ._core._testing import ( + TaskInfo, + get_current_task, + get_running_tasks, + wait_all_tasks_blocked, +) +from ._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute + +# Re-exported here, for backwards compatibility +# isort: off +from .to_thread import current_default_worker_thread_limiter, run_sync_in_worker_thread +from .from_thread import ( + create_blocking_portal, + run_async_from_thread, + run_sync_from_thread, + start_blocking_portal, +) + +# Re-export imports so they look like they live directly in this package +key: str +value: Any +for key, value in list(locals().items()): + if getattr(value, "__module__", "").startswith("anyio."): + value.__module__ = __name__ diff --git a/myenv/lib/python3.9/site-packages/anyio/_backends/__init__.py b/myenv/lib/python3.9/site-packages/anyio/_backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/anyio/_backends/_asyncio.py b/myenv/lib/python3.9/site-packages/anyio/_backends/_asyncio.py new file mode 100644 index 0000000..d2bbc94 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_backends/_asyncio.py @@ -0,0 +1,2181 @@ +import array +import asyncio +import concurrent.futures +import math +import socket +import sys +from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined] +from collections import OrderedDict, deque +from concurrent.futures import Future +from contextvars import Context, copy_context +from dataclasses import dataclass +from functools import partial, wraps +from inspect import ( + CORO_RUNNING, + CORO_SUSPENDED, + GEN_RUNNING, + GEN_SUSPENDED, + getcoroutinestate, + getgeneratorstate, +) +from io import IOBase +from os import PathLike +from queue import Queue +from socket import AddressFamily, SocketKind +from threading import Thread +from types import TracebackType +from typing import ( + IO, + Any, + AsyncGenerator, + Awaitable, + Callable, + Collection, + Coroutine, + Deque, + Dict, + Generator, + Iterable, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, +) +from weakref import WeakKeyDictionary + +import sniffio + +from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc +from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable +from .._core._eventloop import claim_worker_thread, threadlocals +from .._core._exceptions import ( + BrokenResourceError, + BusyResourceError, + ClosedResourceError, + EndOfStream, +) +from .._core._exceptions import ExceptionGroup as BaseExceptionGroup +from .._core._exceptions import WouldBlock +from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr +from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter +from .._core._synchronization import Event as BaseEvent +from .._core._synchronization import ResourceGuard +from .._core._tasks import CancelScope as BaseCancelScope +from ..abc import IPSockAddrType, UDPPacketType +from ..lowlevel import RunVar + +if sys.version_info >= (3, 8): + + def get_coro(task: asyncio.Task) -> Union[Generator, Awaitable[Any]]: + return task.get_coro() + +else: + + def get_coro(task: asyncio.Task) -> Union[Generator, Awaitable[Any]]: + return task._coro + + +if sys.version_info >= (3, 7): + from asyncio import all_tasks, create_task, current_task, get_running_loop + from asyncio import run as native_run + + def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]: + return [cb for cb, context in task._callbacks] # type: ignore[attr-defined] + +else: + _T = TypeVar("_T") + + def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]: + return task._callbacks + + def native_run(main, *, debug=False): + # Snatched from Python 3.7 + from asyncio import coroutines, events, tasks + + def _cancel_all_tasks(loop): + to_cancel = all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete( + tasks.gather(*to_cancel, loop=loop, return_exceptions=True) + ) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler( + { + "message": "unhandled exception during asyncio.run() shutdown", + "exception": task.exception(), + "task": task, + } + ) + + if events._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop" + ) + + if not coroutines.iscoroutine(main): + raise ValueError(f"a coroutine was expected, got {main!r}") + + loop = events.new_event_loop() + try: + events.set_event_loop(loop) + loop.set_debug(debug) + return loop.run_until_complete(main) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + finally: + events.set_event_loop(None) + loop.close() + + def create_task( + coro: Union[Generator[Any, None, _T], Awaitable[_T]], *, name: object = None + ) -> asyncio.Task: + return get_running_loop().create_task(coro) + + def get_running_loop() -> asyncio.AbstractEventLoop: + loop = asyncio._get_running_loop() + if loop is not None: + return loop + else: + raise RuntimeError("no running event loop") + + def all_tasks( + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> Set[asyncio.Task]: + """Return a set of all tasks for the loop.""" + from asyncio import Task + + if loop is None: + loop = get_running_loop() + + return {t for t in Task.all_tasks(loop) if not t.done()} + + def current_task( + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> Optional[asyncio.Task]: + if loop is None: + loop = get_running_loop() + + return asyncio.Task.current_task(loop) + + +T_Retval = TypeVar("T_Retval") + +# Check whether there is native support for task names in asyncio (3.8+) +_native_task_names = hasattr(asyncio.Task, "get_name") + + +_root_task: RunVar[Optional[asyncio.Task]] = RunVar("_root_task") + + +def find_root_task() -> asyncio.Task: + root_task = _root_task.get(None) + if root_task is not None and not root_task.done(): + return root_task + + # Look for a task that has been started via run_until_complete() + for task in all_tasks(): + if task._callbacks and not task.done(): + for cb in _get_task_callbacks(task): + if ( + cb is _run_until_complete_cb + or getattr(cb, "__module__", None) == "uvloop.loop" + ): + _root_task.set(task) + return task + + # Look up the topmost task in the AnyIO task tree, if possible + task = cast(asyncio.Task, current_task()) + state = _task_states.get(task) + if state: + cancel_scope = state.cancel_scope + while cancel_scope and cancel_scope._parent_scope is not None: + cancel_scope = cancel_scope._parent_scope + + if cancel_scope is not None: + return cast(asyncio.Task, cancel_scope._host_task) + + return task + + +def get_callable_name(func: Callable) -> str: + module = getattr(func, "__module__", None) + qualname = getattr(func, "__qualname__", None) + return ".".join([x for x in (module, qualname) if x]) + + +# +# Event loop +# + +_run_vars = ( + WeakKeyDictionary() +) # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] + +current_token = get_running_loop + + +def _task_started(task: asyncio.Task) -> bool: + """Return ``True`` if the task has been started and has not finished.""" + coro = cast(Coroutine[Any, Any, Any], get_coro(task)) + try: + return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED) + except AttributeError: + try: + return getgeneratorstate(cast(Generator, coro)) in ( + GEN_RUNNING, + GEN_SUSPENDED, + ) + except AttributeError: + # task coro is async_genenerator_asend https://bugs.python.org/issue37771 + raise Exception(f"Cannot determine if task {task} has started or not") + + +def _maybe_set_event_loop_policy( + policy: Optional[asyncio.AbstractEventLoopPolicy], use_uvloop: bool +) -> None: + # On CPython, use uvloop when possible if no other policy has been given and if not + # explicitly disabled + if policy is None and use_uvloop and sys.implementation.name == "cpython": + try: + import uvloop + except ImportError: + pass + else: + # Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier) + if not hasattr( + asyncio.AbstractEventLoop, "shutdown_default_executor" + ) or hasattr(uvloop.loop.Loop, "shutdown_default_executor"): + policy = uvloop.EventLoopPolicy() + + if policy is not None: + asyncio.set_event_loop_policy(policy) + + +def run( + func: Callable[..., Awaitable[T_Retval]], + *args: object, + debug: bool = False, + use_uvloop: bool = False, + policy: Optional[asyncio.AbstractEventLoopPolicy] = None, +) -> T_Retval: + @wraps(func) + async def wrapper() -> T_Retval: + task = cast(asyncio.Task, current_task()) + task_state = TaskState(None, get_callable_name(func), None) + _task_states[task] = task_state + if _native_task_names: + task.set_name(task_state.name) + + try: + return await func(*args) + finally: + del _task_states[task] + + _maybe_set_event_loop_policy(policy, use_uvloop) + return native_run(wrapper(), debug=debug) + + +# +# Miscellaneous +# + +sleep = asyncio.sleep + + +# +# Timeouts and cancellation +# + +CancelledError = asyncio.CancelledError + + +class CancelScope(BaseCancelScope): + def __new__( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> "CancelScope": + return object.__new__(cls) + + def __init__(self, deadline: float = math.inf, shield: bool = False): + self._deadline = deadline + self._shield = shield + self._parent_scope: Optional[CancelScope] = None + self._cancel_called = False + self._active = False + self._timeout_handle: Optional[asyncio.TimerHandle] = None + self._cancel_handle: Optional[asyncio.Handle] = None + self._tasks: Set[asyncio.Task] = set() + self._host_task: Optional[asyncio.Task] = None + self._timeout_expired = False + + def __enter__(self) -> "CancelScope": + if self._active: + raise RuntimeError( + "Each CancelScope may only be used for a single 'with' block" + ) + + self._host_task = host_task = cast(asyncio.Task, current_task()) + self._tasks.add(host_task) + try: + task_state = _task_states[host_task] + except KeyError: + task_name = host_task.get_name() if _native_task_names else None + task_state = TaskState(None, task_name, self) + _task_states[host_task] = task_state + else: + self._parent_scope = task_state.cancel_scope + task_state.cancel_scope = self + + self._timeout() + self._active = True + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + if not self._active: + raise RuntimeError("This cancel scope is not active") + if current_task() is not self._host_task: + raise RuntimeError( + "Attempted to exit cancel scope in a different task than it was " + "entered in" + ) + + assert self._host_task is not None + host_task_state = _task_states.get(self._host_task) + if host_task_state is None or host_task_state.cancel_scope is not self: + raise RuntimeError( + "Attempted to exit a cancel scope that isn't the current tasks's " + "current cancel scope" + ) + + self._active = False + if self._timeout_handle: + self._timeout_handle.cancel() + self._timeout_handle = None + + self._tasks.remove(self._host_task) + + host_task_state.cancel_scope = self._parent_scope + + # Restart the cancellation effort in the farthest directly cancelled parent scope if this + # one was shielded + if self._shield: + self._deliver_cancellation_to_parent() + + if exc_val is not None: + exceptions = ( + exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val] + ) + if all(isinstance(exc, CancelledError) for exc in exceptions): + if self._timeout_expired: + return True + elif not self._cancel_called: + # Task was cancelled natively + return None + elif not self._parent_cancelled(): + # This scope was directly cancelled + return True + + return None + + def _timeout(self) -> None: + if self._deadline != math.inf: + loop = get_running_loop() + if loop.time() >= self._deadline: + self._timeout_expired = True + self.cancel() + else: + self._timeout_handle = loop.call_at(self._deadline, self._timeout) + + def _deliver_cancellation(self) -> None: + """ + Deliver cancellation to directly contained tasks and nested cancel scopes. + + Schedule another run at the end if we still have tasks eligible for cancellation. + """ + should_retry = False + current = current_task() + for task in self._tasks: + if task._must_cancel: # type: ignore[attr-defined] + continue + + # The task is eligible for cancellation if it has started and is not in a cancel + # scope shielded from this one + cancel_scope = _task_states[task].cancel_scope + while cancel_scope is not self: + if cancel_scope is None or cancel_scope._shield: + break + else: + cancel_scope = cancel_scope._parent_scope + else: + should_retry = True + if task is not current and ( + task is self._host_task or _task_started(task) + ): + task.cancel() + + # Schedule another callback if there are still tasks left + if should_retry: + self._cancel_handle = get_running_loop().call_soon( + self._deliver_cancellation + ) + else: + self._cancel_handle = None + + def _deliver_cancellation_to_parent(self) -> None: + """Start cancellation effort in the farthest directly cancelled parent scope""" + scope = self._parent_scope + scope_to_cancel: Optional[CancelScope] = None + while scope is not None: + if scope._cancel_called and scope._cancel_handle is None: + scope_to_cancel = scope + + # No point in looking beyond any shielded scope + if scope._shield: + break + + scope = scope._parent_scope + + if scope_to_cancel is not None: + scope_to_cancel._deliver_cancellation() + + def _parent_cancelled(self) -> bool: + # Check whether any parent has been cancelled + cancel_scope = self._parent_scope + while cancel_scope is not None and not cancel_scope._shield: + if cancel_scope._cancel_called: + return True + else: + cancel_scope = cancel_scope._parent_scope + + return False + + def cancel(self) -> DeprecatedAwaitable: + if not self._cancel_called: + if self._timeout_handle: + self._timeout_handle.cancel() + self._timeout_handle = None + + self._cancel_called = True + self._deliver_cancellation() + + return DeprecatedAwaitable(self.cancel) + + @property + def deadline(self) -> float: + return self._deadline + + @deadline.setter + def deadline(self, value: float) -> None: + self._deadline = float(value) + if self._timeout_handle is not None: + self._timeout_handle.cancel() + self._timeout_handle = None + + if self._active and not self._cancel_called: + self._timeout() + + @property + def cancel_called(self) -> bool: + return self._cancel_called + + @property + def shield(self) -> bool: + return self._shield + + @shield.setter + def shield(self, value: bool) -> None: + if self._shield != value: + self._shield = value + if not value: + self._deliver_cancellation_to_parent() + + +async def checkpoint() -> None: + await sleep(0) + + +async def checkpoint_if_cancelled() -> None: + task = current_task() + if task is None: + return + + try: + cancel_scope = _task_states[task].cancel_scope + except KeyError: + return + + while cancel_scope: + if cancel_scope.cancel_called: + await sleep(0) + elif cancel_scope.shield: + break + else: + cancel_scope = cancel_scope._parent_scope + + +async def cancel_shielded_checkpoint() -> None: + with CancelScope(shield=True): + await sleep(0) + + +def current_effective_deadline() -> float: + try: + cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index] + except KeyError: + return math.inf + + deadline = math.inf + while cancel_scope: + deadline = min(deadline, cancel_scope.deadline) + if cancel_scope.shield: + break + else: + cancel_scope = cancel_scope._parent_scope + + return deadline + + +def current_time() -> float: + return get_running_loop().time() + + +# +# Task states +# + + +class TaskState: + """ + Encapsulates auxiliary task information that cannot be added to the Task instance itself + because there are no guarantees about its implementation. + """ + + __slots__ = "parent_id", "name", "cancel_scope" + + def __init__( + self, + parent_id: Optional[int], + name: Optional[str], + cancel_scope: Optional[CancelScope], + ): + self.parent_id = parent_id + self.name = name + self.cancel_scope = cancel_scope + + +_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState] + + +# +# Task groups +# + + +class ExceptionGroup(BaseExceptionGroup): + def __init__(self, exceptions: List[BaseException]): + super().__init__() + self.exceptions = exceptions + + +class _AsyncioTaskStatus(abc.TaskStatus): + def __init__(self, future: asyncio.Future, parent_id: int): + self._future = future + self._parent_id = parent_id + + def started(self, value: object = None) -> None: + try: + self._future.set_result(value) + except asyncio.InvalidStateError: + raise RuntimeError( + "called 'started' twice on the same task status" + ) from None + + task = cast(asyncio.Task, current_task()) + _task_states[task].parent_id = self._parent_id + + +class TaskGroup(abc.TaskGroup): + def __init__(self) -> None: + self.cancel_scope: CancelScope = CancelScope() + self._active = False + self._exceptions: List[BaseException] = [] + + async def __aenter__(self) -> "TaskGroup": + self.cancel_scope.__enter__() + self._active = True + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb) + if exc_val is not None: + self.cancel_scope.cancel() + self._exceptions.append(exc_val) + + while self.cancel_scope._tasks: + try: + await asyncio.wait(self.cancel_scope._tasks) + except asyncio.CancelledError: + self.cancel_scope.cancel() + + self._active = False + if not self.cancel_scope._parent_cancelled(): + exceptions = self._filter_cancellation_errors(self._exceptions) + else: + exceptions = self._exceptions + + try: + if len(exceptions) > 1: + if all( + isinstance(e, CancelledError) and not e.args for e in exceptions + ): + # Tasks were cancelled natively, without a cancellation message + raise CancelledError + else: + raise ExceptionGroup(exceptions) + elif exceptions and exceptions[0] is not exc_val: + raise exceptions[0] + except BaseException as exc: + # Clear the context here, as it can only be done in-flight. + # If the context is not cleared, it can result in recursive tracebacks (see #145). + exc.__context__ = None + raise + + return ignore_exception + + @staticmethod + def _filter_cancellation_errors( + exceptions: Sequence[BaseException], + ) -> List[BaseException]: + filtered_exceptions: List[BaseException] = [] + for exc in exceptions: + if isinstance(exc, ExceptionGroup): + new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions) + if len(new_exceptions) > 1: + filtered_exceptions.append(exc) + elif len(new_exceptions) == 1: + filtered_exceptions.append(new_exceptions[0]) + elif new_exceptions: + new_exc = ExceptionGroup(new_exceptions) + new_exc.__cause__ = exc.__cause__ + new_exc.__context__ = exc.__context__ + new_exc.__traceback__ = exc.__traceback__ + filtered_exceptions.append(new_exc) + elif not isinstance(exc, CancelledError) or exc.args: + filtered_exceptions.append(exc) + + return filtered_exceptions + + async def _run_wrapped_task( + self, coro: Coroutine, task_status_future: Optional[asyncio.Future] + ) -> None: + # This is the code path for Python 3.6 and 3.7 on which asyncio freaks out if a task raises + # a BaseException. + __traceback_hide__ = __tracebackhide__ = True # noqa: F841 + task = cast(asyncio.Task, current_task()) + try: + await coro + except BaseException as exc: + if task_status_future is None or task_status_future.done(): + self._exceptions.append(exc) + self.cancel_scope.cancel() + else: + task_status_future.set_exception(exc) + else: + if task_status_future is not None and not task_status_future.done(): + task_status_future.set_exception( + RuntimeError("Child exited without calling task_status.started()") + ) + finally: + if task in self.cancel_scope._tasks: + self.cancel_scope._tasks.remove(task) + del _task_states[task] + + def _spawn( + self, + func: Callable[..., Coroutine], + args: tuple, + name: object, + task_status_future: Optional[asyncio.Future] = None, + ) -> asyncio.Task: + def task_done(_task: asyncio.Task) -> None: + # This is the code path for Python 3.8+ + assert _task in self.cancel_scope._tasks + self.cancel_scope._tasks.remove(_task) + del _task_states[_task] + + try: + exc = _task.exception() + except CancelledError as e: + while isinstance(e.__context__, CancelledError): + e = e.__context__ + + exc = e + + if exc is not None: + if task_status_future is None or task_status_future.done(): + self._exceptions.append(exc) + self.cancel_scope.cancel() + else: + task_status_future.set_exception(exc) + elif task_status_future is not None and not task_status_future.done(): + task_status_future.set_exception( + RuntimeError("Child exited without calling task_status.started()") + ) + + if not self._active: + raise RuntimeError( + "This task group is not active; no new tasks can be started." + ) + + options = {} + name = get_callable_name(func) if name is None else str(name) + if _native_task_names: + options["name"] = name + + kwargs = {} + if task_status_future: + parent_id = id(current_task()) + kwargs["task_status"] = _AsyncioTaskStatus( + task_status_future, id(self.cancel_scope._host_task) + ) + else: + parent_id = id(self.cancel_scope._host_task) + + coro = func(*args, **kwargs) + if not asyncio.iscoroutine(coro): + raise TypeError( + f"Expected an async function, but {func} appears to be synchronous" + ) + + foreign_coro = not hasattr(coro, "cr_frame") and not hasattr(coro, "gi_frame") + if foreign_coro or sys.version_info < (3, 8): + coro = self._run_wrapped_task(coro, task_status_future) + + task = create_task(coro, **options) + if not foreign_coro and sys.version_info >= (3, 8): + task.add_done_callback(task_done) + + # Make the spawned task inherit the task group's cancel scope + _task_states[task] = TaskState( + parent_id=parent_id, name=name, cancel_scope=self.cancel_scope + ) + self.cancel_scope._tasks.add(task) + return task + + def start_soon( + self, func: Callable[..., Coroutine], *args: object, name: object = None + ) -> None: + self._spawn(func, args, name) + + async def start( + self, func: Callable[..., Coroutine], *args: object, name: object = None + ) -> None: + future: asyncio.Future = asyncio.Future() + task = self._spawn(func, args, name, future) + + # If the task raises an exception after sending a start value without a switch point + # between, the task group is cancelled and this method never proceeds to process the + # completed future. That's why we have to have a shielded cancel scope here. + with CancelScope(shield=True): + try: + return await future + except CancelledError: + task.cancel() + raise + + +# +# Threads +# + +_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]] + + +class WorkerThread(Thread): + MAX_IDLE_TIME = 10 # seconds + + def __init__( + self, + root_task: asyncio.Task, + workers: Set["WorkerThread"], + idle_workers: Deque["WorkerThread"], + ): + super().__init__(name="AnyIO worker thread") + self.root_task = root_task + self.workers = workers + self.idle_workers = idle_workers + self.loop = root_task._loop + self.queue: Queue[ + Union[Tuple[Context, Callable, tuple, asyncio.Future], None] + ] = Queue(2) + self.idle_since = current_time() + self.stopping = False + + def _report_result( + self, future: asyncio.Future, result: Any, exc: Optional[BaseException] + ) -> None: + self.idle_since = current_time() + if not self.stopping: + self.idle_workers.append(self) + + if not future.cancelled(): + if exc is not None: + future.set_exception(exc) + else: + future.set_result(result) + + def run(self) -> None: + with claim_worker_thread("asyncio"): + threadlocals.loop = self.loop + while True: + item = self.queue.get() + if item is None: + # Shutdown command received + return + + context, func, args, future = item + if not future.cancelled(): + result = None + exception: Optional[BaseException] = None + try: + result = context.run(func, *args) + except BaseException as exc: + exception = exc + + if not self.loop.is_closed(): + self.loop.call_soon_threadsafe( + self._report_result, future, result, exception + ) + + self.queue.task_done() + + def stop(self, f: Optional[asyncio.Task] = None) -> None: + self.stopping = True + self.queue.put_nowait(None) + self.workers.discard(self) + try: + self.idle_workers.remove(self) + except ValueError: + pass + + +_threadpool_idle_workers: RunVar[Deque[WorkerThread]] = RunVar( + "_threadpool_idle_workers" +) +_threadpool_workers: RunVar[Set[WorkerThread]] = RunVar("_threadpool_workers") + + +async def run_sync_in_worker_thread( + func: Callable[..., T_Retval], + *args: object, + cancellable: bool = False, + limiter: Optional["CapacityLimiter"] = None, +) -> T_Retval: + await checkpoint() + + # If this is the first run in this event loop thread, set up the necessary variables + try: + idle_workers = _threadpool_idle_workers.get() + workers = _threadpool_workers.get() + except LookupError: + idle_workers = deque() + workers = set() + _threadpool_idle_workers.set(idle_workers) + _threadpool_workers.set(workers) + + async with (limiter or current_default_thread_limiter()): + with CancelScope(shield=not cancellable): + future: asyncio.Future = asyncio.Future() + root_task = find_root_task() + if not idle_workers: + worker = WorkerThread(root_task, workers, idle_workers) + worker.start() + workers.add(worker) + root_task.add_done_callback(worker.stop) + else: + worker = idle_workers.pop() + + # Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer + now = current_time() + while idle_workers: + if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME: + break + + expired_worker = idle_workers.popleft() + expired_worker.root_task.remove_done_callback(expired_worker.stop) + expired_worker.stop() + + context = copy_context() + context.run(sniffio.current_async_library_cvar.set, None) + worker.queue.put_nowait((context, func, args, future)) + return await future + + +def run_sync_from_thread( + func: Callable[..., T_Retval], + *args: object, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> T_Retval: + @wraps(func) + def wrapper() -> None: + try: + f.set_result(func(*args)) + except BaseException as exc: + f.set_exception(exc) + if not isinstance(exc, Exception): + raise + + f: concurrent.futures.Future[T_Retval] = Future() + loop = loop or threadlocals.loop + if sys.version_info < (3, 7): + loop.call_soon_threadsafe(copy_context().run, wrapper) + else: + loop.call_soon_threadsafe(wrapper) + + return f.result() + + +def run_async_from_thread( + func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object +) -> T_Retval: + f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe( + func(*args), threadlocals.loop + ) + return f.result() + + +class BlockingPortal(abc.BlockingPortal): + def __new__(cls) -> "BlockingPortal": + return object.__new__(cls) + + def __init__(self) -> None: + super().__init__() + self._loop = get_running_loop() + + def _spawn_task_from_thread( + self, + func: Callable, + args: tuple, + kwargs: Dict[str, Any], + name: object, + future: Future, + ) -> None: + run_sync_from_thread( + partial(self._task_group.start_soon, name=name), + self._call_func, + func, + args, + kwargs, + future, + loop=self._loop, + ) + + +# +# Subprocesses +# + + +@dataclass(eq=False) +class StreamReaderWrapper(abc.ByteReceiveStream): + _stream: asyncio.StreamReader + + async def receive(self, max_bytes: int = 65536) -> bytes: + data = await self._stream.read(max_bytes) + if data: + return data + else: + raise EndOfStream + + async def aclose(self) -> None: + self._stream.feed_eof() + + +@dataclass(eq=False) +class StreamWriterWrapper(abc.ByteSendStream): + _stream: asyncio.StreamWriter + + async def send(self, item: bytes) -> None: + self._stream.write(item) + await self._stream.drain() + + async def aclose(self) -> None: + self._stream.close() + + +@dataclass(eq=False) +class Process(abc.Process): + _process: asyncio.subprocess.Process + _stdin: Optional[StreamWriterWrapper] + _stdout: Optional[StreamReaderWrapper] + _stderr: Optional[StreamReaderWrapper] + + async def aclose(self) -> None: + if self._stdin: + await self._stdin.aclose() + if self._stdout: + await self._stdout.aclose() + if self._stderr: + await self._stderr.aclose() + + await self.wait() + + async def wait(self) -> int: + return await self._process.wait() + + def terminate(self) -> None: + self._process.terminate() + + def kill(self) -> None: + self._process.kill() + + def send_signal(self, signal: int) -> None: + self._process.send_signal(signal) + + @property + def pid(self) -> int: + return self._process.pid + + @property + def returncode(self) -> Optional[int]: + return self._process.returncode + + @property + def stdin(self) -> Optional[abc.ByteSendStream]: + return self._stdin + + @property + def stdout(self) -> Optional[abc.ByteReceiveStream]: + return self._stdout + + @property + def stderr(self) -> Optional[abc.ByteReceiveStream]: + return self._stderr + + +async def open_process( + command: Union[str, bytes, Sequence[Union[str, bytes]]], + *, + shell: bool, + stdin: Union[int, IO[Any], None], + stdout: Union[int, IO[Any], None], + stderr: Union[int, IO[Any], None], + cwd: Union[str, bytes, PathLike, None] = None, + env: Optional[Mapping[str, str]] = None, + start_new_session: bool = False, +) -> Process: + await checkpoint() + if shell: + process = await asyncio.create_subprocess_shell( + cast(Union[str, bytes], command), + stdin=stdin, + stdout=stdout, + stderr=stderr, + cwd=cwd, + env=env, + start_new_session=start_new_session, + ) + else: + process = await asyncio.create_subprocess_exec( + *command, + stdin=stdin, + stdout=stdout, + stderr=stderr, + cwd=cwd, + env=env, + start_new_session=start_new_session, + ) + + stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None + stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None + stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None + return Process(process, stdin_stream, stdout_stream, stderr_stream) + + +def _forcibly_shutdown_process_pool_on_exit( + workers: Set[Process], _task: object +) -> None: + """ + Forcibly shuts down worker processes belonging to this event loop.""" + child_watcher: Optional[asyncio.AbstractChildWatcher] + try: + child_watcher = asyncio.get_event_loop_policy().get_child_watcher() + except NotImplementedError: + child_watcher = None + + # Close as much as possible (w/o async/await) to avoid warnings + for process in workers: + if process.returncode is None: + continue + + process._stdin._stream._transport.close() # type: ignore[union-attr] + process._stdout._stream._transport.close() # type: ignore[union-attr] + process._stderr._stream._transport.close() # type: ignore[union-attr] + process.kill() + if child_watcher: + child_watcher.remove_child_handler(process.pid) + + +async def _shutdown_process_pool_on_exit(workers: Set[Process]) -> None: + """ + Shuts down worker processes belonging to this event loop. + + NOTE: this only works when the event loop was started using asyncio.run() or anyio.run(). + + """ + process: Process + try: + await sleep(math.inf) + except asyncio.CancelledError: + for process in workers: + if process.returncode is None: + process.kill() + + for process in workers: + await process.aclose() + + +def setup_process_pool_exit_at_shutdown(workers: Set[Process]) -> None: + kwargs = {"name": "AnyIO process pool shutdown task"} if _native_task_names else {} + create_task(_shutdown_process_pool_on_exit(workers), **kwargs) + find_root_task().add_done_callback( + partial(_forcibly_shutdown_process_pool_on_exit, workers) + ) + + +# +# Sockets and networking +# + + +class StreamProtocol(asyncio.Protocol): + read_queue: Deque[bytes] + read_event: asyncio.Event + write_event: asyncio.Event + exception: Optional[Exception] = None + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + self.read_queue = deque() + self.read_event = asyncio.Event() + self.write_event = asyncio.Event() + self.write_event.set() + cast(asyncio.Transport, transport).set_write_buffer_limits(0) + + def connection_lost(self, exc: Optional[Exception]) -> None: + if exc: + self.exception = BrokenResourceError() + self.exception.__cause__ = exc + + self.read_event.set() + self.write_event.set() + + def data_received(self, data: bytes) -> None: + self.read_queue.append(data) + self.read_event.set() + + def eof_received(self) -> Optional[bool]: + self.read_event.set() + return True + + def pause_writing(self) -> None: + self.write_event = asyncio.Event() + + def resume_writing(self) -> None: + self.write_event.set() + + +class DatagramProtocol(asyncio.DatagramProtocol): + read_queue: Deque[Tuple[bytes, IPSockAddrType]] + read_event: asyncio.Event + write_event: asyncio.Event + exception: Optional[Exception] = None + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + self.read_queue = deque(maxlen=100) # arbitrary value + self.read_event = asyncio.Event() + self.write_event = asyncio.Event() + self.write_event.set() + + def connection_lost(self, exc: Optional[Exception]) -> None: + self.read_event.set() + self.write_event.set() + + def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None: + addr = convert_ipv6_sockaddr(addr) + self.read_queue.append((data, addr)) + self.read_event.set() + + def error_received(self, exc: Exception) -> None: + self.exception = exc + + def pause_writing(self) -> None: + self.write_event.clear() + + def resume_writing(self) -> None: + self.write_event.set() + + +class SocketStream(abc.SocketStream): + def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol): + self._transport = transport + self._protocol = protocol + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + self._closed = False + + @property + def _raw_socket(self) -> socket.socket: + return self._transport.get_extra_info("socket") + + async def receive(self, max_bytes: int = 65536) -> bytes: + with self._receive_guard: + await checkpoint() + + if ( + not self._protocol.read_event.is_set() + and not self._transport.is_closing() + ): + self._transport.resume_reading() + await self._protocol.read_event.wait() + self._transport.pause_reading() + + try: + chunk = self._protocol.read_queue.popleft() + except IndexError: + if self._closed: + raise ClosedResourceError from None + elif self._protocol.exception: + raise self._protocol.exception + else: + raise EndOfStream from None + + if len(chunk) > max_bytes: + # Split the oversized chunk + chunk, leftover = chunk[:max_bytes], chunk[max_bytes:] + self._protocol.read_queue.appendleft(leftover) + + # If the read queue is empty, clear the flag so that the next call will block until + # data is available + if not self._protocol.read_queue: + self._protocol.read_event.clear() + + return chunk + + async def send(self, item: bytes) -> None: + with self._send_guard: + await checkpoint() + + if self._closed: + raise ClosedResourceError + elif self._protocol.exception is not None: + raise self._protocol.exception + + try: + self._transport.write(item) + except RuntimeError as exc: + if self._transport.is_closing(): + raise BrokenResourceError from exc + else: + raise + + await self._protocol.write_event.wait() + + async def send_eof(self) -> None: + try: + self._transport.write_eof() + except OSError: + pass + + async def aclose(self) -> None: + if not self._transport.is_closing(): + self._closed = True + try: + self._transport.write_eof() + except OSError: + pass + + self._transport.close() + await sleep(0) + self._transport.abort() + + +class UNIXSocketStream(abc.SocketStream): + _receive_future: Optional[asyncio.Future] = None + _send_future: Optional[asyncio.Future] = None + _closing = False + + def __init__(self, raw_socket: socket.socket): + self.__raw_socket = raw_socket + self._loop = get_running_loop() + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + @property + def _raw_socket(self) -> socket.socket: + return self.__raw_socket + + def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: + def callback(f: object) -> None: + del self._receive_future + loop.remove_reader(self.__raw_socket) + + f = self._receive_future = asyncio.Future() + self._loop.add_reader(self.__raw_socket, f.set_result, None) + f.add_done_callback(callback) + return f + + def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: + def callback(f: object) -> None: + del self._send_future + loop.remove_writer(self.__raw_socket) + + f = self._send_future = asyncio.Future() + self._loop.add_writer(self.__raw_socket, f.set_result, None) + f.add_done_callback(callback) + return f + + async def send_eof(self) -> None: + with self._send_guard: + self._raw_socket.shutdown(socket.SHUT_WR) + + async def receive(self, max_bytes: int = 65536) -> bytes: + loop = get_running_loop() + await checkpoint() + with self._receive_guard: + while True: + try: + data = self.__raw_socket.recv(max_bytes) + except BlockingIOError: + await self._wait_until_readable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + if not data: + raise EndOfStream + + return data + + async def send(self, item: bytes) -> None: + loop = get_running_loop() + await checkpoint() + with self._send_guard: + view = memoryview(item) + while view: + try: + bytes_sent = self.__raw_socket.send(item) + except BlockingIOError: + await self._wait_until_writable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + view = view[bytes_sent:] + + async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]: + if not isinstance(msglen, int) or msglen < 0: + raise ValueError("msglen must be a non-negative integer") + if not isinstance(maxfds, int) or maxfds < 1: + raise ValueError("maxfds must be a positive integer") + + loop = get_running_loop() + fds = array.array("i") + await checkpoint() + with self._receive_guard: + while True: + try: + message, ancdata, flags, addr = self.__raw_socket.recvmsg( + msglen, socket.CMSG_LEN(maxfds * fds.itemsize) + ) + except BlockingIOError: + await self._wait_until_readable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + if not message and not ancdata: + raise EndOfStream + + break + + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: + raise RuntimeError( + f"Received unexpected ancillary data; message = {message!r}, " + f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" + ) + + fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return message, list(fds) + + async def send_fds( + self, message: bytes, fds: Collection[Union[int, IOBase]] + ) -> None: + if not message: + raise ValueError("message must not be empty") + if not fds: + raise ValueError("fds must not be empty") + + loop = get_running_loop() + filenos: List[int] = [] + for fd in fds: + if isinstance(fd, int): + filenos.append(fd) + elif isinstance(fd, IOBase): + filenos.append(fd.fileno()) + + fdarray = array.array("i", filenos) + await checkpoint() + with self._send_guard: + while True: + try: + # The ignore can be removed after mypy picks up + # https://github.com/python/typeshed/pull/5545 + self.__raw_socket.sendmsg( + [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)] + ) + break + except BlockingIOError: + await self._wait_until_writable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + + async def aclose(self) -> None: + if not self._closing: + self._closing = True + if self.__raw_socket.fileno() != -1: + self.__raw_socket.close() + + if self._receive_future: + self._receive_future.set_result(None) + if self._send_future: + self._send_future.set_result(None) + + +class TCPSocketListener(abc.SocketListener): + _accept_scope: Optional[CancelScope] = None + _closed = False + + def __init__(self, raw_socket: socket.socket): + self.__raw_socket = raw_socket + self._loop = cast(asyncio.BaseEventLoop, get_running_loop()) + self._accept_guard = ResourceGuard("accepting connections from") + + @property + def _raw_socket(self) -> socket.socket: + return self.__raw_socket + + async def accept(self) -> abc.SocketStream: + if self._closed: + raise ClosedResourceError + + with self._accept_guard: + await checkpoint() + with CancelScope() as self._accept_scope: + try: + client_sock, _addr = await self._loop.sock_accept(self._raw_socket) + except asyncio.CancelledError: + # Workaround for https://bugs.python.org/issue41317 + try: + self._loop.remove_reader(self._raw_socket) + except (ValueError, NotImplementedError): + pass + + if self._closed: + raise ClosedResourceError from None + + raise + finally: + self._accept_scope = None + + client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + transport, protocol = await self._loop.connect_accepted_socket( + StreamProtocol, client_sock + ) + return SocketStream(cast(asyncio.Transport, transport), protocol) + + async def aclose(self) -> None: + if self._closed: + return + + self._closed = True + if self._accept_scope: + # Workaround for https://bugs.python.org/issue41317 + try: + self._loop.remove_reader(self._raw_socket) + except (ValueError, NotImplementedError): + pass + + self._accept_scope.cancel() + await sleep(0) + + self._raw_socket.close() + + +class UNIXSocketListener(abc.SocketListener): + def __init__(self, raw_socket: socket.socket): + self.__raw_socket = raw_socket + self._loop = get_running_loop() + self._accept_guard = ResourceGuard("accepting connections from") + self._closed = False + + async def accept(self) -> abc.SocketStream: + await checkpoint() + with self._accept_guard: + while True: + try: + client_sock, _ = self.__raw_socket.accept() + client_sock.setblocking(False) + return UNIXSocketStream(client_sock) + except BlockingIOError: + f: asyncio.Future = asyncio.Future() + self._loop.add_reader(self.__raw_socket, f.set_result, None) + f.add_done_callback( + lambda _: self._loop.remove_reader(self.__raw_socket) + ) + await f + except OSError as exc: + if self._closed: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + + async def aclose(self) -> None: + self._closed = True + self.__raw_socket.close() + + @property + def _raw_socket(self) -> socket.socket: + return self.__raw_socket + + +class UDPSocket(abc.UDPSocket): + def __init__( + self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol + ): + self._transport = transport + self._protocol = protocol + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + self._closed = False + + @property + def _raw_socket(self) -> socket.socket: + return self._transport.get_extra_info("socket") + + async def aclose(self) -> None: + if not self._transport.is_closing(): + self._closed = True + self._transport.close() + + async def receive(self) -> Tuple[bytes, IPSockAddrType]: + with self._receive_guard: + await checkpoint() + + # If the buffer is empty, ask for more data + if not self._protocol.read_queue and not self._transport.is_closing(): + self._protocol.read_event.clear() + await self._protocol.read_event.wait() + + try: + return self._protocol.read_queue.popleft() + except IndexError: + if self._closed: + raise ClosedResourceError from None + else: + raise BrokenResourceError from None + + async def send(self, item: UDPPacketType) -> None: + with self._send_guard: + await checkpoint() + await self._protocol.write_event.wait() + if self._closed: + raise ClosedResourceError + elif self._transport.is_closing(): + raise BrokenResourceError + else: + self._transport.sendto(*item) + + +class ConnectedUDPSocket(abc.ConnectedUDPSocket): + def __init__( + self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol + ): + self._transport = transport + self._protocol = protocol + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + self._closed = False + + @property + def _raw_socket(self) -> socket.socket: + return self._transport.get_extra_info("socket") + + async def aclose(self) -> None: + if not self._transport.is_closing(): + self._closed = True + self._transport.close() + + async def receive(self) -> bytes: + with self._receive_guard: + await checkpoint() + + # If the buffer is empty, ask for more data + if not self._protocol.read_queue and not self._transport.is_closing(): + self._protocol.read_event.clear() + await self._protocol.read_event.wait() + + try: + packet = self._protocol.read_queue.popleft() + except IndexError: + if self._closed: + raise ClosedResourceError from None + else: + raise BrokenResourceError from None + + return packet[0] + + async def send(self, item: bytes) -> None: + with self._send_guard: + await checkpoint() + await self._protocol.write_event.wait() + if self._closed: + raise ClosedResourceError + elif self._transport.is_closing(): + raise BrokenResourceError + else: + self._transport.sendto(item) + + +async def connect_tcp( + host: str, port: int, local_addr: Optional[Tuple[str, int]] = None +) -> SocketStream: + transport, protocol = cast( + Tuple[asyncio.Transport, StreamProtocol], + await get_running_loop().create_connection( + StreamProtocol, host, port, local_addr=local_addr + ), + ) + transport.pause_reading() + return SocketStream(transport, protocol) + + +async def connect_unix(path: str) -> UNIXSocketStream: + await checkpoint() + loop = get_running_loop() + raw_socket = socket.socket(socket.AF_UNIX) + raw_socket.setblocking(False) + while True: + try: + raw_socket.connect(path) + except BlockingIOError: + f: asyncio.Future = asyncio.Future() + loop.add_writer(raw_socket, f.set_result, None) + f.add_done_callback(lambda _: loop.remove_writer(raw_socket)) + await f + except BaseException: + raw_socket.close() + raise + else: + return UNIXSocketStream(raw_socket) + + +async def create_udp_socket( + family: socket.AddressFamily, + local_address: Optional[IPSockAddrType], + remote_address: Optional[IPSockAddrType], + reuse_port: bool, +) -> Union[UDPSocket, ConnectedUDPSocket]: + result = await get_running_loop().create_datagram_endpoint( + DatagramProtocol, + local_addr=local_address, + remote_addr=remote_address, + family=family, + reuse_port=reuse_port, + ) + transport = cast(asyncio.DatagramTransport, result[0]) + protocol = result[1] + if protocol.exception: + transport.close() + raise protocol.exception + + if not remote_address: + return UDPSocket(transport, protocol) + else: + return ConnectedUDPSocket(transport, protocol) + + +async def getaddrinfo( + host: Union[bytes, str], + port: Union[str, int, None], + *, + family: Union[int, AddressFamily] = 0, + type: Union[int, SocketKind] = 0, + proto: int = 0, + flags: int = 0, +) -> GetAddrInfoReturnType: + # https://github.com/python/typeshed/pull/4304 + result = await get_running_loop().getaddrinfo( + host, port, family=family, type=type, proto=proto, flags=flags + ) + return cast(GetAddrInfoReturnType, result) + + +async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Tuple[str, str]: + return await get_running_loop().getnameinfo(sockaddr, flags) + + +_read_events: RunVar[Dict[Any, asyncio.Event]] = RunVar("read_events") +_write_events: RunVar[Dict[Any, asyncio.Event]] = RunVar("write_events") + + +async def wait_socket_readable(sock: socket.socket) -> None: + await checkpoint() + try: + read_events = _read_events.get() + except LookupError: + read_events = {} + _read_events.set(read_events) + + if read_events.get(sock): + raise BusyResourceError("reading from") from None + + loop = get_running_loop() + event = read_events[sock] = asyncio.Event() + loop.add_reader(sock, event.set) + try: + await event.wait() + finally: + if read_events.pop(sock, None) is not None: + loop.remove_reader(sock) + readable = True + else: + readable = False + + if not readable: + raise ClosedResourceError + + +async def wait_socket_writable(sock: socket.socket) -> None: + await checkpoint() + try: + write_events = _write_events.get() + except LookupError: + write_events = {} + _write_events.set(write_events) + + if write_events.get(sock): + raise BusyResourceError("writing to") from None + + loop = get_running_loop() + event = write_events[sock] = asyncio.Event() + loop.add_writer(sock.fileno(), event.set) + try: + await event.wait() + finally: + if write_events.pop(sock, None) is not None: + loop.remove_writer(sock) + writable = True + else: + writable = False + + if not writable: + raise ClosedResourceError + + +# +# Synchronization +# + + +class Event(BaseEvent): + def __new__(cls) -> "Event": + return object.__new__(cls) + + def __init__(self) -> None: + self._event = asyncio.Event() + + def set(self) -> DeprecatedAwaitable: + self._event.set() + return DeprecatedAwaitable(self.set) + + def is_set(self) -> bool: + return self._event.is_set() + + async def wait(self) -> None: + if await self._event.wait(): + await checkpoint() + + def statistics(self) -> EventStatistics: + return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined] + + +class CapacityLimiter(BaseCapacityLimiter): + _total_tokens: float = 0 + + def __new__(cls, total_tokens: float) -> "CapacityLimiter": + return object.__new__(cls) + + def __init__(self, total_tokens: float): + self._borrowers: Set[Any] = set() + self._wait_queue: Dict[Any, asyncio.Event] = OrderedDict() + self.total_tokens = total_tokens + + async def __aenter__(self) -> None: + await self.acquire() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.release() + + @property + def total_tokens(self) -> float: + return self._total_tokens + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + if not isinstance(value, int) and not math.isinf(value): + raise TypeError("total_tokens must be an int or math.inf") + if value < 1: + raise ValueError("total_tokens must be >= 1") + + old_value = self._total_tokens + self._total_tokens = value + events = [] + for event in self._wait_queue.values(): + if value <= old_value: + break + + if not event.is_set(): + events.append(event) + old_value += 1 + + for event in events: + event.set() + + @property + def borrowed_tokens(self) -> int: + return len(self._borrowers) + + @property + def available_tokens(self) -> float: + return self._total_tokens - len(self._borrowers) + + def acquire_nowait(self) -> DeprecatedAwaitable: + self.acquire_on_behalf_of_nowait(current_task()) + return DeprecatedAwaitable(self.acquire_nowait) + + def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: + if borrower in self._borrowers: + raise RuntimeError( + "this borrower is already holding one of this CapacityLimiter's " + "tokens" + ) + + if self._wait_queue or len(self._borrowers) >= self._total_tokens: + raise WouldBlock + + self._borrowers.add(borrower) + return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) + + async def acquire(self) -> None: + return await self.acquire_on_behalf_of(current_task()) + + async def acquire_on_behalf_of(self, borrower: object) -> None: + await checkpoint_if_cancelled() + try: + self.acquire_on_behalf_of_nowait(borrower) + except WouldBlock: + event = asyncio.Event() + self._wait_queue[borrower] = event + try: + await event.wait() + except BaseException: + self._wait_queue.pop(borrower, None) + raise + + self._borrowers.add(borrower) + else: + try: + await cancel_shielded_checkpoint() + except BaseException: + self.release() + raise + + def release(self) -> None: + self.release_on_behalf_of(current_task()) + + def release_on_behalf_of(self, borrower: object) -> None: + try: + self._borrowers.remove(borrower) + except KeyError: + raise RuntimeError( + "this borrower isn't holding any of this CapacityLimiter's " "tokens" + ) from None + + # Notify the next task in line if this limiter has free capacity now + if self._wait_queue and len(self._borrowers) < self._total_tokens: + event = self._wait_queue.popitem()[1] + event.set() + + def statistics(self) -> CapacityLimiterStatistics: + return CapacityLimiterStatistics( + self.borrowed_tokens, + self.total_tokens, + tuple(self._borrowers), + len(self._wait_queue), + ) + + +_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter") + + +def current_default_thread_limiter() -> CapacityLimiter: + try: + return _default_thread_limiter.get() + except LookupError: + limiter = CapacityLimiter(40) + _default_thread_limiter.set(limiter) + return limiter + + +# +# Operating system signals +# + + +class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]): + def __init__(self, signals: Tuple[int, ...]): + self._signals = signals + self._loop = get_running_loop() + self._signal_queue: Deque[int] = deque() + self._future: asyncio.Future = asyncio.Future() + self._handled_signals: Set[int] = set() + + def _deliver(self, signum: int) -> None: + self._signal_queue.append(signum) + if not self._future.done(): + self._future.set_result(None) + + def __enter__(self) -> "_SignalReceiver": + for sig in set(self._signals): + self._loop.add_signal_handler(sig, self._deliver, sig) + self._handled_signals.add(sig) + + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + for sig in self._handled_signals: + self._loop.remove_signal_handler(sig) + return None + + def __aiter__(self) -> "_SignalReceiver": + return self + + async def __anext__(self) -> int: + await checkpoint() + if not self._signal_queue: + self._future = asyncio.Future() + await self._future + + return self._signal_queue.popleft() + + +def open_signal_receiver(*signals: int) -> _SignalReceiver: + return _SignalReceiver(signals) + + +# +# Testing and debugging +# + + +def _create_task_info(task: asyncio.Task) -> TaskInfo: + task_state = _task_states.get(task) + if task_state is None: + name = task.get_name() if _native_task_names else None + parent_id = None + else: + name = task_state.name + parent_id = task_state.parent_id + + return TaskInfo(id(task), parent_id, name, get_coro(task)) + + +def get_current_task() -> TaskInfo: + return _create_task_info(current_task()) # type: ignore[arg-type] + + +def get_running_tasks() -> List[TaskInfo]: + return [_create_task_info(task) for task in all_tasks() if not task.done()] + + +async def wait_all_tasks_blocked() -> None: + await checkpoint() + this_task = current_task() + while True: + for task in all_tasks(): + if task is this_task: + continue + + if task._fut_waiter is None or task._fut_waiter.done(): # type: ignore[attr-defined] + await sleep(0.1) + break + else: + return + + +class TestRunner(abc.TestRunner): + def __init__( + self, + debug: bool = False, + use_uvloop: bool = False, + policy: Optional[asyncio.AbstractEventLoopPolicy] = None, + ): + self._exceptions: List[BaseException] = [] + _maybe_set_event_loop_policy(policy, use_uvloop) + self._loop = asyncio.new_event_loop() + self._loop.set_debug(debug) + self._loop.set_exception_handler(self._exception_handler) + asyncio.set_event_loop(self._loop) + + def _cancel_all_tasks(self) -> None: + to_cancel = all_tasks(self._loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + self._loop.run_until_complete( + asyncio.gather(*to_cancel, return_exceptions=True) + ) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + raise cast(BaseException, task.exception()) + + def _exception_handler( + self, loop: asyncio.AbstractEventLoop, context: Dict[str, Any] + ) -> None: + if isinstance(context.get("exception"), Exception): + self._exceptions.append(context["exception"]) + else: + loop.default_exception_handler(context) + + def _raise_async_exceptions(self) -> None: + # Re-raise any exceptions raised in asynchronous callbacks + if self._exceptions: + exceptions, self._exceptions = self._exceptions, [] + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise ExceptionGroup(exceptions) + + def close(self) -> None: + try: + self._cancel_all_tasks() + self._loop.run_until_complete(self._loop.shutdown_asyncgens()) + finally: + asyncio.set_event_loop(None) + self._loop.close() + + def run_asyncgen_fixture( + self, + fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], + kwargs: Dict[str, Any], + ) -> Iterable[T_Retval]: + async def fixture_runner() -> None: + agen = fixture_func(**kwargs) + try: + retval = await agen.asend(None) + self._raise_async_exceptions() + except BaseException as exc: + f.set_exception(exc) + return + else: + f.set_result(retval) + + await event.wait() + try: + await agen.asend(None) + except StopAsyncIteration: + pass + else: + await agen.aclose() + raise RuntimeError("Async generator fixture did not stop") + + f = self._loop.create_future() + event = asyncio.Event() + fixture_task = self._loop.create_task(fixture_runner()) + self._loop.run_until_complete(f) + yield f.result() + event.set() + self._loop.run_until_complete(fixture_task) + self._raise_async_exceptions() + + def run_fixture( + self, + fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], + kwargs: Dict[str, Any], + ) -> T_Retval: + retval = self._loop.run_until_complete(fixture_func(**kwargs)) + self._raise_async_exceptions() + return retval + + def run_test( + self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: Dict[str, Any] + ) -> None: + try: + self._loop.run_until_complete(test_func(**kwargs)) + except Exception as exc: + self._exceptions.append(exc) + + self._raise_async_exceptions() diff --git a/myenv/lib/python3.9/site-packages/anyio/_backends/_trio.py b/myenv/lib/python3.9/site-packages/anyio/_backends/_trio.py new file mode 100644 index 0000000..cf2aaec --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_backends/_trio.py @@ -0,0 +1,988 @@ +import array +import math +import socket +from concurrent.futures import Future +from contextvars import copy_context +from dataclasses import dataclass +from functools import partial +from io import IOBase +from os import PathLike +from signal import Signals +from types import TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + AsyncGenerator, + Awaitable, + Callable, + Collection, + ContextManager, + Coroutine, + Deque, + Dict, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +import sniffio +import trio.from_thread +from outcome import Error, Outcome, Value +from trio.socket import SocketType as TrioSocketType +from trio.to_thread import run_sync + +from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc +from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable, T +from .._core._eventloop import claim_worker_thread +from .._core._exceptions import ( + BrokenResourceError, + BusyResourceError, + ClosedResourceError, + EndOfStream, +) +from .._core._exceptions import ExceptionGroup as BaseExceptionGroup +from .._core._sockets import convert_ipv6_sockaddr +from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter +from .._core._synchronization import Event as BaseEvent +from .._core._synchronization import ResourceGuard +from .._core._tasks import CancelScope as BaseCancelScope +from ..abc import IPSockAddrType, UDPPacketType + +if TYPE_CHECKING: + from trio_typing import TaskStatus + +try: + from trio import lowlevel as trio_lowlevel +except ImportError: + from trio import hazmat as trio_lowlevel # type: ignore[no-redef] + from trio.hazmat import wait_readable, wait_writable +else: + from trio.lowlevel import wait_readable, wait_writable + +try: + trio_open_process = trio_lowlevel.open_process # type: ignore[attr-defined] +except AttributeError: + from trio import open_process as trio_open_process + +T_Retval = TypeVar("T_Retval") +T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType) + + +# +# Event loop +# + +run = trio.run +current_token = trio.lowlevel.current_trio_token +RunVar = trio.lowlevel.RunVar + + +# +# Miscellaneous +# + +sleep = trio.sleep + + +# +# Timeouts and cancellation +# + + +class CancelScope(BaseCancelScope): + def __new__( + cls, original: Optional[trio.CancelScope] = None, **kwargs: object + ) -> "CancelScope": + return object.__new__(cls) + + def __init__( + self, original: Optional[trio.CancelScope] = None, **kwargs: Any + ) -> None: + self.__original = original or trio.CancelScope(**kwargs) + + def __enter__(self) -> "CancelScope": + self.__original.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + return self.__original.__exit__(exc_type, exc_val, exc_tb) + + def cancel(self) -> DeprecatedAwaitable: + self.__original.cancel() + return DeprecatedAwaitable(self.cancel) + + @property + def deadline(self) -> float: + return self.__original.deadline + + @deadline.setter + def deadline(self, value: float) -> None: + self.__original.deadline = value + + @property + def cancel_called(self) -> bool: + return self.__original.cancel_called + + @property + def shield(self) -> bool: + return self.__original.shield + + @shield.setter + def shield(self, value: bool) -> None: + self.__original.shield = value + + +CancelledError = trio.Cancelled +checkpoint = trio.lowlevel.checkpoint +checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled +cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint +current_effective_deadline = trio.current_effective_deadline +current_time = trio.current_time + + +# +# Task groups +# + + +class ExceptionGroup(BaseExceptionGroup, trio.MultiError): + pass + + +class TaskGroup(abc.TaskGroup): + def __init__(self) -> None: + self._active = False + self._nursery_manager = trio.open_nursery() + self.cancel_scope = None # type: ignore[assignment] + + async def __aenter__(self) -> "TaskGroup": + self._active = True + self._nursery = await self._nursery_manager.__aenter__() + self.cancel_scope = CancelScope(self._nursery.cancel_scope) + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + try: + return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) + except trio.MultiError as exc: + raise ExceptionGroup(exc.exceptions) from None + finally: + self._active = False + + def start_soon(self, func: Callable, *args: object, name: object = None) -> None: + if not self._active: + raise RuntimeError( + "This task group is not active; no new tasks can be started." + ) + + self._nursery.start_soon(func, *args, name=name) + + async def start( + self, func: Callable[..., Coroutine], *args: object, name: object = None + ) -> object: + if not self._active: + raise RuntimeError( + "This task group is not active; no new tasks can be started." + ) + + return await self._nursery.start(func, *args, name=name) + + +# +# Threads +# + + +async def run_sync_in_worker_thread( + func: Callable[..., T_Retval], + *args: object, + cancellable: bool = False, + limiter: Optional[trio.CapacityLimiter] = None, +) -> T_Retval: + def wrapper() -> T_Retval: + with claim_worker_thread("trio"): + return func(*args) + + # TODO: remove explicit context copying when trio 0.20 is the minimum requirement + context = copy_context() + context.run(sniffio.current_async_library_cvar.set, None) + return await run_sync( + context.run, wrapper, cancellable=cancellable, limiter=limiter + ) + + +# TODO: remove this workaround when trio 0.20 is the minimum requirement +def run_async_from_thread( + fn: Callable[..., Awaitable[T_Retval]], *args: Any +) -> T_Retval: + async def wrapper() -> T_Retval: + retval: T_Retval + + async def inner() -> None: + nonlocal retval + __tracebackhide__ = True + retval = await fn(*args) + + async with trio.open_nursery() as n: + context.run(n.start_soon, inner) + + __tracebackhide__ = True + return retval + + context = copy_context() + context.run(sniffio.current_async_library_cvar.set, "trio") + return trio.from_thread.run(wrapper) + + +def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval: + # TODO: remove explicit context copying when trio 0.20 is the minimum requirement + retval = trio.from_thread.run_sync(copy_context().run, fn, *args) + return cast(T_Retval, retval) + + +class BlockingPortal(abc.BlockingPortal): + def __new__(cls) -> "BlockingPortal": + return object.__new__(cls) + + def __init__(self) -> None: + super().__init__() + self._token = trio.lowlevel.current_trio_token() + + def _spawn_task_from_thread( + self, + func: Callable, + args: tuple, + kwargs: Dict[str, Any], + name: object, + future: Future, + ) -> None: + context = copy_context() + context.run(sniffio.current_async_library_cvar.set, "trio") + trio.from_thread.run_sync( + context.run, + partial(self._task_group.start_soon, name=name), + self._call_func, + func, + args, + kwargs, + future, + trio_token=self._token, + ) + + +# +# Subprocesses +# + + +@dataclass(eq=False) +class ReceiveStreamWrapper(abc.ByteReceiveStream): + _stream: trio.abc.ReceiveStream + + async def receive(self, max_bytes: Optional[int] = None) -> bytes: + try: + data = await self._stream.receive_some(max_bytes) + except trio.ClosedResourceError as exc: + raise ClosedResourceError from exc.__cause__ + except trio.BrokenResourceError as exc: + raise BrokenResourceError from exc.__cause__ + + if data: + return data + else: + raise EndOfStream + + async def aclose(self) -> None: + await self._stream.aclose() + + +@dataclass(eq=False) +class SendStreamWrapper(abc.ByteSendStream): + _stream: trio.abc.SendStream + + async def send(self, item: bytes) -> None: + try: + await self._stream.send_all(item) + except trio.ClosedResourceError as exc: + raise ClosedResourceError from exc.__cause__ + except trio.BrokenResourceError as exc: + raise BrokenResourceError from exc.__cause__ + + async def aclose(self) -> None: + await self._stream.aclose() + + +@dataclass(eq=False) +class Process(abc.Process): + _process: trio.Process + _stdin: Optional[abc.ByteSendStream] + _stdout: Optional[abc.ByteReceiveStream] + _stderr: Optional[abc.ByteReceiveStream] + + async def aclose(self) -> None: + if self._stdin: + await self._stdin.aclose() + if self._stdout: + await self._stdout.aclose() + if self._stderr: + await self._stderr.aclose() + + await self.wait() + + async def wait(self) -> int: + return await self._process.wait() + + def terminate(self) -> None: + self._process.terminate() + + def kill(self) -> None: + self._process.kill() + + def send_signal(self, signal: Signals) -> None: + self._process.send_signal(signal) + + @property + def pid(self) -> int: + return self._process.pid + + @property + def returncode(self) -> Optional[int]: + return self._process.returncode + + @property + def stdin(self) -> Optional[abc.ByteSendStream]: + return self._stdin + + @property + def stdout(self) -> Optional[abc.ByteReceiveStream]: + return self._stdout + + @property + def stderr(self) -> Optional[abc.ByteReceiveStream]: + return self._stderr + + +async def open_process( + command: Union[str, bytes, Sequence[Union[str, bytes]]], + *, + shell: bool, + stdin: Union[int, IO[Any], None], + stdout: Union[int, IO[Any], None], + stderr: Union[int, IO[Any], None], + cwd: Union[str, bytes, PathLike, None] = None, + env: Optional[Mapping[str, str]] = None, + start_new_session: bool = False, +) -> Process: + process = await trio_open_process( + command, + stdin=stdin, + stdout=stdout, + stderr=stderr, + shell=shell, + cwd=cwd, + env=env, + start_new_session=start_new_session, + ) + stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None + stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None + stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None + return Process(process, stdin_stream, stdout_stream, stderr_stream) + + +class _ProcessPoolShutdownInstrument(trio.abc.Instrument): + def after_run(self) -> None: + super().after_run() + + +current_default_worker_process_limiter: RunVar = RunVar( + "current_default_worker_process_limiter" +) + + +async def _shutdown_process_pool(workers: Set[Process]) -> None: + process: Process + try: + await sleep(math.inf) + except trio.Cancelled: + for process in workers: + if process.returncode is None: + process.kill() + + with CancelScope(shield=True): + for process in workers: + await process.aclose() + + +def setup_process_pool_exit_at_shutdown(workers: Set[Process]) -> None: + trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers) + + +# +# Sockets and networking +# + + +class _TrioSocketMixin(Generic[T_SockAddr]): + def __init__(self, trio_socket: TrioSocketType) -> None: + self._trio_socket = trio_socket + self._closed = False + + def _check_closed(self) -> None: + if self._closed: + raise ClosedResourceError + if self._trio_socket.fileno() < 0: + raise BrokenResourceError + + @property + def _raw_socket(self) -> socket.socket: + return self._trio_socket._sock # type: ignore[attr-defined] + + async def aclose(self) -> None: + if self._trio_socket.fileno() >= 0: + self._closed = True + self._trio_socket.close() + + def _convert_socket_error(self, exc: BaseException) -> "NoReturn": + if isinstance(exc, trio.ClosedResourceError): + raise ClosedResourceError from exc + elif self._trio_socket.fileno() < 0 and self._closed: + raise ClosedResourceError from None + elif isinstance(exc, OSError): + raise BrokenResourceError from exc + else: + raise exc + + +class SocketStream(_TrioSocketMixin, abc.SocketStream): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self, max_bytes: int = 65536) -> bytes: + with self._receive_guard: + try: + data = await self._trio_socket.recv(max_bytes) + except BaseException as exc: + self._convert_socket_error(exc) + + if data: + return data + else: + raise EndOfStream + + async def send(self, item: bytes) -> None: + with self._send_guard: + view = memoryview(item) + while view: + try: + bytes_sent = await self._trio_socket.send(view) + except BaseException as exc: + self._convert_socket_error(exc) + + view = view[bytes_sent:] + + async def send_eof(self) -> None: + self._trio_socket.shutdown(socket.SHUT_WR) + + +class UNIXSocketStream(SocketStream, abc.UNIXSocketStream): + async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]: + if not isinstance(msglen, int) or msglen < 0: + raise ValueError("msglen must be a non-negative integer") + if not isinstance(maxfds, int) or maxfds < 1: + raise ValueError("maxfds must be a positive integer") + + fds = array.array("i") + await checkpoint() + with self._receive_guard: + while True: + try: + message, ancdata, flags, addr = await self._trio_socket.recvmsg( + msglen, socket.CMSG_LEN(maxfds * fds.itemsize) + ) + except BaseException as exc: + self._convert_socket_error(exc) + else: + if not message and not ancdata: + raise EndOfStream + + break + + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: + raise RuntimeError( + f"Received unexpected ancillary data; message = {message!r}, " + f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" + ) + + fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return message, list(fds) + + async def send_fds( + self, message: bytes, fds: Collection[Union[int, IOBase]] + ) -> None: + if not message: + raise ValueError("message must not be empty") + if not fds: + raise ValueError("fds must not be empty") + + filenos: List[int] = [] + for fd in fds: + if isinstance(fd, int): + filenos.append(fd) + elif isinstance(fd, IOBase): + filenos.append(fd.fileno()) + + fdarray = array.array("i", filenos) + await checkpoint() + with self._send_guard: + while True: + try: + await self._trio_socket.sendmsg( + [message], + [ + ( + socket.SOL_SOCKET, + socket.SCM_RIGHTS, # type: ignore[list-item] + fdarray, + ) + ], + ) + break + except BaseException as exc: + self._convert_socket_error(exc) + + +class TCPSocketListener(_TrioSocketMixin, abc.SocketListener): + def __init__(self, raw_socket: socket.socket): + super().__init__(trio.socket.from_stdlib_socket(raw_socket)) + self._accept_guard = ResourceGuard("accepting connections from") + + async def accept(self) -> SocketStream: + with self._accept_guard: + try: + trio_socket, _addr = await self._trio_socket.accept() + except BaseException as exc: + self._convert_socket_error(exc) + + trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + return SocketStream(trio_socket) + + +class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener): + def __init__(self, raw_socket: socket.socket): + super().__init__(trio.socket.from_stdlib_socket(raw_socket)) + self._accept_guard = ResourceGuard("accepting connections from") + + async def accept(self) -> UNIXSocketStream: + with self._accept_guard: + try: + trio_socket, _addr = await self._trio_socket.accept() + except BaseException as exc: + self._convert_socket_error(exc) + + return UNIXSocketStream(trio_socket) + + +class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self) -> Tuple[bytes, IPSockAddrType]: + with self._receive_guard: + try: + data, addr = await self._trio_socket.recvfrom(65536) + return data, convert_ipv6_sockaddr(addr) + except BaseException as exc: + self._convert_socket_error(exc) + + async def send(self, item: UDPPacketType) -> None: + with self._send_guard: + try: + await self._trio_socket.sendto(*item) + except BaseException as exc: + self._convert_socket_error(exc) + + +class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self) -> bytes: + with self._receive_guard: + try: + return await self._trio_socket.recv(65536) + except BaseException as exc: + self._convert_socket_error(exc) + + async def send(self, item: bytes) -> None: + with self._send_guard: + try: + await self._trio_socket.send(item) + except BaseException as exc: + self._convert_socket_error(exc) + + +async def connect_tcp( + host: str, port: int, local_address: Optional[IPSockAddrType] = None +) -> SocketStream: + family = socket.AF_INET6 if ":" in host else socket.AF_INET + trio_socket = trio.socket.socket(family) + trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + if local_address: + await trio_socket.bind(local_address) + + try: + await trio_socket.connect((host, port)) + except BaseException: + trio_socket.close() + raise + + return SocketStream(trio_socket) + + +async def connect_unix(path: str) -> UNIXSocketStream: + trio_socket = trio.socket.socket(socket.AF_UNIX) + try: + await trio_socket.connect(path) + except BaseException: + trio_socket.close() + raise + + return UNIXSocketStream(trio_socket) + + +async def create_udp_socket( + family: socket.AddressFamily, + local_address: Optional[IPSockAddrType], + remote_address: Optional[IPSockAddrType], + reuse_port: bool, +) -> Union[UDPSocket, ConnectedUDPSocket]: + trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM) + + if reuse_port: + trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + + if local_address: + await trio_socket.bind(local_address) + + if remote_address: + await trio_socket.connect(remote_address) + return ConnectedUDPSocket(trio_socket) + else: + return UDPSocket(trio_socket) + + +getaddrinfo = trio.socket.getaddrinfo +getnameinfo = trio.socket.getnameinfo + + +async def wait_socket_readable(sock: socket.socket) -> None: + try: + await wait_readable(sock) + except trio.ClosedResourceError as exc: + raise ClosedResourceError().with_traceback(exc.__traceback__) from None + except trio.BusyResourceError: + raise BusyResourceError("reading from") from None + + +async def wait_socket_writable(sock: socket.socket) -> None: + try: + await wait_writable(sock) + except trio.ClosedResourceError as exc: + raise ClosedResourceError().with_traceback(exc.__traceback__) from None + except trio.BusyResourceError: + raise BusyResourceError("writing to") from None + + +# +# Synchronization +# + + +class Event(BaseEvent): + def __new__(cls) -> "Event": + return object.__new__(cls) + + def __init__(self) -> None: + self.__original = trio.Event() + + def is_set(self) -> bool: + return self.__original.is_set() + + async def wait(self) -> None: + return await self.__original.wait() + + def statistics(self) -> EventStatistics: + orig_statistics = self.__original.statistics() + return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting) + + def set(self) -> DeprecatedAwaitable: + self.__original.set() + return DeprecatedAwaitable(self.set) + + +class CapacityLimiter(BaseCapacityLimiter): + def __new__(cls, *args: object, **kwargs: object) -> "CapacityLimiter": + return object.__new__(cls) + + def __init__( + self, *args: Any, original: Optional[trio.CapacityLimiter] = None + ) -> None: + self.__original = original or trio.CapacityLimiter(*args) + + async def __aenter__(self) -> None: + return await self.__original.__aenter__() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + return await self.__original.__aexit__(exc_type, exc_val, exc_tb) + + @property + def total_tokens(self) -> float: + return self.__original.total_tokens + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + self.__original.total_tokens = value + + @property + def borrowed_tokens(self) -> int: + return self.__original.borrowed_tokens + + @property + def available_tokens(self) -> float: + return self.__original.available_tokens + + def acquire_nowait(self) -> DeprecatedAwaitable: + self.__original.acquire_nowait() + return DeprecatedAwaitable(self.acquire_nowait) + + def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: + self.__original.acquire_on_behalf_of_nowait(borrower) + return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) + + async def acquire(self) -> None: + await self.__original.acquire() + + async def acquire_on_behalf_of(self, borrower: object) -> None: + await self.__original.acquire_on_behalf_of(borrower) + + def release(self) -> None: + return self.__original.release() + + def release_on_behalf_of(self, borrower: object) -> None: + return self.__original.release_on_behalf_of(borrower) + + def statistics(self) -> CapacityLimiterStatistics: + orig = self.__original.statistics() + return CapacityLimiterStatistics( + borrowed_tokens=orig.borrowed_tokens, + total_tokens=orig.total_tokens, + borrowers=orig.borrowers, + tasks_waiting=orig.tasks_waiting, + ) + + +_capacity_limiter_wrapper: RunVar = RunVar("_capacity_limiter_wrapper") + + +def current_default_thread_limiter() -> CapacityLimiter: + try: + return _capacity_limiter_wrapper.get() + except LookupError: + limiter = CapacityLimiter( + original=trio.to_thread.current_default_thread_limiter() + ) + _capacity_limiter_wrapper.set(limiter) + return limiter + + +# +# Signal handling +# + + +class _SignalReceiver(DeprecatedAsyncContextManager[T]): + def __init__(self, cm: ContextManager[T]): + self._cm = cm + + def __enter__(self) -> T: + return self._cm.__enter__() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + return self._cm.__exit__(exc_type, exc_val, exc_tb) + + +def open_signal_receiver(*signals: Signals) -> _SignalReceiver: + cm = trio.open_signal_receiver(*signals) + return _SignalReceiver(cm) + + +# +# Testing and debugging +# + + +def get_current_task() -> TaskInfo: + task = trio_lowlevel.current_task() + + parent_id = None + if task.parent_nursery and task.parent_nursery.parent_task: + parent_id = id(task.parent_nursery.parent_task) + + return TaskInfo(id(task), parent_id, task.name, task.coro) + + +def get_running_tasks() -> List[TaskInfo]: + root_task = trio_lowlevel.current_root_task() + task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)] + nurseries = root_task.child_nurseries + while nurseries: + new_nurseries: List[trio.Nursery] = [] + for nursery in nurseries: + for task in nursery.child_tasks: + task_infos.append( + TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro) + ) + new_nurseries.extend(task.child_nurseries) + + nurseries = new_nurseries + + return task_infos + + +def wait_all_tasks_blocked() -> Awaitable[None]: + import trio.testing + + return trio.testing.wait_all_tasks_blocked() + + +class TestRunner(abc.TestRunner): + def __init__(self, **options: Any) -> None: + from collections import deque + from queue import Queue + + self._call_queue: "Queue[Callable[..., object]]" = Queue() + self._result_queue: Deque[Outcome] = deque() + self._stop_event: Optional[trio.Event] = None + self._nursery: Optional[trio.Nursery] = None + self._options = options + + async def _trio_main(self) -> None: + self._stop_event = trio.Event() + async with trio.open_nursery() as self._nursery: + await self._stop_event.wait() + + async def _call_func( + self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict + ) -> None: + try: + retval = await func(*args, **kwargs) + except BaseException as exc: + self._result_queue.append(Error(exc)) + else: + self._result_queue.append(Value(retval)) + + def _main_task_finished(self, outcome: object) -> None: + self._nursery = None + + def _get_nursery(self) -> trio.Nursery: + if self._nursery is None: + trio.lowlevel.start_guest_run( + self._trio_main, + run_sync_soon_threadsafe=self._call_queue.put, + done_callback=self._main_task_finished, + **self._options, + ) + while self._nursery is None: + self._call_queue.get()() + + return self._nursery + + def _call( + self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object + ) -> T_Retval: + self._get_nursery().start_soon(self._call_func, func, args, kwargs) + while not self._result_queue: + self._call_queue.get()() + + outcome = self._result_queue.pop() + return outcome.unwrap() + + def close(self) -> None: + if self._stop_event: + self._stop_event.set() + while self._nursery is not None: + self._call_queue.get()() + + def run_asyncgen_fixture( + self, + fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], + kwargs: Dict[str, Any], + ) -> Iterable[T_Retval]: + async def fixture_runner(*, task_status: "TaskStatus") -> None: + agen = fixture_func(**kwargs) + retval = await agen.asend(None) + task_status.started(retval) + await teardown_event.wait() + try: + await agen.asend(None) + except StopAsyncIteration: + pass + else: + await agen.aclose() + raise RuntimeError("Async generator fixture did not stop") + + teardown_event = trio.Event() + fixture_value = self._call(lambda: self._get_nursery().start(fixture_runner)) + yield fixture_value + teardown_event.set() + + def run_fixture( + self, + fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], + kwargs: Dict[str, Any], + ) -> T_Retval: + return self._call(fixture_func, **kwargs) + + def run_test( + self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: Dict[str, Any] + ) -> None: + self._call(test_func, **kwargs) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/__init__.py b/myenv/lib/python3.9/site-packages/anyio/_core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_compat.py b/myenv/lib/python3.9/site-packages/anyio/_core/_compat.py new file mode 100644 index 0000000..7062be5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_compat.py @@ -0,0 +1,218 @@ +from abc import ABCMeta, abstractmethod +from contextlib import AbstractContextManager +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + ContextManager, + Generator, + Generic, + Iterable, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + overload, +) +from warnings import warn + +if TYPE_CHECKING: + from ._testing import TaskInfo +else: + TaskInfo = object + +T = TypeVar("T") +AnyDeprecatedAwaitable = Union[ + "DeprecatedAwaitable", + "DeprecatedAwaitableFloat", + "DeprecatedAwaitableList[T]", + TaskInfo, +] + + +@overload +async def maybe_async(__obj: TaskInfo) -> TaskInfo: + ... + + +@overload +async def maybe_async(__obj: "DeprecatedAwaitableFloat") -> float: + ... + + +@overload +async def maybe_async(__obj: "DeprecatedAwaitableList[T]") -> List[T]: + ... + + +@overload +async def maybe_async(__obj: "DeprecatedAwaitable") -> None: + ... + + +async def maybe_async( + __obj: "AnyDeprecatedAwaitable[T]", +) -> Union[TaskInfo, float, List[T], None]: + """ + Await on the given object if necessary. + + This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and + methods were converted from coroutine functions into regular functions. + + Do **not** try to use this for any other purpose! + + :return: the result of awaiting on the object if coroutine, or the object itself otherwise + + .. versionadded:: 2.2 + + """ + return __obj._unwrap() + + +class _ContextManagerWrapper: + def __init__(self, cm: ContextManager[T]): + self._cm = cm + + async def __aenter__(self) -> T: + return self._cm.__enter__() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + return self._cm.__exit__(exc_type, exc_val, exc_tb) + + +def maybe_async_cm( + cm: Union[ContextManager[T], AsyncContextManager[T]] +) -> AsyncContextManager[T]: + """ + Wrap a regular context manager as an async one if necessary. + + This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and + methods were changed to return regular context managers instead of async ones. + + :param cm: a regular or async context manager + :return: an async context manager + + .. versionadded:: 2.2 + + """ + if not isinstance(cm, AbstractContextManager): + raise TypeError("Given object is not an context manager") + + return _ContextManagerWrapper(cm) + + +def _warn_deprecation( + awaitable: "AnyDeprecatedAwaitable[Any]", stacklevel: int = 1 +) -> None: + warn( + f'Awaiting on {awaitable._name}() is deprecated. Use "await ' + f"anyio.maybe_async({awaitable._name}(...)) if you have to support both AnyIO 2.x " + f'and 3.x, or just remove the "await" if you are completely migrating to AnyIO 3+.', + DeprecationWarning, + stacklevel=stacklevel + 1, + ) + + +class DeprecatedAwaitable: + def __init__(self, func: Callable[..., "DeprecatedAwaitable"]): + self._name = f"{func.__module__}.{func.__qualname__}" + + def __await__(self) -> Generator[None, None, None]: + _warn_deprecation(self) + if False: + yield + + def __reduce__(self) -> Tuple[Type[None], Tuple[()]]: + return type(None), () + + def _unwrap(self) -> None: + return None + + +class DeprecatedAwaitableFloat(float): + def __new__( + cls, x: float, func: Callable[..., "DeprecatedAwaitableFloat"] + ) -> "DeprecatedAwaitableFloat": + return super().__new__(cls, x) + + def __init__(self, x: float, func: Callable[..., "DeprecatedAwaitableFloat"]): + self._name = f"{func.__module__}.{func.__qualname__}" + + def __await__(self) -> Generator[None, None, float]: + _warn_deprecation(self) + if False: + yield + + return float(self) + + def __reduce__(self) -> Tuple[Type[float], Tuple[float]]: + return float, (float(self),) + + def _unwrap(self) -> float: + return float(self) + + +class DeprecatedAwaitableList(List[T]): + def __init__( + self, + iterable: Iterable[T] = (), + *, + func: Callable[..., "DeprecatedAwaitableList[T]"], + ): + super().__init__(iterable) + self._name = f"{func.__module__}.{func.__qualname__}" + + def __await__(self) -> Generator[None, None, List[T]]: + _warn_deprecation(self) + if False: + yield + + return list(self) + + def __reduce__(self) -> Tuple[Type[List[T]], Tuple[List[T]]]: + return list, (list(self),) + + def _unwrap(self) -> List[T]: + return list(self) + + +class DeprecatedAsyncContextManager(Generic[T], metaclass=ABCMeta): + @abstractmethod + def __enter__(self) -> T: + pass + + @abstractmethod + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + pass + + async def __aenter__(self) -> T: + warn( + f"Using {self.__class__.__name__} as an async context manager has been deprecated. " + f'Use "async with anyio.maybe_async_cm(yourcontextmanager) as foo:" if you have to ' + f'support both AnyIO 2.x and 3.x, or just remove the "async" from "async with" if ' + f"you are completely migrating to AnyIO 3+.", + DeprecationWarning, + ) + return self.__enter__() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + return self.__exit__(exc_type, exc_val, exc_tb) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_eventloop.py b/myenv/lib/python3.9/site-packages/anyio/_core/_eventloop.py new file mode 100644 index 0000000..f027ae5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_eventloop.py @@ -0,0 +1,155 @@ +import math +import sys +import threading +from contextlib import contextmanager +from importlib import import_module +from typing import ( + Any, + Callable, + Coroutine, + Dict, + Generator, + Optional, + Tuple, + Type, + TypeVar, +) + +import sniffio + +# This must be updated when new backends are introduced +from ._compat import DeprecatedAwaitableFloat + +BACKENDS = "asyncio", "trio" + +T_Retval = TypeVar("T_Retval") +threadlocals = threading.local() + + +def run( + func: Callable[..., Coroutine[Any, Any, T_Retval]], + *args: object, + backend: str = "asyncio", + backend_options: Optional[Dict[str, Any]] = None, +) -> T_Retval: + """ + Run the given coroutine function in an asynchronous event loop. + + The current thread must not be already running an event loop. + + :param func: a coroutine function + :param args: positional arguments to ``func`` + :param backend: name of the asynchronous event loop implementation – currently either + ``asyncio`` or ``trio`` + :param backend_options: keyword arguments to call the backend ``run()`` implementation with + (documented :ref:`here `) + :return: the return value of the coroutine function + :raises RuntimeError: if an asynchronous event loop is already running in this thread + :raises LookupError: if the named backend is not found + + """ + try: + asynclib_name = sniffio.current_async_library() + except sniffio.AsyncLibraryNotFoundError: + pass + else: + raise RuntimeError(f"Already running {asynclib_name} in this thread") + + try: + asynclib = import_module(f"..._backends._{backend}", package=__name__) + except ImportError as exc: + raise LookupError(f"No such backend: {backend}") from exc + + token = None + if sniffio.current_async_library_cvar.get(None) is None: + # Since we're in control of the event loop, we can cache the name of the async library + token = sniffio.current_async_library_cvar.set(backend) + + try: + backend_options = backend_options or {} + return asynclib.run(func, *args, **backend_options) + finally: + if token: + sniffio.current_async_library_cvar.reset(token) + + +async def sleep(delay: float) -> None: + """ + Pause the current task for the specified duration. + + :param delay: the duration, in seconds + + """ + return await get_asynclib().sleep(delay) + + +async def sleep_forever() -> None: + """ + Pause the current task until it's cancelled. + + This is a shortcut for ``sleep(math.inf)``. + + .. versionadded:: 3.1 + + """ + await sleep(math.inf) + + +async def sleep_until(deadline: float) -> None: + """ + Pause the current task until the given time. + + :param deadline: the absolute time to wake up at (according to the internal monotonic clock of + the event loop) + + .. versionadded:: 3.1 + + """ + now = current_time() + await sleep(max(deadline - now, 0)) + + +def current_time() -> DeprecatedAwaitableFloat: + """ + Return the current value of the event loop's internal clock. + + :return: the clock value (seconds) + + """ + return DeprecatedAwaitableFloat(get_asynclib().current_time(), current_time) + + +def get_all_backends() -> Tuple[str, ...]: + """Return a tuple of the names of all built-in backends.""" + return BACKENDS + + +def get_cancelled_exc_class() -> Type[BaseException]: + """Return the current async library's cancellation exception class.""" + return get_asynclib().CancelledError + + +# +# Private API +# + + +@contextmanager +def claim_worker_thread(backend: str) -> Generator[Any, None, None]: + module = sys.modules["anyio._backends._" + backend] + threadlocals.current_async_module = module + try: + yield + finally: + del threadlocals.current_async_module + + +def get_asynclib(asynclib_name: Optional[str] = None) -> Any: + if asynclib_name is None: + asynclib_name = sniffio.current_async_library() + + modulename = "anyio._backends._" + asynclib_name + try: + return sys.modules[modulename] + except KeyError: + return import_module(modulename) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_exceptions.py b/myenv/lib/python3.9/site-packages/anyio/_core/_exceptions.py new file mode 100644 index 0000000..db2bbcf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_exceptions.py @@ -0,0 +1,93 @@ +from traceback import format_exception +from typing import List + + +class BrokenResourceError(Exception): + """ + Raised when trying to use a resource that has been rendered unusable due to external causes + (e.g. a send stream whose peer has disconnected). + """ + + +class BrokenWorkerProcess(Exception): + """ + Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or otherwise + misbehaves. + """ + + +class BusyResourceError(Exception): + """Raised when two tasks are trying to read from or write to the same resource concurrently.""" + + def __init__(self, action: str): + super().__init__(f"Another task is already {action} this resource") + + +class ClosedResourceError(Exception): + """Raised when trying to use a resource that has been closed.""" + + +class DelimiterNotFound(Exception): + """ + Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the + maximum number of bytes has been read without the delimiter being found. + """ + + def __init__(self, max_bytes: int) -> None: + super().__init__( + f"The delimiter was not found among the first {max_bytes} bytes" + ) + + +class EndOfStream(Exception): + """Raised when trying to read from a stream that has been closed from the other end.""" + + +class ExceptionGroup(BaseException): + """ + Raised when multiple exceptions have been raised in a task group. + + :var ~typing.Sequence[BaseException] exceptions: the sequence of exceptions raised together + """ + + SEPARATOR = "----------------------------\n" + + exceptions: List[BaseException] + + def __str__(self) -> str: + tracebacks = [ + "".join(format_exception(type(exc), exc, exc.__traceback__)) + for exc in self.exceptions + ] + return ( + f"{len(self.exceptions)} exceptions were raised in the task group:\n" + f"{self.SEPARATOR}{self.SEPARATOR.join(tracebacks)}" + ) + + def __repr__(self) -> str: + exception_reprs = ", ".join(repr(exc) for exc in self.exceptions) + return f"<{self.__class__.__name__}: {exception_reprs}>" + + +class IncompleteRead(Exception): + """ + Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or + :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the + connection is closed before the requested amount of bytes has been read. + """ + + def __init__(self) -> None: + super().__init__( + "The stream was closed before the read operation could be completed" + ) + + +class TypedAttributeLookupError(LookupError): + """ + Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute is not + found and no default value has been given. + """ + + +class WouldBlock(Exception): + """Raised by ``X_nowait`` functions if ``X()`` would block.""" diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_fileio.py b/myenv/lib/python3.9/site-packages/anyio/_core/_fileio.py new file mode 100644 index 0000000..19c1e83 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_fileio.py @@ -0,0 +1,607 @@ +import os +import pathlib +import sys +from dataclasses import dataclass +from functools import partial +from os import PathLike +from typing import ( + IO, + TYPE_CHECKING, + Any, + AnyStr, + AsyncIterator, + Callable, + Generic, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, + overload, +) + +from .. import to_thread +from ..abc import AsyncResource + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +if TYPE_CHECKING: + from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer +else: + ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object + + +class AsyncFile(AsyncResource, Generic[AnyStr]): + """ + An asynchronous file object. + + This class wraps a standard file object and provides async friendly versions of the following + blocking methods (where available on the original file object): + + * read + * read1 + * readline + * readlines + * readinto + * readinto1 + * write + * writelines + * truncate + * seek + * tell + * flush + + All other methods are directly passed through. + + This class supports the asynchronous context manager protocol which closes the underlying file + at the end of the context block. + + This class also supports asynchronous iteration:: + + async with await open_file(...) as f: + async for line in f: + print(line) + """ + + def __init__(self, fp: IO[AnyStr]) -> None: + self._fp: Any = fp + + def __getattr__(self, name: str) -> object: + return getattr(self._fp, name) + + @property + def wrapped(self) -> IO[AnyStr]: + """The wrapped file object.""" + return self._fp + + async def __aiter__(self) -> AsyncIterator[AnyStr]: + while True: + line = await self.readline() + if line: + yield line + else: + break + + async def aclose(self) -> None: + return await to_thread.run_sync(self._fp.close) + + async def read(self, size: int = -1) -> AnyStr: + return await to_thread.run_sync(self._fp.read, size) + + async def read1(self: "AsyncFile[bytes]", size: int = -1) -> bytes: + return await to_thread.run_sync(self._fp.read1, size) + + async def readline(self) -> AnyStr: + return await to_thread.run_sync(self._fp.readline) + + async def readlines(self) -> List[AnyStr]: + return await to_thread.run_sync(self._fp.readlines) + + async def readinto(self: "AsyncFile[bytes]", b: WriteableBuffer) -> bytes: + return await to_thread.run_sync(self._fp.readinto, b) + + async def readinto1(self: "AsyncFile[bytes]", b: WriteableBuffer) -> bytes: + return await to_thread.run_sync(self._fp.readinto1, b) + + @overload + async def write(self: "AsyncFile[bytes]", b: ReadableBuffer) -> int: + ... + + @overload + async def write(self: "AsyncFile[str]", b: str) -> int: + ... + + async def write(self, b: Union[ReadableBuffer, str]) -> int: + return await to_thread.run_sync(self._fp.write, b) + + @overload + async def writelines( + self: "AsyncFile[bytes]", lines: Iterable[ReadableBuffer] + ) -> None: + ... + + @overload + async def writelines(self: "AsyncFile[str]", lines: Iterable[str]) -> None: + ... + + async def writelines( + self, lines: Union[Iterable[ReadableBuffer], Iterable[str]] + ) -> None: + return await to_thread.run_sync(self._fp.writelines, lines) + + async def truncate(self, size: Optional[int] = None) -> int: + return await to_thread.run_sync(self._fp.truncate, size) + + async def seek(self, offset: int, whence: Optional[int] = os.SEEK_SET) -> int: + return await to_thread.run_sync(self._fp.seek, offset, whence) + + async def tell(self) -> int: + return await to_thread.run_sync(self._fp.tell) + + async def flush(self) -> None: + return await to_thread.run_sync(self._fp.flush) + + +@overload +async def open_file( + file: Union[str, "PathLike[str]", int], + mode: OpenBinaryMode, + buffering: int = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + closefd: bool = ..., + opener: Optional[Callable[[str, int], int]] = ..., +) -> AsyncFile[bytes]: + ... + + +@overload +async def open_file( + file: Union[str, "PathLike[str]", int], + mode: OpenTextMode = ..., + buffering: int = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + closefd: bool = ..., + opener: Optional[Callable[[str, int], int]] = ..., +) -> AsyncFile[str]: + ... + + +async def open_file( + file: Union[str, "PathLike[str]", int], + mode: str = "r", + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + closefd: bool = True, + opener: Optional[Callable[[str, int], int]] = None, +) -> AsyncFile[Any]: + """ + Open a file asynchronously. + + The arguments are exactly the same as for the builtin :func:`open`. + + :return: an asynchronous file object + + """ + fp = await to_thread.run_sync( + open, file, mode, buffering, encoding, errors, newline, closefd, opener + ) + return AsyncFile(fp) + + +def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]: + """ + Wrap an existing file as an asynchronous file. + + :param file: an existing file-like object + :return: an asynchronous file object + + """ + return AsyncFile(file) + + +@dataclass(eq=False) +class _PathIterator(AsyncIterator["Path"]): + iterator: Iterator["PathLike[str]"] + + async def __anext__(self) -> "Path": + nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True) + if nextval is None: + raise StopAsyncIteration from None + + return Path(cast("PathLike[str]", nextval)) + + +class Path: + """ + An asynchronous version of :class:`pathlib.Path`. + + This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but + it is compatible with the :class:`os.PathLike` interface. + + It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the + deprecated :meth:`~pathlib.Path.link_to` method. + + Any methods that do disk I/O need to be awaited on. These methods are: + + * :meth:`~pathlib.Path.absolute` + * :meth:`~pathlib.Path.chmod` + * :meth:`~pathlib.Path.cwd` + * :meth:`~pathlib.Path.exists` + * :meth:`~pathlib.Path.expanduser` + * :meth:`~pathlib.Path.group` + * :meth:`~pathlib.Path.hardlink_to` + * :meth:`~pathlib.Path.home` + * :meth:`~pathlib.Path.is_block_device` + * :meth:`~pathlib.Path.is_char_device` + * :meth:`~pathlib.Path.is_dir` + * :meth:`~pathlib.Path.is_fifo` + * :meth:`~pathlib.Path.is_file` + * :meth:`~pathlib.Path.is_mount` + * :meth:`~pathlib.Path.lchmod` + * :meth:`~pathlib.Path.lstat` + * :meth:`~pathlib.Path.mkdir` + * :meth:`~pathlib.Path.open` + * :meth:`~pathlib.Path.owner` + * :meth:`~pathlib.Path.read_bytes` + * :meth:`~pathlib.Path.read_text` + * :meth:`~pathlib.Path.readlink` + * :meth:`~pathlib.Path.rename` + * :meth:`~pathlib.Path.replace` + * :meth:`~pathlib.Path.rmdir` + * :meth:`~pathlib.Path.samefile` + * :meth:`~pathlib.Path.stat` + * :meth:`~pathlib.Path.touch` + * :meth:`~pathlib.Path.unlink` + * :meth:`~pathlib.Path.write_bytes` + * :meth:`~pathlib.Path.write_text` + + Additionally, the following methods return an async iterator yielding :class:`~.Path` objects: + + * :meth:`~pathlib.Path.glob` + * :meth:`~pathlib.Path.iterdir` + * :meth:`~pathlib.Path.rglob` + """ + + __slots__ = "_path", "__weakref__" + + __weakref__: Any + + def __init__(self, *args: Union[str, "PathLike[str]"]) -> None: + self._path: Final[pathlib.Path] = pathlib.Path(*args) + + def __fspath__(self) -> str: + return self._path.__fspath__() + + def __str__(self) -> str: + return self._path.__str__() + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.as_posix()!r})" + + def __bytes__(self) -> bytes: + return self._path.__bytes__() + + def __hash__(self) -> int: + return self._path.__hash__() + + def __eq__(self, other: object) -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__eq__(target) + + def __lt__(self, other: "Path") -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__lt__(target) + + def __le__(self, other: "Path") -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__le__(target) + + def __gt__(self, other: "Path") -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__gt__(target) + + def __ge__(self, other: "Path") -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__ge__(target) + + def __truediv__(self, other: Any) -> "Path": + return Path(self._path / other) + + def __rtruediv__(self, other: Any) -> "Path": + return Path(other) / self + + @property + def parts(self) -> Tuple[str, ...]: + return self._path.parts + + @property + def drive(self) -> str: + return self._path.drive + + @property + def root(self) -> str: + return self._path.root + + @property + def anchor(self) -> str: + return self._path.anchor + + @property + def parents(self) -> Sequence["Path"]: + return tuple(Path(p) for p in self._path.parents) + + @property + def parent(self) -> "Path": + return Path(self._path.parent) + + @property + def name(self) -> str: + return self._path.name + + @property + def suffix(self) -> str: + return self._path.suffix + + @property + def suffixes(self) -> List[str]: + return self._path.suffixes + + @property + def stem(self) -> str: + return self._path.stem + + async def absolute(self) -> "Path": + path = await to_thread.run_sync(self._path.absolute) + return Path(path) + + def as_posix(self) -> str: + return self._path.as_posix() + + def as_uri(self) -> str: + return self._path.as_uri() + + def match(self, path_pattern: str) -> bool: + return self._path.match(path_pattern) + + def is_relative_to(self, *other: Union[str, "PathLike[str]"]) -> bool: + try: + self.relative_to(*other) + return True + except ValueError: + return False + + async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None: + func = partial(os.chmod, follow_symlinks=follow_symlinks) + return await to_thread.run_sync(func, self._path, mode) + + @classmethod + async def cwd(cls) -> "Path": + path = await to_thread.run_sync(pathlib.Path.cwd) + return cls(path) + + async def exists(self) -> bool: + return await to_thread.run_sync(self._path.exists, cancellable=True) + + async def expanduser(self) -> "Path": + return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True)) + + def glob(self, pattern: str) -> AsyncIterator["Path"]: + gen = self._path.glob(pattern) + return _PathIterator(gen) + + async def group(self) -> str: + return await to_thread.run_sync(self._path.group, cancellable=True) + + async def hardlink_to(self, target: Union[str, pathlib.Path, "Path"]) -> None: + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(os.link, target, self) + + @classmethod + async def home(cls) -> "Path": + home_path = await to_thread.run_sync(pathlib.Path.home) + return cls(home_path) + + def is_absolute(self) -> bool: + return self._path.is_absolute() + + async def is_block_device(self) -> bool: + return await to_thread.run_sync(self._path.is_block_device, cancellable=True) + + async def is_char_device(self) -> bool: + return await to_thread.run_sync(self._path.is_char_device, cancellable=True) + + async def is_dir(self) -> bool: + return await to_thread.run_sync(self._path.is_dir, cancellable=True) + + async def is_fifo(self) -> bool: + return await to_thread.run_sync(self._path.is_fifo, cancellable=True) + + async def is_file(self) -> bool: + return await to_thread.run_sync(self._path.is_file, cancellable=True) + + async def is_mount(self) -> bool: + return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True) + + def is_reserved(self) -> bool: + return self._path.is_reserved() + + async def is_socket(self) -> bool: + return await to_thread.run_sync(self._path.is_socket, cancellable=True) + + async def is_symlink(self) -> bool: + return await to_thread.run_sync(self._path.is_symlink, cancellable=True) + + def iterdir(self) -> AsyncIterator["Path"]: + gen = self._path.iterdir() + return _PathIterator(gen) + + def joinpath(self, *args: Union[str, "PathLike[str]"]) -> "Path": + return Path(self._path.joinpath(*args)) + + async def lchmod(self, mode: int) -> None: + await to_thread.run_sync(self._path.lchmod, mode) + + async def lstat(self) -> os.stat_result: + return await to_thread.run_sync(self._path.lstat, cancellable=True) + + async def mkdir( + self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False + ) -> None: + await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok) + + @overload + async def open( + self, + mode: OpenBinaryMode, + buffering: int = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + ) -> AsyncFile[bytes]: + ... + + @overload + async def open( + self, + mode: OpenTextMode = ..., + buffering: int = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + ) -> AsyncFile[str]: + ... + + async def open( + self, + mode: str = "r", + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + ) -> AsyncFile[Any]: + fp = await to_thread.run_sync( + self._path.open, mode, buffering, encoding, errors, newline + ) + return AsyncFile(fp) + + async def owner(self) -> str: + return await to_thread.run_sync(self._path.owner, cancellable=True) + + async def read_bytes(self) -> bytes: + return await to_thread.run_sync(self._path.read_bytes) + + async def read_text( + self, encoding: Optional[str] = None, errors: Optional[str] = None + ) -> str: + return await to_thread.run_sync(self._path.read_text, encoding, errors) + + def relative_to(self, *other: Union[str, "PathLike[str]"]) -> "Path": + return Path(self._path.relative_to(*other)) + + async def readlink(self) -> "Path": + target = await to_thread.run_sync(os.readlink, self._path) + return Path(cast(str, target)) + + async def rename(self, target: Union[str, pathlib.PurePath, "Path"]) -> "Path": + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(self._path.rename, target) + return Path(target) + + async def replace(self, target: Union[str, pathlib.PurePath, "Path"]) -> "Path": + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(self._path.replace, target) + return Path(target) + + async def resolve(self, strict: bool = False) -> "Path": + func = partial(self._path.resolve, strict=strict) + return Path(await to_thread.run_sync(func, cancellable=True)) + + def rglob(self, pattern: str) -> AsyncIterator["Path"]: + gen = self._path.rglob(pattern) + return _PathIterator(gen) + + async def rmdir(self) -> None: + await to_thread.run_sync(self._path.rmdir) + + async def samefile( + self, other_path: Union[str, bytes, int, pathlib.Path, "Path"] + ) -> bool: + if isinstance(other_path, Path): + other_path = other_path._path + + return await to_thread.run_sync( + self._path.samefile, other_path, cancellable=True + ) + + async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result: + func = partial(os.stat, follow_symlinks=follow_symlinks) + return await to_thread.run_sync(func, self._path, cancellable=True) + + async def symlink_to( + self, + target: Union[str, pathlib.Path, "Path"], + target_is_directory: bool = False, + ) -> None: + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(self._path.symlink_to, target, target_is_directory) + + async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None: + await to_thread.run_sync(self._path.touch, mode, exist_ok) + + async def unlink(self, missing_ok: bool = False) -> None: + try: + await to_thread.run_sync(self._path.unlink) + except FileNotFoundError: + if not missing_ok: + raise + + def with_name(self, name: str) -> "Path": + return Path(self._path.with_name(name)) + + def with_stem(self, stem: str) -> "Path": + return Path(self._path.with_name(stem + self._path.suffix)) + + def with_suffix(self, suffix: str) -> "Path": + return Path(self._path.with_suffix(suffix)) + + async def write_bytes(self, data: bytes) -> int: + return await to_thread.run_sync(self._path.write_bytes, data) + + async def write_text( + self, + data: str, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + ) -> int: + # Path.write_text() does not support the "newline" parameter before Python 3.10 + def sync_write_text() -> int: + with self._path.open( + "w", encoding=encoding, errors=errors, newline=newline + ) as fp: + return fp.write(data) + + return await to_thread.run_sync(sync_write_text) + + +PathLike.register(Path) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_resources.py b/myenv/lib/python3.9/site-packages/anyio/_core/_resources.py new file mode 100644 index 0000000..b9414f7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_resources.py @@ -0,0 +1,16 @@ +from ..abc import AsyncResource +from ._tasks import CancelScope + + +async def aclose_forcefully(resource: AsyncResource) -> None: + """ + Close an asynchronous resource in a cancelled scope. + + Doing this closes the resource without waiting on anything. + + :param resource: the resource to close + + """ + with CancelScope() as scope: + scope.cancel() + await resource.aclose() diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_signals.py b/myenv/lib/python3.9/site-packages/anyio/_core/_signals.py new file mode 100644 index 0000000..02234fd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_signals.py @@ -0,0 +1,24 @@ +from typing import AsyncIterator + +from ._compat import DeprecatedAsyncContextManager +from ._eventloop import get_asynclib + + +def open_signal_receiver( + *signals: int, +) -> DeprecatedAsyncContextManager[AsyncIterator[int]]: + """ + Start receiving operating system signals. + + :param signals: signals to receive (e.g. ``signal.SIGINT``) + :return: an asynchronous context manager for an asynchronous iterator which yields signal + numbers + + .. warning:: Windows does not support signals natively so it is best to avoid relying on this + in cross-platform applications. + + .. warning:: On asyncio, this permanently replaces any previous signal handler for the given + signals, as set via :meth:`~asyncio.loop.add_signal_handler`. + + """ + return get_asynclib().open_signal_receiver(*signals) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_sockets.py b/myenv/lib/python3.9/site-packages/anyio/_core/_sockets.py new file mode 100644 index 0000000..ca85d30 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_sockets.py @@ -0,0 +1,587 @@ +import socket +import ssl +import sys +from ipaddress import IPv6Address, ip_address +from os import PathLike, chmod +from pathlib import Path +from socket import AddressFamily, SocketKind +from typing import Awaitable, List, Optional, Tuple, Union, cast, overload + +from .. import to_thread +from ..abc import ( + ConnectedUDPSocket, + IPAddressType, + IPSockAddrType, + SocketListener, + SocketStream, + UDPSocket, + UNIXSocketStream, +) +from ..streams.stapled import MultiListener +from ..streams.tls import TLSStream +from ._eventloop import get_asynclib +from ._resources import aclose_forcefully +from ._synchronization import Event +from ._tasks import create_task_group, move_on_after + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515 + +GetAddrInfoReturnType = List[ + Tuple[AddressFamily, SocketKind, int, str, Tuple[str, int]] +] +AnyIPAddressFamily = Literal[ + AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6 +] +IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6] + + +# tls_hostname given +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: Optional[IPAddressType] = ..., + ssl_context: Optional[ssl.SSLContext] = ..., + tls_standard_compatible: bool = ..., + tls_hostname: str, + happy_eyeballs_delay: float = ..., +) -> TLSStream: + ... + + +# ssl_context given +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: Optional[IPAddressType] = ..., + ssl_context: ssl.SSLContext, + tls_standard_compatible: bool = ..., + tls_hostname: Optional[str] = ..., + happy_eyeballs_delay: float = ..., +) -> TLSStream: + ... + + +# tls=True +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: Optional[IPAddressType] = ..., + tls: Literal[True], + ssl_context: Optional[ssl.SSLContext] = ..., + tls_standard_compatible: bool = ..., + tls_hostname: Optional[str] = ..., + happy_eyeballs_delay: float = ..., +) -> TLSStream: + ... + + +# tls=False +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: Optional[IPAddressType] = ..., + tls: Literal[False], + ssl_context: Optional[ssl.SSLContext] = ..., + tls_standard_compatible: bool = ..., + tls_hostname: Optional[str] = ..., + happy_eyeballs_delay: float = ..., +) -> SocketStream: + ... + + +# No TLS arguments +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: Optional[IPAddressType] = ..., + happy_eyeballs_delay: float = ..., +) -> SocketStream: + ... + + +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: Optional[IPAddressType] = None, + tls: bool = False, + ssl_context: Optional[ssl.SSLContext] = None, + tls_standard_compatible: bool = True, + tls_hostname: Optional[str] = None, + happy_eyeballs_delay: float = 0.25, +) -> Union[SocketStream, TLSStream]: + """ + Connect to a host using the TCP protocol. + + This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). + If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until + one connection attempt succeeds. If the first attempt does not connected within 250 + milliseconds, a second attempt is started using the next address in the list, and so on. + On IPv6 enabled systems, an IPv6 address (if available) is tried first. + + When the connection has been established, a TLS handshake will be done if either + ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. + + :param remote_host: the IP address or host name to connect to + :param remote_port: port on the target host to connect to + :param local_host: the interface address or name to bind the socket to before connecting + :param tls: ``True`` to do a TLS handshake with the connected stream and return a + :class:`~anyio.streams.tls.TLSStream` instead + :param ssl_context: the SSL context object to use (if omitted, a default context is created) + :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing + the stream and requires that the server does this as well. Otherwise, + :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. + Some protocols, such as HTTP, require this option to be ``False``. + See :meth:`~ssl.SSLContext.wrap_socket` for details. + :param tls_hostname: host name to check the server certificate against (defaults to the value + of ``remote_host``) + :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt + :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream + :raises OSError: if the connection attempt fails + + """ + # Placed here due to https://github.com/python/mypy/issues/7057 + connected_stream: Optional[SocketStream] = None + + async def try_connect(remote_host: str, event: Event) -> None: + nonlocal connected_stream + try: + stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) + except OSError as exc: + oserrors.append(exc) + return + else: + if connected_stream is None: + connected_stream = stream + tg.cancel_scope.cancel() + else: + await stream.aclose() + finally: + event.set() + + asynclib = get_asynclib() + local_address: Optional[IPSockAddrType] = None + family = socket.AF_UNSPEC + if local_host: + gai_res = await getaddrinfo(str(local_host), None) + family, *_, local_address = gai_res[0] + + target_host = str(remote_host) + try: + addr_obj = ip_address(remote_host) + except ValueError: + # getaddrinfo() will raise an exception if name resolution fails + gai_res = await getaddrinfo( + target_host, remote_port, family=family, type=socket.SOCK_STREAM + ) + + # Organize the list so that the first address is an IPv6 address (if available) and the + # second one is an IPv4 addresses. The rest can be in whatever order. + v6_found = v4_found = False + target_addrs: List[Tuple[socket.AddressFamily, str]] = [] + for af, *rest, sa in gai_res: + if af == socket.AF_INET6 and not v6_found: + v6_found = True + target_addrs.insert(0, (af, sa[0])) + elif af == socket.AF_INET and not v4_found and v6_found: + v4_found = True + target_addrs.insert(1, (af, sa[0])) + else: + target_addrs.append((af, sa[0])) + else: + if isinstance(addr_obj, IPv6Address): + target_addrs = [(socket.AF_INET6, addr_obj.compressed)] + else: + target_addrs = [(socket.AF_INET, addr_obj.compressed)] + + oserrors: List[OSError] = [] + async with create_task_group() as tg: + for i, (af, addr) in enumerate(target_addrs): + event = Event() + tg.start_soon(try_connect, addr, event) + with move_on_after(happy_eyeballs_delay): + await event.wait() + + if connected_stream is None: + cause = oserrors[0] if len(oserrors) == 1 else asynclib.ExceptionGroup(oserrors) + raise OSError("All connection attempts failed") from cause + + if tls or tls_hostname or ssl_context: + try: + return await TLSStream.wrap( + connected_stream, + server_side=False, + hostname=tls_hostname or str(remote_host), + ssl_context=ssl_context, + standard_compatible=tls_standard_compatible, + ) + except BaseException: + await aclose_forcefully(connected_stream) + raise + + return connected_stream + + +async def connect_unix(path: Union[str, "PathLike[str]"]) -> UNIXSocketStream: + """ + Connect to the given UNIX socket. + + Not available on Windows. + + :param path: path to the socket + :return: a socket stream object + + """ + path = str(Path(path)) + return await get_asynclib().connect_unix(path) + + +async def create_tcp_listener( + *, + local_host: Optional[IPAddressType] = None, + local_port: int = 0, + family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC, + backlog: int = 65536, + reuse_port: bool = False, +) -> MultiListener[SocketStream]: + """ + Create a TCP socket listener. + + :param local_port: port number to listen on + :param local_host: IP address of the interface to listen on. If omitted, listen on all IPv4 + and IPv6 interfaces. To listen on all interfaces on a specific address family, use + ``0.0.0.0`` for IPv4 or ``::`` for IPv6. + :param family: address family (used if ``interface`` was omitted) + :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or + 65536) + :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port + (not supported on Windows) + :return: a list of listener objects + + """ + asynclib = get_asynclib() + backlog = min(backlog, 65536) + local_host = str(local_host) if local_host is not None else None + gai_res = await getaddrinfo( + local_host, # type: ignore[arg-type] + local_port, + family=family, + type=socket.SOCK_STREAM, + flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, + ) + listeners: List[SocketListener] = [] + try: + # The set() is here to work around a glibc bug: + # https://sourceware.org/bugzilla/show_bug.cgi?id=14969 + for fam, *_, sockaddr in sorted(set(gai_res)): + raw_socket = socket.socket(fam) + raw_socket.setblocking(False) + + # For Windows, enable exclusive address use. For others, enable address reuse. + if sys.platform == "win32": + raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + else: + raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + if reuse_port: + raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + + # If only IPv6 was requested, disable dual stack operation + if fam == socket.AF_INET6: + raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) + + raw_socket.bind(sockaddr) + raw_socket.listen(backlog) + listener = asynclib.TCPSocketListener(raw_socket) + listeners.append(listener) + except BaseException: + for listener in listeners: + await listener.aclose() + + raise + + return MultiListener(listeners) + + +async def create_unix_listener( + path: Union[str, "PathLike[str]"], + *, + mode: Optional[int] = None, + backlog: int = 65536, +) -> SocketListener: + """ + Create a UNIX socket listener. + + Not available on Windows. + + :param path: path of the socket + :param mode: permissions to set on the socket + :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or + 65536) + :return: a listener object + + .. versionchanged:: 3.0 + If a socket already exists on the file system in the given path, it will be removed first. + + """ + path_str = str(path) + path = Path(path) + if path.is_socket(): + path.unlink() + + backlog = min(backlog, 65536) + raw_socket = socket.socket(socket.AF_UNIX) + raw_socket.setblocking(False) + try: + await to_thread.run_sync(raw_socket.bind, path_str, cancellable=True) + if mode is not None: + await to_thread.run_sync(chmod, path_str, mode, cancellable=True) + + raw_socket.listen(backlog) + return get_asynclib().UNIXSocketListener(raw_socket) + except BaseException: + raw_socket.close() + raise + + +async def create_udp_socket( + family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, + *, + local_host: Optional[IPAddressType] = None, + local_port: int = 0, + reuse_port: bool = False, +) -> UDPSocket: + """ + Create a UDP socket. + + If ``port`` has been given, the socket will be bound to this port on the local machine, + making this socket suitable for providing UDP based services. + + :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from + ``local_host`` if omitted + :param local_host: IP address or host name of the local interface to bind to + :param local_port: local port to bind to + :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port + (not supported on Windows) + :return: a UDP socket + + """ + if family is AddressFamily.AF_UNSPEC and not local_host: + raise ValueError('Either "family" or "local_host" must be given') + + if local_host: + gai_res = await getaddrinfo( + str(local_host), + local_port, + family=family, + type=socket.SOCK_DGRAM, + flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, + ) + family = cast(AnyIPAddressFamily, gai_res[0][0]) + local_address = gai_res[0][-1] + elif family is AddressFamily.AF_INET6: + local_address = ("::", 0) + else: + local_address = ("0.0.0.0", 0) + + return await get_asynclib().create_udp_socket( + family, local_address, None, reuse_port + ) + + +async def create_connected_udp_socket( + remote_host: IPAddressType, + remote_port: int, + *, + family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, + local_host: Optional[IPAddressType] = None, + local_port: int = 0, + reuse_port: bool = False, +) -> ConnectedUDPSocket: + """ + Create a connected UDP socket. + + Connected UDP sockets can only communicate with the specified remote host/port, and any packets + sent from other sources are dropped. + + :param remote_host: remote host to set as the default target + :param remote_port: port on the remote host to set as the default target + :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from + ``local_host`` or ``remote_host`` if omitted + :param local_host: IP address or host name of the local interface to bind to + :param local_port: local port to bind to + :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port + (not supported on Windows) + :return: a connected UDP socket + + """ + local_address = None + if local_host: + gai_res = await getaddrinfo( + str(local_host), + local_port, + family=family, + type=socket.SOCK_DGRAM, + flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, + ) + family = cast(AnyIPAddressFamily, gai_res[0][0]) + local_address = gai_res[0][-1] + + gai_res = await getaddrinfo( + str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM + ) + family = cast(AnyIPAddressFamily, gai_res[0][0]) + remote_address = gai_res[0][-1] + + return await get_asynclib().create_udp_socket( + family, local_address, remote_address, reuse_port + ) + + +async def getaddrinfo( + host: Union[bytearray, bytes, str], + port: Union[str, int, None], + *, + family: Union[int, AddressFamily] = 0, + type: Union[int, SocketKind] = 0, + proto: int = 0, + flags: int = 0, +) -> GetAddrInfoReturnType: + """ + Look up a numeric IP address given a host name. + + Internationalized domain names are translated according to the (non-transitional) IDNA 2008 + standard. + + .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of + (host, port), unlike what :func:`socket.getaddrinfo` does. + + :param host: host name + :param port: port number + :param family: socket family (`'AF_INET``, ...) + :param type: socket type (``SOCK_STREAM``, ...) + :param proto: protocol number + :param flags: flags to pass to upstream ``getaddrinfo()`` + :return: list of tuples containing (family, type, proto, canonname, sockaddr) + + .. seealso:: :func:`socket.getaddrinfo` + + """ + # Handle unicode hostnames + if isinstance(host, str): + try: + encoded_host = host.encode("ascii") + except UnicodeEncodeError: + import idna + + encoded_host = idna.encode(host, uts46=True) + else: + encoded_host = host + + gai_res = await get_asynclib().getaddrinfo( + encoded_host, port, family=family, type=type, proto=proto, flags=flags + ) + return [ + (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr)) + for family, type, proto, canonname, sockaddr in gai_res + ] + + +def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[Tuple[str, str]]: + """ + Look up the host name of an IP address. + + :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4) + :param flags: flags to pass to upstream ``getnameinfo()`` + :return: a tuple of (host name, service name) + + .. seealso:: :func:`socket.getnameinfo` + + """ + return get_asynclib().getnameinfo(sockaddr, flags) + + +def wait_socket_readable(sock: socket.socket) -> Awaitable[None]: + """ + Wait until the given socket has data to be read. + + This does **NOT** work on Windows when using the asyncio backend with a proactor event loop + (default on py3.8+). + + .. warning:: Only use this on raw sockets that have not been wrapped by any higher level + constructs like socket streams! + + :param sock: a socket object + :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the + socket to become readable + :raises ~anyio.BusyResourceError: if another task is already waiting for the socket + to become readable + + """ + return get_asynclib().wait_socket_readable(sock) + + +def wait_socket_writable(sock: socket.socket) -> Awaitable[None]: + """ + Wait until the given socket can be written to. + + This does **NOT** work on Windows when using the asyncio backend with a proactor event loop + (default on py3.8+). + + .. warning:: Only use this on raw sockets that have not been wrapped by any higher level + constructs like socket streams! + + :param sock: a socket object + :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the + socket to become writable + :raises ~anyio.BusyResourceError: if another task is already waiting for the socket + to become writable + + """ + return get_asynclib().wait_socket_writable(sock) + + +# +# Private API +# + + +def convert_ipv6_sockaddr( + sockaddr: Union[Tuple[str, int, int, int], Tuple[str, int]] +) -> Tuple[str, int]: + """ + Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format. + + If the scope ID is nonzero, it is added to the address, separated with ``%``. + Otherwise the flow id and scope id are simply cut off from the tuple. + Any other kinds of socket addresses are returned as-is. + + :param sockaddr: the result of :meth:`~socket.socket.getsockname` + :return: the converted socket address + + """ + # This is more complicated than it should be because of MyPy + if isinstance(sockaddr, tuple) and len(sockaddr) == 4: + host, port, flowinfo, scope_id = cast(Tuple[str, int, int, int], sockaddr) + if scope_id: + # Add scope_id to the address + return f"{host}%{scope_id}", port + else: + return host, port + else: + return cast(Tuple[str, int], sockaddr) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_streams.py b/myenv/lib/python3.9/site-packages/anyio/_core/_streams.py new file mode 100644 index 0000000..58954a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_streams.py @@ -0,0 +1,45 @@ +import math +from typing import Any, Optional, Tuple, Type, TypeVar, overload + +from ..streams.memory import ( + MemoryObjectReceiveStream, + MemoryObjectSendStream, + MemoryObjectStreamState, +) + +T_Item = TypeVar("T_Item") + + +@overload +def create_memory_object_stream( + max_buffer_size: float, item_type: Type[T_Item] +) -> Tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]: + ... + + +@overload +def create_memory_object_stream( + max_buffer_size: float = 0, +) -> Tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: + ... + + +def create_memory_object_stream( + max_buffer_size: float = 0, item_type: Optional[Type[T_Item]] = None +) -> Tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: + """ + Create a memory object stream. + + :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking + :param item_type: type of item, for marking the streams with the right generic type for + static typing (not used at run time) + :return: a tuple of (send stream, receive stream) + + """ + if max_buffer_size != math.inf and not isinstance(max_buffer_size, int): + raise ValueError("max_buffer_size must be either an integer or math.inf") + if max_buffer_size < 0: + raise ValueError("max_buffer_size cannot be negative") + + state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size) + return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_subprocesses.py b/myenv/lib/python3.9/site-packages/anyio/_core/_subprocesses.py new file mode 100644 index 0000000..43fa6b6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_subprocesses.py @@ -0,0 +1,136 @@ +from io import BytesIO +from os import PathLike +from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess +from typing import ( + IO, + Any, + AsyncIterable, + List, + Mapping, + Optional, + Sequence, + Union, + cast, +) + +from ..abc import Process +from ._eventloop import get_asynclib +from ._tasks import create_task_group + + +async def run_process( + command: Union[str, bytes, Sequence[Union[str, bytes]]], + *, + input: Optional[bytes] = None, + stdout: Union[int, IO[Any], None] = PIPE, + stderr: Union[int, IO[Any], None] = PIPE, + check: bool = True, + cwd: Union[str, bytes, "PathLike[str]", None] = None, + env: Optional[Mapping[str, str]] = None, + start_new_session: bool = False, +) -> "CompletedProcess[bytes]": + """ + Run an external command in a subprocess and wait until it completes. + + .. seealso:: :func:`subprocess.run` + + :param command: either a string to pass to the shell, or an iterable of strings containing the + executable name or path and its arguments + :param input: bytes passed to the standard input of the subprocess + :param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL` + :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or + :data:`subprocess.STDOUT` + :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the process + terminates with a return code other than 0 + :param cwd: If not ``None``, change the working directory to this before running the command + :param env: if not ``None``, this mapping replaces the inherited environment variables from the + parent process + :param start_new_session: if ``true`` the setsid() system call will be made in the child + process prior to the execution of the subprocess. (POSIX only) + :return: an object representing the completed process + :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process exits with a + nonzero return code + + """ + + async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None: + buffer = BytesIO() + async for chunk in stream: + buffer.write(chunk) + + stream_contents[index] = buffer.getvalue() + + async with await open_process( + command, + stdin=PIPE if input else DEVNULL, + stdout=stdout, + stderr=stderr, + cwd=cwd, + env=env, + start_new_session=start_new_session, + ) as process: + stream_contents: List[Optional[bytes]] = [None, None] + try: + async with create_task_group() as tg: + if process.stdout: + tg.start_soon(drain_stream, process.stdout, 0) + if process.stderr: + tg.start_soon(drain_stream, process.stderr, 1) + if process.stdin and input: + await process.stdin.send(input) + await process.stdin.aclose() + + await process.wait() + except BaseException: + process.kill() + raise + + output, errors = stream_contents + if check and process.returncode != 0: + raise CalledProcessError(cast(int, process.returncode), command, output, errors) + + return CompletedProcess(command, cast(int, process.returncode), output, errors) + + +async def open_process( + command: Union[str, bytes, Sequence[Union[str, bytes]]], + *, + stdin: Union[int, IO[Any], None] = PIPE, + stdout: Union[int, IO[Any], None] = PIPE, + stderr: Union[int, IO[Any], None] = PIPE, + cwd: Union[str, bytes, "PathLike[str]", None] = None, + env: Optional[Mapping[str, str]] = None, + start_new_session: bool = False, +) -> Process: + """ + Start an external command in a subprocess. + + .. seealso:: :class:`subprocess.Popen` + + :param command: either a string to pass to the shell, or an iterable of strings containing the + executable name or path and its arguments + :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a + file-like object, or ``None`` + :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + a file-like object, or ``None`` + :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + :data:`subprocess.STDOUT`, a file-like object, or ``None`` + :param cwd: If not ``None``, the working directory is changed before executing + :param env: If env is not ``None``, it must be a mapping that defines the environment + variables for the new process + :param start_new_session: if ``true`` the setsid() system call will be made in the child + process prior to the execution of the subprocess. (POSIX only) + :return: an asynchronous process object + + """ + shell = isinstance(command, str) + return await get_asynclib().open_process( + command, + shell=shell, + stdin=stdin, + stdout=stdout, + stderr=stderr, + cwd=cwd, + env=env, + start_new_session=start_new_session, + ) diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_synchronization.py b/myenv/lib/python3.9/site-packages/anyio/_core/_synchronization.py new file mode 100644 index 0000000..15d4afc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_synchronization.py @@ -0,0 +1,595 @@ +from collections import deque +from dataclasses import dataclass +from types import TracebackType +from typing import Deque, Optional, Tuple, Type +from warnings import warn + +from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled +from ._compat import DeprecatedAwaitable +from ._eventloop import get_asynclib +from ._exceptions import BusyResourceError, WouldBlock +from ._tasks import CancelScope +from ._testing import TaskInfo, get_current_task + + +@dataclass(frozen=True) +class EventStatistics: + """ + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait` + """ + + tasks_waiting: int + + +@dataclass(frozen=True) +class CapacityLimiterStatistics: + """ + :ivar int borrowed_tokens: number of tokens currently borrowed by tasks + :ivar float total_tokens: total number of available tokens + :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this + limiter + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or + :meth:`~.CapacityLimiter.acquire_on_behalf_of` + """ + + borrowed_tokens: int + total_tokens: float + borrowers: Tuple[object, ...] + tasks_waiting: int + + +@dataclass(frozen=True) +class LockStatistics: + """ + :ivar bool locked: flag indicating if this lock is locked or not + :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not + held by any task) + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire` + """ + + locked: bool + owner: Optional[TaskInfo] + tasks_waiting: int + + +@dataclass(frozen=True) +class ConditionStatistics: + """ + :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait` + :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock` + """ + + tasks_waiting: int + lock_statistics: LockStatistics + + +@dataclass(frozen=True) +class SemaphoreStatistics: + """ + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire` + + """ + + tasks_waiting: int + + +class Event: + def __new__(cls) -> "Event": + return get_asynclib().Event() + + def set(self) -> DeprecatedAwaitable: + """Set the flag, notifying all listeners.""" + raise NotImplementedError + + def is_set(self) -> bool: + """Return ``True`` if the flag is set, ``False`` if not.""" + raise NotImplementedError + + async def wait(self) -> None: + """ + Wait until the flag has been set. + + If the flag has already been set when this method is called, it returns immediately. + + """ + raise NotImplementedError + + def statistics(self) -> EventStatistics: + """Return statistics about the current state of this event.""" + raise NotImplementedError + + +class Lock: + _owner_task: Optional[TaskInfo] = None + + def __init__(self) -> None: + self._waiters: Deque[Tuple[TaskInfo, Event]] = deque() + + async def __aenter__(self) -> None: + await self.acquire() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.release() + + async def acquire(self) -> None: + """Acquire the lock.""" + await checkpoint_if_cancelled() + try: + self.acquire_nowait() + except WouldBlock: + task = get_current_task() + event = Event() + token = task, event + self._waiters.append(token) + try: + await event.wait() + except BaseException: + if not event.is_set(): + self._waiters.remove(token) + elif self._owner_task == task: + self.release() + + raise + + assert self._owner_task == task + else: + try: + await cancel_shielded_checkpoint() + except BaseException: + self.release() + raise + + def acquire_nowait(self) -> None: + """ + Acquire the lock, without blocking. + + :raises ~WouldBlock: if the operation would block + + """ + task = get_current_task() + if self._owner_task == task: + raise RuntimeError("Attempted to acquire an already held Lock") + + if self._owner_task is not None: + raise WouldBlock + + self._owner_task = task + + def release(self) -> DeprecatedAwaitable: + """Release the lock.""" + if self._owner_task != get_current_task(): + raise RuntimeError("The current task is not holding this lock") + + if self._waiters: + self._owner_task, event = self._waiters.popleft() + event.set() + else: + del self._owner_task + + return DeprecatedAwaitable(self.release) + + def locked(self) -> bool: + """Return True if the lock is currently held.""" + return self._owner_task is not None + + def statistics(self) -> LockStatistics: + """ + Return statistics about the current state of this lock. + + .. versionadded:: 3.0 + """ + return LockStatistics(self.locked(), self._owner_task, len(self._waiters)) + + +class Condition: + _owner_task: Optional[TaskInfo] = None + + def __init__(self, lock: Optional[Lock] = None): + self._lock = lock or Lock() + self._waiters: Deque[Event] = deque() + + async def __aenter__(self) -> None: + await self.acquire() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.release() + + def _check_acquired(self) -> None: + if self._owner_task != get_current_task(): + raise RuntimeError("The current task is not holding the underlying lock") + + async def acquire(self) -> None: + """Acquire the underlying lock.""" + await self._lock.acquire() + self._owner_task = get_current_task() + + def acquire_nowait(self) -> None: + """ + Acquire the underlying lock, without blocking. + + :raises ~WouldBlock: if the operation would block + + """ + self._lock.acquire_nowait() + self._owner_task = get_current_task() + + def release(self) -> DeprecatedAwaitable: + """Release the underlying lock.""" + self._lock.release() + return DeprecatedAwaitable(self.release) + + def locked(self) -> bool: + """Return True if the lock is set.""" + return self._lock.locked() + + def notify(self, n: int = 1) -> None: + """Notify exactly n listeners.""" + self._check_acquired() + for _ in range(n): + try: + event = self._waiters.popleft() + except IndexError: + break + + event.set() + + def notify_all(self) -> None: + """Notify all the listeners.""" + self._check_acquired() + for event in self._waiters: + event.set() + + self._waiters.clear() + + async def wait(self) -> None: + """Wait for a notification.""" + await checkpoint() + event = Event() + self._waiters.append(event) + self.release() + try: + await event.wait() + except BaseException: + if not event.is_set(): + self._waiters.remove(event) + + raise + finally: + with CancelScope(shield=True): + await self.acquire() + + def statistics(self) -> ConditionStatistics: + """ + Return statistics about the current state of this condition. + + .. versionadded:: 3.0 + """ + return ConditionStatistics(len(self._waiters), self._lock.statistics()) + + +class Semaphore: + def __init__(self, initial_value: int, *, max_value: Optional[int] = None): + if not isinstance(initial_value, int): + raise TypeError("initial_value must be an integer") + if initial_value < 0: + raise ValueError("initial_value must be >= 0") + if max_value is not None: + if not isinstance(max_value, int): + raise TypeError("max_value must be an integer or None") + if max_value < initial_value: + raise ValueError( + "max_value must be equal to or higher than initial_value" + ) + + self._value = initial_value + self._max_value = max_value + self._waiters: Deque[Event] = deque() + + async def __aenter__(self) -> "Semaphore": + await self.acquire() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.release() + + async def acquire(self) -> None: + """Decrement the semaphore value, blocking if necessary.""" + await checkpoint_if_cancelled() + try: + self.acquire_nowait() + except WouldBlock: + event = Event() + self._waiters.append(event) + try: + await event.wait() + except BaseException: + if not event.is_set(): + self._waiters.remove(event) + else: + self.release() + + raise + else: + try: + await cancel_shielded_checkpoint() + except BaseException: + self.release() + raise + + def acquire_nowait(self) -> None: + """ + Acquire the underlying lock, without blocking. + + :raises ~WouldBlock: if the operation would block + + """ + if self._value == 0: + raise WouldBlock + + self._value -= 1 + + def release(self) -> DeprecatedAwaitable: + """Increment the semaphore value.""" + if self._max_value is not None and self._value == self._max_value: + raise ValueError("semaphore released too many times") + + if self._waiters: + self._waiters.popleft().set() + else: + self._value += 1 + + return DeprecatedAwaitable(self.release) + + @property + def value(self) -> int: + """The current value of the semaphore.""" + return self._value + + @property + def max_value(self) -> Optional[int]: + """The maximum value of the semaphore.""" + return self._max_value + + def statistics(self) -> SemaphoreStatistics: + """ + Return statistics about the current state of this semaphore. + + .. versionadded:: 3.0 + """ + return SemaphoreStatistics(len(self._waiters)) + + +class CapacityLimiter: + def __new__(cls, total_tokens: float) -> "CapacityLimiter": + return get_asynclib().CapacityLimiter(total_tokens) + + async def __aenter__(self) -> None: + raise NotImplementedError + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + raise NotImplementedError + + @property + def total_tokens(self) -> float: + """ + The total number of tokens available for borrowing. + + This is a read-write property. If the total number of tokens is increased, the + proportionate number of tasks waiting on this limiter will be granted their tokens. + + .. versionchanged:: 3.0 + The property is now writable. + + """ + raise NotImplementedError + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + raise NotImplementedError + + async def set_total_tokens(self, value: float) -> None: + warn( + "CapacityLimiter.set_total_tokens has been deprecated. Set the value of the" + '"total_tokens" attribute directly.', + DeprecationWarning, + ) + self.total_tokens = value + + @property + def borrowed_tokens(self) -> int: + """The number of tokens that have currently been borrowed.""" + raise NotImplementedError + + @property + def available_tokens(self) -> float: + """The number of tokens currently available to be borrowed""" + raise NotImplementedError + + def acquire_nowait(self) -> DeprecatedAwaitable: + """ + Acquire a token for the current task without waiting for one to become available. + + :raises ~anyio.WouldBlock: if there are no tokens available for borrowing + + """ + raise NotImplementedError + + def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: + """ + Acquire a token without waiting for one to become available. + + :param borrower: the entity borrowing a token + :raises ~anyio.WouldBlock: if there are no tokens available for borrowing + + """ + raise NotImplementedError + + async def acquire(self) -> None: + """ + Acquire a token for the current task, waiting if necessary for one to become available. + + """ + raise NotImplementedError + + async def acquire_on_behalf_of(self, borrower: object) -> None: + """ + Acquire a token, waiting if necessary for one to become available. + + :param borrower: the entity borrowing a token + + """ + raise NotImplementedError + + def release(self) -> None: + """ + Release the token held by the current task. + :raises RuntimeError: if the current task has not borrowed a token from this limiter. + + """ + raise NotImplementedError + + def release_on_behalf_of(self, borrower: object) -> None: + """ + Release the token held by the given borrower. + + :raises RuntimeError: if the borrower has not borrowed a token from this limiter. + + """ + raise NotImplementedError + + def statistics(self) -> CapacityLimiterStatistics: + """ + Return statistics about the current state of this limiter. + + .. versionadded:: 3.0 + + """ + raise NotImplementedError + + +def create_lock() -> Lock: + """ + Create an asynchronous lock. + + :return: a lock object + + .. deprecated:: 3.0 + Use :class:`~Lock` directly. + + """ + warn("create_lock() is deprecated -- use Lock() directly", DeprecationWarning) + return Lock() + + +def create_condition(lock: Optional[Lock] = None) -> Condition: + """ + Create an asynchronous condition. + + :param lock: the lock to base the condition object on + :return: a condition object + + .. deprecated:: 3.0 + Use :class:`~Condition` directly. + + """ + warn( + "create_condition() is deprecated -- use Condition() directly", + DeprecationWarning, + ) + return Condition(lock=lock) + + +def create_event() -> Event: + """ + Create an asynchronous event object. + + :return: an event object + + .. deprecated:: 3.0 + Use :class:`~Event` directly. + + """ + warn("create_event() is deprecated -- use Event() directly", DeprecationWarning) + return get_asynclib().Event() + + +def create_semaphore(value: int, *, max_value: Optional[int] = None) -> Semaphore: + """ + Create an asynchronous semaphore. + + :param value: the semaphore's initial value + :param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the + semaphore's value would exceed this number + :return: a semaphore object + + .. deprecated:: 3.0 + Use :class:`~Semaphore` directly. + + """ + warn( + "create_semaphore() is deprecated -- use Semaphore() directly", + DeprecationWarning, + ) + return Semaphore(value, max_value=max_value) + + +def create_capacity_limiter(total_tokens: float) -> CapacityLimiter: + """ + Create a capacity limiter. + + :param total_tokens: the total number of tokens available for borrowing (can be an integer or + :data:`math.inf`) + :return: a capacity limiter object + + .. deprecated:: 3.0 + Use :class:`~CapacityLimiter` directly. + + """ + warn( + "create_capacity_limiter() is deprecated -- use CapacityLimiter() directly", + DeprecationWarning, + ) + return get_asynclib().CapacityLimiter(total_tokens) + + +class ResourceGuard: + __slots__ = "action", "_guarded" + + def __init__(self, action: str): + self.action = action + self._guarded = False + + def __enter__(self) -> None: + if self._guarded: + raise BusyResourceError(self.action) + + self._guarded = True + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._guarded = False + return None diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_tasks.py b/myenv/lib/python3.9/site-packages/anyio/_core/_tasks.py new file mode 100644 index 0000000..f24764c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_tasks.py @@ -0,0 +1,178 @@ +import math +from types import TracebackType +from typing import Optional, Type +from warnings import warn + +from ..abc._tasks import TaskGroup, TaskStatus +from ._compat import ( + DeprecatedAsyncContextManager, + DeprecatedAwaitable, + DeprecatedAwaitableFloat, +) +from ._eventloop import get_asynclib + + +class _IgnoredTaskStatus(TaskStatus): + def started(self, value: object = None) -> None: + pass + + +TASK_STATUS_IGNORED = _IgnoredTaskStatus() + + +class CancelScope(DeprecatedAsyncContextManager["CancelScope"]): + """ + Wraps a unit of work that can be made separately cancellable. + + :param deadline: The time (clock value) when this scope is cancelled automatically + :param shield: ``True`` to shield the cancel scope from external cancellation + """ + + def __new__( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> "CancelScope": + return get_asynclib().CancelScope(shield=shield, deadline=deadline) + + def cancel(self) -> DeprecatedAwaitable: + """Cancel this scope immediately.""" + raise NotImplementedError + + @property + def deadline(self) -> float: + """ + The time (clock value) when this scope is cancelled automatically. + + Will be ``float('inf')`` if no timeout has been set. + + """ + raise NotImplementedError + + @deadline.setter + def deadline(self, value: float) -> None: + raise NotImplementedError + + @property + def cancel_called(self) -> bool: + """``True`` if :meth:`cancel` has been called.""" + raise NotImplementedError + + @property + def shield(self) -> bool: + """ + ``True`` if this scope is shielded from external cancellation. + + While a scope is shielded, it will not receive cancellations from outside. + + """ + raise NotImplementedError + + @shield.setter + def shield(self, value: bool) -> None: + raise NotImplementedError + + def __enter__(self) -> "CancelScope": + raise NotImplementedError + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + raise NotImplementedError + + +def open_cancel_scope(*, shield: bool = False) -> CancelScope: + """ + Open a cancel scope. + + :param shield: ``True`` to shield the cancel scope from external cancellation + :return: a cancel scope + + .. deprecated:: 3.0 + Use :class:`~CancelScope` directly. + + """ + warn( + "open_cancel_scope() is deprecated -- use CancelScope() directly", + DeprecationWarning, + ) + return get_asynclib().CancelScope(shield=shield) + + +class FailAfterContextManager(DeprecatedAsyncContextManager[CancelScope]): + def __init__(self, cancel_scope: CancelScope): + self._cancel_scope = cancel_scope + + def __enter__(self) -> CancelScope: + return self._cancel_scope.__enter__() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + retval = self._cancel_scope.__exit__(exc_type, exc_val, exc_tb) + if self._cancel_scope.cancel_called: + raise TimeoutError + + return retval + + +def fail_after(delay: Optional[float], shield: bool = False) -> FailAfterContextManager: + """ + Create a context manager which raises a :class:`TimeoutError` if does not finish in time. + + :param delay: maximum allowed time (in seconds) before raising the exception, or ``None`` to + disable the timeout + :param shield: ``True`` to shield the cancel scope from external cancellation + :return: a context manager that yields a cancel scope + :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.abc.CancelScope`\\] + + """ + deadline = ( + (get_asynclib().current_time() + delay) if delay is not None else math.inf + ) + cancel_scope = get_asynclib().CancelScope(deadline=deadline, shield=shield) + return FailAfterContextManager(cancel_scope) + + +def move_on_after(delay: Optional[float], shield: bool = False) -> CancelScope: + """ + Create a cancel scope with a deadline that expires after the given delay. + + :param delay: maximum allowed time (in seconds) before exiting the context block, or ``None`` + to disable the timeout + :param shield: ``True`` to shield the cancel scope from external cancellation + :return: a cancel scope + + """ + deadline = ( + (get_asynclib().current_time() + delay) if delay is not None else math.inf + ) + return get_asynclib().CancelScope(deadline=deadline, shield=shield) + + +def current_effective_deadline() -> DeprecatedAwaitableFloat: + """ + Return the nearest deadline among all the cancel scopes effective for the current task. + + :return: a clock value from the event loop's internal clock (``float('inf')`` if there is no + deadline in effect) + :rtype: float + + """ + return DeprecatedAwaitableFloat( + get_asynclib().current_effective_deadline(), current_effective_deadline + ) + + +def create_task_group() -> "TaskGroup": + """ + Create a task group. + + :return: a task group + + """ + return get_asynclib().TaskGroup() diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_testing.py b/myenv/lib/python3.9/site-packages/anyio/_core/_testing.py new file mode 100644 index 0000000..4998753 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_testing.py @@ -0,0 +1,80 @@ +from typing import Any, Awaitable, Generator, Optional, Union + +from ._compat import DeprecatedAwaitableList, _warn_deprecation +from ._eventloop import get_asynclib + + +class TaskInfo: + """ + Represents an asynchronous task. + + :ivar int id: the unique identifier of the task + :ivar parent_id: the identifier of the parent task, if any + :vartype parent_id: Optional[int] + :ivar str name: the description of the task (if any) + :ivar ~collections.abc.Coroutine coro: the coroutine object of the task + """ + + __slots__ = "_name", "id", "parent_id", "name", "coro" + + def __init__( + self, + id: int, + parent_id: Optional[int], + name: Optional[str], + coro: Union[Generator, Awaitable[Any]], + ): + func = get_current_task + self._name = f"{func.__module__}.{func.__qualname__}" + self.id: int = id + self.parent_id: Optional[int] = parent_id + self.name: Optional[str] = name + self.coro: Union[Generator, Awaitable[Any]] = coro + + def __eq__(self, other: object) -> bool: + if isinstance(other, TaskInfo): + return self.id == other.id + + return NotImplemented + + def __hash__(self) -> int: + return hash(self.id) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})" + + def __await__(self) -> Generator[None, None, "TaskInfo"]: + _warn_deprecation(self) + if False: + yield + + return self + + def _unwrap(self) -> "TaskInfo": + return self + + +def get_current_task() -> TaskInfo: + """ + Return the current task. + + :return: a representation of the current task + + """ + return get_asynclib().get_current_task() + + +def get_running_tasks() -> DeprecatedAwaitableList[TaskInfo]: + """ + Return a list of running tasks in the current event loop. + + :return: a list of task info objects + + """ + tasks = get_asynclib().get_running_tasks() + return DeprecatedAwaitableList(tasks, func=get_running_tasks) + + +async def wait_all_tasks_blocked() -> None: + """Wait until all other tasks are waiting for something.""" + await get_asynclib().wait_all_tasks_blocked() diff --git a/myenv/lib/python3.9/site-packages/anyio/_core/_typedattr.py b/myenv/lib/python3.9/site-packages/anyio/_core/_typedattr.py new file mode 100644 index 0000000..424836a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/_core/_typedattr.py @@ -0,0 +1,81 @@ +import sys +from typing import Any, Callable, Dict, Mapping, TypeVar, Union, overload + +from ._exceptions import TypedAttributeLookupError + +if sys.version_info >= (3, 8): + from typing import final +else: + from typing_extensions import final + +T_Attr = TypeVar("T_Attr") +T_Default = TypeVar("T_Default") +undefined = object() + + +def typed_attribute() -> Any: + """Return a unique object, used to mark typed attributes.""" + return object() + + +class TypedAttributeSet: + """ + Superclass for typed attribute collections. + + Checks that every public attribute of every subclass has a type annotation. + """ + + def __init_subclass__(cls) -> None: + annotations: Dict[str, Any] = getattr(cls, "__annotations__", {}) + for attrname in dir(cls): + if not attrname.startswith("_") and attrname not in annotations: + raise TypeError( + f"Attribute {attrname!r} is missing its type annotation" + ) + + super().__init_subclass__() + + +class TypedAttributeProvider: + """Base class for classes that wish to provide typed extra attributes.""" + + @property + def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]: + """ + A mapping of the extra attributes to callables that return the corresponding values. + + If the provider wraps another provider, the attributes from that wrapper should also be + included in the returned mapping (but the wrapper may override the callables from the + wrapped instance). + + """ + return {} + + @overload + def extra(self, attribute: T_Attr) -> T_Attr: + ... + + @overload + def extra(self, attribute: T_Attr, default: T_Default) -> Union[T_Attr, T_Default]: + ... + + @final + def extra(self, attribute: Any, default: object = undefined) -> object: + """ + extra(attribute, default=undefined) + + Return the value of the given typed extra attribute. + + :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to look for + :param default: the value that should be returned if no value is found for the attribute + :raises ~anyio.TypedAttributeLookupError: if the search failed and no default value was + given + + """ + try: + return self.extra_attributes[attribute]() + except KeyError: + if default is undefined: + raise TypedAttributeLookupError("Attribute not found") from None + else: + return default diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/__init__.py b/myenv/lib/python3.9/site-packages/anyio/abc/__init__.py new file mode 100644 index 0000000..72c4444 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/__init__.py @@ -0,0 +1,88 @@ +__all__ = ( + "AsyncResource", + "IPAddressType", + "IPSockAddrType", + "SocketAttribute", + "SocketStream", + "SocketListener", + "UDPSocket", + "UNIXSocketStream", + "UDPPacketType", + "ConnectedUDPSocket", + "UnreliableObjectReceiveStream", + "UnreliableObjectSendStream", + "UnreliableObjectStream", + "ObjectReceiveStream", + "ObjectSendStream", + "ObjectStream", + "ByteReceiveStream", + "ByteSendStream", + "ByteStream", + "AnyUnreliableByteReceiveStream", + "AnyUnreliableByteSendStream", + "AnyUnreliableByteStream", + "AnyByteReceiveStream", + "AnyByteSendStream", + "AnyByteStream", + "Listener", + "Process", + "Event", + "Condition", + "Lock", + "Semaphore", + "CapacityLimiter", + "CancelScope", + "TaskGroup", + "TaskStatus", + "TestRunner", + "BlockingPortal", +) + +from typing import Any + +from ._resources import AsyncResource +from ._sockets import ( + ConnectedUDPSocket, + IPAddressType, + IPSockAddrType, + SocketAttribute, + SocketListener, + SocketStream, + UDPPacketType, + UDPSocket, + UNIXSocketStream, +) +from ._streams import ( + AnyByteReceiveStream, + AnyByteSendStream, + AnyByteStream, + AnyUnreliableByteReceiveStream, + AnyUnreliableByteSendStream, + AnyUnreliableByteStream, + ByteReceiveStream, + ByteSendStream, + ByteStream, + Listener, + ObjectReceiveStream, + ObjectSendStream, + ObjectStream, + UnreliableObjectReceiveStream, + UnreliableObjectSendStream, + UnreliableObjectStream, +) +from ._subprocesses import Process +from ._tasks import TaskGroup, TaskStatus +from ._testing import TestRunner + +# Re-exported here, for backwards compatibility +# isort: off +from .._core._synchronization import CapacityLimiter, Condition, Event, Lock, Semaphore +from .._core._tasks import CancelScope +from ..from_thread import BlockingPortal + +# Re-export imports so they look like they live directly in this package +key: str +value: Any +for key, value in list(locals().items()): + if getattr(value, "__module__", "").startswith("anyio.abc."): + value.__module__ = __name__ diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/_resources.py b/myenv/lib/python3.9/site-packages/anyio/abc/_resources.py new file mode 100644 index 0000000..4f66c38 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/_resources.py @@ -0,0 +1,29 @@ +from abc import ABCMeta, abstractmethod +from types import TracebackType +from typing import Optional, Type, TypeVar + +T = TypeVar("T") + + +class AsyncResource(metaclass=ABCMeta): + """ + Abstract base class for all closeable asynchronous resources. + + Works as an asynchronous context manager which returns the instance itself on enter, and calls + :meth:`aclose` on exit. + """ + + async def __aenter__(self: T) -> T: + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + await self.aclose() + + @abstractmethod + async def aclose(self) -> None: + """Close the resource.""" diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/_sockets.py b/myenv/lib/python3.9/site-packages/anyio/abc/_sockets.py new file mode 100644 index 0000000..f73e795 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/_sockets.py @@ -0,0 +1,183 @@ +import socket +from abc import abstractmethod +from io import IOBase +from ipaddress import IPv4Address, IPv6Address +from socket import AddressFamily +from types import TracebackType +from typing import ( + Any, + AsyncContextManager, + Callable, + Collection, + Dict, + List, + Mapping, + Optional, + Tuple, + Type, + TypeVar, + Union, +) + +from .._core._typedattr import ( + TypedAttributeProvider, + TypedAttributeSet, + typed_attribute, +) +from ._streams import ByteStream, Listener, T_Stream, UnreliableObjectStream +from ._tasks import TaskGroup + +IPAddressType = Union[str, IPv4Address, IPv6Address] +IPSockAddrType = Tuple[str, int] +SockAddrType = Union[IPSockAddrType, str] +UDPPacketType = Tuple[bytes, IPSockAddrType] +T_Retval = TypeVar("T_Retval") + + +class _NullAsyncContextManager: + async def __aenter__(self) -> None: + pass + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + return None + + +class SocketAttribute(TypedAttributeSet): + #: the address family of the underlying socket + family: AddressFamily = typed_attribute() + #: the local socket address of the underlying socket + local_address: SockAddrType = typed_attribute() + #: for IP addresses, the local port the underlying socket is bound to + local_port: int = typed_attribute() + #: the underlying stdlib socket object + raw_socket: socket.socket = typed_attribute() + #: the remote address the underlying socket is connected to + remote_address: SockAddrType = typed_attribute() + #: for IP addresses, the remote port the underlying socket is connected to + remote_port: int = typed_attribute() + + +class _SocketProvider(TypedAttributeProvider): + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + from .._core._sockets import convert_ipv6_sockaddr as convert + + attributes: Dict[Any, Callable[[], Any]] = { + SocketAttribute.family: lambda: self._raw_socket.family, + SocketAttribute.local_address: lambda: convert( + self._raw_socket.getsockname() + ), + SocketAttribute.raw_socket: lambda: self._raw_socket, + } + try: + peername: Optional[Tuple[str, int]] = convert( + self._raw_socket.getpeername() + ) + except OSError: + peername = None + + # Provide the remote address for connected sockets + if peername is not None: + attributes[SocketAttribute.remote_address] = lambda: peername + + # Provide local and remote ports for IP based sockets + if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6): + attributes[ + SocketAttribute.local_port + ] = lambda: self._raw_socket.getsockname()[1] + if peername is not None: + remote_port = peername[1] + attributes[SocketAttribute.remote_port] = lambda: remote_port + + return attributes + + @property + @abstractmethod + def _raw_socket(self) -> socket.socket: + pass + + +class SocketStream(ByteStream, _SocketProvider): + """ + Transports bytes over a socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + +class UNIXSocketStream(SocketStream): + @abstractmethod + async def send_fds( + self, message: bytes, fds: Collection[Union[int, IOBase]] + ) -> None: + """ + Send file descriptors along with a message to the peer. + + :param message: a non-empty bytestring + :param fds: a collection of files (either numeric file descriptors or open file or socket + objects) + """ + + @abstractmethod + async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]: + """ + Receive file descriptors along with a message from the peer. + + :param msglen: length of the message to expect from the peer + :param maxfds: maximum number of file descriptors to expect from the peer + :return: a tuple of (message, file descriptors) + """ + + +class SocketListener(Listener[SocketStream], _SocketProvider): + """ + Listens to incoming socket connections. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + @abstractmethod + async def accept(self) -> SocketStream: + """Accept an incoming connection.""" + + async def serve( + self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None + ) -> None: + from .. import create_task_group + + context_manager: AsyncContextManager + if task_group is None: + task_group = context_manager = create_task_group() + else: + # Can be replaced with AsyncExitStack once on py3.7+ + context_manager = _NullAsyncContextManager() + + async with context_manager: + while True: + stream = await self.accept() + task_group.start_soon(handler, stream) + + +class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider): + """ + Represents an unconnected UDP socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + async def sendto(self, data: bytes, host: str, port: int) -> None: + """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).""" + return await self.send((data, (host, port))) + + +class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider): + """ + Represents an connected UDP socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/_streams.py b/myenv/lib/python3.9/site-packages/anyio/abc/_streams.py new file mode 100644 index 0000000..4980ef4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/_streams.py @@ -0,0 +1,198 @@ +from abc import abstractmethod +from typing import Any, Callable, Generic, Optional, TypeVar, Union + +from .._core._exceptions import EndOfStream +from .._core._typedattr import TypedAttributeProvider +from ._resources import AsyncResource +from ._tasks import TaskGroup + +T_Item = TypeVar("T_Item") +T_Stream = TypeVar("T_Stream") + + +class UnreliableObjectReceiveStream( + Generic[T_Item], AsyncResource, TypedAttributeProvider +): + """ + An interface for receiving objects. + + This interface makes no guarantees that the received messages arrive in the order in which they + were sent, or that no messages are missed. + + Asynchronously iterating over objects of this type will yield objects matching the given type + parameter. + """ + + def __aiter__(self) -> "UnreliableObjectReceiveStream[T_Item]": + return self + + async def __anext__(self) -> T_Item: + try: + return await self.receive() + except EndOfStream: + raise StopAsyncIteration + + @abstractmethod + async def receive(self) -> T_Item: + """ + Receive the next item. + + :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly + closed + :raises ~anyio.EndOfStream: if this stream has been closed from the other end + :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable + due to external causes + """ + + +class UnreliableObjectSendStream( + Generic[T_Item], AsyncResource, TypedAttributeProvider +): + """ + An interface for sending objects. + + This interface makes no guarantees that the messages sent will reach the recipient(s) in the + same order in which they were sent, or at all. + """ + + @abstractmethod + async def send(self, item: T_Item) -> None: + """ + Send an item to the peer(s). + + :param item: the item to send + :raises ~anyio.ClosedResourceError: if the send stream has been explicitly + closed + :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable + due to external causes + """ + + +class UnreliableObjectStream( + UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item] +): + """ + A bidirectional message stream which does not guarantee the order or reliability of message + delivery. + """ + + +class ObjectReceiveStream(UnreliableObjectReceiveStream[T_Item]): + """ + A receive message stream which guarantees that messages are received in the same order in + which they were sent, and that no messages are missed. + """ + + +class ObjectSendStream(UnreliableObjectSendStream[T_Item]): + """ + A send message stream which guarantees that messages are delivered in the same order in which + they were sent, without missing any messages in the middle. + """ + + +class ObjectStream( + ObjectReceiveStream[T_Item], + ObjectSendStream[T_Item], + UnreliableObjectStream[T_Item], +): + """ + A bidirectional message stream which guarantees the order and reliability of message delivery. + """ + + @abstractmethod + async def send_eof(self) -> None: + """ + Send an end-of-file indication to the peer. + + You should not try to send any further data to this stream after calling this method. + This method is idempotent (does nothing on successive calls). + """ + + +class ByteReceiveStream(AsyncResource, TypedAttributeProvider): + """ + An interface for receiving bytes from a single peer. + + Iterating this byte stream will yield a byte string of arbitrary length, but no more than + 65536 bytes. + """ + + def __aiter__(self) -> "ByteReceiveStream": + return self + + async def __anext__(self) -> bytes: + try: + return await self.receive() + except EndOfStream: + raise StopAsyncIteration + + @abstractmethod + async def receive(self, max_bytes: int = 65536) -> bytes: + """ + Receive at most ``max_bytes`` bytes from the peer. + + .. note:: Implementors of this interface should not return an empty :class:`bytes` object, + and users should ignore them. + + :param max_bytes: maximum number of bytes to receive + :return: the received bytes + :raises ~anyio.EndOfStream: if this stream has been closed from the other end + """ + + +class ByteSendStream(AsyncResource, TypedAttributeProvider): + """An interface for sending bytes to a single peer.""" + + @abstractmethod + async def send(self, item: bytes) -> None: + """ + Send the given bytes to the peer. + + :param item: the bytes to send + """ + + +class ByteStream(ByteReceiveStream, ByteSendStream): + """A bidirectional byte stream.""" + + @abstractmethod + async def send_eof(self) -> None: + """ + Send an end-of-file indication to the peer. + + You should not try to send any further data to this stream after calling this method. + This method is idempotent (does nothing on successive calls). + """ + + +#: Type alias for all unreliable bytes-oriented receive streams. +AnyUnreliableByteReceiveStream = Union[ + UnreliableObjectReceiveStream[bytes], ByteReceiveStream +] +#: Type alias for all unreliable bytes-oriented send streams. +AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream] +#: Type alias for all unreliable bytes-oriented streams. +AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream] +#: Type alias for all bytes-oriented receive streams. +AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream] +#: Type alias for all bytes-oriented send streams. +AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream] +#: Type alias for all bytes-oriented streams. +AnyByteStream = Union[ObjectStream[bytes], ByteStream] + + +class Listener(Generic[T_Stream], AsyncResource, TypedAttributeProvider): + """An interface for objects that let you accept incoming connections.""" + + @abstractmethod + async def serve( + self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None + ) -> None: + """ + Accept incoming connections as they come in and start tasks to handle them. + + :param handler: a callable that will be used to handle each accepted connection + :param task_group: the task group that will be used to start tasks for handling each + accepted connection (if omitted, an ad-hoc task group will be created) + """ diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/_subprocesses.py b/myenv/lib/python3.9/site-packages/anyio/abc/_subprocesses.py new file mode 100644 index 0000000..1e633fb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/_subprocesses.py @@ -0,0 +1,78 @@ +from abc import abstractmethod +from signal import Signals +from typing import Optional + +from ._resources import AsyncResource +from ._streams import ByteReceiveStream, ByteSendStream + + +class Process(AsyncResource): + """An asynchronous version of :class:`subprocess.Popen`.""" + + @abstractmethod + async def wait(self) -> int: + """ + Wait until the process exits. + + :return: the exit code of the process + """ + + @abstractmethod + def terminate(self) -> None: + """ + Terminates the process, gracefully if possible. + + On Windows, this calls ``TerminateProcess()``. + On POSIX systems, this sends ``SIGTERM`` to the process. + + .. seealso:: :meth:`subprocess.Popen.terminate` + """ + + @abstractmethod + def kill(self) -> None: + """ + Kills the process. + + On Windows, this calls ``TerminateProcess()``. + On POSIX systems, this sends ``SIGKILL`` to the process. + + .. seealso:: :meth:`subprocess.Popen.kill` + """ + + @abstractmethod + def send_signal(self, signal: Signals) -> None: + """ + Send a signal to the subprocess. + + .. seealso:: :meth:`subprocess.Popen.send_signal` + + :param signal: the signal number (e.g. :data:`signal.SIGHUP`) + """ + + @property + @abstractmethod + def pid(self) -> int: + """The process ID of the process.""" + + @property + @abstractmethod + def returncode(self) -> Optional[int]: + """ + The return code of the process. If the process has not yet terminated, this will be + ``None``. + """ + + @property + @abstractmethod + def stdin(self) -> Optional[ByteSendStream]: + """The stream for the standard input of the process.""" + + @property + @abstractmethod + def stdout(self) -> Optional[ByteReceiveStream]: + """The stream for the standard output of the process.""" + + @property + @abstractmethod + def stderr(self) -> Optional[ByteReceiveStream]: + """The stream for the standard error output of the process.""" diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/_tasks.py b/myenv/lib/python3.9/site-packages/anyio/abc/_tasks.py new file mode 100644 index 0000000..99928a1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/_tasks.py @@ -0,0 +1,104 @@ +import typing +from abc import ABCMeta, abstractmethod +from types import TracebackType +from typing import Any, Callable, Coroutine, Optional, Type, TypeVar +from warnings import warn + +if typing.TYPE_CHECKING: + from anyio._core._tasks import CancelScope + +T_Retval = TypeVar("T_Retval") + + +class TaskStatus(metaclass=ABCMeta): + @abstractmethod + def started(self, value: object = None) -> None: + """ + Signal that the task has started. + + :param value: object passed back to the starter of the task + """ + + +class TaskGroup(metaclass=ABCMeta): + """ + Groups several asynchronous tasks together. + + :ivar cancel_scope: the cancel scope inherited by all child tasks + :vartype cancel_scope: CancelScope + """ + + cancel_scope: "CancelScope" + + async def spawn( + self, + func: Callable[..., Coroutine[Any, Any, Any]], + *args: object, + name: object = None + ) -> None: + """ + Start a new task in this task group. + + :param func: a coroutine function + :param args: positional arguments to call the function with + :param name: name of the task, for the purposes of introspection and debugging + + .. deprecated:: 3.0 + Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you + can keep using this until AnyIO 4. + + """ + warn( + 'spawn() is deprecated -- use start_soon() (without the "await") instead', + DeprecationWarning, + ) + self.start_soon(func, *args, name=name) + + @abstractmethod + def start_soon( + self, + func: Callable[..., Coroutine[Any, Any, Any]], + *args: object, + name: object = None + ) -> None: + """ + Start a new task in this task group. + + :param func: a coroutine function + :param args: positional arguments to call the function with + :param name: name of the task, for the purposes of introspection and debugging + + .. versionadded:: 3.0 + """ + + @abstractmethod + async def start( + self, + func: Callable[..., Coroutine[Any, Any, Any]], + *args: object, + name: object = None + ) -> object: + """ + Start a new task and wait until it signals for readiness. + + :param func: a coroutine function + :param args: positional arguments to call the function with + :param name: name of the task, for the purposes of introspection and debugging + :return: the value passed to ``task_status.started()`` + :raises RuntimeError: if the task finishes without calling ``task_status.started()`` + + .. versionadded:: 3.0 + """ + + @abstractmethod + async def __aenter__(self) -> "TaskGroup": + """Enter the task group context and allow starting new tasks.""" + + @abstractmethod + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + """Exit the task group context waiting for all tasks to finish.""" diff --git a/myenv/lib/python3.9/site-packages/anyio/abc/_testing.py b/myenv/lib/python3.9/site-packages/anyio/abc/_testing.py new file mode 100644 index 0000000..4e3621d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/abc/_testing.py @@ -0,0 +1,68 @@ +import types +from abc import ABCMeta, abstractmethod +from collections.abc import AsyncGenerator, Iterable +from typing import Any, Callable, Coroutine, Dict, Optional, Type, TypeVar + +_T = TypeVar("_T") + + +class TestRunner(metaclass=ABCMeta): + """ + Encapsulates a running event loop. Every call made through this object will use the same event + loop. + """ + + def __enter__(self) -> "TestRunner": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[types.TracebackType], + ) -> Optional[bool]: + self.close() + return None + + @abstractmethod + def close(self) -> None: + """Close the event loop.""" + + @abstractmethod + def run_asyncgen_fixture( + self, + fixture_func: Callable[..., "AsyncGenerator[_T, Any]"], + kwargs: Dict[str, Any], + ) -> "Iterable[_T]": + """ + Run an async generator fixture. + + :param fixture_func: the fixture function + :param kwargs: keyword arguments to call the fixture function with + :return: an iterator yielding the value yielded from the async generator + """ + + @abstractmethod + def run_fixture( + self, + fixture_func: Callable[..., Coroutine[Any, Any, _T]], + kwargs: Dict[str, Any], + ) -> _T: + """ + Run an async fixture. + + :param fixture_func: the fixture function + :param kwargs: keyword arguments to call the fixture function with + :return: the return value of the fixture function + """ + + @abstractmethod + def run_test( + self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: Dict[str, Any] + ) -> None: + """ + Run an async test function. + + :param test_func: the test function + :param kwargs: keyword arguments to call the test function with + """ diff --git a/myenv/lib/python3.9/site-packages/anyio/from_thread.py b/myenv/lib/python3.9/site-packages/anyio/from_thread.py new file mode 100644 index 0000000..e4f871f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/from_thread.py @@ -0,0 +1,502 @@ +import threading +from asyncio import iscoroutine +from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait +from contextlib import AbstractContextManager, contextmanager +from types import TracebackType +from typing import ( + Any, + AsyncContextManager, + Callable, + ContextManager, + Coroutine, + Dict, + Generator, + Iterable, + Optional, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) +from warnings import warn + +from ._core import _eventloop +from ._core._eventloop import get_asynclib, get_cancelled_exc_class, threadlocals +from ._core._synchronization import Event +from ._core._tasks import CancelScope, create_task_group +from .abc._tasks import TaskStatus + +T_Retval = TypeVar("T_Retval") +T_co = TypeVar("T_co") + + +def run(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object) -> T_Retval: + """ + Call a coroutine function from a worker thread. + + :param func: a coroutine function + :param args: positional arguments for the callable + :return: the return value of the coroutine function + + """ + try: + asynclib = threadlocals.current_async_module + except AttributeError: + raise RuntimeError("This function can only be run from an AnyIO worker thread") + + return asynclib.run_async_from_thread(func, *args) + + +def run_async_from_thread( + func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object +) -> T_Retval: + warn( + "run_async_from_thread() has been deprecated, use anyio.from_thread.run() instead", + DeprecationWarning, + ) + return run(func, *args) + + +def run_sync(func: Callable[..., T_Retval], *args: object) -> T_Retval: + """ + Call a function in the event loop thread from a worker thread. + + :param func: a callable + :param args: positional arguments for the callable + :return: the return value of the callable + + """ + try: + asynclib = threadlocals.current_async_module + except AttributeError: + raise RuntimeError("This function can only be run from an AnyIO worker thread") + + return asynclib.run_sync_from_thread(func, *args) + + +def run_sync_from_thread(func: Callable[..., T_Retval], *args: object) -> T_Retval: + warn( + "run_sync_from_thread() has been deprecated, use anyio.from_thread.run_sync() instead", + DeprecationWarning, + ) + return run_sync(func, *args) + + +class _BlockingAsyncContextManager(AbstractContextManager): + _enter_future: Future + _exit_future: Future + _exit_event: Event + _exit_exc_info: Tuple[ + Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType] + ] = (None, None, None) + + def __init__(self, async_cm: AsyncContextManager[T_co], portal: "BlockingPortal"): + self._async_cm = async_cm + self._portal = portal + + async def run_async_cm(self) -> Optional[bool]: + try: + self._exit_event = Event() + value = await self._async_cm.__aenter__() + except BaseException as exc: + self._enter_future.set_exception(exc) + raise + else: + self._enter_future.set_result(value) + + try: + # Wait for the sync context manager to exit. + # This next statement can raise `get_cancelled_exc_class()` if + # something went wrong in a task group in this async context + # manager. + await self._exit_event.wait() + finally: + # In case of cancellation, it could be that we end up here before + # `_BlockingAsyncContextManager.__exit__` is called, and an + # `_exit_exc_info` has been set. + result = await self._async_cm.__aexit__(*self._exit_exc_info) + return result + + def __enter__(self) -> T_co: + self._enter_future = Future() + self._exit_future = self._portal.start_task_soon(self.run_async_cm) + cm = self._enter_future.result() + return cast(T_co, cm) + + def __exit__( + self, + __exc_type: Optional[Type[BaseException]], + __exc_value: Optional[BaseException], + __traceback: Optional[TracebackType], + ) -> Optional[bool]: + self._exit_exc_info = __exc_type, __exc_value, __traceback + self._portal.call(self._exit_event.set) + return self._exit_future.result() + + +class _BlockingPortalTaskStatus(TaskStatus): + def __init__(self, future: Future): + self._future = future + + def started(self, value: object = None) -> None: + self._future.set_result(value) + + +class BlockingPortal: + """An object that lets external threads run code in an asynchronous event loop.""" + + def __new__(cls) -> "BlockingPortal": + return get_asynclib().BlockingPortal() + + def __init__(self) -> None: + self._event_loop_thread_id: Optional[int] = threading.get_ident() + self._stop_event = Event() + self._task_group = create_task_group() + self._cancelled_exc_class = get_cancelled_exc_class() + + async def __aenter__(self) -> "BlockingPortal": + await self._task_group.__aenter__() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + await self.stop() + return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) + + def _check_running(self) -> None: + if self._event_loop_thread_id is None: + raise RuntimeError("This portal is not running") + if self._event_loop_thread_id == threading.get_ident(): + raise RuntimeError( + "This method cannot be called from the event loop thread" + ) + + async def sleep_until_stopped(self) -> None: + """Sleep until :meth:`stop` is called.""" + await self._stop_event.wait() + + async def stop(self, cancel_remaining: bool = False) -> None: + """ + Signal the portal to shut down. + + This marks the portal as no longer accepting new calls and exits from + :meth:`sleep_until_stopped`. + + :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False`` to let them + finish before returning + + """ + self._event_loop_thread_id = None + self._stop_event.set() + if cancel_remaining: + self._task_group.cancel_scope.cancel() + + async def _call_func( + self, func: Callable, args: tuple, kwargs: Dict[str, Any], future: Future + ) -> None: + def callback(f: Future) -> None: + if f.cancelled() and self._event_loop_thread_id not in ( + None, + threading.get_ident(), + ): + self.call(scope.cancel) + + try: + retval = func(*args, **kwargs) + if iscoroutine(retval): + with CancelScope() as scope: + if future.cancelled(): + scope.cancel() + else: + future.add_done_callback(callback) + + retval = await retval + except self._cancelled_exc_class: + future.cancel() + except BaseException as exc: + if not future.cancelled(): + future.set_exception(exc) + + # Let base exceptions fall through + if not isinstance(exc, Exception): + raise + else: + if not future.cancelled(): + future.set_result(retval) + finally: + scope = None # type: ignore[assignment] + + def _spawn_task_from_thread( + self, + func: Callable, + args: tuple, + kwargs: Dict[str, Any], + name: object, + future: Future, + ) -> None: + """ + Spawn a new task using the given callable. + + Implementors must ensure that the future is resolved when the task finishes. + + :param func: a callable + :param args: positional arguments to be passed to the callable + :param kwargs: keyword arguments to be passed to the callable + :param name: name of the task (will be coerced to a string if not ``None``) + :param future: a future that will resolve to the return value of the callable, or the + exception raised during its execution + + """ + raise NotImplementedError + + @overload + def call( + self, func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object + ) -> T_Retval: + ... + + @overload + def call(self, func: Callable[..., T_Retval], *args: object) -> T_Retval: + ... + + def call( + self, + func: Callable[..., Union[Coroutine[Any, Any, T_Retval], T_Retval]], + *args: object + ) -> T_Retval: + """ + Call the given function in the event loop thread. + + If the callable returns a coroutine object, it is awaited on. + + :param func: any callable + :raises RuntimeError: if the portal is not running or if this method is called from within + the event loop thread + + """ + return cast(T_Retval, self.start_task_soon(func, *args).result()) + + @overload + def spawn_task( + self, + func: Callable[..., Coroutine[Any, Any, T_Retval]], + *args: object, + name: object = None + ) -> "Future[T_Retval]": + ... + + @overload + def spawn_task( + self, func: Callable[..., T_Retval], *args: object, name: object = None + ) -> "Future[T_Retval]": + ... + + def spawn_task( + self, + func: Callable[..., Union[Coroutine[Any, Any, T_Retval], T_Retval]], + *args: object, + name: object = None + ) -> "Future[T_Retval]": + """ + Start a task in the portal's task group. + + :param func: the target coroutine function + :param args: positional arguments passed to ``func`` + :param name: name of the task (will be coerced to a string if not ``None``) + :return: a future that resolves with the return value of the callable if the task completes + successfully, or with the exception raised in the task + :raises RuntimeError: if the portal is not running or if this method is called from within + the event loop thread + + .. versionadded:: 2.1 + .. deprecated:: 3.0 + Use :meth:`start_task_soon` instead. If your code needs AnyIO 2 compatibility, you + can keep using this until AnyIO 4. + + """ + warn( + "spawn_task() is deprecated -- use start_task_soon() instead", + DeprecationWarning, + ) + return self.start_task_soon(func, *args, name=name) # type: ignore[arg-type] + + @overload + def start_task_soon( + self, + func: Callable[..., Coroutine[Any, Any, T_Retval]], + *args: object, + name: object = None + ) -> "Future[T_Retval]": + ... + + @overload + def start_task_soon( + self, func: Callable[..., T_Retval], *args: object, name: object = None + ) -> "Future[T_Retval]": + ... + + def start_task_soon( + self, + func: Callable[..., Union[Coroutine[Any, Any, T_Retval], T_Retval]], + *args: object, + name: object = None + ) -> "Future[T_Retval]": + """ + Start a task in the portal's task group. + + The task will be run inside a cancel scope which can be cancelled by cancelling the + returned future. + + :param func: the target coroutine function + :param args: positional arguments passed to ``func`` + :param name: name of the task (will be coerced to a string if not ``None``) + :return: a future that resolves with the return value of the callable if the task completes + successfully, or with the exception raised in the task + :raises RuntimeError: if the portal is not running or if this method is called from within + the event loop thread + + .. versionadded:: 3.0 + + """ + self._check_running() + f: Future = Future() + self._spawn_task_from_thread(func, args, {}, name, f) + return f + + def start_task( + self, + func: Callable[..., Coroutine[Any, Any, Any]], + *args: object, + name: object = None + ) -> Tuple["Future[Any]", Any]: + """ + Start a task in the portal's task group and wait until it signals for readiness. + + This method works the same way as :meth:`TaskGroup.start`. + + :param func: the target coroutine function + :param args: positional arguments passed to ``func`` + :param name: name of the task (will be coerced to a string if not ``None``) + :return: a tuple of (future, task_status_value) where the ``task_status_value`` is the + value passed to ``task_status.started()`` from within the target function + + .. versionadded:: 3.0 + + """ + + def task_done(future: Future) -> None: + if not task_status_future.done(): + if future.cancelled(): + task_status_future.cancel() + elif future.exception(): + task_status_future.set_exception(future.exception()) + else: + exc = RuntimeError( + "Task exited without calling task_status.started()" + ) + task_status_future.set_exception(exc) + + self._check_running() + task_status_future: Future = Future() + task_status = _BlockingPortalTaskStatus(task_status_future) + f: Future = Future() + f.add_done_callback(task_done) + self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f) + return f, task_status_future.result() + + def wrap_async_context_manager( + self, cm: AsyncContextManager[T_co] + ) -> ContextManager[T_co]: + """ + Wrap an async context manager as a synchronous context manager via this portal. + + Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping in the + middle until the synchronous context manager exits. + + :param cm: an asynchronous context manager + :return: a synchronous context manager + + .. versionadded:: 2.1 + + """ + return _BlockingAsyncContextManager(cm, self) + + +def create_blocking_portal() -> BlockingPortal: + """ + Create a portal for running functions in the event loop thread from external threads. + + Use this function in asynchronous code when you need to allow external threads access to the + event loop where your asynchronous code is currently running. + + .. deprecated:: 3.0 + Use :class:`.BlockingPortal` directly. + + """ + warn( + "create_blocking_portal() has been deprecated -- use anyio.from_thread.BlockingPortal() " + "directly", + DeprecationWarning, + ) + return BlockingPortal() + + +@contextmanager +def start_blocking_portal( + backend: str = "asyncio", backend_options: Optional[Dict[str, Any]] = None +) -> Generator[BlockingPortal, Any, None]: + """ + Start a new event loop in a new thread and run a blocking portal in its main task. + + The parameters are the same as for :func:`~anyio.run`. + + :param backend: name of the backend + :param backend_options: backend options + :return: a context manager that yields a blocking portal + + .. versionchanged:: 3.0 + Usage as a context manager is now required. + + """ + + async def run_portal() -> None: + async with BlockingPortal() as portal_: + if future.set_running_or_notify_cancel(): + future.set_result(portal_) + await portal_.sleep_until_stopped() + + future: Future[BlockingPortal] = Future() + with ThreadPoolExecutor(1) as executor: + run_future = executor.submit( + _eventloop.run, + run_portal, # type: ignore[arg-type] + backend=backend, + backend_options=backend_options, + ) + try: + wait( + cast(Iterable[Future], [run_future, future]), + return_when=FIRST_COMPLETED, + ) + except BaseException: + future.cancel() + run_future.cancel() + raise + + if future.done(): + portal = future.result() + try: + yield portal + except BaseException: + portal.call(portal.stop, True) + raise + + portal.call(portal.stop, False) + + run_future.result() diff --git a/myenv/lib/python3.9/site-packages/anyio/lowlevel.py b/myenv/lib/python3.9/site-packages/anyio/lowlevel.py new file mode 100644 index 0000000..c1da8fa --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/lowlevel.py @@ -0,0 +1,170 @@ +import enum +import sys +from dataclasses import dataclass +from typing import Any, Dict, Generic, Set, TypeVar, Union, overload +from weakref import WeakKeyDictionary + +from ._core._eventloop import get_asynclib + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +T = TypeVar("T") +D = TypeVar("D") + + +async def checkpoint() -> None: + """ + Check for cancellation and allow the scheduler to switch to another task. + + Equivalent to (but more efficient than):: + + await checkpoint_if_cancelled() + await cancel_shielded_checkpoint() + + .. versionadded:: 3.0 + + """ + await get_asynclib().checkpoint() + + +async def checkpoint_if_cancelled() -> None: + """ + Enter a checkpoint if the enclosing cancel scope has been cancelled. + + This does not allow the scheduler to switch to a different task. + + .. versionadded:: 3.0 + + """ + await get_asynclib().checkpoint_if_cancelled() + + +async def cancel_shielded_checkpoint() -> None: + """ + Allow the scheduler to switch to another task but without checking for cancellation. + + Equivalent to (but potentially more efficient than):: + + with CancelScope(shield=True): + await checkpoint() + + .. versionadded:: 3.0 + + """ + await get_asynclib().cancel_shielded_checkpoint() + + +def current_token() -> object: + """Return a backend specific token object that can be used to get back to the event loop.""" + return get_asynclib().current_token() + + +_run_vars = WeakKeyDictionary() # type: WeakKeyDictionary[Any, Dict[str, Any]] +_token_wrappers: Dict[Any, "_TokenWrapper"] = {} + + +@dataclass(frozen=True) +class _TokenWrapper: + __slots__ = "_token", "__weakref__" + _token: object + + +class _NoValueSet(enum.Enum): + NO_VALUE_SET = enum.auto() + + +class RunvarToken(Generic[T]): + __slots__ = "_var", "_value", "_redeemed" + + def __init__( + self, var: "RunVar[T]", value: Union[T, Literal[_NoValueSet.NO_VALUE_SET]] + ): + self._var = var + self._value: Union[T, Literal[_NoValueSet.NO_VALUE_SET]] = value + self._redeemed = False + + +class RunVar(Generic[T]): + """Like a :class:`~contextvars.ContextVar`, expect scoped to the running event loop.""" + + __slots__ = "_name", "_default" + + NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET + + _token_wrappers: Set[_TokenWrapper] = set() + + def __init__( + self, + name: str, + default: Union[T, Literal[_NoValueSet.NO_VALUE_SET]] = NO_VALUE_SET, + ): + self._name = name + self._default = default + + @property + def _current_vars(self) -> Dict[str, T]: + token = current_token() + while True: + try: + return _run_vars[token] + except TypeError: + # Happens when token isn't weak referable (TrioToken). + # This workaround does mean that some memory will leak on Trio until the problem + # is fixed on their end. + token = _TokenWrapper(token) + self._token_wrappers.add(token) + except KeyError: + run_vars = _run_vars[token] = {} + return run_vars + + @overload + def get(self, default: D) -> Union[T, D]: + ... + + @overload + def get(self) -> T: + ... + + def get( + self, default: Union[D, Literal[_NoValueSet.NO_VALUE_SET]] = NO_VALUE_SET + ) -> Union[T, D]: + try: + return self._current_vars[self._name] + except KeyError: + if default is not RunVar.NO_VALUE_SET: + return default + elif self._default is not RunVar.NO_VALUE_SET: + return self._default + + raise LookupError( + f'Run variable "{self._name}" has no value and no default set' + ) + + def set(self, value: T) -> RunvarToken[T]: + current_vars = self._current_vars + token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET)) + current_vars[self._name] = value + return token + + def reset(self, token: RunvarToken[T]) -> None: + if token._var is not self: + raise ValueError("This token does not belong to this RunVar") + + if token._redeemed: + raise ValueError("This token has already been used") + + if token._value is _NoValueSet.NO_VALUE_SET: + try: + del self._current_vars[self._name] + except KeyError: + pass + else: + self._current_vars[self._name] = token._value + + token._redeemed = True + + def __repr__(self) -> str: + return f"" diff --git a/myenv/lib/python3.9/site-packages/anyio/py.typed b/myenv/lib/python3.9/site-packages/anyio/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/anyio/pytest_plugin.py b/myenv/lib/python3.9/site-packages/anyio/pytest_plugin.py new file mode 100644 index 0000000..432eee3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/pytest_plugin.py @@ -0,0 +1,144 @@ +from contextlib import contextmanager +from inspect import isasyncgenfunction, iscoroutinefunction +from typing import TYPE_CHECKING, Any, Dict, Generator, Optional, Tuple, cast + +import pytest +import sniffio +from _pytest.fixtures import FixtureRequest + +from ._core._eventloop import get_all_backends, get_asynclib +from .abc import TestRunner + +if TYPE_CHECKING: + from _pytest.config import Config + +_current_runner: Optional[TestRunner] = None + + +def extract_backend_and_options(backend: object) -> Tuple[str, Dict[str, Any]]: + if isinstance(backend, str): + return backend, {} + elif isinstance(backend, tuple) and len(backend) == 2: + if isinstance(backend[0], str) and isinstance(backend[1], dict): + return cast(Tuple[str, Dict[str, Any]], backend) + + raise TypeError("anyio_backend must be either a string or tuple of (string, dict)") + + +@contextmanager +def get_runner( + backend_name: str, backend_options: Dict[str, Any] +) -> Generator[TestRunner, object, None]: + global _current_runner + if _current_runner: + yield _current_runner + return + + asynclib = get_asynclib(backend_name) + token = None + if sniffio.current_async_library_cvar.get(None) is None: + # Since we're in control of the event loop, we can cache the name of the async library + token = sniffio.current_async_library_cvar.set(backend_name) + + try: + backend_options = backend_options or {} + with asynclib.TestRunner(**backend_options) as runner: + _current_runner = runner + yield runner + finally: + _current_runner = None + if token: + sniffio.current_async_library_cvar.reset(token) + + +def pytest_configure(config: "Config") -> None: + config.addinivalue_line( + "markers", + "anyio: mark the (coroutine function) test to be run " + "asynchronously via anyio.", + ) + + +def pytest_fixture_setup(fixturedef: Any, request: FixtureRequest) -> None: + def wrapper(*args, anyio_backend, **kwargs): # type: ignore[no-untyped-def] + backend_name, backend_options = extract_backend_and_options(anyio_backend) + if has_backend_arg: + kwargs["anyio_backend"] = anyio_backend + + with get_runner(backend_name, backend_options) as runner: + if isasyncgenfunction(func): + yield from runner.run_asyncgen_fixture(func, kwargs) + else: + yield runner.run_fixture(func, kwargs) + + # Only apply this to coroutine functions and async generator functions in requests that involve + # the anyio_backend fixture + func = fixturedef.func + if isasyncgenfunction(func) or iscoroutinefunction(func): + if "anyio_backend" in request.fixturenames: + has_backend_arg = "anyio_backend" in fixturedef.argnames + fixturedef.func = wrapper + if not has_backend_arg: + fixturedef.argnames += ("anyio_backend",) + + +@pytest.hookimpl(tryfirst=True) +def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None: + if collector.istestfunction(obj, name): + inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj + if iscoroutinefunction(inner_func): + marker = collector.get_closest_marker("anyio") + own_markers = getattr(obj, "pytestmark", ()) + if marker or any(marker.name == "anyio" for marker in own_markers): + pytest.mark.usefixtures("anyio_backend")(obj) + + +@pytest.hookimpl(tryfirst=True) +def pytest_pyfunc_call(pyfuncitem: Any) -> Optional[bool]: + def run_with_hypothesis(**kwargs: Any) -> None: + with get_runner(backend_name, backend_options) as runner: + runner.run_test(original_func, kwargs) + + backend = pyfuncitem.funcargs.get("anyio_backend") + if backend: + backend_name, backend_options = extract_backend_and_options(backend) + + if hasattr(pyfuncitem.obj, "hypothesis"): + # Wrap the inner test function unless it's already wrapped + original_func = pyfuncitem.obj.hypothesis.inner_test + if original_func.__qualname__ != run_with_hypothesis.__qualname__: + if iscoroutinefunction(original_func): + pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis + + return None + + if iscoroutinefunction(pyfuncitem.obj): + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + with get_runner(backend_name, backend_options) as runner: + runner.run_test(pyfuncitem.obj, testargs) + + return True + + return None + + +@pytest.fixture(params=get_all_backends()) +def anyio_backend(request: Any) -> Any: + return request.param + + +@pytest.fixture +def anyio_backend_name(anyio_backend: Any) -> str: + if isinstance(anyio_backend, str): + return anyio_backend + else: + return anyio_backend[0] + + +@pytest.fixture +def anyio_backend_options(anyio_backend: Any) -> Dict[str, Any]: + if isinstance(anyio_backend, str): + return {} + else: + return anyio_backend[1] diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/__init__.py b/myenv/lib/python3.9/site-packages/anyio/streams/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/buffered.py b/myenv/lib/python3.9/site-packages/anyio/streams/buffered.py new file mode 100644 index 0000000..1503b3e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/streams/buffered.py @@ -0,0 +1,116 @@ +from dataclasses import dataclass, field +from typing import Any, Callable, Mapping + +from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead +from ..abc import AnyByteReceiveStream, ByteReceiveStream + + +@dataclass(eq=False) +class BufferedByteReceiveStream(ByteReceiveStream): + """ + Wraps any bytes-based receive stream and uses a buffer to provide sophisticated receiving + capabilities in the form of a byte stream. + """ + + receive_stream: AnyByteReceiveStream + _buffer: bytearray = field(init=False, default_factory=bytearray) + _closed: bool = field(init=False, default=False) + + async def aclose(self) -> None: + await self.receive_stream.aclose() + self._closed = True + + @property + def buffer(self) -> bytes: + """The bytes currently in the buffer.""" + return bytes(self._buffer) + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return self.receive_stream.extra_attributes + + async def receive(self, max_bytes: int = 65536) -> bytes: + if self._closed: + raise ClosedResourceError + + if self._buffer: + chunk = bytes(self._buffer[:max_bytes]) + del self._buffer[:max_bytes] + return chunk + elif isinstance(self.receive_stream, ByteReceiveStream): + return await self.receive_stream.receive(max_bytes) + else: + # With a bytes-oriented object stream, we need to handle any surplus bytes we get from + # the receive() call + chunk = await self.receive_stream.receive() + if len(chunk) > max_bytes: + # Save the surplus bytes in the buffer + self._buffer.extend(chunk[max_bytes:]) + return chunk[:max_bytes] + else: + return chunk + + async def receive_exactly(self, nbytes: int) -> bytes: + """ + Read exactly the given amount of bytes from the stream. + + :param nbytes: the number of bytes to read + :return: the bytes read + :raises ~anyio.IncompleteRead: if the stream was closed before the requested + amount of bytes could be read from the stream + + """ + while True: + remaining = nbytes - len(self._buffer) + if remaining <= 0: + retval = self._buffer[:nbytes] + del self._buffer[:nbytes] + return bytes(retval) + + try: + if isinstance(self.receive_stream, ByteReceiveStream): + chunk = await self.receive_stream.receive(remaining) + else: + chunk = await self.receive_stream.receive() + except EndOfStream as exc: + raise IncompleteRead from exc + + self._buffer.extend(chunk) + + async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes: + """ + Read from the stream until the delimiter is found or max_bytes have been read. + + :param delimiter: the marker to look for in the stream + :param max_bytes: maximum number of bytes that will be read before raising + :exc:`~anyio.DelimiterNotFound` + :return: the bytes read (not including the delimiter) + :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter + was found + :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the + bytes read up to the maximum allowed + + """ + delimiter_size = len(delimiter) + offset = 0 + while True: + # Check if the delimiter can be found in the current buffer + index = self._buffer.find(delimiter, offset) + if index >= 0: + found = self._buffer[:index] + del self._buffer[: index + len(delimiter) :] + return bytes(found) + + # Check if the buffer is already at or over the limit + if len(self._buffer) >= max_bytes: + raise DelimiterNotFound(max_bytes) + + # Read more data into the buffer from the socket + try: + data = await self.receive_stream.receive() + except EndOfStream as exc: + raise IncompleteRead from exc + + # Move the offset forward and add the new data to the buffer + offset = max(len(self._buffer) - delimiter_size + 1, 0) + self._buffer.extend(data) diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/file.py b/myenv/lib/python3.9/site-packages/anyio/streams/file.py new file mode 100644 index 0000000..938d1da --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/streams/file.py @@ -0,0 +1,145 @@ +from io import SEEK_SET, UnsupportedOperation +from os import PathLike +from pathlib import Path +from typing import Any, BinaryIO, Callable, Dict, Mapping, Union, cast + +from .. import ( + BrokenResourceError, + ClosedResourceError, + EndOfStream, + TypedAttributeSet, + to_thread, + typed_attribute, +) +from ..abc import ByteReceiveStream, ByteSendStream + + +class FileStreamAttribute(TypedAttributeSet): + #: the open file descriptor + file: BinaryIO = typed_attribute() + #: the path of the file on the file system, if available (file must be a real file) + path: Path = typed_attribute() + #: the file number, if available (file must be a real file or a TTY) + fileno: int = typed_attribute() + + +class _BaseFileStream: + def __init__(self, file: BinaryIO): + self._file = file + + async def aclose(self) -> None: + await to_thread.run_sync(self._file.close) + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + attributes: Dict[Any, Callable[[], Any]] = { + FileStreamAttribute.file: lambda: self._file, + } + + if hasattr(self._file, "name"): + attributes[FileStreamAttribute.path] = lambda: Path(self._file.name) + + try: + self._file.fileno() + except UnsupportedOperation: + pass + else: + attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno() + + return attributes + + +class FileReadStream(_BaseFileStream, ByteReceiveStream): + """ + A byte stream that reads from a file in the file system. + + :param file: a file that has been opened for reading in binary mode + + .. versionadded:: 3.0 + """ + + @classmethod + async def from_path(cls, path: Union[str, "PathLike[str]"]) -> "FileReadStream": + """ + Create a file read stream by opening the given file. + + :param path: path of the file to read from + + """ + file = await to_thread.run_sync(Path(path).open, "rb") + return cls(cast(BinaryIO, file)) + + async def receive(self, max_bytes: int = 65536) -> bytes: + try: + data = await to_thread.run_sync(self._file.read, max_bytes) + except ValueError: + raise ClosedResourceError from None + except OSError as exc: + raise BrokenResourceError from exc + + if data: + return data + else: + raise EndOfStream + + async def seek(self, position: int, whence: int = SEEK_SET) -> int: + """ + Seek the file to the given position. + + .. seealso:: :meth:`io.IOBase.seek` + + .. note:: Not all file descriptors are seekable. + + :param position: position to seek the file to + :param whence: controls how ``position`` is interpreted + :return: the new absolute position + :raises OSError: if the file is not seekable + + """ + return await to_thread.run_sync(self._file.seek, position, whence) + + async def tell(self) -> int: + """ + Return the current stream position. + + .. note:: Not all file descriptors are seekable. + + :return: the current absolute position + :raises OSError: if the file is not seekable + + """ + return await to_thread.run_sync(self._file.tell) + + +class FileWriteStream(_BaseFileStream, ByteSendStream): + """ + A byte stream that writes to a file in the file system. + + :param file: a file that has been opened for writing in binary mode + + .. versionadded:: 3.0 + """ + + @classmethod + async def from_path( + cls, path: Union[str, "PathLike[str]"], append: bool = False + ) -> "FileWriteStream": + """ + Create a file write stream by opening the given file for writing. + + :param path: path of the file to write to + :param append: if ``True``, open the file for appending; if ``False``, any existing file + at the given path will be truncated + + """ + mode = "ab" if append else "wb" + file = await to_thread.run_sync(Path(path).open, mode) + return cls(cast(BinaryIO, file)) + + async def send(self, item: bytes) -> None: + try: + await to_thread.run_sync(self._file.write, item) + except ValueError: + raise ClosedResourceError from None + except OSError as exc: + raise BrokenResourceError from exc diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/memory.py b/myenv/lib/python3.9/site-packages/anyio/streams/memory.py new file mode 100644 index 0000000..d8a958c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/streams/memory.py @@ -0,0 +1,275 @@ +from collections import OrderedDict, deque +from dataclasses import dataclass, field +from types import TracebackType +from typing import Deque, Generic, List, NamedTuple, Optional, Type, TypeVar + +from .. import ( + BrokenResourceError, + ClosedResourceError, + EndOfStream, + WouldBlock, + get_cancelled_exc_class, +) +from .._core._compat import DeprecatedAwaitable +from ..abc import Event, ObjectReceiveStream, ObjectSendStream +from ..lowlevel import checkpoint + +T_Item = TypeVar("T_Item") + + +class MemoryObjectStreamStatistics(NamedTuple): + current_buffer_used: int #: number of items stored in the buffer + #: maximum number of items that can be stored on this stream (or :data:`math.inf`) + max_buffer_size: float + open_send_streams: int #: number of unclosed clones of the send stream + open_receive_streams: int #: number of unclosed clones of the receive stream + tasks_waiting_send: int #: number of tasks blocked on :meth:`MemoryObjectSendStream.send` + #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive` + tasks_waiting_receive: int + + +@dataclass(eq=False) +class MemoryObjectStreamState(Generic[T_Item]): + max_buffer_size: float = field() + buffer: Deque[T_Item] = field(init=False, default_factory=deque) + open_send_channels: int = field(init=False, default=0) + open_receive_channels: int = field(init=False, default=0) + waiting_receivers: "OrderedDict[Event, List[T_Item]]" = field( + init=False, default_factory=OrderedDict + ) + waiting_senders: "OrderedDict[Event, T_Item]" = field( + init=False, default_factory=OrderedDict + ) + + def statistics(self) -> MemoryObjectStreamStatistics: + return MemoryObjectStreamStatistics( + len(self.buffer), + self.max_buffer_size, + self.open_send_channels, + self.open_receive_channels, + len(self.waiting_senders), + len(self.waiting_receivers), + ) + + +@dataclass(eq=False) +class MemoryObjectReceiveStream(Generic[T_Item], ObjectReceiveStream[T_Item]): + _state: MemoryObjectStreamState[T_Item] + _closed: bool = field(init=False, default=False) + + def __post_init__(self) -> None: + self._state.open_receive_channels += 1 + + def receive_nowait(self) -> T_Item: + """ + Receive the next item if it can be done without waiting. + + :return: the received item + :raises ~anyio.ClosedResourceError: if this send stream has been closed + :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been + closed from the sending end + :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks + waiting to send + + """ + if self._closed: + raise ClosedResourceError + + if self._state.waiting_senders: + # Get the item from the next sender + send_event, item = self._state.waiting_senders.popitem(last=False) + self._state.buffer.append(item) + send_event.set() + + if self._state.buffer: + return self._state.buffer.popleft() + elif not self._state.open_send_channels: + raise EndOfStream + + raise WouldBlock + + async def receive(self) -> T_Item: + await checkpoint() + try: + return self.receive_nowait() + except WouldBlock: + # Add ourselves in the queue + receive_event = Event() + container: List[T_Item] = [] + self._state.waiting_receivers[receive_event] = container + + try: + await receive_event.wait() + except get_cancelled_exc_class(): + # Ignore the immediate cancellation if we already received an item, so as not to + # lose it + if not container: + raise + finally: + self._state.waiting_receivers.pop(receive_event, None) + + if container: + return container[0] + else: + raise EndOfStream + + def clone(self) -> "MemoryObjectReceiveStream[T_Item]": + """ + Create a clone of this receive stream. + + Each clone can be closed separately. Only when all clones have been closed will the + receiving end of the memory stream be considered closed by the sending ends. + + :return: the cloned stream + + """ + if self._closed: + raise ClosedResourceError + + return MemoryObjectReceiveStream(_state=self._state) + + def close(self) -> None: + """ + Close the stream. + + This works the exact same way as :meth:`aclose`, but is provided as a special case for the + benefit of synchronous callbacks. + + """ + if not self._closed: + self._closed = True + self._state.open_receive_channels -= 1 + if self._state.open_receive_channels == 0: + send_events = list(self._state.waiting_senders.keys()) + for event in send_events: + event.set() + + async def aclose(self) -> None: + self.close() + + def statistics(self) -> MemoryObjectStreamStatistics: + """ + Return statistics about the current state of this stream. + + .. versionadded:: 3.0 + """ + return self._state.statistics() + + def __enter__(self) -> "MemoryObjectReceiveStream[T_Item]": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.close() + + +@dataclass(eq=False) +class MemoryObjectSendStream(Generic[T_Item], ObjectSendStream[T_Item]): + _state: MemoryObjectStreamState[T_Item] + _closed: bool = field(init=False, default=False) + + def __post_init__(self) -> None: + self._state.open_send_channels += 1 + + def send_nowait(self, item: T_Item) -> DeprecatedAwaitable: + """ + Send an item immediately if it can be done without waiting. + + :param item: the item to send + :raises ~anyio.ClosedResourceError: if this send stream has been closed + :raises ~anyio.BrokenResourceError: if the stream has been closed from the + receiving end + :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting + to receive + + """ + if self._closed: + raise ClosedResourceError + if not self._state.open_receive_channels: + raise BrokenResourceError + + if self._state.waiting_receivers: + receive_event, container = self._state.waiting_receivers.popitem(last=False) + container.append(item) + receive_event.set() + elif len(self._state.buffer) < self._state.max_buffer_size: + self._state.buffer.append(item) + else: + raise WouldBlock + + return DeprecatedAwaitable(self.send_nowait) + + async def send(self, item: T_Item) -> None: + await checkpoint() + try: + self.send_nowait(item) + except WouldBlock: + # Wait until there's someone on the receiving end + send_event = Event() + self._state.waiting_senders[send_event] = item + try: + await send_event.wait() + except BaseException: + self._state.waiting_senders.pop(send_event, None) # type: ignore[arg-type] + raise + + if self._state.waiting_senders.pop(send_event, None): # type: ignore[arg-type] + raise BrokenResourceError + + def clone(self) -> "MemoryObjectSendStream[T_Item]": + """ + Create a clone of this send stream. + + Each clone can be closed separately. Only when all clones have been closed will the + sending end of the memory stream be considered closed by the receiving ends. + + :return: the cloned stream + + """ + if self._closed: + raise ClosedResourceError + + return MemoryObjectSendStream(_state=self._state) + + def close(self) -> None: + """ + Close the stream. + + This works the exact same way as :meth:`aclose`, but is provided as a special case for the + benefit of synchronous callbacks. + + """ + if not self._closed: + self._closed = True + self._state.open_send_channels -= 1 + if self._state.open_send_channels == 0: + receive_events = list(self._state.waiting_receivers.keys()) + self._state.waiting_receivers.clear() + for event in receive_events: + event.set() + + async def aclose(self) -> None: + self.close() + + def statistics(self) -> MemoryObjectStreamStatistics: + """ + Return statistics about the current state of this stream. + + .. versionadded:: 3.0 + """ + return self._state.statistics() + + def __enter__(self) -> "MemoryObjectSendStream[T_Item]": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.close() diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/stapled.py b/myenv/lib/python3.9/site-packages/anyio/streams/stapled.py new file mode 100644 index 0000000..a71ffb0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/streams/stapled.py @@ -0,0 +1,138 @@ +from dataclasses import dataclass +from typing import Any, Callable, Generic, List, Mapping, Optional, Sequence, TypeVar + +from ..abc import ( + ByteReceiveStream, + ByteSendStream, + ByteStream, + Listener, + ObjectReceiveStream, + ObjectSendStream, + ObjectStream, + TaskGroup, +) + +T_Item = TypeVar("T_Item") +T_Stream = TypeVar("T_Stream") + + +@dataclass(eq=False) +class StapledByteStream(ByteStream): + """ + Combines two byte streams into a single, bidirectional byte stream. + + Extra attributes will be provided from both streams, with the receive stream providing the + values in case of a conflict. + + :param ByteSendStream send_stream: the sending byte stream + :param ByteReceiveStream receive_stream: the receiving byte stream + """ + + send_stream: ByteSendStream + receive_stream: ByteReceiveStream + + async def receive(self, max_bytes: int = 65536) -> bytes: + return await self.receive_stream.receive(max_bytes) + + async def send(self, item: bytes) -> None: + await self.send_stream.send(item) + + async def send_eof(self) -> None: + await self.send_stream.aclose() + + async def aclose(self) -> None: + await self.send_stream.aclose() + await self.receive_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self.send_stream.extra_attributes, + **self.receive_stream.extra_attributes, + } + + +@dataclass(eq=False) +class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]): + """ + Combines two object streams into a single, bidirectional object stream. + + Extra attributes will be provided from both streams, with the receive stream providing the + values in case of a conflict. + + :param ObjectSendStream send_stream: the sending object stream + :param ObjectReceiveStream receive_stream: the receiving object stream + """ + + send_stream: ObjectSendStream[T_Item] + receive_stream: ObjectReceiveStream[T_Item] + + async def receive(self) -> T_Item: + return await self.receive_stream.receive() + + async def send(self, item: T_Item) -> None: + await self.send_stream.send(item) + + async def send_eof(self) -> None: + await self.send_stream.aclose() + + async def aclose(self) -> None: + await self.send_stream.aclose() + await self.receive_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self.send_stream.extra_attributes, + **self.receive_stream.extra_attributes, + } + + +@dataclass(eq=False) +class MultiListener(Generic[T_Stream], Listener[T_Stream]): + """ + Combines multiple listeners into one, serving connections from all of them at once. + + Any MultiListeners in the given collection of listeners will have their listeners moved into + this one. + + Extra attributes are provided from each listener, with each successive listener overriding any + conflicting attributes from the previous one. + + :param listeners: listeners to serve + :type listeners: Sequence[Listener[T_Stream]] + """ + + listeners: Sequence[Listener[T_Stream]] + + def __post_init__(self) -> None: + listeners: List[Listener[T_Stream]] = [] + for listener in self.listeners: + if isinstance(listener, MultiListener): + listeners.extend(listener.listeners) + del listener.listeners[:] # type: ignore[attr-defined] + else: + listeners.append(listener) + + self.listeners = listeners + + async def serve( + self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None + ) -> None: + from .. import create_task_group + + async with create_task_group() as tg: + for listener in self.listeners: + tg.start_soon(listener.serve, handler, task_group) + + async def aclose(self) -> None: + for listener in self.listeners: + await listener.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + attributes: dict = {} + for listener in self.listeners: + attributes.update(listener.extra_attributes) + + return attributes diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/text.py b/myenv/lib/python3.9/site-packages/anyio/streams/text.py new file mode 100644 index 0000000..ccb683c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/streams/text.py @@ -0,0 +1,141 @@ +import codecs +from dataclasses import InitVar, dataclass, field +from typing import Any, Callable, Mapping, Tuple + +from ..abc import ( + AnyByteReceiveStream, + AnyByteSendStream, + AnyByteStream, + ObjectReceiveStream, + ObjectSendStream, + ObjectStream, +) + + +@dataclass(eq=False) +class TextReceiveStream(ObjectReceiveStream[str]): + """ + Stream wrapper that decodes bytes to strings using the given encoding. + + Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any completely + received unicode characters as soon as they come in. + + :param transport_stream: any bytes-based receive stream + :param encoding: character encoding to use for decoding bytes to strings (defaults to + ``utf-8``) + :param errors: handling scheme for decoding errors (defaults to ``strict``; see the + `codecs module documentation`_ for a comprehensive list of options) + + .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects + """ + + transport_stream: AnyByteReceiveStream + encoding: InitVar[str] = "utf-8" + errors: InitVar[str] = "strict" + _decoder: codecs.IncrementalDecoder = field(init=False) + + def __post_init__(self, encoding: str, errors: str) -> None: + decoder_class = codecs.getincrementaldecoder(encoding) + self._decoder = decoder_class(errors=errors) + + async def receive(self) -> str: + while True: + chunk = await self.transport_stream.receive() + decoded = self._decoder.decode(chunk) + if decoded: + return decoded + + async def aclose(self) -> None: + await self.transport_stream.aclose() + self._decoder.reset() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return self.transport_stream.extra_attributes + + +@dataclass(eq=False) +class TextSendStream(ObjectSendStream[str]): + """ + Sends strings to the wrapped stream as bytes using the given encoding. + + :param AnyByteSendStream transport_stream: any bytes-based send stream + :param str encoding: character encoding to use for encoding strings to bytes (defaults to + ``utf-8``) + :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the + `codecs module documentation`_ for a comprehensive list of options) + + .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects + """ + + transport_stream: AnyByteSendStream + encoding: InitVar[str] = "utf-8" + errors: str = "strict" + _encoder: Callable[..., Tuple[bytes, int]] = field(init=False) + + def __post_init__(self, encoding: str) -> None: + self._encoder = codecs.getencoder(encoding) + + async def send(self, item: str) -> None: + encoded = self._encoder(item, self.errors)[0] + await self.transport_stream.send(encoded) + + async def aclose(self) -> None: + await self.transport_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return self.transport_stream.extra_attributes + + +@dataclass(eq=False) +class TextStream(ObjectStream[str]): + """ + A bidirectional stream that decodes bytes to strings on receive and encodes strings to bytes on + send. + + Extra attributes will be provided from both streams, with the receive stream providing the + values in case of a conflict. + + :param AnyByteStream transport_stream: any bytes-based stream + :param str encoding: character encoding to use for encoding/decoding strings to/from bytes + (defaults to ``utf-8``) + :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the + `codecs module documentation`_ for a comprehensive list of options) + + .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects + """ + + transport_stream: AnyByteStream + encoding: InitVar[str] = "utf-8" + errors: InitVar[str] = "strict" + _receive_stream: TextReceiveStream = field(init=False) + _send_stream: TextSendStream = field(init=False) + + def __post_init__(self, encoding: str, errors: str) -> None: + self._receive_stream = TextReceiveStream( + self.transport_stream, encoding=encoding, errors=errors + ) + self._send_stream = TextSendStream( + self.transport_stream, encoding=encoding, errors=errors + ) + + async def receive(self) -> str: + return await self._receive_stream.receive() + + async def send(self, item: str) -> None: + await self._send_stream.send(item) + + async def send_eof(self) -> None: + await self.transport_stream.send_eof() + + async def aclose(self) -> None: + await self._send_stream.aclose() + await self._receive_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self._send_stream.extra_attributes, + **self._receive_stream.extra_attributes, + } diff --git a/myenv/lib/python3.9/site-packages/anyio/streams/tls.py b/myenv/lib/python3.9/site-packages/anyio/streams/tls.py new file mode 100644 index 0000000..c8e19e2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/streams/tls.py @@ -0,0 +1,317 @@ +import logging +import re +import ssl +from dataclasses import dataclass +from functools import wraps +from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, TypeVar, Union + +from .. import ( + BrokenResourceError, + EndOfStream, + aclose_forcefully, + get_cancelled_exc_class, +) +from .._core._typedattr import TypedAttributeSet, typed_attribute +from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup + +T_Retval = TypeVar("T_Retval") +_PCTRTT = Tuple[Tuple[str, str], ...] +_PCTRTTT = Tuple[_PCTRTT, ...] + + +class TLSAttribute(TypedAttributeSet): + """Contains Transport Layer Security related attributes.""" + + #: the selected ALPN protocol + alpn_protocol: Optional[str] = typed_attribute() + #: the channel binding for type ``tls-unique`` + channel_binding_tls_unique: bytes = typed_attribute() + #: the selected cipher + cipher: Tuple[str, str, int] = typed_attribute() + #: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert` for more + #: information) + peer_certificate: Optional[ + Dict[str, Union[str, _PCTRTTT, _PCTRTT]] + ] = typed_attribute() + #: the peer certificate in binary form + peer_certificate_binary: Optional[bytes] = typed_attribute() + #: ``True`` if this is the server side of the connection + server_side: bool = typed_attribute() + #: ciphers shared between both ends of the TLS connection + shared_ciphers: List[Tuple[str, str, int]] = typed_attribute() + #: the :class:`~ssl.SSLObject` used for encryption + ssl_object: ssl.SSLObject = typed_attribute() + #: ``True`` if this stream does (and expects) a closing TLS handshake when the stream is being + #: closed + standard_compatible: bool = typed_attribute() + #: the TLS protocol version (e.g. ``TLSv1.2``) + tls_version: str = typed_attribute() + + +@dataclass(eq=False) +class TLSStream(ByteStream): + """ + A stream wrapper that encrypts all sent data and decrypts received data. + + This class has no public initializer; use :meth:`wrap` instead. + All extra attributes from :class:`~TLSAttribute` are supported. + + :var AnyByteStream transport_stream: the wrapped stream + + """ + + transport_stream: AnyByteStream + standard_compatible: bool + _ssl_object: ssl.SSLObject + _read_bio: ssl.MemoryBIO + _write_bio: ssl.MemoryBIO + + @classmethod + async def wrap( + cls, + transport_stream: AnyByteStream, + *, + server_side: Optional[bool] = None, + hostname: Optional[str] = None, + ssl_context: Optional[ssl.SSLContext] = None, + standard_compatible: bool = True, + ) -> "TLSStream": + """ + Wrap an existing stream with Transport Layer Security. + + This performs a TLS handshake with the peer. + + :param transport_stream: a bytes-transporting stream to wrap + :param server_side: ``True`` if this is the server side of the connection, ``False`` if + this is the client side (if omitted, will be set to ``False`` if ``hostname`` has been + provided, ``False`` otherwise). Used only to create a default context when an explicit + context has not been provided. + :param hostname: host name of the peer (if host name checking is desired) + :param ssl_context: the SSLContext object to use (if not provided, a secure default will be + created) + :param standard_compatible: if ``False``, skip the closing handshake when closing the + connection, and don't raise an exception if the peer does the same + :raises ~ssl.SSLError: if the TLS handshake fails + + """ + if server_side is None: + server_side = not hostname + + if not ssl_context: + purpose = ( + ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH + ) + ssl_context = ssl.create_default_context(purpose) + + # Re-enable detection of unexpected EOFs if it was disabled by Python + if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"): + ssl_context.options ^= ssl.OP_IGNORE_UNEXPECTED_EOF # type: ignore[attr-defined] + + bio_in = ssl.MemoryBIO() + bio_out = ssl.MemoryBIO() + ssl_object = ssl_context.wrap_bio( + bio_in, bio_out, server_side=server_side, server_hostname=hostname + ) + wrapper = cls( + transport_stream=transport_stream, + standard_compatible=standard_compatible, + _ssl_object=ssl_object, + _read_bio=bio_in, + _write_bio=bio_out, + ) + await wrapper._call_sslobject_method(ssl_object.do_handshake) + return wrapper + + async def _call_sslobject_method( + self, func: Callable[..., T_Retval], *args: object + ) -> T_Retval: + while True: + try: + result = func(*args) + except ssl.SSLWantReadError: + try: + # Flush any pending writes first + if self._write_bio.pending: + await self.transport_stream.send(self._write_bio.read()) + + data = await self.transport_stream.receive() + except EndOfStream: + self._read_bio.write_eof() + except OSError as exc: + self._read_bio.write_eof() + self._write_bio.write_eof() + raise BrokenResourceError from exc + else: + self._read_bio.write(data) + except ssl.SSLWantWriteError: + await self.transport_stream.send(self._write_bio.read()) + except ssl.SSLSyscallError as exc: + self._read_bio.write_eof() + self._write_bio.write_eof() + raise BrokenResourceError from exc + except ssl.SSLError as exc: + self._read_bio.write_eof() + self._write_bio.write_eof() + if ( + isinstance(exc, ssl.SSLEOFError) + or "UNEXPECTED_EOF_WHILE_READING" in exc.strerror + ): + if self.standard_compatible: + raise BrokenResourceError from exc + else: + raise EndOfStream from None + + raise + else: + # Flush any pending writes first + if self._write_bio.pending: + await self.transport_stream.send(self._write_bio.read()) + + return result + + async def unwrap(self) -> Tuple[AnyByteStream, bytes]: + """ + Does the TLS closing handshake. + + :return: a tuple of (wrapped byte stream, bytes left in the read buffer) + + """ + await self._call_sslobject_method(self._ssl_object.unwrap) + self._read_bio.write_eof() + self._write_bio.write_eof() + return self.transport_stream, self._read_bio.read() + + async def aclose(self) -> None: + if self.standard_compatible: + try: + await self.unwrap() + except BaseException: + await aclose_forcefully(self.transport_stream) + raise + + await self.transport_stream.aclose() + + async def receive(self, max_bytes: int = 65536) -> bytes: + data = await self._call_sslobject_method(self._ssl_object.read, max_bytes) + if not data: + raise EndOfStream + + return data + + async def send(self, item: bytes) -> None: + await self._call_sslobject_method(self._ssl_object.write, item) + + async def send_eof(self) -> None: + tls_version = self.extra(TLSAttribute.tls_version) + match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version) + if match: + major, minor = int(match.group(1)), int(match.group(2) or 0) + if (major, minor) < (1, 3): + raise NotImplementedError( + f"send_eof() requires at least TLSv1.3; current " + f"session uses {tls_version}" + ) + + raise NotImplementedError( + "send_eof() has not yet been implemented for TLS streams" + ) + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self.transport_stream.extra_attributes, + TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol, + TLSAttribute.channel_binding_tls_unique: self._ssl_object.get_channel_binding, + TLSAttribute.cipher: self._ssl_object.cipher, + TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False), + TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert( + True + ), + TLSAttribute.server_side: lambda: self._ssl_object.server_side, + TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers(), + TLSAttribute.standard_compatible: lambda: self.standard_compatible, + TLSAttribute.ssl_object: lambda: self._ssl_object, + TLSAttribute.tls_version: self._ssl_object.version, + } + + +@dataclass(eq=False) +class TLSListener(Listener[TLSStream]): + """ + A convenience listener that wraps another listener and auto-negotiates a TLS session on every + accepted connection. + + If the TLS handshake times out or raises an exception, :meth:`handle_handshake_error` is + called to do whatever post-mortem processing is deemed necessary. + + Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute. + + :param Listener listener: the listener to wrap + :param ssl_context: the SSL context object + :param standard_compatible: a flag passed through to :meth:`TLSStream.wrap` + :param handshake_timeout: time limit for the TLS handshake + (passed to :func:`~anyio.fail_after`) + """ + + listener: Listener[Any] + ssl_context: ssl.SSLContext + standard_compatible: bool = True + handshake_timeout: float = 30 + + @staticmethod + async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None: + f""" + Handle an exception raised during the TLS handshake. + + This method does 3 things: + + #. Forcefully closes the original stream + #. Logs the exception (unless it was a cancellation exception) using the ``{__name__}`` + logger + #. Reraises the exception if it was a base exception or a cancellation exception + + :param exc: the exception + :param stream: the original stream + + """ + await aclose_forcefully(stream) + + # Log all except cancellation exceptions + if not isinstance(exc, get_cancelled_exc_class()): + logging.getLogger(__name__).exception("Error during TLS handshake") + + # Only reraise base exceptions and cancellation exceptions + if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()): + raise + + async def serve( + self, + handler: Callable[[TLSStream], Any], + task_group: Optional[TaskGroup] = None, + ) -> None: + @wraps(handler) + async def handler_wrapper(stream: AnyByteStream) -> None: + from .. import fail_after + + try: + with fail_after(self.handshake_timeout): + wrapped_stream = await TLSStream.wrap( + stream, + ssl_context=self.ssl_context, + standard_compatible=self.standard_compatible, + ) + except BaseException as exc: + await self.handle_handshake_error(exc, stream) + else: + await handler(wrapped_stream) + + await self.listener.serve(handler_wrapper, task_group) + + async def aclose(self) -> None: + await self.listener.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + TLSAttribute.standard_compatible: lambda: self.standard_compatible, + } diff --git a/myenv/lib/python3.9/site-packages/anyio/to_process.py b/myenv/lib/python3.9/site-packages/anyio/to_process.py new file mode 100644 index 0000000..39a3173 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/to_process.py @@ -0,0 +1,247 @@ +import os +import pickle +import subprocess +import sys +from collections import deque +from importlib.util import module_from_spec, spec_from_file_location +from typing import Callable, Deque, List, Optional, Set, Tuple, TypeVar, cast + +from ._core._eventloop import current_time, get_asynclib, get_cancelled_exc_class +from ._core._exceptions import BrokenWorkerProcess +from ._core._subprocesses import open_process +from ._core._synchronization import CapacityLimiter +from ._core._tasks import CancelScope, fail_after +from .abc import ByteReceiveStream, ByteSendStream, Process +from .lowlevel import RunVar, checkpoint_if_cancelled +from .streams.buffered import BufferedByteReceiveStream + +WORKER_MAX_IDLE_TIME = 300 # 5 minutes + +T_Retval = TypeVar("T_Retval") +_process_pool_workers: RunVar[Set[Process]] = RunVar("_process_pool_workers") +_process_pool_idle_workers: RunVar[Deque[Tuple[Process, float]]] = RunVar( + "_process_pool_idle_workers" +) +_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter") + + +async def run_sync( + func: Callable[..., T_Retval], + *args: object, + cancellable: bool = False, + limiter: Optional[CapacityLimiter] = None, +) -> T_Retval: + """ + Call the given function with the given arguments in a worker process. + + If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, + the worker process running it will be abruptly terminated using SIGKILL (or + ``terminateProcess()`` on Windows). + + :param func: a callable + :param args: positional arguments for the callable + :param cancellable: ``True`` to allow cancellation of the operation while it's running + :param limiter: capacity limiter to use to limit the total amount of processes running + (if omitted, the default limiter is used) + :return: an awaitable that yields the return value of the function. + + """ + + async def send_raw_command(pickled_cmd: bytes) -> object: + try: + await stdin.send(pickled_cmd) + response = await buffered.receive_until(b"\n", 50) + status, length = response.split(b" ") + if status not in (b"RETURN", b"EXCEPTION"): + raise RuntimeError( + f"Worker process returned unexpected response: {response!r}" + ) + + pickled_response = await buffered.receive_exactly(int(length)) + except BaseException as exc: + workers.discard(process) + try: + process.kill() + with CancelScope(shield=True): + await process.aclose() + except ProcessLookupError: + pass + + if isinstance(exc, get_cancelled_exc_class()): + raise + else: + raise BrokenWorkerProcess from exc + + retval = pickle.loads(pickled_response) + if status == b"EXCEPTION": + assert isinstance(retval, BaseException) + raise retval + else: + return retval + + # First pickle the request before trying to reserve a worker process + await checkpoint_if_cancelled() + request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL) + + # If this is the first run in this event loop thread, set up the necessary variables + try: + workers = _process_pool_workers.get() + idle_workers = _process_pool_idle_workers.get() + except LookupError: + workers = set() + idle_workers = deque() + _process_pool_workers.set(workers) + _process_pool_idle_workers.set(idle_workers) + get_asynclib().setup_process_pool_exit_at_shutdown(workers) + + async with (limiter or current_default_process_limiter()): + # Pop processes from the pool (starting from the most recently used) until we find one that + # hasn't exited yet + process: Process + while idle_workers: + process, idle_since = idle_workers.pop() + if process.returncode is None: + stdin = cast(ByteSendStream, process.stdin) + buffered = BufferedByteReceiveStream( + cast(ByteReceiveStream, process.stdout) + ) + + # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME seconds or + # longer + now = current_time() + killed_processes: List[Process] = [] + while idle_workers: + if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME: + break + + process, idle_since = idle_workers.popleft() + process.kill() + workers.remove(process) + killed_processes.append(process) + + with CancelScope(shield=True): + for process in killed_processes: + await process.aclose() + + break + + workers.remove(process) + else: + command = [sys.executable, "-u", "-m", __name__] + process = await open_process( + command, stdin=subprocess.PIPE, stdout=subprocess.PIPE + ) + try: + stdin = cast(ByteSendStream, process.stdin) + buffered = BufferedByteReceiveStream( + cast(ByteReceiveStream, process.stdout) + ) + with fail_after(20): + message = await buffered.receive(6) + + if message != b"READY\n": + raise BrokenWorkerProcess( + f"Worker process returned unexpected response: {message!r}" + ) + + main_module_path = getattr(sys.modules["__main__"], "__file__", None) + pickled = pickle.dumps( + ("init", sys.path, main_module_path), + protocol=pickle.HIGHEST_PROTOCOL, + ) + await send_raw_command(pickled) + except (BrokenWorkerProcess, get_cancelled_exc_class()): + raise + except BaseException as exc: + process.kill() + raise BrokenWorkerProcess( + "Error during worker process initialization" + ) from exc + + workers.add(process) + + with CancelScope(shield=not cancellable): + try: + return cast(T_Retval, await send_raw_command(request)) + finally: + if process in workers: + idle_workers.append((process, current_time())) + + +def current_default_process_limiter() -> CapacityLimiter: + """ + Return the capacity limiter that is used by default to limit the number of worker processes. + + :return: a capacity limiter object + + """ + try: + return _default_process_limiter.get() + except LookupError: + limiter = CapacityLimiter(os.cpu_count() or 2) + _default_process_limiter.set(limiter) + return limiter + + +def process_worker() -> None: + # Redirect standard streams to os.devnull so that user code won't interfere with the + # parent-worker communication + stdin = sys.stdin + stdout = sys.stdout + sys.stdin = open(os.devnull) + sys.stdout = open(os.devnull, "w") + + stdout.buffer.write(b"READY\n") + while True: + retval = exception = None + try: + command, *args = pickle.load(stdin.buffer) + except EOFError: + return + except BaseException as exc: + exception = exc + else: + if command == "run": + func, args = args + try: + retval = func(*args) + except BaseException as exc: + exception = exc + elif command == "init": + main_module_path: Optional[str] + sys.path, main_module_path = args + del sys.modules["__main__"] + if main_module_path: + # Load the parent's main module but as __mp_main__ instead of __main__ + # (like multiprocessing does) to avoid infinite recursion + try: + spec = spec_from_file_location("__mp_main__", main_module_path) + if spec and spec.loader: + main = module_from_spec(spec) + spec.loader.exec_module(main) + sys.modules["__main__"] = main + except BaseException as exc: + exception = exc + + try: + if exception is not None: + status = b"EXCEPTION" + pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL) + else: + status = b"RETURN" + pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL) + except BaseException as exc: + exception = exc + status = b"EXCEPTION" + pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL) + + stdout.buffer.write(b"%s %d\n" % (status, len(pickled))) + stdout.buffer.write(pickled) + + # Respect SIGTERM + if isinstance(exception, SystemExit): + raise exception + + +if __name__ == "__main__": + process_worker() diff --git a/myenv/lib/python3.9/site-packages/anyio/to_thread.py b/myenv/lib/python3.9/site-packages/anyio/to_thread.py new file mode 100644 index 0000000..a2fd42f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/anyio/to_thread.py @@ -0,0 +1,65 @@ +from typing import Callable, Optional, TypeVar +from warnings import warn + +from ._core._eventloop import get_asynclib +from .abc import CapacityLimiter + +T_Retval = TypeVar("T_Retval") + + +async def run_sync( + func: Callable[..., T_Retval], + *args: object, + cancellable: bool = False, + limiter: Optional[CapacityLimiter] = None +) -> T_Retval: + """ + Call the given function with the given arguments in a worker thread. + + If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, + the thread will still run its course but its return value (or any raised exception) will be + ignored. + + :param func: a callable + :param args: positional arguments for the callable + :param cancellable: ``True`` to allow cancellation of the operation + :param limiter: capacity limiter to use to limit the total amount of threads running + (if omitted, the default limiter is used) + :return: an awaitable that yields the return value of the function. + + """ + return await get_asynclib().run_sync_in_worker_thread( + func, *args, cancellable=cancellable, limiter=limiter + ) + + +async def run_sync_in_worker_thread( + func: Callable[..., T_Retval], + *args: object, + cancellable: bool = False, + limiter: Optional[CapacityLimiter] = None +) -> T_Retval: + warn( + "run_sync_in_worker_thread() has been deprecated, use anyio.to_thread.run_sync() instead", + DeprecationWarning, + ) + return await run_sync(func, *args, cancellable=cancellable, limiter=limiter) + + +def current_default_thread_limiter() -> CapacityLimiter: + """ + Return the capacity limiter that is used by default to limit the number of concurrent threads. + + :return: a capacity limiter object + + """ + return get_asynclib().current_default_thread_limiter() + + +def current_default_worker_thread_limiter() -> CapacityLimiter: + warn( + "current_default_worker_thread_limiter() has been deprecated, " + "use anyio.to_thread.current_default_thread_limiter() instead", + DeprecationWarning, + ) + return current_default_thread_limiter() diff --git a/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/LICENSE new file mode 100644 index 0000000..5f4f225 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) Django Software Foundation and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of Django nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/METADATA b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/METADATA new file mode 100644 index 0000000..48f0b33 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/METADATA @@ -0,0 +1,245 @@ +Metadata-Version: 2.1 +Name: asgiref +Version: 3.5.2 +Summary: ASGI specs, helper code, and adapters +Home-page: https://github.com/django/asgiref/ +Author: Django Software Foundation +Author-email: foundation@djangoproject.com +License: BSD +Project-URL: Documentation, https://asgi.readthedocs.io/ +Project-URL: Further Documentation, https://docs.djangoproject.com/en/stable/topics/async/#async-adapter-functions +Project-URL: Changelog, https://github.com/django/asgiref/blob/master/CHANGELOG.txt +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Python: >=3.7 +License-File: LICENSE +Requires-Dist: typing-extensions ; python_version < "3.8" +Provides-Extra: tests +Requires-Dist: pytest ; extra == 'tests' +Requires-Dist: pytest-asyncio ; extra == 'tests' +Requires-Dist: mypy (>=0.800) ; extra == 'tests' + +asgiref +======= + +.. image:: https://api.travis-ci.org/django/asgiref.svg + :target: https://travis-ci.org/django/asgiref + +.. image:: https://img.shields.io/pypi/v/asgiref.svg + :target: https://pypi.python.org/pypi/asgiref + +ASGI is a standard for Python asynchronous web apps and servers to communicate +with each other, and positioned as an asynchronous successor to WSGI. You can +read more at https://asgi.readthedocs.io/en/latest/ + +This package includes ASGI base libraries, such as: + +* Sync-to-async and async-to-sync function wrappers, ``asgiref.sync`` +* Server base classes, ``asgiref.server`` +* A WSGI-to-ASGI adapter, in ``asgiref.wsgi`` + + +Function wrappers +----------------- + +These allow you to wrap or decorate async or sync functions to call them from +the other style (so you can call async functions from a synchronous thread, +or vice-versa). + +In particular: + +* AsyncToSync lets a synchronous subthread stop and wait while the async + function is called on the main thread's event loop, and then control is + returned to the thread when the async function is finished. + +* SyncToAsync lets async code call a synchronous function, which is run in + a threadpool and control returned to the async coroutine when the synchronous + function completes. + +The idea is to make it easier to call synchronous APIs from async code and +asynchronous APIs from synchronous code so it's easier to transition code from +one style to the other. In the case of Channels, we wrap the (synchronous) +Django view system with SyncToAsync to allow it to run inside the (asynchronous) +ASGI server. + +Note that exactly what threads things run in is very specific, and aimed to +keep maximum compatibility with old synchronous code. See +"Synchronous code & Threads" below for a full explanation. By default, +``sync_to_async`` will run all synchronous code in the program in the same +thread for safety reasons; you can disable this for more performance with +``@sync_to_async(thread_sensitive=False)``, but make sure that your code does +not rely on anything bound to threads (like database connections) when you do. + + +Threadlocal replacement +----------------------- + +This is a drop-in replacement for ``threading.local`` that works with both +threads and asyncio Tasks. Even better, it will proxy values through from a +task-local context to a thread-local context when you use ``sync_to_async`` +to run things in a threadpool, and vice-versa for ``async_to_sync``. + +If you instead want true thread- and task-safety, you can set +``thread_critical`` on the Local object to ensure this instead. + + +Server base classes +------------------- + +Includes a ``StatelessServer`` class which provides all the hard work of +writing a stateless server (as in, does not handle direct incoming sockets +but instead consumes external streams or sockets to work out what is happening). + +An example of such a server would be a chatbot server that connects out to +a central chat server and provides a "connection scope" per user chatting to +it. There's only one actual connection, but the server has to separate things +into several scopes for easier writing of the code. + +You can see an example of this being used in `frequensgi `_. + + +WSGI-to-ASGI adapter +-------------------- + +Allows you to wrap a WSGI application so it appears as a valid ASGI application. + +Simply wrap it around your WSGI application like so:: + + asgi_application = WsgiToAsgi(wsgi_application) + +The WSGI application will be run in a synchronous threadpool, and the wrapped +ASGI application will be one that accepts ``http`` class messages. + +Please note that not all extended features of WSGI may be supported (such as +file handles for incoming POST bodies). + + +Dependencies +------------ + +``asgiref`` requires Python 3.7 or higher. + + +Contributing +------------ + +Please refer to the +`main Channels contributing docs `_. + + +Testing +''''''' + +To run tests, make sure you have installed the ``tests`` extra with the package:: + + cd asgiref/ + pip install -e .[tests] + pytest + + +Building the documentation +'''''''''''''''''''''''''' + +The documentation uses `Sphinx `_:: + + cd asgiref/docs/ + pip install sphinx + +To build the docs, you can use the default tools:: + + sphinx-build -b html . _build/html # or `make html`, if you've got make set up + cd _build/html + python -m http.server + +...or you can use ``sphinx-autobuild`` to run a server and rebuild/reload +your documentation changes automatically:: + + pip install sphinx-autobuild + sphinx-autobuild . _build/html + + +Releasing +''''''''' + +To release, first add details to CHANGELOG.txt and update the version number in ``asgiref/__init__.py``. + +Then, build and push the packages:: + + python -m build + twine upload dist/* + rm -r build/ dist/ + + +Implementation Details +---------------------- + +Synchronous code & threads +'''''''''''''''''''''''''' + +The ``asgiref.sync`` module provides two wrappers that let you go between +asynchronous and synchronous code at will, while taking care of the rough edges +for you. + +Unfortunately, the rough edges are numerous, and the code has to work especially +hard to keep things in the same thread as much as possible. Notably, the +restrictions we are working with are: + +* All synchronous code called through ``SyncToAsync`` and marked with + ``thread_sensitive`` should run in the same thread as each other (and if the + outer layer of the program is synchronous, the main thread) + +* If a thread already has a running async loop, ``AsyncToSync`` can't run things + on that loop if it's blocked on synchronous code that is above you in the + call stack. + +The first compromise you get to might be that ``thread_sensitive`` code should +just run in the same thread and not spawn in a sub-thread, fulfilling the first +restriction, but that immediately runs you into the second restriction. + +The only real solution is to essentially have a variant of ThreadPoolExecutor +that executes any ``thread_sensitive`` code on the outermost synchronous +thread - either the main thread, or a single spawned subthread. + +This means you now have two basic states: + +* If the outermost layer of your program is synchronous, then all async code + run through ``AsyncToSync`` will run in a per-call event loop in arbitrary + sub-threads, while all ``thread_sensitive`` code will run in the main thread. + +* If the outermost layer of your program is asynchronous, then all async code + runs on the main thread's event loop, and all ``thread_sensitive`` synchronous + code will run in a single shared sub-thread. + +Crucially, this means that in both cases there is a thread which is a shared +resource that all ``thread_sensitive`` code must run on, and there is a chance +that this thread is currently blocked on its own ``AsyncToSync`` call. Thus, +``AsyncToSync`` needs to act as an executor for thread code while it's blocking. + +The ``CurrentThreadExecutor`` class provides this functionality; rather than +simply waiting on a Future, you can call its ``run_until_future`` method and +it will run submitted code until that Future is done. This means that code +inside the call can then run code on your thread. + + +Maintenance and Security +------------------------ + +To report security issues, please contact security@djangoproject.com. For GPG +signatures and more security process information, see +https://docs.djangoproject.com/en/dev/internals/security/. + +To report bugs or request new features, please open a new GitHub issue. + +This repository is part of the Channels project. For the shepherd and maintenance team, please see the +`main Channels readme `_. diff --git a/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/RECORD b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/RECORD new file mode 100644 index 0000000..83e51b9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/RECORD @@ -0,0 +1,17 @@ +asgiref/__init__.py,sha256=LtYJ5AVwuiAlsrJUQwzHZMrGMIRn7cuIoIt4OznYy6c,22 +asgiref/compatibility.py,sha256=MVH2bEdiCMMVTLbE-1V6KiU7q4LwqzP7PIufeXa-njM,1598 +asgiref/current_thread_executor.py,sha256=oeH8zv2tTmcbpxdUmOSMzbEXzeY5nJzIMFvzprE95gA,2801 +asgiref/local.py,sha256=nx5RqVFLYgUJVaxzApuQUW7dd9y21sruMYdgISoRs1k,4854 +asgiref/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +asgiref/server.py,sha256=egTQhZo1k4G0F7SSBQNp_VOekpGcjBJZU2kkCoiGC_M,6005 +asgiref/sync.py,sha256=3P813NHl3EHPMtzPEjaBelmjV_JUw97zYbtx-MmLUiw,20185 +asgiref/testing.py,sha256=3byNRV7Oto_Fg8Z-fErQJ3yGf7OQlcUexbN_cDQugzQ,3119 +asgiref/timeout.py,sha256=5Ekbmn3X1HPR55qgx-hPJMPEu_-YoivHqNhFEitiSYE,3440 +asgiref/typing.py,sha256=MZ7vbJY1F7EQqo9gL9pMSFRMw9b_SQrQQsnvlJQ2iP4,5603 +asgiref/wsgi.py,sha256=-L0eo_uK_dq7EPjv1meW1BRGytURaO9NPESxnJc9CtA,6575 +asgiref-3.5.2.dist-info/LICENSE,sha256=uEZBXRtRTpwd_xSiLeuQbXlLxUbKYSn5UKGM0JHipmk,1552 +asgiref-3.5.2.dist-info/METADATA,sha256=3JU5Zw-j9qCKPcuf3cJZ5dVispB_b7UXU0fnQVp9DDA,9143 +asgiref-3.5.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +asgiref-3.5.2.dist-info/top_level.txt,sha256=bokQjCzwwERhdBiPdvYEZa4cHxT4NCeAffQNUqJ8ssg,8 +asgiref-3.5.2.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +asgiref-3.5.2.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/top_level.txt new file mode 100644 index 0000000..ddf99d3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref-3.5.2.dist-info/top_level.txt @@ -0,0 +1 @@ +asgiref diff --git a/myenv/lib/python3.9/site-packages/asgiref/__init__.py b/myenv/lib/python3.9/site-packages/asgiref/__init__.py new file mode 100644 index 0000000..dae42b1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/__init__.py @@ -0,0 +1 @@ +__version__ = "3.5.2" diff --git a/myenv/lib/python3.9/site-packages/asgiref/compatibility.py b/myenv/lib/python3.9/site-packages/asgiref/compatibility.py new file mode 100644 index 0000000..eccaee0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/compatibility.py @@ -0,0 +1,47 @@ +import asyncio +import inspect + + +def is_double_callable(application): + """ + Tests to see if an application is a legacy-style (double-callable) application. + """ + # Look for a hint on the object first + if getattr(application, "_asgi_single_callable", False): + return False + if getattr(application, "_asgi_double_callable", False): + return True + # Uninstanted classes are double-callable + if inspect.isclass(application): + return True + # Instanted classes depend on their __call__ + if hasattr(application, "__call__"): + # We only check to see if its __call__ is a coroutine function - + # if it's not, it still might be a coroutine function itself. + if asyncio.iscoroutinefunction(application.__call__): + return False + # Non-classes we just check directly + return not asyncio.iscoroutinefunction(application) + + +def double_to_single_callable(application): + """ + Transforms a double-callable ASGI application into a single-callable one. + """ + + async def new_application(scope, receive, send): + instance = application(scope) + return await instance(receive, send) + + return new_application + + +def guarantee_single_callable(application): + """ + Takes either a single- or double-callable application and always returns it + in single-callable style. Use this to add backwards compatibility for ASGI + 2.0 applications to your server/test harness/etc. + """ + if is_double_callable(application): + application = double_to_single_callable(application) + return application diff --git a/myenv/lib/python3.9/site-packages/asgiref/current_thread_executor.py b/myenv/lib/python3.9/site-packages/asgiref/current_thread_executor.py new file mode 100644 index 0000000..a7898f8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/current_thread_executor.py @@ -0,0 +1,81 @@ +import queue +import threading +from concurrent.futures import Executor, Future + + +class _WorkItem: + """ + Represents an item needing to be run in the executor. + Copied from ThreadPoolExecutor (but it's private, so we're not going to rely on importing it) + """ + + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException as exc: + self.future.set_exception(exc) + # Break a reference cycle with the exception 'exc' + self = None + else: + self.future.set_result(result) + + +class CurrentThreadExecutor(Executor): + """ + An Executor that actually runs code in the thread it is instantiated in. + Passed to other threads running async code, so they can run sync code in + the thread they came from. + """ + + def __init__(self): + self._work_thread = threading.current_thread() + self._work_queue = queue.Queue() + self._broken = False + + def run_until_future(self, future): + """ + Runs the code in the work queue until a result is available from the future. + Should be run from the thread the executor is initialised in. + """ + # Check we're in the right thread + if threading.current_thread() != self._work_thread: + raise RuntimeError( + "You cannot run CurrentThreadExecutor from a different thread" + ) + future.add_done_callback(self._work_queue.put) + # Keep getting and running work items until we get the future we're waiting for + # back via the future's done callback. + try: + while True: + # Get a work item and run it + work_item = self._work_queue.get() + if work_item is future: + return + work_item.run() + del work_item + finally: + self._broken = True + + def submit(self, fn, *args, **kwargs): + # Check they're not submitting from the same thread + if threading.current_thread() == self._work_thread: + raise RuntimeError( + "You cannot submit onto CurrentThreadExecutor from its own thread" + ) + # Check they're not too late or the executor errored + if self._broken: + raise RuntimeError("CurrentThreadExecutor already quit or is broken") + # Add to work queue + f = Future() + work_item = _WorkItem(f, fn, args, kwargs) + self._work_queue.put(work_item) + # Return the future + return f diff --git a/myenv/lib/python3.9/site-packages/asgiref/local.py b/myenv/lib/python3.9/site-packages/asgiref/local.py new file mode 100644 index 0000000..17314d4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/local.py @@ -0,0 +1,120 @@ +import random +import string +import sys +import threading +import weakref + + +class Local: + """ + A drop-in replacement for threading.locals that also works with asyncio + Tasks (via the current_task asyncio method), and passes locals through + sync_to_async and async_to_sync. + + Specifically: + - Locals work per-coroutine on any thread not spawned using asgiref + - Locals work per-thread on any thread not spawned using asgiref + - Locals are shared with the parent coroutine when using sync_to_async + - Locals are shared with the parent thread when using async_to_sync + (and if that thread was launched using sync_to_async, with its parent + coroutine as well, with this working for indefinite levels of nesting) + + Set thread_critical to True to not allow locals to pass from an async Task + to a thread it spawns. This is needed for code that truly needs + thread-safety, as opposed to things used for helpful context (e.g. sqlite + does not like being called from a different thread to the one it is from). + Thread-critical code will still be differentiated per-Task within a thread + as it is expected it does not like concurrent access. + + This doesn't use contextvars as it needs to support 3.6. Once it can support + 3.7 only, we can then reimplement the storage more nicely. + """ + + def __init__(self, thread_critical: bool = False) -> None: + self._thread_critical = thread_critical + self._thread_lock = threading.RLock() + self._context_refs: "weakref.WeakSet[object]" = weakref.WeakSet() + # Random suffixes stop accidental reuse between different Locals, + # though we try to force deletion as well. + self._attr_name = "_asgiref_local_impl_{}_{}".format( + id(self), + "".join(random.choice(string.ascii_letters) for i in range(8)), + ) + + def _get_context_id(self): + """ + Get the ID we should use for looking up variables + """ + # Prevent a circular reference + from .sync import AsyncToSync, SyncToAsync + + # First, pull the current task if we can + context_id = SyncToAsync.get_current_task() + context_is_async = True + # OK, let's try for a thread ID + if context_id is None: + context_id = threading.current_thread() + context_is_async = False + # If we're thread-critical, we stop here, as we can't share contexts. + if self._thread_critical: + return context_id + # Now, take those and see if we can resolve them through the launch maps + for i in range(sys.getrecursionlimit()): + try: + if context_is_async: + # Tasks have a source thread in AsyncToSync + context_id = AsyncToSync.launch_map[context_id] + context_is_async = False + else: + # Threads have a source task in SyncToAsync + context_id = SyncToAsync.launch_map[context_id] + context_is_async = True + except KeyError: + break + else: + # Catch infinite loops (they happen if you are screwing around + # with AsyncToSync implementations) + raise RuntimeError("Infinite launch_map loops") + return context_id + + def _get_storage(self): + context_obj = self._get_context_id() + if not hasattr(context_obj, self._attr_name): + setattr(context_obj, self._attr_name, {}) + self._context_refs.add(context_obj) + return getattr(context_obj, self._attr_name) + + def __del__(self): + try: + for context_obj in self._context_refs: + try: + delattr(context_obj, self._attr_name) + except AttributeError: + pass + except TypeError: + # WeakSet.__iter__ can crash when interpreter is shutting down due + # to _IterationGuard being None. + pass + + def __getattr__(self, key): + with self._thread_lock: + storage = self._get_storage() + if key in storage: + return storage[key] + else: + raise AttributeError(f"{self!r} object has no attribute {key!r}") + + def __setattr__(self, key, value): + if key in ("_context_refs", "_thread_critical", "_thread_lock", "_attr_name"): + return super().__setattr__(key, value) + with self._thread_lock: + storage = self._get_storage() + storage[key] = value + + def __delattr__(self, key): + with self._thread_lock: + storage = self._get_storage() + if key in storage: + del storage[key] + else: + raise AttributeError(f"{self!r} object has no attribute {key!r}") diff --git a/myenv/lib/python3.9/site-packages/asgiref/py.typed b/myenv/lib/python3.9/site-packages/asgiref/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/asgiref/server.py b/myenv/lib/python3.9/site-packages/asgiref/server.py new file mode 100644 index 0000000..43c28c6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/server.py @@ -0,0 +1,157 @@ +import asyncio +import logging +import time +import traceback + +from .compatibility import guarantee_single_callable + +logger = logging.getLogger(__name__) + + +class StatelessServer: + """ + Base server class that handles basic concepts like application instance + creation/pooling, exception handling, and similar, for stateless protocols + (i.e. ones without actual incoming connections to the process) + + Your code should override the handle() method, doing whatever it needs to, + and calling get_or_create_application_instance with a unique `scope_id` + and `scope` for the scope it wants to get. + + If an application instance is found with the same `scope_id`, you are + given its input queue, otherwise one is made for you with the scope provided + and you are given that fresh new input queue. Either way, you should do + something like: + + input_queue = self.get_or_create_application_instance( + "user-123456", + {"type": "testprotocol", "user_id": "123456", "username": "andrew"}, + ) + input_queue.put_nowait(message) + + If you try and create an application instance and there are already + `max_application` instances, the oldest/least recently used one will be + reclaimed and shut down to make space. + + Application coroutines that error will be found periodically (every 100ms + by default) and have their exceptions printed to the console. Override + application_exception() if you want to do more when this happens. + + If you override run(), make sure you handle things like launching the + application checker. + """ + + application_checker_interval = 0.1 + + def __init__(self, application, max_applications=1000): + # Parameters + self.application = application + self.max_applications = max_applications + # Initialisation + self.application_instances = {} + + ### Mainloop and handling + + def run(self): + """ + Runs the asyncio event loop with our handler loop. + """ + event_loop = asyncio.get_event_loop() + asyncio.ensure_future(self.application_checker()) + try: + event_loop.run_until_complete(self.handle()) + except KeyboardInterrupt: + logger.info("Exiting due to Ctrl-C/interrupt") + + async def handle(self): + raise NotImplementedError("You must implement handle()") + + async def application_send(self, scope, message): + """ + Receives outbound sends from applications and handles them. + """ + raise NotImplementedError("You must implement application_send()") + + ### Application instance management + + def get_or_create_application_instance(self, scope_id, scope): + """ + Creates an application instance and returns its queue. + """ + if scope_id in self.application_instances: + self.application_instances[scope_id]["last_used"] = time.time() + return self.application_instances[scope_id]["input_queue"] + # See if we need to delete an old one + while len(self.application_instances) > self.max_applications: + self.delete_oldest_application_instance() + # Make an instance of the application + input_queue = asyncio.Queue() + application_instance = guarantee_single_callable(self.application) + # Run it, and stash the future for later checking + future = asyncio.ensure_future( + application_instance( + scope=scope, + receive=input_queue.get, + send=lambda message: self.application_send(scope, message), + ), + ) + self.application_instances[scope_id] = { + "input_queue": input_queue, + "future": future, + "scope": scope, + "last_used": time.time(), + } + return input_queue + + def delete_oldest_application_instance(self): + """ + Finds and deletes the oldest application instance + """ + oldest_time = min( + details["last_used"] for details in self.application_instances.values() + ) + for scope_id, details in self.application_instances.items(): + if details["last_used"] == oldest_time: + self.delete_application_instance(scope_id) + # Return to make sure we only delete one in case two have + # the same oldest time + return + + def delete_application_instance(self, scope_id): + """ + Removes an application instance (makes sure its task is stopped, + then removes it from the current set) + """ + details = self.application_instances[scope_id] + del self.application_instances[scope_id] + if not details["future"].done(): + details["future"].cancel() + + async def application_checker(self): + """ + Goes through the set of current application instance Futures and cleans up + any that are done/prints exceptions for any that errored. + """ + while True: + await asyncio.sleep(self.application_checker_interval) + for scope_id, details in list(self.application_instances.items()): + if details["future"].done(): + exception = details["future"].exception() + if exception: + await self.application_exception(exception, details) + try: + del self.application_instances[scope_id] + except KeyError: + # Exception handling might have already got here before us. That's fine. + pass + + async def application_exception(self, exception, application_details): + """ + Called whenever an application coroutine has an exception. + """ + logging.error( + "Exception inside application: %s\n%s%s", + exception, + "".join(traceback.format_tb(exception.__traceback__)), + f" {exception}", + ) diff --git a/myenv/lib/python3.9/site-packages/asgiref/sync.py b/myenv/lib/python3.9/site-packages/asgiref/sync.py new file mode 100644 index 0000000..6247043 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/sync.py @@ -0,0 +1,532 @@ +import asyncio +import asyncio.coroutines +import contextvars +import functools +import inspect +import os +import sys +import threading +import warnings +import weakref +from concurrent.futures import Future, ThreadPoolExecutor +from typing import Any, Callable, Dict, Optional, overload + +from .current_thread_executor import CurrentThreadExecutor +from .local import Local + + +def _restore_context(context): + # Check for changes in contextvars, and set them to the current + # context for downstream consumers + for cvar in context: + try: + if cvar.get() != context.get(cvar): + cvar.set(context.get(cvar)) + except LookupError: + cvar.set(context.get(cvar)) + + +def _iscoroutinefunction_or_partial(func: Any) -> bool: + # Python < 3.8 does not correctly determine partially wrapped + # coroutine functions are coroutine functions, hence the need for + # this to exist. Code taken from CPython. + if sys.version_info >= (3, 8): + return asyncio.iscoroutinefunction(func) + else: + while inspect.ismethod(func): + func = func.__func__ + while isinstance(func, functools.partial): + func = func.func + + return asyncio.iscoroutinefunction(func) + + +class ThreadSensitiveContext: + """Async context manager to manage context for thread sensitive mode + + This context manager controls which thread pool executor is used when in + thread sensitive mode. By default, a single thread pool executor is shared + within a process. + + In Python 3.7+, the ThreadSensitiveContext() context manager may be used to + specify a thread pool per context. + + This context manager is re-entrant, so only the outer-most call to + ThreadSensitiveContext will set the context. + + Usage: + + >>> import time + >>> async with ThreadSensitiveContext(): + ... await sync_to_async(time.sleep, 1)() + """ + + def __init__(self): + self.token = None + + async def __aenter__(self): + try: + SyncToAsync.thread_sensitive_context.get() + except LookupError: + self.token = SyncToAsync.thread_sensitive_context.set(self) + + return self + + async def __aexit__(self, exc, value, tb): + if not self.token: + return + + executor = SyncToAsync.context_to_thread_executor.pop(self, None) + if executor: + executor.shutdown() + SyncToAsync.thread_sensitive_context.reset(self.token) + + +class AsyncToSync: + """ + Utility class which turns an awaitable that only works on the thread with + the event loop into a synchronous callable that works in a subthread. + + If the call stack contains an async loop, the code runs there. + Otherwise, the code runs in a new loop in a new thread. + + Either way, this thread then pauses and waits to run any thread_sensitive + code called from further down the call stack using SyncToAsync, before + finally exiting once the async task returns. + """ + + # Maps launched Tasks to the threads that launched them (for locals impl) + launch_map: "Dict[asyncio.Task[object], threading.Thread]" = {} + + # Keeps track of which CurrentThreadExecutor to use. This uses an asgiref + # Local, not a threadlocal, so that tasks can work out what their parent used. + executors = Local() + + # When we can't find a CurrentThreadExecutor from the context, such as + # inside create_task, we'll look it up here from the running event loop. + loop_thread_executors: "Dict[asyncio.AbstractEventLoop, CurrentThreadExecutor]" = {} + + def __init__(self, awaitable, force_new_loop=False): + if not callable(awaitable) or ( + not _iscoroutinefunction_or_partial(awaitable) + and not _iscoroutinefunction_or_partial( + getattr(awaitable, "__call__", awaitable) + ) + ): + # Python does not have very reliable detection of async functions + # (lots of false negatives) so this is just a warning. + warnings.warn( + "async_to_sync was passed a non-async-marked callable", stacklevel=2 + ) + self.awaitable = awaitable + try: + self.__self__ = self.awaitable.__self__ + except AttributeError: + pass + if force_new_loop: + # They have asked that we always run in a new sub-loop. + self.main_event_loop = None + else: + try: + self.main_event_loop = asyncio.get_running_loop() + except RuntimeError: + # There's no event loop in this thread. Look for the threadlocal if + # we're inside SyncToAsync + main_event_loop_pid = getattr( + SyncToAsync.threadlocal, "main_event_loop_pid", None + ) + # We make sure the parent loop is from the same process - if + # they've forked, this is not going to be valid any more (#194) + if main_event_loop_pid and main_event_loop_pid == os.getpid(): + self.main_event_loop = getattr( + SyncToAsync.threadlocal, "main_event_loop", None + ) + else: + self.main_event_loop = None + + def __call__(self, *args, **kwargs): + # You can't call AsyncToSync from a thread with a running event loop + try: + event_loop = asyncio.get_running_loop() + except RuntimeError: + pass + else: + if event_loop.is_running(): + raise RuntimeError( + "You cannot use AsyncToSync in the same thread as an async event loop - " + "just await the async function directly." + ) + + # Wrapping context in list so it can be reassigned from within + # `main_wrap`. + context = [contextvars.copy_context()] + + # Make a future for the return information + call_result = Future() + # Get the source thread + source_thread = threading.current_thread() + # Make a CurrentThreadExecutor we'll use to idle in this thread - we + # need one for every sync frame, even if there's one above us in the + # same thread. + if hasattr(self.executors, "current"): + old_current_executor = self.executors.current + else: + old_current_executor = None + current_executor = CurrentThreadExecutor() + self.executors.current = current_executor + loop = None + # Use call_soon_threadsafe to schedule a synchronous callback on the + # main event loop's thread if it's there, otherwise make a new loop + # in this thread. + try: + awaitable = self.main_wrap( + args, kwargs, call_result, source_thread, sys.exc_info(), context + ) + + if not (self.main_event_loop and self.main_event_loop.is_running()): + # Make our own event loop - in a new thread - and run inside that. + loop = asyncio.new_event_loop() + self.loop_thread_executors[loop] = current_executor + loop_executor = ThreadPoolExecutor(max_workers=1) + loop_future = loop_executor.submit( + self._run_event_loop, loop, awaitable + ) + if current_executor: + # Run the CurrentThreadExecutor until the future is done + current_executor.run_until_future(loop_future) + # Wait for future and/or allow for exception propagation + loop_future.result() + else: + # Call it inside the existing loop + self.main_event_loop.call_soon_threadsafe( + self.main_event_loop.create_task, awaitable + ) + if current_executor: + # Run the CurrentThreadExecutor until the future is done + current_executor.run_until_future(call_result) + finally: + # Clean up any executor we were running + if loop is not None: + del self.loop_thread_executors[loop] + if hasattr(self.executors, "current"): + del self.executors.current + if old_current_executor: + self.executors.current = old_current_executor + _restore_context(context[0]) + + # Wait for results from the future. + return call_result.result() + + def _run_event_loop(self, loop, coro): + """ + Runs the given event loop (designed to be called in a thread). + """ + asyncio.set_event_loop(loop) + try: + loop.run_until_complete(coro) + finally: + try: + # mimic asyncio.run() behavior + # cancel unexhausted async generators + tasks = asyncio.all_tasks(loop) + for task in tasks: + task.cancel() + + async def gather(): + await asyncio.gather(*tasks, return_exceptions=True) + + loop.run_until_complete(gather()) + for task in tasks: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler( + { + "message": "unhandled exception during loop shutdown", + "exception": task.exception(), + "task": task, + } + ) + if hasattr(loop, "shutdown_asyncgens"): + loop.run_until_complete(loop.shutdown_asyncgens()) + finally: + loop.close() + asyncio.set_event_loop(self.main_event_loop) + + def __get__(self, parent, objtype): + """ + Include self for methods + """ + func = functools.partial(self.__call__, parent) + return functools.update_wrapper(func, self.awaitable) + + async def main_wrap( + self, args, kwargs, call_result, source_thread, exc_info, context + ): + """ + Wraps the awaitable with something that puts the result into the + result/exception future. + """ + if context is not None: + _restore_context(context[0]) + + current_task = SyncToAsync.get_current_task() + self.launch_map[current_task] = source_thread + try: + # If we have an exception, run the function inside the except block + # after raising it so exc_info is correctly populated. + if exc_info[1]: + try: + raise exc_info[1] + except BaseException: + result = await self.awaitable(*args, **kwargs) + else: + result = await self.awaitable(*args, **kwargs) + except BaseException as e: + call_result.set_exception(e) + else: + call_result.set_result(result) + finally: + del self.launch_map[current_task] + + context[0] = contextvars.copy_context() + + +class SyncToAsync: + """ + Utility class which turns a synchronous callable into an awaitable that + runs in a threadpool. It also sets a threadlocal inside the thread so + calls to AsyncToSync can escape it. + + If thread_sensitive is passed, the code will run in the same thread as any + outer code. This is needed for underlying Python code that is not + threadsafe (for example, code which handles SQLite database connections). + + If the outermost program is async (i.e. SyncToAsync is outermost), then + this will be a dedicated single sub-thread that all sync code runs in, + one after the other. If the outermost program is sync (i.e. AsyncToSync is + outermost), this will just be the main thread. This is achieved by idling + with a CurrentThreadExecutor while AsyncToSync is blocking its sync parent, + rather than just blocking. + + If executor is passed in, that will be used instead of the loop's default executor. + In order to pass in an executor, thread_sensitive must be set to False, otherwise + a TypeError will be raised. + """ + + # If they've set ASGI_THREADS, update the default asyncio executor for now + if "ASGI_THREADS" in os.environ: + # We use get_event_loop here - not get_running_loop - as this will + # be run at import time, and we want to update the main thread's loop. + loop = asyncio.get_event_loop() + loop.set_default_executor( + ThreadPoolExecutor(max_workers=int(os.environ["ASGI_THREADS"])) + ) + + # Maps launched threads to the coroutines that spawned them + launch_map: "Dict[threading.Thread, asyncio.Task[object]]" = {} + + # Storage for main event loop references + threadlocal = threading.local() + + # Single-thread executor for thread-sensitive code + single_thread_executor = ThreadPoolExecutor(max_workers=1) + + # Maintain a contextvar for the current execution context. Optionally used + # for thread sensitive mode. + thread_sensitive_context: "contextvars.ContextVar[str]" = contextvars.ContextVar( + "thread_sensitive_context" + ) + + # Contextvar that is used to detect if the single thread executor + # would be awaited on while already being used in the same context + deadlock_context: "contextvars.ContextVar[bool]" = contextvars.ContextVar( + "deadlock_context" + ) + + # Maintaining a weak reference to the context ensures that thread pools are + # erased once the context goes out of scope. This terminates the thread pool. + context_to_thread_executor: "weakref.WeakKeyDictionary[object, ThreadPoolExecutor]" = ( + weakref.WeakKeyDictionary() + ) + + def __init__( + self, + func: Callable[..., Any], + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, + ) -> None: + if ( + not callable(func) + or _iscoroutinefunction_or_partial(func) + or _iscoroutinefunction_or_partial(getattr(func, "__call__", func)) + ): + raise TypeError("sync_to_async can only be applied to sync functions.") + self.func = func + functools.update_wrapper(self, func) + self._thread_sensitive = thread_sensitive + self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore + if thread_sensitive and executor is not None: + raise TypeError("executor must not be set when thread_sensitive is True") + self._executor = executor + try: + self.__self__ = func.__self__ # type: ignore + except AttributeError: + pass + + async def __call__(self, *args, **kwargs): + loop = asyncio.get_running_loop() + + # Work out what thread to run the code in + if self._thread_sensitive: + if hasattr(AsyncToSync.executors, "current"): + # If we have a parent sync thread above somewhere, use that + executor = AsyncToSync.executors.current + elif self.thread_sensitive_context and self.thread_sensitive_context.get( + None + ): + # If we have a way of retrieving the current context, attempt + # to use a per-context thread pool executor + thread_sensitive_context = self.thread_sensitive_context.get() + + if thread_sensitive_context in self.context_to_thread_executor: + # Re-use thread executor in current context + executor = self.context_to_thread_executor[thread_sensitive_context] + else: + # Create new thread executor in current context + executor = ThreadPoolExecutor(max_workers=1) + self.context_to_thread_executor[thread_sensitive_context] = executor + elif loop in AsyncToSync.loop_thread_executors: + # Re-use thread executor for running loop + executor = AsyncToSync.loop_thread_executors[loop] + elif self.deadlock_context and self.deadlock_context.get(False): + raise RuntimeError( + "Single thread executor already being used, would deadlock" + ) + else: + # Otherwise, we run it in a fixed single thread + executor = self.single_thread_executor + if self.deadlock_context: + self.deadlock_context.set(True) + else: + # Use the passed in executor, or the loop's default if it is None + executor = self._executor + + context = contextvars.copy_context() + child = functools.partial(self.func, *args, **kwargs) + func = context.run + args = (child,) + kwargs = {} + + try: + # Run the code in the right thread + future = loop.run_in_executor( + executor, + functools.partial( + self.thread_handler, + loop, + self.get_current_task(), + sys.exc_info(), + func, + *args, + **kwargs, + ), + ) + ret = await asyncio.wait_for(future, timeout=None) + + finally: + _restore_context(context) + if self.deadlock_context: + self.deadlock_context.set(False) + + return ret + + def __get__(self, parent, objtype): + """ + Include self for methods + """ + return functools.partial(self.__call__, parent) + + def thread_handler(self, loop, source_task, exc_info, func, *args, **kwargs): + """ + Wraps the sync application with exception handling. + """ + # Set the threadlocal for AsyncToSync + self.threadlocal.main_event_loop = loop + self.threadlocal.main_event_loop_pid = os.getpid() + # Set the task mapping (used for the locals module) + current_thread = threading.current_thread() + if AsyncToSync.launch_map.get(source_task) == current_thread: + # Our parent task was launched from this same thread, so don't make + # a launch map entry - let it shortcut over us! (and stop infinite loops) + parent_set = False + else: + self.launch_map[current_thread] = source_task + parent_set = True + # Run the function + try: + # If we have an exception, run the function inside the except block + # after raising it so exc_info is correctly populated. + if exc_info[1]: + try: + raise exc_info[1] + except BaseException: + return func(*args, **kwargs) + else: + return func(*args, **kwargs) + finally: + # Only delete the launch_map parent if we set it, otherwise it is + # from someone else. + if parent_set: + del self.launch_map[current_thread] + + @staticmethod + def get_current_task(): + """ + Implementation of asyncio.current_task() + that returns None if there is no task. + """ + try: + return asyncio.current_task() + except RuntimeError: + return None + + +# Lowercase aliases (and decorator friendliness) +async_to_sync = AsyncToSync + + +@overload +def sync_to_async( + func: None = None, + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, +) -> Callable[[Callable[..., Any]], SyncToAsync]: + ... + + +@overload +def sync_to_async( + func: Callable[..., Any], + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, +) -> SyncToAsync: + ... + + +def sync_to_async( + func=None, + thread_sensitive=True, + executor=None, +): + if func is None: + return lambda f: SyncToAsync( + f, + thread_sensitive=thread_sensitive, + executor=executor, + ) + return SyncToAsync( + func, + thread_sensitive=thread_sensitive, + executor=executor, + ) diff --git a/myenv/lib/python3.9/site-packages/asgiref/testing.py b/myenv/lib/python3.9/site-packages/asgiref/testing.py new file mode 100644 index 0000000..6624317 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/testing.py @@ -0,0 +1,97 @@ +import asyncio +import time + +from .compatibility import guarantee_single_callable +from .timeout import timeout as async_timeout + + +class ApplicationCommunicator: + """ + Runs an ASGI application in a test mode, allowing sending of + messages to it and retrieval of messages it sends. + """ + + def __init__(self, application, scope): + self.application = guarantee_single_callable(application) + self.scope = scope + self.input_queue = asyncio.Queue() + self.output_queue = asyncio.Queue() + self.future = asyncio.ensure_future( + self.application(scope, self.input_queue.get, self.output_queue.put) + ) + + async def wait(self, timeout=1): + """ + Waits for the application to stop itself and returns any exceptions. + """ + try: + async with async_timeout(timeout): + try: + await self.future + self.future.result() + except asyncio.CancelledError: + pass + finally: + if not self.future.done(): + self.future.cancel() + try: + await self.future + except asyncio.CancelledError: + pass + + def stop(self, exceptions=True): + if not self.future.done(): + self.future.cancel() + elif exceptions: + # Give a chance to raise any exceptions + self.future.result() + + def __del__(self): + # Clean up on deletion + try: + self.stop(exceptions=False) + except RuntimeError: + # Event loop already stopped + pass + + async def send_input(self, message): + """ + Sends a single message to the application + """ + # Give it the message + await self.input_queue.put(message) + + async def receive_output(self, timeout=1): + """ + Receives a single message from the application, with optional timeout. + """ + # Make sure there's not an exception to raise from the task + if self.future.done(): + self.future.result() + # Wait and receive the message + try: + async with async_timeout(timeout): + return await self.output_queue.get() + except asyncio.TimeoutError as e: + # See if we have another error to raise inside + if self.future.done(): + self.future.result() + else: + self.future.cancel() + try: + await self.future + except asyncio.CancelledError: + pass + raise e + + async def receive_nothing(self, timeout=0.1, interval=0.01): + """ + Checks that there is no message to receive in the given time. + """ + # `interval` has precedence over `timeout` + start = time.monotonic() + while time.monotonic() - start < timeout: + if not self.output_queue.empty(): + return False + await asyncio.sleep(interval) + return self.output_queue.empty() diff --git a/myenv/lib/python3.9/site-packages/asgiref/timeout.py b/myenv/lib/python3.9/site-packages/asgiref/timeout.py new file mode 100644 index 0000000..65932d1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/timeout.py @@ -0,0 +1,112 @@ +# This code is originally sourced from the aio-libs project "async_timeout", +# under the Apache 2.0 license. You may see the original project at +# https://github.com/aio-libs/async-timeout + +# It is vendored here to reduce chain-dependencies on this library, and +# modified slightly to remove some features we don't use. + + +import asyncio +from types import TracebackType +from typing import Any, Optional, Type + + +class timeout: + """timeout context manager. + + Useful in cases when you want to apply timeout logic around block + of code or in cases when asyncio.wait_for is not suitable. For example: + + >>> with timeout(0.001): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + timeout - value in seconds or None to disable timeout logic + loop - asyncio compatible event loop + """ + + def __init__( + self, + timeout: Optional[float], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + self._timeout = timeout + if loop is None: + loop = asyncio.get_event_loop() + self._loop = loop + self._task = None # type: Optional[asyncio.Task[Any]] + self._cancelled = False + self._cancel_handler = None # type: Optional[asyncio.Handle] + self._cancel_at = None # type: Optional[float] + + def __enter__(self) -> "timeout": + return self._do_enter() + + def __exit__( + self, + exc_type: Type[BaseException], + exc_val: BaseException, + exc_tb: TracebackType, + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + async def __aenter__(self) -> "timeout": + return self._do_enter() + + async def __aexit__( + self, + exc_type: Type[BaseException], + exc_val: BaseException, + exc_tb: TracebackType, + ) -> None: + self._do_exit(exc_type) + + @property + def expired(self) -> bool: + return self._cancelled + + @property + def remaining(self) -> Optional[float]: + if self._cancel_at is not None: + return max(self._cancel_at - self._loop.time(), 0.0) + else: + return None + + def _do_enter(self) -> "timeout": + # Support Tornado 5- without timeout + # Details: https://github.com/python/asyncio/issues/392 + if self._timeout is None: + return self + + self._task = asyncio.current_task(self._loop) + if self._task is None: + raise RuntimeError( + "Timeout context manager should be used " "inside a task" + ) + + if self._timeout <= 0: + self._loop.call_soon(self._cancel_task) + return self + + self._cancel_at = self._loop.time() + self._timeout + self._cancel_handler = self._loop.call_at(self._cancel_at, self._cancel_task) + return self + + def _do_exit(self, exc_type: Type[BaseException]) -> None: + if exc_type is asyncio.CancelledError and self._cancelled: + self._cancel_handler = None + self._task = None + raise asyncio.TimeoutError + if self._timeout is not None and self._cancel_handler is not None: + self._cancel_handler.cancel() + self._cancel_handler = None + self._task = None + return None + + def _cancel_task(self) -> None: + if self._task is not None: + self._task.cancel() + self._cancelled = True diff --git a/myenv/lib/python3.9/site-packages/asgiref/typing.py b/myenv/lib/python3.9/site-packages/asgiref/typing.py new file mode 100644 index 0000000..c7d7576 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/typing.py @@ -0,0 +1,242 @@ +import sys +from typing import Awaitable, Callable, Dict, Iterable, Optional, Tuple, Type, Union + +if sys.version_info >= (3, 8): + from typing import Literal, Protocol, TypedDict +else: + from typing_extensions import Literal, Protocol, TypedDict + +__all__ = ( + "ASGIVersions", + "HTTPScope", + "WebSocketScope", + "LifespanScope", + "WWWScope", + "Scope", + "HTTPRequestEvent", + "HTTPResponseStartEvent", + "HTTPResponseBodyEvent", + "HTTPServerPushEvent", + "HTTPDisconnectEvent", + "WebSocketConnectEvent", + "WebSocketAcceptEvent", + "WebSocketReceiveEvent", + "WebSocketSendEvent", + "WebSocketResponseStartEvent", + "WebSocketResponseBodyEvent", + "WebSocketDisconnectEvent", + "WebSocketCloseEvent", + "LifespanStartupEvent", + "LifespanShutdownEvent", + "LifespanStartupCompleteEvent", + "LifespanStartupFailedEvent", + "LifespanShutdownCompleteEvent", + "LifespanShutdownFailedEvent", + "ASGIReceiveEvent", + "ASGISendEvent", + "ASGIReceiveCallable", + "ASGISendCallable", + "ASGI2Protocol", + "ASGI2Application", + "ASGI3Application", + "ASGIApplication", +) + + +class ASGIVersions(TypedDict): + spec_version: str + version: Union[Literal["2.0"], Literal["3.0"]] + + +class HTTPScope(TypedDict): + type: Literal["http"] + asgi: ASGIVersions + http_version: str + method: str + scheme: str + path: str + raw_path: bytes + query_string: bytes + root_path: str + headers: Iterable[Tuple[bytes, bytes]] + client: Optional[Tuple[str, int]] + server: Optional[Tuple[str, Optional[int]]] + extensions: Optional[Dict[str, Dict[object, object]]] + + +class WebSocketScope(TypedDict): + type: Literal["websocket"] + asgi: ASGIVersions + http_version: str + scheme: str + path: str + raw_path: bytes + query_string: bytes + root_path: str + headers: Iterable[Tuple[bytes, bytes]] + client: Optional[Tuple[str, int]] + server: Optional[Tuple[str, Optional[int]]] + subprotocols: Iterable[str] + extensions: Optional[Dict[str, Dict[object, object]]] + + +class LifespanScope(TypedDict): + type: Literal["lifespan"] + asgi: ASGIVersions + + +WWWScope = Union[HTTPScope, WebSocketScope] +Scope = Union[HTTPScope, WebSocketScope, LifespanScope] + + +class HTTPRequestEvent(TypedDict): + type: Literal["http.request"] + body: bytes + more_body: bool + + +class HTTPResponseStartEvent(TypedDict): + type: Literal["http.response.start"] + status: int + headers: Iterable[Tuple[bytes, bytes]] + + +class HTTPResponseBodyEvent(TypedDict): + type: Literal["http.response.body"] + body: bytes + more_body: bool + + +class HTTPServerPushEvent(TypedDict): + type: Literal["http.response.push"] + path: str + headers: Iterable[Tuple[bytes, bytes]] + + +class HTTPDisconnectEvent(TypedDict): + type: Literal["http.disconnect"] + + +class WebSocketConnectEvent(TypedDict): + type: Literal["websocket.connect"] + + +class WebSocketAcceptEvent(TypedDict): + type: Literal["websocket.accept"] + subprotocol: Optional[str] + headers: Iterable[Tuple[bytes, bytes]] + + +class WebSocketReceiveEvent(TypedDict): + type: Literal["websocket.receive"] + bytes: Optional[bytes] + text: Optional[str] + + +class WebSocketSendEvent(TypedDict): + type: Literal["websocket.send"] + bytes: Optional[bytes] + text: Optional[str] + + +class WebSocketResponseStartEvent(TypedDict): + type: Literal["websocket.http.response.start"] + status: int + headers: Iterable[Tuple[bytes, bytes]] + + +class WebSocketResponseBodyEvent(TypedDict): + type: Literal["websocket.http.response.body"] + body: bytes + more_body: bool + + +class WebSocketDisconnectEvent(TypedDict): + type: Literal["websocket.disconnect"] + code: int + + +class WebSocketCloseEvent(TypedDict): + type: Literal["websocket.close"] + code: int + reason: Optional[str] + + +class LifespanStartupEvent(TypedDict): + type: Literal["lifespan.startup"] + + +class LifespanShutdownEvent(TypedDict): + type: Literal["lifespan.shutdown"] + + +class LifespanStartupCompleteEvent(TypedDict): + type: Literal["lifespan.startup.complete"] + + +class LifespanStartupFailedEvent(TypedDict): + type: Literal["lifespan.startup.failed"] + message: str + + +class LifespanShutdownCompleteEvent(TypedDict): + type: Literal["lifespan.shutdown.complete"] + + +class LifespanShutdownFailedEvent(TypedDict): + type: Literal["lifespan.shutdown.failed"] + message: str + + +ASGIReceiveEvent = Union[ + HTTPRequestEvent, + HTTPDisconnectEvent, + WebSocketConnectEvent, + WebSocketReceiveEvent, + WebSocketDisconnectEvent, + LifespanStartupEvent, + LifespanShutdownEvent, +] + + +ASGISendEvent = Union[ + HTTPResponseStartEvent, + HTTPResponseBodyEvent, + HTTPServerPushEvent, + HTTPDisconnectEvent, + WebSocketAcceptEvent, + WebSocketSendEvent, + WebSocketResponseStartEvent, + WebSocketResponseBodyEvent, + WebSocketCloseEvent, + LifespanStartupCompleteEvent, + LifespanStartupFailedEvent, + LifespanShutdownCompleteEvent, + LifespanShutdownFailedEvent, +] + + +ASGIReceiveCallable = Callable[[], Awaitable[ASGIReceiveEvent]] +ASGISendCallable = Callable[[ASGISendEvent], Awaitable[None]] + + +class ASGI2Protocol(Protocol): + def __init__(self, scope: Scope) -> None: + ... + + async def __call__( + self, receive: ASGIReceiveCallable, send: ASGISendCallable + ) -> None: + ... + + +ASGI2Application = Type[ASGI2Protocol] +ASGI3Application = Callable[ + [ + Scope, + ASGIReceiveCallable, + ASGISendCallable, + ], + Awaitable[None], +] +ASGIApplication = Union[ASGI2Application, ASGI3Application] diff --git a/myenv/lib/python3.9/site-packages/asgiref/wsgi.py b/myenv/lib/python3.9/site-packages/asgiref/wsgi.py new file mode 100644 index 0000000..40fba20 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/asgiref/wsgi.py @@ -0,0 +1,162 @@ +from io import BytesIO +from tempfile import SpooledTemporaryFile + +from asgiref.sync import AsyncToSync, sync_to_async + + +class WsgiToAsgi: + """ + Wraps a WSGI application to make it into an ASGI application. + """ + + def __init__(self, wsgi_application): + self.wsgi_application = wsgi_application + + async def __call__(self, scope, receive, send): + """ + ASGI application instantiation point. + We return a new WsgiToAsgiInstance here with the WSGI app + and the scope, ready to respond when it is __call__ed. + """ + await WsgiToAsgiInstance(self.wsgi_application)(scope, receive, send) + + +class WsgiToAsgiInstance: + """ + Per-socket instance of a wrapped WSGI application + """ + + def __init__(self, wsgi_application): + self.wsgi_application = wsgi_application + self.response_started = False + self.response_content_length = None + + async def __call__(self, scope, receive, send): + if scope["type"] != "http": + raise ValueError("WSGI wrapper received a non-HTTP scope") + self.scope = scope + with SpooledTemporaryFile(max_size=65536) as body: + # Alright, wait for the http.request messages + while True: + message = await receive() + if message["type"] != "http.request": + raise ValueError("WSGI wrapper received a non-HTTP-request message") + body.write(message.get("body", b"")) + if not message.get("more_body"): + break + body.seek(0) + # Wrap send so it can be called from the subthread + self.sync_send = AsyncToSync(send) + # Call the WSGI app + await self.run_wsgi_app(body) + + def build_environ(self, scope, body): + """ + Builds a scope and request body into a WSGI environ object. + """ + environ = { + "REQUEST_METHOD": scope["method"], + "SCRIPT_NAME": scope.get("root_path", "").encode("utf8").decode("latin1"), + "PATH_INFO": scope["path"].encode("utf8").decode("latin1"), + "QUERY_STRING": scope["query_string"].decode("ascii"), + "SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"], + "wsgi.version": (1, 0), + "wsgi.url_scheme": scope.get("scheme", "http"), + "wsgi.input": body, + "wsgi.errors": BytesIO(), + "wsgi.multithread": True, + "wsgi.multiprocess": True, + "wsgi.run_once": False, + } + # Get server name and port - required in WSGI, not in ASGI + if "server" in scope: + environ["SERVER_NAME"] = scope["server"][0] + environ["SERVER_PORT"] = str(scope["server"][1]) + else: + environ["SERVER_NAME"] = "localhost" + environ["SERVER_PORT"] = "80" + + if "client" in scope: + environ["REMOTE_ADDR"] = scope["client"][0] + + # Go through headers and make them into environ entries + for name, value in self.scope.get("headers", []): + name = name.decode("latin1") + if name == "content-length": + corrected_name = "CONTENT_LENGTH" + elif name == "content-type": + corrected_name = "CONTENT_TYPE" + else: + corrected_name = "HTTP_%s" % name.upper().replace("-", "_") + # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case + value = value.decode("latin1") + if corrected_name in environ: + value = environ[corrected_name] + "," + value + environ[corrected_name] = value + return environ + + def start_response(self, status, response_headers, exc_info=None): + """ + WSGI start_response callable. + """ + # Don't allow re-calling once response has begun + if self.response_started: + raise exc_info[1].with_traceback(exc_info[2]) + # Don't allow re-calling without exc_info + if hasattr(self, "response_start") and exc_info is None: + raise ValueError( + "You cannot call start_response a second time without exc_info" + ) + # Extract status code + status_code, _ = status.split(" ", 1) + status_code = int(status_code) + # Extract headers + headers = [ + (name.lower().encode("ascii"), value.encode("ascii")) + for name, value in response_headers + ] + # Extract content-length + self.response_content_length = None + for name, value in response_headers: + if name.lower() == "content-length": + self.response_content_length = int(value) + # Build and send response start message. + self.response_start = { + "type": "http.response.start", + "status": status_code, + "headers": headers, + } + + @sync_to_async + def run_wsgi_app(self, body): + """ + Called in a subthread to run the WSGI app. We encapsulate like + this so that the start_response callable is called in the same thread. + """ + # Translate the scope and incoming request body into a WSGI environ + environ = self.build_environ(self.scope, body) + # Run the WSGI app + bytes_sent = 0 + for output in self.wsgi_application(environ, self.start_response): + # If this is the first response, include the response headers + if not self.response_started: + self.response_started = True + self.sync_send(self.response_start) + # If the application supplies a Content-Length header + if self.response_content_length is not None: + # The server should not transmit more bytes to the client than the header allows + bytes_allowed = self.response_content_length - bytes_sent + if len(output) > bytes_allowed: + output = output[:bytes_allowed] + self.sync_send( + {"type": "http.response.body", "body": output, "more_body": True} + ) + bytes_sent += len(output) + # The server should stop iterating over the response when enough data has been sent + if bytes_sent == self.response_content_length: + break + # Close connection + if not self.response_started: + self.response_started = True + self.sync_send(self.response_start) + self.sync_send({"type": "http.response.body"}) diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/CONTRIBUTORS.txt b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/CONTRIBUTORS.txt new file mode 100644 index 0000000..0fb15ad --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/CONTRIBUTORS.txt @@ -0,0 +1,175 @@ +# This file is autocompleted by 'contributors-txt', +# using the configuration in 'script/.contributors_aliases.json'. +# Do not add new persons manually and only add information without +# using '-' as the line first character. +# Please verify that your change are stable if you modify manually. + +Ex-maintainers +-------------- +- Claudiu Popa +- Sylvain Thénault +- Torsten Marek + + +Maintainers +----------- +- Pierre Sassoulas +- Hippo91 +- Marc Mueller <30130371+cdce8p@users.noreply.github.com> +- Daniël van Noord <13665637+DanielNoord@users.noreply.github.com> +- Bryce Guinta +- Ceridwen +- Łukasz Rogalski +- Florian Bruhin +- Ashley Whetter +- Jacob Walls +- Dimitri Prybysh +- Areveny + + +Contributors +------------ +- LOGILAB S.A. (Paris, FRANCE) +- Nick Drozd +- Andrew Haigh +- David Liu +- Eevee (Alex Munroe) +- David Gilman +- Julien Jehannet +- Calen Pennington +- Phil Schaf +- Hugo van Kemenade +- Alex Hall +- Tushar Sadhwani <86737547+tushar-deepsource@users.noreply.github.com> +- Tim Martin +- Raphael Gaschignard +- Radosław Ganczarek +- Paligot Gérard +- Ioana Tagirta +- Derek Gustafson +- David Shea +- Daniel Harding +- Ville Skyttä +- Rene Zhang +- Philip Lorenz +- Mario Corchero +- Marien Zwart +- FELD Boris +- Enji Cooper +- doranid +- brendanator +- Tomas Gavenciak +- Thomas Hisch +- Stefan Scherfke +- Sergei Lebedev <185856+superbobry@users.noreply.github.com> +- Ram Rachum +- Peter Pentchev +- Peter Kolbus +- Omer Katz +- Moises Lopez +- Keichi Takahashi +- Kavins Singh +- Karthikeyan Singaravelan +- Joshua Cannon +- John Vandenberg +- Jacob Bogdanov +- Google, Inc. +- David Euresti +- David Cain +- Anthony Sottile +- Alexander Shadchin +- wgehalo +- tristanlatr <19967168+tristanlatr@users.noreply.github.com> +- rr- +- raylu +- mathieui +- markmcclain +- ioanatia +- grayjk +- Zbigniew Jędrzejewski-Szmek +- Zac Hatfield-Dodds +- Vilnis Termanis +- Valentin Valls +- Uilian Ries +- Tomas Novak +- Thirumal Venkat +- SupImDos <62866982+SupImDos@users.noreply.github.com> +- Stanislav Levin +- Simon Hewitt +- Serhiy Storchaka +- Roy Wright +- Robin Jarry +- René Fritze <47802+renefritze@users.noreply.github.com> +- Redoubts +- Philipp Hörist +- Peter de Blanc +- Peter Talley +- Ovidiu Sabou +- Nicolas Noirbent +- Neil Girdhar +- Michał Masłowski +- Michael K +- Mateusz Bysiek +- Mark Byrne <31762852+mbyrnepr2@users.noreply.github.com> +- Leandro T. C. Melo +- Konrad Weihmann +- Kian Meng, Ang +- Kai Mueller <15907922+kasium@users.noreply.github.com> +- Jörg Thalheim +- Jonathan Striebel +- John Belmonte +- Jeff Widman +- Jeff Quast +- Jarrad Hope +- Jared Garst +- Jakub Wilk +- Iva Miholic +- Ionel Maries Cristian +- HoverHell +- HQupgradeHQ <18361586+HQupgradeHQ@users.noreply.github.com> +- Grygorii Iermolenko +- Gregory P. Smith +- Giuseppe Scrivano +- Frédéric Chapoton +- Francis Charette Migneault +- Felix Mölder +- Federico Bond +- DudeNr33 <3929834+DudeNr33@users.noreply.github.com> +- Dmitry Shachnev +- Denis Laxalde +- David Poirier +- Dave Hirschfeld +- Dave Baum +- Daniel Martin +- Daniel Colascione +- Damien Baty +- Craig Franklin +- Colin Kennedy +- Cole Robinson +- Christoph Reiter +- Chris Philip +- BioGeek +- Bianca Power <30207144+biancapower@users.noreply.github.com> +- Benjamin Elven <25181435+S3ntinelX@users.noreply.github.com> +- Becker Awqatty +- BasPH +- Azeem Bande-Ali +- Aru Sahni +- Artsiom Kaval +- Anubhav <35621759+anubh-v@users.noreply.github.com> +- Antoine Boellinger +- Alphadelta14 +- Alexander Presnyakov +- Ahmed Azzaoui + +Co-Author +--------- +The following persons were credited manually but did not commit themselves +under this name, or we did not manage to find their commits in the history. + +- François Mockers +- platings +- carl +- alain lefroy +- Mark Gius +- jarradhope diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/LICENSE new file mode 100644 index 0000000..182e0fb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/LICENSE @@ -0,0 +1,508 @@ + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations +below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it +becomes a de-facto standard. To achieve this, non-free programs must +be allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control +compilation and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at least + three years, to give the same user the materials specified in + Subsection 6a, above, for a charge no more than the cost of + performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply, and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License +may add an explicit geographical distribution limitation excluding those +countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms +of the ordinary General Public License). + + To apply these terms, attach the following notices to the library. +It is safest to attach them to the start of each source file to most +effectively convey the exclusion of warranty; and each file should +have at least the "copyright" line and a pointer to where the full +notice is found. + + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or +your school, if any, to sign a "copyright disclaimer" for the library, +if necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James + Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/METADATA b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/METADATA new file mode 100644 index 0000000..4096b32 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/METADATA @@ -0,0 +1,129 @@ +Metadata-Version: 2.1 +Name: astroid +Version: 2.11.7 +Summary: An abstract syntax tree for Python with inference support. +Home-page: https://github.com/PyCQA/astroid +Author: Python Code Quality Authority +Author-email: code-quality@python.org +License: LGPL-2.1-or-later +Project-URL: Bug tracker, https://github.com/PyCQA/astroid/issues +Project-URL: Discord server, https://discord.gg/Egy6P8AMB5 +Keywords: static code analysis,python,abstract syntax tree +Classifier: Development Status :: 6 - Mature +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Requires-Python: >=3.6.2 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: CONTRIBUTORS.txt +Requires-Dist: lazy-object-proxy (>=1.4.0) +Requires-Dist: wrapt (<2,>=1.11) +Requires-Dist: setuptools (>=20.0) +Requires-Dist: typed-ast (<2.0,>=1.4.0) ; implementation_name == "cpython" and python_version < "3.8" +Requires-Dist: typing-extensions (>=3.10) ; python_version < "3.10" + +Astroid +======= + +.. image:: https://coveralls.io/repos/github/PyCQA/astroid/badge.svg?branch=main + :target: https://coveralls.io/github/PyCQA/astroid?branch=main + :alt: Coverage badge from coveralls.io + +.. image:: https://readthedocs.org/projects/astroid/badge/?version=latest + :target: http://astroid.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + +.. image:: https://results.pre-commit.ci/badge/github/PyCQA/astroid/main.svg + :target: https://results.pre-commit.ci/latest/github/PyCQA/astroid/main + :alt: pre-commit.ci status + +.. |tidelift_logo| image:: https://raw.githubusercontent.com/PyCQA/astroid/main/doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White.png + :width: 75 + :height: 60 + :alt: Tidelift + +.. list-table:: + :widths: 10 100 + + * - |tidelift_logo| + - Professional support for astroid is available as part of the + `Tidelift Subscription`_. Tidelift gives software development teams a single source for + purchasing and maintaining their software, with professional grade assurances + from the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-astroid?utm_source=pypi-astroid&utm_medium=referral&utm_campaign=readme + + + +What's this? +------------ + +The aim of this module is to provide a common base representation of +python source code. It is currently the library powering pylint's capabilities. + +It provides a compatible representation which comes from the `_ast` +module. It rebuilds the tree generated by the builtin _ast module by +recursively walking down the AST and building an extended ast. The new +node classes have additional methods and attributes for different +usages. They include some support for static inference and local name +scopes. Furthermore, astroid can also build partial trees by inspecting living +objects. + + +Installation +------------ + +Extract the tarball, jump into the created directory and run:: + + pip install . + + +If you want to do an editable installation, you can run:: + + pip install -e . + + +If you have any questions, please mail the code-quality@python.org +mailing list for support. See +http://mail.python.org/mailman/listinfo/code-quality for subscription +information and archives. + +Documentation +------------- +http://astroid.readthedocs.io/en/latest/ + + +Python Versions +--------------- + +astroid 2.0 is currently available for Python 3 only. If you want Python 2 +support, use an older version of astroid (though note that these versions +are no longer supported). + +Test +---- + +Tests are in the 'test' subdirectory. To launch the whole tests suite, you can use +either `tox` or `pytest`:: + + tox + pytest astroid diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/RECORD b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/RECORD new file mode 100644 index 0000000..8ded6ee --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/RECORD @@ -0,0 +1,101 @@ +astroid/__init__.py,sha256=PZRnmbvNyyztzoep3TNq10RnlnWgtWC3d-ntvmXettQ,5202 +astroid/__pkginfo__.py,sha256=B7AH_irTjoDqJDLb8H2GFcWt80is9hliU-R1XmszI3A,274 +astroid/_ast.py,sha256=GHCb1UhQYAdYq4FIWj4grPMEkCkGzgPtq9I-3546C1g,3713 +astroid/arguments.py,sha256=ioNM-KU0UQQw0Ji3cplijDRKMo3wKRYofr8JG071IlM,12772 +astroid/astroid_manager.py,sha256=0kHpLsw88YEdjEf11pHZV0Ru7u1sM3TkjKE64VJ6MAU,568 +astroid/bases.py,sha256=bL5597ATkiEYWvNlSLCATArt7Egd8234_ffq7YLNimc,20003 +astroid/builder.py,sha256=tRGbSQbB8B6hLNg347ylD5__1Awqf2DHgMj1tSt-y5Y,17317 +astroid/const.py,sha256=fLUo3VgP8jFaL9gw1HqHpsp1YdMfSksQXuJzvRqyghQ,868 +astroid/context.py,sha256=YPPvoNA8pHirJEyX3DHbKLDMALsNKPnNAuDlcnf4gSc,5688 +astroid/decorators.py,sha256=1opjj_aJdpKZfsVqT3_Ix6uCtVci4Lv9jvMHfP3xcfM,9503 +astroid/exceptions.py,sha256=9M4aj1kwg4kGzK4Q51eLOaYMQESsql1I4928s1oUC-c,8538 +astroid/filter_statements.py,sha256=1_xfnZHCrNyCKtwDwiVXhHXM977jXmRJ5VeXk44q7R0,9644 +astroid/helpers.py,sha256=hcAXP11C3a7J0jwOYudwCHTgajq7RcRAPTHmKpcqzSw,10276 +astroid/inference.py,sha256=jaiatB037N2KlCjcY62psVdY_LuLz1OlM9esZs9yawA,36351 +astroid/inference_tip.py,sha256=6fChETD0XKh7khW6DDXEgnnsShlHIAhRjYIWbxl8juY,2969 +astroid/manager.py,sha256=ekgbq49QWWPk4p-2lladUnHhjwv-xcnamx0hspHzWME,13558 +astroid/mixins.py,sha256=uLpgScOzCa1pIHk3D3aDQqT2gFEWvJA63ZJemClmI7s,5510 +astroid/modutils.py,sha256=psXAsy-YDA2k_8imNaN_wylQuc8mglgdgIYMEyhpu08,20773 +astroid/node_classes.py,sha256=OnYVG9E21mFM7R-xXO_Bn8Bn1UFYwM4GSwLDg8JwwLM,1802 +astroid/objects.py,sha256=G-MjRjqi8w-fmC1WmHvpC8tiBdhG7FpPrSV2fBaSFgc,11769 +astroid/protocols.py,sha256=ezS37nv-11NHq4Kn1nvRoWIu-GJbbwctvrxL0AJp6fE,30095 +astroid/raw_building.py,sha256=0PXzpGytJaQPqNA3T0V3K-FrOniCRHkGuxFImFf4TLc,18027 +astroid/rebuilder.py,sha256=MPgqHUf8p-B5aWr0P8L407_zBdXPJg0F8j2s3Q55mj4,79052 +astroid/scoped_nodes.py,sha256=rELeS8rKUxNcJZxHlKERD3im6HGKae4wbcx51VCiZmI,933 +astroid/test_utils.py,sha256=tRsndWUiEwr29S7Bf6Ej5x-Ooph2byfCTEf-pheLhjM,2397 +astroid/transforms.py,sha256=Gnm2oXDE7SrSNsh6Ks6e3ycqSWWNlNljhXTxkZvW4wg,3264 +astroid/typing.py,sha256=hEiny9JQuETF5eAyXrYE7wG8c_DO_4mc39027WRXrqM,737 +astroid/util.py,sha256=F5bbf33-dgaEaeSbkdNYnRBJSGJ39eWJFrZOT7q2EPk,4256 +astroid/brain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astroid/brain/brain_argparse.py,sha256=WdKRrkd4--tdZtJzNI1nJAmA_-gdkz8KNguPUTe6jis,1441 +astroid/brain/brain_attrs.py,sha256=dwMHH4ou5jNq33hlXVW3e9QYB-BC_os41RgG2QNyMR4,2853 +astroid/brain/brain_boto3.py,sha256=-Ntjxs3fneXCwYWBMxjeKS6gKRYf609nNAdf_Fv5LQs,997 +astroid/brain/brain_builtin_inference.py,sha256=DLYc0S72TN7nEft1tunET5psSk_W5kThtop2QGnYWro,30630 +astroid/brain/brain_collections.py,sha256=3xGVDVZIG5RTydq-lda7GWViXqN506L1wDb6eKvIUIE,4307 +astroid/brain/brain_crypt.py,sha256=q-4C5kA8qEwk822Fi4KkbyVz5zq8UG2DUAKGFyXkpVI,1044 +astroid/brain/brain_ctypes.py,sha256=Zl6XWUMx852uwIGi8EeLywmhDOqzpRnWaPjD5hVshjA,2655 +astroid/brain/brain_curses.py,sha256=XXyCeLASFFksO2zSbrZgB_AImGBlIGZ7Pgjdm04x-jg,3477 +astroid/brain/brain_dataclasses.py,sha256=8PI0XKyeJO0sE9RMQUAHxSKgM_Ss4cm1A29AH8b_NaE,14992 +astroid/brain/brain_dateutil.py,sha256=ajM8Xkn6WrrPGQBleEM_XIb0SnAozIfaHpDbG8e5vyo,766 +astroid/brain/brain_fstrings.py,sha256=fxeEYe37aEHg5zTuAR3J1QP5fUNJLY-0ATB7jAxVof4,2175 +astroid/brain/brain_functools.py,sha256=I6SDzNjBGFqB33nAA9icvnL0hCa4CJhcI46CWESBQKw,5842 +astroid/brain/brain_gi.py,sha256=q_gfHJKL9IHGk0TnC7kvDISSVNMkCl-mn7ed1t-U1H8,7540 +astroid/brain/brain_hashlib.py,sha256=0wd-TK0BsMiAwVhrfZAm6_ACxRJYxix0WHw4ItZgnfo,2189 +astroid/brain/brain_http.py,sha256=OgqOSkOqctysaPycz9SOmF3NxpJ4OPg5nZ-PqYwUgow,10602 +astroid/brain/brain_hypothesis.py,sha256=ZypkmriFkll04Hkx_4wZH6xe1myMqjtmcOcr70kldvw,1725 +astroid/brain/brain_io.py,sha256=Rh84qonOdMIS_Z3zEFGvmyObPs4vjO3WzYV5V7LYTEM,1517 +astroid/brain/brain_mechanize.py,sha256=EyBHLek513PHyCE7NDW-dlkJ8gdJbjxwLMtKMbE0YGA,2530 +astroid/brain/brain_multiprocessing.py,sha256=5LlHw5efq01VUV-vUWpH8u7vHhk3SPp1tf4N-BZSmzU,3181 +astroid/brain/brain_namedtuple_enum.py,sha256=hzj09nO4N61wy-5TrYtzeL08aGZRSzdYwVOOnigNGYM,20516 +astroid/brain/brain_nose.py,sha256=DCUwKzR14Se5tF-s54AMwFoRvhsHH_nf6agrh966VpQ,2320 +astroid/brain/brain_numpy_core_fromnumeric.py,sha256=3UFRtK5U51AVQNxawf9ElhFv2tizhj5RfyCSnPT3u54,732 +astroid/brain/brain_numpy_core_function_base.py,sha256=_k-WRQ_x0QEHhvr9IwuTLnpGoCYOVO0GAH7e3DyoHNQ,1298 +astroid/brain/brain_numpy_core_multiarray.py,sha256=YPY78fKkr0pFimDu9hW1o9kytivAeylNsgbVvXs7xV8,4210 +astroid/brain/brain_numpy_core_numeric.py,sha256=VAc51Ud8QDujsh29e-RQK5SBcKW_jzjZIFmntCslCKU,1629 +astroid/brain/brain_numpy_core_numerictypes.py,sha256=HVJ_paQwjh2V8qz0Juei6b2ZRm_dmDJodiDKfAcE-mE,8546 +astroid/brain/brain_numpy_core_umath.py,sha256=2EhXmtiw8wQcdUtbnlLLTjHAOHpQ1c9BXHdc6kHJGbA,4893 +astroid/brain/brain_numpy_ma.py,sha256=Wb4CZAo6WzprymYTxWrF_IlhbKPzcJ_cmw5npSeq1mo,805 +astroid/brain/brain_numpy_ndarray.py,sha256=dAH8wFR7sjQF3pcUINlp6WkBQy4tSBTU_m6Emb0E9mE,8882 +astroid/brain/brain_numpy_random_mtrand.py,sha256=Wrpm9SX8kH0Xsk9b3qpR0rhs-MEb0I5aN4E-66wkLkM,3436 +astroid/brain/brain_numpy_utils.py,sha256=BOH-BYMYUrTLAsBf8VFumKDwcH_82AsCjtOqZbnqMWU,2543 +astroid/brain/brain_pkg_resources.py,sha256=W3Q4G6mYTYtJY9iWQv3vEO8_lpmLFAin9aQLtBRLxAc,2200 +astroid/brain/brain_pytest.py,sha256=suTDBHCJ_p4F7TGXjwVJAFtiAP-VlkFE2gncXjLRQKM,2223 +astroid/brain/brain_qt.py,sha256=dE30a8e7aeQlMss5Nn1OobZpdhn-nzn3ylOllbSlMxI,2505 +astroid/brain/brain_random.py,sha256=CqjCq2IIVspVcMGk43cn_RB7y5iHLaMjCP4mCWf-3tE,2692 +astroid/brain/brain_re.py,sha256=NzMzyUvRTMI3LqMFqMFEWAdzIxGcKSVjZnP55s5cDXk,2669 +astroid/brain/brain_responses.py,sha256=qI7BWS_fWS5c_PcMnLgqABJRlwn8j9IRlq-XOdKnAWs,1869 +astroid/brain/brain_scipy_signal.py,sha256=Mi93D6-j94tqhqDHCbabVs3_eDAhti88CL1_rOvnG_I,2276 +astroid/brain/brain_signal.py,sha256=MJCkmWmUTVEqVKwQfE8FzVh-x0XLzJpBdVjjeJynQbg,3859 +astroid/brain/brain_six.py,sha256=JNtmuokvVod0-mwSb4yU0Zhm_Qts4daSp__yvU3RnFM,7569 +astroid/brain/brain_sqlalchemy.py,sha256=onKAxgKY-GYLe2v6PVK3APBn_z8txokEZfDcXt33GuE,1009 +astroid/brain/brain_ssl.py,sha256=gUTdzoUVlA8ehtwCjyJ3o6omG1PqoBPs2cXDDDmGI3M,3578 +astroid/brain/brain_subprocess.py,sha256=mBC5ulUUt5JcPiZu1ZAeGDn_Y3vAgqMOrsz9HGwVAWo,3768 +astroid/brain/brain_threading.py,sha256=N7F9UUe9lsDYtySrC1pKn_etxYmJ13tWzfom5q7l54s,855 +astroid/brain/brain_type.py,sha256=4mQWbJBqwviXx2Rdul_3Gu4FVTXxMGD4vHxKHm43CjQ,2472 +astroid/brain/brain_typing.py,sha256=87RT9F5EJYxyj2-__L6IgxEUBsGszrKBWNsi3Mhaxyo,14380 +astroid/brain/brain_unittest.py,sha256=O8acdDUPDb21ytB8EJ-3UCU6cIZ1pegs-EiPvKzEutw,1136 +astroid/brain/brain_uuid.py,sha256=fztFKPbRbrZzbvrzp5LCvYU4cE_WLmfVo0ws_Uc9FSw,649 +astroid/brain/helpers.py,sha256=OQLzWcxOg1K8eOxXHKOKUxF1HkpNyZQCjWyEvqnSHdk,718 +astroid/interpreter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astroid/interpreter/dunder_lookup.py,sha256=cDcopHXRrJRxPZ19698HwO5p5gIIdc4posLG3VHF6Bo,2139 +astroid/interpreter/objectmodel.py,sha256=TP7RewfIrYGyFrIHuJC_ig9KgS6teMa5di5gDsIZjqc,27766 +astroid/interpreter/_import/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astroid/interpreter/_import/spec.py,sha256=WGAlH5e5xNaRgx22usKO13z3eD5wRD2kDMwgGWOBfvo,13329 +astroid/interpreter/_import/util.py,sha256=Rl7gWekrSdm3Nk3F2oVFmutA4HlSepHA5PAqEg44eOw,529 +astroid/nodes/__init__.py,sha256=8fyupLEs0pKqbWu_3iMQcOlC7QQYHmiLv7P52eT2fqE,4836 +astroid/nodes/as_string.py,sha256=_JYz5qTcbGqZUZ40sc_rllJzp7S4n_6kqN9XZYuspC8,23797 +astroid/nodes/const.py,sha256=iyQn_v-wEzK6uXc-dEMoliPVSRy0NZ8T3XmvvPXLuP4,797 +astroid/nodes/node_classes.py,sha256=N2gWXwBO8m8KWYINaoBsGKEr5mnbzoMUOvGdxMouePo,164847 +astroid/nodes/node_ng.py,sha256=CcKOGv6eu_hHMbM71ZU1Hm1yeIEiIMeTEqDZOtQL-YI,28103 +astroid/nodes/utils.py,sha256=Uy6xoownLyWwDtuZ5qUpjbq19P2pUIcj0J04khbH5HY,423 +astroid/nodes/scoped_nodes/__init__.py,sha256=S8pX4zg73KzuCIawKmdgj-BuvXAh29atqUm23z0zI6U,1218 +astroid/nodes/scoped_nodes/mixin.py,sha256=9_dyepx-vfwvhyxLd5S6nSnbdRvTPfYhWdwztdhHYfM,5819 +astroid/nodes/scoped_nodes/scoped_nodes.py,sha256=Oh9dJoTuweiSrp4umO048gedlLs1axaFc8kdlpNu_2Y,105931 +astroid/nodes/scoped_nodes/utils.py,sha256=k7XSCSLC-S5L-puGQM4mv0e4N5-qMfo7UhDO3dswhcQ,1112 +astroid-2.11.7.dist-info/CONTRIBUTORS.txt,sha256=2I_w7LagIB19_12quxx9uYsoY0OoPPtZ9fYzVxSK_io,6858 +astroid-2.11.7.dist-info/LICENSE,sha256=_qFr2p5zTeoNnI2fW5CYeO9BcWJjVDKWCf_889tCyyQ,26516 +astroid-2.11.7.dist-info/METADATA,sha256=MBjTQH1_yiEYoFgYWhTQb9_3Vu75UMQqnrPirG_rO-Q,4681 +astroid-2.11.7.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +astroid-2.11.7.dist-info/top_level.txt,sha256=HsdW4O2x7ZXRj6k-agi3RaQybGLobI3VSE-jt4vQUXM,8 +astroid-2.11.7.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +astroid-2.11.7.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/top_level.txt new file mode 100644 index 0000000..450d4fe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid-2.11.7.dist-info/top_level.txt @@ -0,0 +1 @@ +astroid diff --git a/myenv/lib/python3.9/site-packages/astroid/__init__.py b/myenv/lib/python3.9/site-packages/astroid/__init__.py new file mode 100644 index 0000000..14a61c2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/__init__.py @@ -0,0 +1,200 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Python Abstract Syntax Tree New Generation + +The aim of this module is to provide a common base representation of +python source code for projects such as pychecker, pyreverse, +pylint... Well, actually the development of this library is essentially +governed by pylint's needs. + +It mimics the class defined in the python's _ast module with some +additional methods and attributes. New nodes instances are not fully +compatible with python's _ast. + +Instance attributes are added by a +builder object, which can either generate extended ast (let's call +them astroid ;) by visiting an existent ast tree or by inspecting living +object. Methods are added by monkey patching ast classes. + +Main modules are: + +* nodes and scoped_nodes for more information about methods and + attributes added to different node classes + +* the manager contains a high level object to get astroid trees from + source files and living objects. It maintains a cache of previously + constructed tree for quick access + +* builder contains the class responsible to build astroid trees +""" + +import functools +import tokenize +from importlib import import_module +from pathlib import Path + +# isort: off +# We have an isort: off on '__version__' because the packaging need to access +# the version before the dependencies are installed (in particular 'wrapt' +# that is imported in astroid.inference) +from astroid.__pkginfo__ import __version__, version +from astroid.nodes import node_classes, scoped_nodes + +# isort: on + +from astroid import inference, raw_building +from astroid.astroid_manager import MANAGER +from astroid.bases import BaseInstance, BoundMethod, Instance, UnboundMethod +from astroid.brain.helpers import register_module_extender +from astroid.builder import extract_node, parse +from astroid.const import PY310_PLUS, Context, Del, Load, Store +from astroid.exceptions import ( + AstroidBuildingError, + AstroidBuildingException, + AstroidError, + AstroidImportError, + AstroidIndexError, + AstroidSyntaxError, + AstroidTypeError, + AstroidValueError, + AttributeInferenceError, + BinaryOperationError, + DuplicateBasesError, + InconsistentMroError, + InferenceError, + InferenceOverwriteError, + MroError, + NameInferenceError, + NoDefault, + NotFoundError, + OperationError, + ParentMissingError, + ResolveError, + StatementMissing, + SuperArgumentTypeError, + SuperError, + TooManyLevelsError, + UnaryOperationError, + UnresolvableName, + UseInferenceDefault, +) +from astroid.inference_tip import _inference_tip_cached, inference_tip +from astroid.objects import ExceptionInstance + +# isort: off +# It's impossible to import from astroid.nodes with a wildcard, because +# there is a cyclic import that prevent creating an __all__ in astroid/nodes +# and we need astroid/scoped_nodes and astroid/node_classes to work. So +# importing with a wildcard would clash with astroid/nodes/scoped_nodes +# and astroid/nodes/node_classes. +from astroid.nodes import ( # pylint: disable=redefined-builtin (Ellipsis) + CONST_CLS, + AnnAssign, + Arguments, + Assert, + Assign, + AssignAttr, + AssignName, + AsyncFor, + AsyncFunctionDef, + AsyncWith, + Attribute, + AugAssign, + Await, + BinOp, + BoolOp, + Break, + Call, + ClassDef, + Compare, + Comprehension, + ComprehensionScope, + Const, + Continue, + Decorators, + DelAttr, + Delete, + DelName, + Dict, + DictComp, + DictUnpack, + Ellipsis, + EmptyNode, + EvaluatedObject, + ExceptHandler, + Expr, + ExtSlice, + For, + FormattedValue, + FunctionDef, + GeneratorExp, + Global, + If, + IfExp, + Import, + ImportFrom, + Index, + JoinedStr, + Keyword, + Lambda, + List, + ListComp, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchMapping, + MatchOr, + MatchSequence, + MatchSingleton, + MatchStar, + MatchValue, + Module, + Name, + NamedExpr, + NodeNG, + Nonlocal, + Pass, + Raise, + Return, + Set, + SetComp, + Slice, + Starred, + Subscript, + TryExcept, + TryFinally, + Tuple, + UnaryOp, + Unknown, + While, + With, + Yield, + YieldFrom, + are_exclusive, + builtin_lookup, + unpack_infer, + function_to_method, +) + +# isort: on + +from astroid.util import Uninferable + +# Performance hack for tokenize. See https://bugs.python.org/issue43014 +# Adapted from https://github.com/PyCQA/pycodestyle/pull/993 +if ( + not PY310_PLUS + and callable(getattr(tokenize, "_compile", None)) + and getattr(tokenize._compile, "__wrapped__", None) is None # type: ignore[attr-defined] +): + tokenize._compile = functools.lru_cache()(tokenize._compile) # type: ignore[attr-defined] + +# load brain plugins +ASTROID_INSTALL_DIRECTORY = Path(__file__).parent +BRAIN_MODULES_DIRECTORY = ASTROID_INSTALL_DIRECTORY / "brain" +for module in BRAIN_MODULES_DIRECTORY.iterdir(): + if module.suffix == ".py": + import_module(f"astroid.brain.{module.stem}") diff --git a/myenv/lib/python3.9/site-packages/astroid/__pkginfo__.py b/myenv/lib/python3.9/site-packages/astroid/__pkginfo__.py new file mode 100644 index 0000000..459314b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/__pkginfo__.py @@ -0,0 +1,6 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +__version__ = "2.11.7" +version = __version__ diff --git a/myenv/lib/python3.9/site-packages/astroid/_ast.py b/myenv/lib/python3.9/site-packages/astroid/_ast.py new file mode 100644 index 0000000..1c4da43 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/_ast.py @@ -0,0 +1,130 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import ast +import sys +import types +from collections import namedtuple +from functools import partial +from typing import Dict, Optional + +from astroid.const import PY38_PLUS, Context + +if sys.version_info >= (3, 8): + # On Python 3.8, typed_ast was merged back into `ast` + _ast_py3: Optional[types.ModuleType] = ast +else: + try: + import typed_ast.ast3 as _ast_py3 + except ImportError: + _ast_py3 = None + +FunctionType = namedtuple("FunctionType", ["argtypes", "returns"]) + + +class ParserModule( + namedtuple( + "ParserModule", + [ + "module", + "unary_op_classes", + "cmp_op_classes", + "bool_op_classes", + "bin_op_classes", + "context_classes", + ], + ) +): + def parse(self, string: str, type_comments=True): + if self.module is _ast_py3: + if PY38_PLUS: + parse_func = partial(self.module.parse, type_comments=type_comments) + else: + parse_func = partial( + self.module.parse, feature_version=sys.version_info.minor + ) + else: + parse_func = self.module.parse + return parse_func(string) + + +def parse_function_type_comment(type_comment: str) -> Optional[FunctionType]: + """Given a correct type comment, obtain a FunctionType object""" + if _ast_py3 is None: + return None + + func_type = _ast_py3.parse(type_comment, "", "func_type") # type: ignore[attr-defined] + return FunctionType(argtypes=func_type.argtypes, returns=func_type.returns) + + +def get_parser_module(type_comments=True) -> ParserModule: + parser_module = ast + if type_comments and _ast_py3: + parser_module = _ast_py3 + + unary_op_classes = _unary_operators_from_module(parser_module) + cmp_op_classes = _compare_operators_from_module(parser_module) + bool_op_classes = _bool_operators_from_module(parser_module) + bin_op_classes = _binary_operators_from_module(parser_module) + context_classes = _contexts_from_module(parser_module) + + return ParserModule( + parser_module, + unary_op_classes, + cmp_op_classes, + bool_op_classes, + bin_op_classes, + context_classes, + ) + + +def _unary_operators_from_module(module): + return {module.UAdd: "+", module.USub: "-", module.Not: "not", module.Invert: "~"} + + +def _binary_operators_from_module(module): + binary_operators = { + module.Add: "+", + module.BitAnd: "&", + module.BitOr: "|", + module.BitXor: "^", + module.Div: "/", + module.FloorDiv: "//", + module.MatMult: "@", + module.Mod: "%", + module.Mult: "*", + module.Pow: "**", + module.Sub: "-", + module.LShift: "<<", + module.RShift: ">>", + } + return binary_operators + + +def _bool_operators_from_module(module): + return {module.And: "and", module.Or: "or"} + + +def _compare_operators_from_module(module): + return { + module.Eq: "==", + module.Gt: ">", + module.GtE: ">=", + module.In: "in", + module.Is: "is", + module.IsNot: "is not", + module.Lt: "<", + module.LtE: "<=", + module.NotEq: "!=", + module.NotIn: "not in", + } + + +def _contexts_from_module(module) -> Dict[ast.expr_context, Context]: + return { + module.Load: Context.Load, + module.Store: Context.Store, + module.Del: Context.Del, + module.Param: Context.Store, + } diff --git a/myenv/lib/python3.9/site-packages/astroid/arguments.py b/myenv/lib/python3.9/site-packages/astroid/arguments.py new file mode 100644 index 0000000..40061d0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/arguments.py @@ -0,0 +1,306 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from typing import Optional, Set + +from astroid import nodes +from astroid.bases import Instance +from astroid.context import CallContext, InferenceContext +from astroid.exceptions import InferenceError, NoDefault +from astroid.util import Uninferable + + +class CallSite: + """Class for understanding arguments passed into a call site + + It needs a call context, which contains the arguments and the + keyword arguments that were passed into a given call site. + In order to infer what an argument represents, call :meth:`infer_argument` + with the corresponding function node and the argument name. + + :param callcontext: + An instance of :class:`astroid.context.CallContext`, that holds + the arguments for the call site. + :param argument_context_map: + Additional contexts per node, passed in from :attr:`astroid.context.Context.extra_context` + :param context: + An instance of :class:`astroid.context.Context`. + """ + + def __init__( + self, callcontext: CallContext, argument_context_map=None, context=None + ): + if argument_context_map is None: + argument_context_map = {} + self.argument_context_map = argument_context_map + args = callcontext.args + keywords = callcontext.keywords + self.duplicated_keywords: Set[str] = set() + self._unpacked_args = self._unpack_args(args, context=context) + self._unpacked_kwargs = self._unpack_keywords(keywords, context=context) + + self.positional_arguments = [ + arg for arg in self._unpacked_args if arg is not Uninferable + ] + self.keyword_arguments = { + key: value + for key, value in self._unpacked_kwargs.items() + if value is not Uninferable + } + + @classmethod + def from_call(cls, call_node, context: Optional[InferenceContext] = None): + """Get a CallSite object from the given Call node. + + context will be used to force a single inference path. + """ + + # Determine the callcontext from the given `context` object if any. + context = context or InferenceContext() + callcontext = CallContext(call_node.args, call_node.keywords) + return cls(callcontext, context=context) + + def has_invalid_arguments(self): + """Check if in the current CallSite were passed *invalid* arguments + + This can mean multiple things. For instance, if an unpacking + of an invalid object was passed, then this method will return True. + Other cases can be when the arguments can't be inferred by astroid, + for example, by passing objects which aren't known statically. + """ + return len(self.positional_arguments) != len(self._unpacked_args) + + def has_invalid_keywords(self): + """Check if in the current CallSite were passed *invalid* keyword arguments + + For instance, unpacking a dictionary with integer keys is invalid + (**{1:2}), because the keys must be strings, which will make this + method to return True. Other cases where this might return True if + objects which can't be inferred were passed. + """ + return len(self.keyword_arguments) != len(self._unpacked_kwargs) + + def _unpack_keywords(self, keywords, context=None): + values = {} + context = context or InferenceContext() + context.extra_context = self.argument_context_map + for name, value in keywords: + if name is None: + # Then it's an unpacking operation (**) + try: + inferred = next(value.infer(context=context)) + except InferenceError: + values[name] = Uninferable + continue + except StopIteration: + continue + + if not isinstance(inferred, nodes.Dict): + # Not something we can work with. + values[name] = Uninferable + continue + + for dict_key, dict_value in inferred.items: + try: + dict_key = next(dict_key.infer(context=context)) + except InferenceError: + values[name] = Uninferable + continue + except StopIteration: + continue + if not isinstance(dict_key, nodes.Const): + values[name] = Uninferable + continue + if not isinstance(dict_key.value, str): + values[name] = Uninferable + continue + if dict_key.value in values: + # The name is already in the dictionary + values[dict_key.value] = Uninferable + self.duplicated_keywords.add(dict_key.value) + continue + values[dict_key.value] = dict_value + else: + values[name] = value + return values + + def _unpack_args(self, args, context=None): + values = [] + context = context or InferenceContext() + context.extra_context = self.argument_context_map + for arg in args: + if isinstance(arg, nodes.Starred): + try: + inferred = next(arg.value.infer(context=context)) + except InferenceError: + values.append(Uninferable) + continue + except StopIteration: + continue + + if inferred is Uninferable: + values.append(Uninferable) + continue + if not hasattr(inferred, "elts"): + values.append(Uninferable) + continue + values.extend(inferred.elts) + else: + values.append(arg) + return values + + def infer_argument(self, funcnode, name, context): + """infer a function argument value according to the call context + + Arguments: + funcnode: The function being called. + name: The name of the argument whose value is being inferred. + context: Inference context object + """ + if name in self.duplicated_keywords: + raise InferenceError( + "The arguments passed to {func!r} " " have duplicate keywords.", + call_site=self, + func=funcnode, + arg=name, + context=context, + ) + + # Look into the keywords first, maybe it's already there. + try: + return self.keyword_arguments[name].infer(context) + except KeyError: + pass + + # Too many arguments given and no variable arguments. + if len(self.positional_arguments) > len(funcnode.args.args): + if not funcnode.args.vararg and not funcnode.args.posonlyargs: + raise InferenceError( + "Too many positional arguments " + "passed to {func!r} that does " + "not have *args.", + call_site=self, + func=funcnode, + arg=name, + context=context, + ) + + positional = self.positional_arguments[: len(funcnode.args.args)] + vararg = self.positional_arguments[len(funcnode.args.args) :] + argindex = funcnode.args.find_argname(name)[0] + kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs} + kwargs = { + key: value + for key, value in self.keyword_arguments.items() + if key not in kwonlyargs + } + # If there are too few positionals compared to + # what the function expects to receive, check to see + # if the missing positional arguments were passed + # as keyword arguments and if so, place them into the + # positional args list. + if len(positional) < len(funcnode.args.args): + for func_arg in funcnode.args.args: + if func_arg.name in kwargs: + arg = kwargs.pop(func_arg.name) + positional.append(arg) + + if argindex is not None: + boundnode = getattr(context, "boundnode", None) + # 2. first argument of instance/class method + if argindex == 0 and funcnode.type in {"method", "classmethod"}: + # context.boundnode is None when an instance method is called with + # the class, e.g. MyClass.method(obj, ...). In this case, self + # is the first argument. + if boundnode is None and funcnode.type == "method" and positional: + return positional[0].infer(context=context) + if boundnode is None: + # XXX can do better ? + boundnode = funcnode.parent.frame(future=True) + + if isinstance(boundnode, nodes.ClassDef): + # Verify that we're accessing a method + # of the metaclass through a class, as in + # `cls.metaclass_method`. In this case, the + # first argument is always the class. + method_scope = funcnode.parent.scope() + if method_scope is boundnode.metaclass(): + return iter((boundnode,)) + + if funcnode.type == "method": + if not isinstance(boundnode, Instance): + boundnode = boundnode.instantiate_class() + return iter((boundnode,)) + if funcnode.type == "classmethod": + return iter((boundnode,)) + # if we have a method, extract one position + # from the index, so we'll take in account + # the extra parameter represented by `self` or `cls` + if funcnode.type in {"method", "classmethod"} and boundnode: + argindex -= 1 + # 2. search arg index + try: + return self.positional_arguments[argindex].infer(context) + except IndexError: + pass + + if funcnode.args.kwarg == name: + # It wants all the keywords that were passed into + # the call site. + if self.has_invalid_keywords(): + raise InferenceError( + "Inference failed to find values for all keyword arguments " + "to {func!r}: {unpacked_kwargs!r} doesn't correspond to " + "{keyword_arguments!r}.", + keyword_arguments=self.keyword_arguments, + unpacked_kwargs=self._unpacked_kwargs, + call_site=self, + func=funcnode, + arg=name, + context=context, + ) + kwarg = nodes.Dict( + lineno=funcnode.args.lineno, + col_offset=funcnode.args.col_offset, + parent=funcnode.args, + ) + kwarg.postinit( + [(nodes.const_factory(key), value) for key, value in kwargs.items()] + ) + return iter((kwarg,)) + if funcnode.args.vararg == name: + # It wants all the args that were passed into + # the call site. + if self.has_invalid_arguments(): + raise InferenceError( + "Inference failed to find values for all positional " + "arguments to {func!r}: {unpacked_args!r} doesn't " + "correspond to {positional_arguments!r}.", + positional_arguments=self.positional_arguments, + unpacked_args=self._unpacked_args, + call_site=self, + func=funcnode, + arg=name, + context=context, + ) + args = nodes.Tuple( + lineno=funcnode.args.lineno, + col_offset=funcnode.args.col_offset, + parent=funcnode.args, + ) + args.postinit(vararg) + return iter((args,)) + + # Check if it's a default parameter. + try: + return funcnode.args.default_value(name).infer(context) + except NoDefault: + pass + raise InferenceError( + "No value found for argument {arg} to {func!r}", + call_site=self, + func=funcnode, + arg=name, + context=context, + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/astroid_manager.py b/myenv/lib/python3.9/site-packages/astroid/astroid_manager.py new file mode 100644 index 0000000..da51de7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/astroid_manager.py @@ -0,0 +1,15 @@ +""" +This file contain the global astroid MANAGER, to prevent circular import that happened +when the only possibility to import it was from astroid.__init__.py. + +This AstroidManager is a singleton/borg so it's possible to instantiate an +AstroidManager() directly. +""" + +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.manager import AstroidManager + +MANAGER = AstroidManager() diff --git a/myenv/lib/python3.9/site-packages/astroid/bases.py b/myenv/lib/python3.9/site-packages/astroid/bases.py new file mode 100644 index 0000000..5adaf51 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/bases.py @@ -0,0 +1,577 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""This module contains base classes and functions for the nodes and some +inference utils. +""" + +import collections + +from astroid import decorators +from astroid.const import PY310_PLUS +from astroid.context import ( + CallContext, + InferenceContext, + bind_context_to_node, + copy_context, +) +from astroid.exceptions import ( + AstroidTypeError, + AttributeInferenceError, + InferenceError, + NameInferenceError, +) +from astroid.util import Uninferable, lazy_descriptor, lazy_import + +objectmodel = lazy_import("interpreter.objectmodel") +helpers = lazy_import("helpers") +manager = lazy_import("manager") + + +# TODO: check if needs special treatment +BOOL_SPECIAL_METHOD = "__bool__" +BUILTINS = "builtins" # TODO Remove in 2.8 + +PROPERTIES = {"builtins.property", "abc.abstractproperty"} +if PY310_PLUS: + PROPERTIES.add("enum.property") + +# List of possible property names. We use this list in order +# to see if a method is a property or not. This should be +# pretty reliable and fast, the alternative being to check each +# decorator to see if its a real property-like descriptor, which +# can be too complicated. +# Also, these aren't qualified, because each project can +# define them, we shouldn't expect to know every possible +# property-like decorator! +POSSIBLE_PROPERTIES = { + "cached_property", + "cachedproperty", + "lazyproperty", + "lazy_property", + "reify", + "lazyattribute", + "lazy_attribute", + "LazyProperty", + "lazy", + "cache_readonly", + "DynamicClassAttribute", +} + + +def _is_property(meth, context=None): + decoratornames = meth.decoratornames(context=context) + if PROPERTIES.intersection(decoratornames): + return True + stripped = { + name.split(".")[-1] for name in decoratornames if name is not Uninferable + } + if any(name in stripped for name in POSSIBLE_PROPERTIES): + return True + + # Lookup for subclasses of *property* + if not meth.decorators: + return False + for decorator in meth.decorators.nodes or (): + inferred = helpers.safe_infer(decorator, context=context) + if inferred is None or inferred is Uninferable: + continue + if inferred.__class__.__name__ == "ClassDef": + for base_class in inferred.bases: + if base_class.__class__.__name__ != "Name": + continue + module, _ = base_class.lookup(base_class.name) + if module.name == "builtins" and base_class.name == "property": + return True + + return False + + +class Proxy: + """a simple proxy object + + Note: + + Subclasses of this object will need a custom __getattr__ + if new instance attributes are created. See the Const class + """ + + _proxied = None # proxied object may be set by class or by instance + + def __init__(self, proxied=None): + if proxied is not None: + self._proxied = proxied + + def __getattr__(self, name): + if name == "_proxied": + return self.__class__._proxied + if name in self.__dict__: + return self.__dict__[name] + return getattr(self._proxied, name) + + def infer(self, context=None): + yield self + + +def _infer_stmts(stmts, context, frame=None): + """Return an iterator on statements inferred by each statement in *stmts*.""" + inferred = False + if context is not None: + name = context.lookupname + context = context.clone() + else: + name = None + context = InferenceContext() + + for stmt in stmts: + if stmt is Uninferable: + yield stmt + inferred = True + continue + context.lookupname = stmt._infer_name(frame, name) + try: + for inf in stmt.infer(context=context): + yield inf + inferred = True + except NameInferenceError: + continue + except InferenceError: + yield Uninferable + inferred = True + if not inferred: + raise InferenceError( + "Inference failed for all members of {stmts!r}.", + stmts=stmts, + frame=frame, + context=context, + ) + + +def _infer_method_result_truth(instance, method_name, context): + # Get the method from the instance and try to infer + # its return's truth value. + meth = next(instance.igetattr(method_name, context=context), None) + if meth and hasattr(meth, "infer_call_result"): + if not meth.callable(): + return Uninferable + try: + context.callcontext = CallContext(args=[], callee=meth) + for value in meth.infer_call_result(instance, context=context): + if value is Uninferable: + return value + try: + inferred = next(value.infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + return inferred.bool_value() + except InferenceError: + pass + return Uninferable + + +class BaseInstance(Proxy): + """An instance base class, which provides lookup methods for potential instances.""" + + special_attributes = None + + def display_type(self): + return "Instance of" + + def getattr(self, name, context=None, lookupclass=True): + try: + values = self._proxied.instance_attr(name, context) + except AttributeInferenceError as exc: + if self.special_attributes and name in self.special_attributes: + return [self.special_attributes.lookup(name)] + + if lookupclass: + # Class attributes not available through the instance + # unless they are explicitly defined. + return self._proxied.getattr(name, context, class_context=False) + + raise AttributeInferenceError( + target=self, attribute=name, context=context + ) from exc + # since we've no context information, return matching class members as + # well + if lookupclass: + try: + return values + self._proxied.getattr( + name, context, class_context=False + ) + except AttributeInferenceError: + pass + return values + + def igetattr(self, name, context=None): + """inferred getattr""" + if not context: + context = InferenceContext() + try: + context.lookupname = name + # avoid recursively inferring the same attr on the same class + if context.push(self._proxied): + raise InferenceError( + message="Cannot infer the same attribute again", + node=self, + context=context, + ) + + # XXX frame should be self._proxied, or not ? + get_attr = self.getattr(name, context, lookupclass=False) + yield from _infer_stmts( + self._wrap_attr(get_attr, context), context, frame=self + ) + except AttributeInferenceError: + try: + # fallback to class.igetattr since it has some logic to handle + # descriptors + # But only if the _proxied is the Class. + if self._proxied.__class__.__name__ != "ClassDef": + raise + attrs = self._proxied.igetattr(name, context, class_context=False) + yield from self._wrap_attr(attrs, context) + except AttributeInferenceError as error: + raise InferenceError(**vars(error)) from error + + def _wrap_attr(self, attrs, context=None): + """wrap bound methods of attrs in a InstanceMethod proxies""" + for attr in attrs: + if isinstance(attr, UnboundMethod): + if _is_property(attr): + yield from attr.infer_call_result(self, context) + else: + yield BoundMethod(attr, self) + elif hasattr(attr, "name") and attr.name == "": + if attr.args.arguments and attr.args.arguments[0].name == "self": + yield BoundMethod(attr, self) + continue + yield attr + else: + yield attr + + def infer_call_result(self, caller, context=None): + """infer what a class instance is returning when called""" + context = bind_context_to_node(context, self) + inferred = False + for node in self._proxied.igetattr("__call__", context): + if node is Uninferable or not node.callable(): + continue + for res in node.infer_call_result(caller, context): + inferred = True + yield res + if not inferred: + raise InferenceError(node=self, caller=caller, context=context) + + +class Instance(BaseInstance): + """A special node representing a class instance.""" + + # pylint: disable=unnecessary-lambda + special_attributes = lazy_descriptor(lambda: objectmodel.InstanceModel()) + + def __repr__(self): + return "".format( + self._proxied.root().name, self._proxied.name, id(self) + ) + + def __str__(self): + return f"Instance of {self._proxied.root().name}.{self._proxied.name}" + + def callable(self): + try: + self._proxied.getattr("__call__", class_context=False) + return True + except AttributeInferenceError: + return False + + def pytype(self): + return self._proxied.qname() + + def display_type(self): + return "Instance of" + + def bool_value(self, context=None): + """Infer the truth value for an Instance + + The truth value of an instance is determined by these conditions: + + * if it implements __bool__ on Python 3 or __nonzero__ + on Python 2, then its bool value will be determined by + calling this special method and checking its result. + * when this method is not defined, __len__() is called, if it + is defined, and the object is considered true if its result is + nonzero. If a class defines neither __len__() nor __bool__(), + all its instances are considered true. + """ + context = context or InferenceContext() + context.boundnode = self + + try: + result = _infer_method_result_truth(self, BOOL_SPECIAL_METHOD, context) + except (InferenceError, AttributeInferenceError): + # Fallback to __len__. + try: + result = _infer_method_result_truth(self, "__len__", context) + except (AttributeInferenceError, InferenceError): + return True + return result + + def getitem(self, index, context=None): + # TODO: Rewrap index to Const for this case + new_context = bind_context_to_node(context, self) + if not context: + context = new_context + method = next(self.igetattr("__getitem__", context=context), None) + # Create a new CallContext for providing index as an argument. + new_context.callcontext = CallContext(args=[index], callee=method) + if not isinstance(method, BoundMethod): + raise InferenceError( + "Could not find __getitem__ for {node!r}.", node=self, context=context + ) + if len(method.args.arguments) != 2: # (self, index) + raise AstroidTypeError( + "__getitem__ for {node!r} does not have correct signature", + node=self, + context=context, + ) + return next(method.infer_call_result(self, new_context), None) + + +class UnboundMethod(Proxy): + """a special node representing a method not bound to an instance""" + + # pylint: disable=unnecessary-lambda + special_attributes = lazy_descriptor(lambda: objectmodel.UnboundMethodModel()) + + def __repr__(self): + frame = self._proxied.parent.frame(future=True) + return "<{} {} of {} at 0x{}".format( + self.__class__.__name__, self._proxied.name, frame.qname(), id(self) + ) + + def implicit_parameters(self): + return 0 + + def is_bound(self): + return False + + def getattr(self, name, context=None): + if name in self.special_attributes: + return [self.special_attributes.lookup(name)] + return self._proxied.getattr(name, context) + + def igetattr(self, name, context=None): + if name in self.special_attributes: + return iter((self.special_attributes.lookup(name),)) + return self._proxied.igetattr(name, context) + + def infer_call_result(self, caller, context): + """ + The boundnode of the regular context with a function called + on ``object.__new__`` will be of type ``object``, + which is incorrect for the argument in general. + If no context is given the ``object.__new__`` call argument will + correctly inferred except when inside a call that requires + the additional context (such as a classmethod) of the boundnode + to determine which class the method was called from + """ + + # If we're unbound method __new__ of builtin object, the result is an + # instance of the class given as first argument. + if ( + self._proxied.name == "__new__" + and self._proxied.parent.frame(future=True).qname() == "builtins.object" + ): + if caller.args: + node_context = context.extra_context.get(caller.args[0]) + infer = caller.args[0].infer(context=node_context) + else: + infer = [] + return (Instance(x) if x is not Uninferable else x for x in infer) + return self._proxied.infer_call_result(caller, context) + + def bool_value(self, context=None): + return True + + +class BoundMethod(UnboundMethod): + """a special node representing a method bound to an instance""" + + # pylint: disable=unnecessary-lambda + special_attributes = lazy_descriptor(lambda: objectmodel.BoundMethodModel()) + + def __init__(self, proxy, bound): + super().__init__(proxy) + self.bound = bound + + def implicit_parameters(self): + if self.name == "__new__": + # __new__ acts as a classmethod but the class argument is not implicit. + return 0 + return 1 + + def is_bound(self): + return True + + def _infer_type_new_call(self, caller, context): + """Try to infer what type.__new__(mcs, name, bases, attrs) returns. + + In order for such call to be valid, the metaclass needs to be + a subtype of ``type``, the name needs to be a string, the bases + needs to be a tuple of classes + """ + # pylint: disable=import-outside-toplevel; circular import + from astroid.nodes import Pass + + # Verify the metaclass + try: + mcs = next(caller.args[0].infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + if mcs.__class__.__name__ != "ClassDef": + # Not a valid first argument. + return None + if not mcs.is_subtype_of("builtins.type"): + # Not a valid metaclass. + return None + + # Verify the name + try: + name = next(caller.args[1].infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + if name.__class__.__name__ != "Const": + # Not a valid name, needs to be a const. + return None + if not isinstance(name.value, str): + # Needs to be a string. + return None + + # Verify the bases + try: + bases = next(caller.args[2].infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + if bases.__class__.__name__ != "Tuple": + # Needs to be a tuple. + return None + try: + inferred_bases = [next(elt.infer(context=context)) for elt in bases.elts] + except StopIteration as e: + raise InferenceError(context=context) from e + if any(base.__class__.__name__ != "ClassDef" for base in inferred_bases): + # All the bases needs to be Classes + return None + + # Verify the attributes. + try: + attrs = next(caller.args[3].infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + if attrs.__class__.__name__ != "Dict": + # Needs to be a dictionary. + return None + cls_locals = collections.defaultdict(list) + for key, value in attrs.items: + try: + key = next(key.infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + try: + value = next(value.infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context) from e + # Ignore non string keys + if key.__class__.__name__ == "Const" and isinstance(key.value, str): + cls_locals[key.value].append(value) + + # Build the class from now. + cls = mcs.__class__( + name=name.value, + lineno=caller.lineno, + col_offset=caller.col_offset, + parent=caller, + ) + empty = Pass() + cls.postinit( + bases=bases.elts, + body=[empty], + decorators=[], + newstyle=True, + metaclass=mcs, + keywords=[], + ) + cls.locals = cls_locals + return cls + + def infer_call_result(self, caller, context=None): + context = bind_context_to_node(context, self.bound) + if ( + self.bound.__class__.__name__ == "ClassDef" + and self.bound.name == "type" + and self.name == "__new__" + and len(caller.args) == 4 + ): + # Check if we have a ``type.__new__(mcs, name, bases, attrs)`` call. + new_cls = self._infer_type_new_call(caller, context) + if new_cls: + return iter((new_cls,)) + + return super().infer_call_result(caller, context) + + def bool_value(self, context=None): + return True + + +class Generator(BaseInstance): + """a special node representing a generator. + + Proxied class is set once for all in raw_building. + """ + + special_attributes = lazy_descriptor(objectmodel.GeneratorModel) + + def __init__(self, parent=None, generator_initial_context=None): + super().__init__() + self.parent = parent + self._call_context = copy_context(generator_initial_context) + + @decorators.cached + def infer_yield_types(self): + yield from self.parent.infer_yield_result(self._call_context) + + def callable(self): + return False + + def pytype(self): + return "builtins.generator" + + def display_type(self): + return "Generator" + + def bool_value(self, context=None): + return True + + def __repr__(self): + return f"" + + def __str__(self): + return f"Generator({self._proxied.name})" + + +class AsyncGenerator(Generator): + """Special node representing an async generator""" + + def pytype(self): + return "builtins.async_generator" + + def display_type(self): + return "AsyncGenerator" + + def __repr__(self): + return f"" + + def __str__(self): + return f"AsyncGenerator({self._proxied.name})" diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/__init__.py b/myenv/lib/python3.9/site-packages/astroid/brain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_argparse.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_argparse.py new file mode 100644 index 0000000..ea97179 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_argparse.py @@ -0,0 +1,41 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid import arguments, inference_tip, nodes +from astroid.exceptions import UseInferenceDefault +from astroid.manager import AstroidManager + + +def infer_namespace(node, context=None): + callsite = arguments.CallSite.from_call(node, context=context) + if not callsite.keyword_arguments: + # Cannot make sense of it. + raise UseInferenceDefault() + + class_node = nodes.ClassDef("Namespace") + # Set parent manually until ClassDef constructor fixed: + # https://github.com/PyCQA/astroid/issues/1490 + class_node.parent = node.parent + for attr in set(callsite.keyword_arguments): + fake_node = nodes.EmptyNode() + fake_node.parent = class_node + fake_node.attrname = attr + class_node.instance_attrs[attr] = [fake_node] + return iter((class_node.instantiate_class(),)) + + +def _looks_like_namespace(node): + func = node.func + if isinstance(func, nodes.Attribute): + return ( + func.attrname == "Namespace" + and isinstance(func.expr, nodes.Name) + and func.expr.name == "argparse" + ) + return False + + +AstroidManager().register_transform( + nodes.Call, inference_tip(infer_namespace), _looks_like_namespace +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_attrs.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_attrs.py new file mode 100644 index 0000000..32b8ce0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_attrs.py @@ -0,0 +1,84 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Astroid hook for the attrs library + +Without this hook pylint reports unsupported-assignment-operation +for attrs classes +""" +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import AnnAssign, Assign, AssignName, Call, Unknown +from astroid.nodes.scoped_nodes import ClassDef + +ATTRIB_NAMES = frozenset( + ("attr.ib", "attrib", "attr.attrib", "attr.field", "attrs.field", "field") +) +ATTRS_NAMES = frozenset( + ( + "attr.s", + "attrs", + "attr.attrs", + "attr.attributes", + "attr.define", + "attr.mutable", + "attr.frozen", + "attrs.define", + "attrs.mutable", + "attrs.frozen", + ) +) + + +def is_decorated_with_attrs(node, decorator_names=ATTRS_NAMES): + """Return True if a decorated node has + an attr decorator applied.""" + if not node.decorators: + return False + for decorator_attribute in node.decorators.nodes: + if isinstance(decorator_attribute, Call): # decorator with arguments + decorator_attribute = decorator_attribute.func + if decorator_attribute.as_string() in decorator_names: + return True + return False + + +def attr_attributes_transform(node: ClassDef) -> None: + """Given that the ClassNode has an attr decorator, + rewrite class attributes as instance attributes + """ + # Astroid can't infer this attribute properly + # Prevents https://github.com/PyCQA/pylint/issues/1884 + node.locals["__attrs_attrs__"] = [Unknown(parent=node)] + + for cdef_body_node in node.body: + if not isinstance(cdef_body_node, (Assign, AnnAssign)): + continue + if isinstance(cdef_body_node.value, Call): + if cdef_body_node.value.func.as_string() not in ATTRIB_NAMES: + continue + else: + continue + targets = ( + cdef_body_node.targets + if hasattr(cdef_body_node, "targets") + else [cdef_body_node.target] + ) + for target in targets: + rhs_node = Unknown( + lineno=cdef_body_node.lineno, + col_offset=cdef_body_node.col_offset, + parent=cdef_body_node, + ) + if isinstance(target, AssignName): + # Could be a subscript if the code analysed is + # i = Optional[str] = "" + # See https://github.com/PyCQA/pylint/issues/4439 + node.locals[target.name] = [rhs_node] + node.instance_attrs[target.name] = [rhs_node] + + +AstroidManager().register_transform( + ClassDef, attr_attributes_transform, is_decorated_with_attrs +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_boto3.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_boto3.py new file mode 100644 index 0000000..54faa64 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_boto3.py @@ -0,0 +1,30 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for understanding boto3.ServiceRequest()""" +from astroid import extract_node +from astroid.manager import AstroidManager +from astroid.nodes.scoped_nodes import ClassDef + +BOTO_SERVICE_FACTORY_QUALIFIED_NAME = "boto3.resources.base.ServiceResource" + + +def service_request_transform(node): + """Transform ServiceResource to look like dynamic classes""" + code = """ + def __getattr__(self, attr): + return 0 + """ + func_getattr = extract_node(code) + node.locals["__getattr__"] = [func_getattr] + return node + + +def _looks_like_boto3_service_request(node): + return node.qname() == BOTO_SERVICE_FACTORY_QUALIFIED_NAME + + +AstroidManager().register_transform( + ClassDef, service_request_transform, _looks_like_boto3_service_request +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_builtin_inference.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_builtin_inference.py new file mode 100644 index 0000000..5d7040a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_builtin_inference.py @@ -0,0 +1,922 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for various builtins.""" + +from functools import partial +from typing import Optional + +from astroid import arguments, helpers, inference_tip, nodes, objects, util +from astroid.builder import AstroidBuilder +from astroid.context import InferenceContext +from astroid.exceptions import ( + AstroidTypeError, + AttributeInferenceError, + InferenceError, + MroError, + UseInferenceDefault, +) +from astroid.manager import AstroidManager +from astroid.nodes import scoped_nodes + +OBJECT_DUNDER_NEW = "object.__new__" + +STR_CLASS = """ +class whatever(object): + def join(self, iterable): + return {rvalue} + def replace(self, old, new, count=None): + return {rvalue} + def format(self, *args, **kwargs): + return {rvalue} + def encode(self, encoding='ascii', errors=None): + return b'' + def decode(self, encoding='ascii', errors=None): + return u'' + def capitalize(self): + return {rvalue} + def title(self): + return {rvalue} + def lower(self): + return {rvalue} + def upper(self): + return {rvalue} + def swapcase(self): + return {rvalue} + def index(self, sub, start=None, end=None): + return 0 + def find(self, sub, start=None, end=None): + return 0 + def count(self, sub, start=None, end=None): + return 0 + def strip(self, chars=None): + return {rvalue} + def lstrip(self, chars=None): + return {rvalue} + def rstrip(self, chars=None): + return {rvalue} + def rjust(self, width, fillchar=None): + return {rvalue} + def center(self, width, fillchar=None): + return {rvalue} + def ljust(self, width, fillchar=None): + return {rvalue} +""" + + +BYTES_CLASS = """ +class whatever(object): + def join(self, iterable): + return {rvalue} + def replace(self, old, new, count=None): + return {rvalue} + def decode(self, encoding='ascii', errors=None): + return u'' + def capitalize(self): + return {rvalue} + def title(self): + return {rvalue} + def lower(self): + return {rvalue} + def upper(self): + return {rvalue} + def swapcase(self): + return {rvalue} + def index(self, sub, start=None, end=None): + return 0 + def find(self, sub, start=None, end=None): + return 0 + def count(self, sub, start=None, end=None): + return 0 + def strip(self, chars=None): + return {rvalue} + def lstrip(self, chars=None): + return {rvalue} + def rstrip(self, chars=None): + return {rvalue} + def rjust(self, width, fillchar=None): + return {rvalue} + def center(self, width, fillchar=None): + return {rvalue} + def ljust(self, width, fillchar=None): + return {rvalue} +""" + + +def _extend_string_class(class_node, code, rvalue): + """function to extend builtin str/unicode class""" + code = code.format(rvalue=rvalue) + fake = AstroidBuilder(AstroidManager()).string_build(code)["whatever"] + for method in fake.mymethods(): + method.parent = class_node + method.lineno = None + method.col_offset = None + if "__class__" in method.locals: + method.locals["__class__"] = [class_node] + class_node.locals[method.name] = [method] + method.parent = class_node + + +def _extend_builtins(class_transforms): + builtin_ast = AstroidManager().builtins_module + for class_name, transform in class_transforms.items(): + transform(builtin_ast[class_name]) + + +_extend_builtins( + { + "bytes": partial(_extend_string_class, code=BYTES_CLASS, rvalue="b''"), + "str": partial(_extend_string_class, code=STR_CLASS, rvalue="''"), + } +) + + +def _builtin_filter_predicate(node, builtin_name): + if ( + builtin_name == "type" + and node.root().name == "re" + and isinstance(node.func, nodes.Name) + and node.func.name == "type" + and isinstance(node.parent, nodes.Assign) + and len(node.parent.targets) == 1 + and isinstance(node.parent.targets[0], nodes.AssignName) + and node.parent.targets[0].name in {"Pattern", "Match"} + ): + # Handle re.Pattern and re.Match in brain_re + # Match these patterns from stdlib/re.py + # ```py + # Pattern = type(...) + # Match = type(...) + # ``` + return False + if isinstance(node.func, nodes.Name) and node.func.name == builtin_name: + return True + if isinstance(node.func, nodes.Attribute): + return ( + node.func.attrname == "fromkeys" + and isinstance(node.func.expr, nodes.Name) + and node.func.expr.name == "dict" + ) + return False + + +def register_builtin_transform(transform, builtin_name): + """Register a new transform function for the given *builtin_name*. + + The transform function must accept two parameters, a node and + an optional context. + """ + + def _transform_wrapper(node, context=None): + result = transform(node, context=context) + if result: + if not result.parent: + # Let the transformation function determine + # the parent for its result. Otherwise, + # we set it to be the node we transformed from. + result.parent = node + + if result.lineno is None: + result.lineno = node.lineno + # Can be a 'Module' see https://github.com/PyCQA/pylint/issues/4671 + # We don't have a regression test on this one: tread carefully + if hasattr(result, "col_offset") and result.col_offset is None: + result.col_offset = node.col_offset + return iter([result]) + + AstroidManager().register_transform( + nodes.Call, + inference_tip(_transform_wrapper), + partial(_builtin_filter_predicate, builtin_name=builtin_name), + ) + + +def _container_generic_inference(node, context, node_type, transform): + args = node.args + if not args: + return node_type() + if len(node.args) > 1: + raise UseInferenceDefault() + + (arg,) = args + transformed = transform(arg) + if not transformed: + try: + inferred = next(arg.infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + if inferred is util.Uninferable: + raise UseInferenceDefault + transformed = transform(inferred) + if not transformed or transformed is util.Uninferable: + raise UseInferenceDefault + return transformed + + +def _container_generic_transform( # pylint: disable=inconsistent-return-statements + arg, context, klass, iterables, build_elts +): + if isinstance(arg, klass): + return arg + if isinstance(arg, iterables): + if all(isinstance(elt, nodes.Const) for elt in arg.elts): + elts = [elt.value for elt in arg.elts] + else: + # TODO: Does not handle deduplication for sets. + elts = [] + for element in arg.elts: + if not element: + continue + inferred = helpers.safe_infer(element, context=context) + if inferred: + evaluated_object = nodes.EvaluatedObject( + original=element, value=inferred + ) + elts.append(evaluated_object) + elif isinstance(arg, nodes.Dict): + # Dicts need to have consts as strings already. + if not all(isinstance(elt[0], nodes.Const) for elt in arg.items): + raise UseInferenceDefault() + elts = [item[0].value for item in arg.items] + elif isinstance(arg, nodes.Const) and isinstance(arg.value, (str, bytes)): + elts = arg.value + else: + return + return klass.from_elements(elts=build_elts(elts)) + + +def _infer_builtin_container( + node, context, klass=None, iterables=None, build_elts=None +): + transform_func = partial( + _container_generic_transform, + context=context, + klass=klass, + iterables=iterables, + build_elts=build_elts, + ) + + return _container_generic_inference(node, context, klass, transform_func) + + +# pylint: disable=invalid-name +infer_tuple = partial( + _infer_builtin_container, + klass=nodes.Tuple, + iterables=( + nodes.List, + nodes.Set, + objects.FrozenSet, + objects.DictItems, + objects.DictKeys, + objects.DictValues, + ), + build_elts=tuple, +) + +infer_list = partial( + _infer_builtin_container, + klass=nodes.List, + iterables=( + nodes.Tuple, + nodes.Set, + objects.FrozenSet, + objects.DictItems, + objects.DictKeys, + objects.DictValues, + ), + build_elts=list, +) + +infer_set = partial( + _infer_builtin_container, + klass=nodes.Set, + iterables=(nodes.List, nodes.Tuple, objects.FrozenSet, objects.DictKeys), + build_elts=set, +) + +infer_frozenset = partial( + _infer_builtin_container, + klass=objects.FrozenSet, + iterables=(nodes.List, nodes.Tuple, nodes.Set, objects.FrozenSet, objects.DictKeys), + build_elts=frozenset, +) + + +def _get_elts(arg, context): + def is_iterable(n): + return isinstance(n, (nodes.List, nodes.Tuple, nodes.Set)) + + try: + inferred = next(arg.infer(context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + if isinstance(inferred, nodes.Dict): + items = inferred.items + elif is_iterable(inferred): + items = [] + for elt in inferred.elts: + # If an item is not a pair of two items, + # then fallback to the default inference. + # Also, take in consideration only hashable items, + # tuples and consts. We are choosing Names as well. + if not is_iterable(elt): + raise UseInferenceDefault() + if len(elt.elts) != 2: + raise UseInferenceDefault() + if not isinstance(elt.elts[0], (nodes.Tuple, nodes.Const, nodes.Name)): + raise UseInferenceDefault() + items.append(tuple(elt.elts)) + else: + raise UseInferenceDefault() + return items + + +def infer_dict(node, context=None): + """Try to infer a dict call to a Dict node. + + The function treats the following cases: + + * dict() + * dict(mapping) + * dict(iterable) + * dict(iterable, **kwargs) + * dict(mapping, **kwargs) + * dict(**kwargs) + + If a case can't be inferred, we'll fallback to default inference. + """ + call = arguments.CallSite.from_call(node, context=context) + if call.has_invalid_arguments() or call.has_invalid_keywords(): + raise UseInferenceDefault + + args = call.positional_arguments + kwargs = list(call.keyword_arguments.items()) + + if not args and not kwargs: + # dict() + return nodes.Dict() + if kwargs and not args: + # dict(a=1, b=2, c=4) + items = [(nodes.Const(key), value) for key, value in kwargs] + elif len(args) == 1 and kwargs: + # dict(some_iterable, b=2, c=4) + elts = _get_elts(args[0], context) + keys = [(nodes.Const(key), value) for key, value in kwargs] + items = elts + keys + elif len(args) == 1: + items = _get_elts(args[0], context) + else: + raise UseInferenceDefault() + value = nodes.Dict( + col_offset=node.col_offset, lineno=node.lineno, parent=node.parent + ) + value.postinit(items) + return value + + +def infer_super(node, context=None): + """Understand super calls. + + There are some restrictions for what can be understood: + + * unbounded super (one argument form) is not understood. + + * if the super call is not inside a function (classmethod or method), + then the default inference will be used. + + * if the super arguments can't be inferred, the default inference + will be used. + """ + if len(node.args) == 1: + # Ignore unbounded super. + raise UseInferenceDefault + + scope = node.scope() + if not isinstance(scope, nodes.FunctionDef): + # Ignore non-method uses of super. + raise UseInferenceDefault + if scope.type not in ("classmethod", "method"): + # Not interested in staticmethods. + raise UseInferenceDefault + + cls = scoped_nodes.get_wrapping_class(scope) + if not node.args: + mro_pointer = cls + # In we are in a classmethod, the interpreter will fill + # automatically the class as the second argument, not an instance. + if scope.type == "classmethod": + mro_type = cls + else: + mro_type = cls.instantiate_class() + else: + try: + mro_pointer = next(node.args[0].infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + try: + mro_type = next(node.args[1].infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + + if mro_pointer is util.Uninferable or mro_type is util.Uninferable: + # No way we could understand this. + raise UseInferenceDefault + + super_obj = objects.Super( + mro_pointer=mro_pointer, mro_type=mro_type, self_class=cls, scope=scope + ) + super_obj.parent = node + return super_obj + + +def _infer_getattr_args(node, context): + if len(node.args) not in (2, 3): + # Not a valid getattr call. + raise UseInferenceDefault + + try: + obj = next(node.args[0].infer(context=context)) + attr = next(node.args[1].infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + + if obj is util.Uninferable or attr is util.Uninferable: + # If one of the arguments is something we can't infer, + # then also make the result of the getattr call something + # which is unknown. + return util.Uninferable, util.Uninferable + + is_string = isinstance(attr, nodes.Const) and isinstance(attr.value, str) + if not is_string: + raise UseInferenceDefault + + return obj, attr.value + + +def infer_getattr(node, context=None): + """Understand getattr calls + + If one of the arguments is an Uninferable object, then the + result will be an Uninferable object. Otherwise, the normal attribute + lookup will be done. + """ + obj, attr = _infer_getattr_args(node, context) + if ( + obj is util.Uninferable + or attr is util.Uninferable + or not hasattr(obj, "igetattr") + ): + return util.Uninferable + + try: + return next(obj.igetattr(attr, context=context)) + except (StopIteration, InferenceError, AttributeInferenceError): + if len(node.args) == 3: + # Try to infer the default and return it instead. + try: + return next(node.args[2].infer(context=context)) + except (StopIteration, InferenceError) as exc: + raise UseInferenceDefault from exc + + raise UseInferenceDefault + + +def infer_hasattr(node, context=None): + """Understand hasattr calls + + This always guarantees three possible outcomes for calling + hasattr: Const(False) when we are sure that the object + doesn't have the intended attribute, Const(True) when + we know that the object has the attribute and Uninferable + when we are unsure of the outcome of the function call. + """ + try: + obj, attr = _infer_getattr_args(node, context) + if ( + obj is util.Uninferable + or attr is util.Uninferable + or not hasattr(obj, "getattr") + ): + return util.Uninferable + obj.getattr(attr, context=context) + except UseInferenceDefault: + # Can't infer something from this function call. + return util.Uninferable + except AttributeInferenceError: + # Doesn't have it. + return nodes.Const(False) + return nodes.Const(True) + + +def infer_callable(node, context=None): + """Understand callable calls + + This follows Python's semantics, where an object + is callable if it provides an attribute __call__, + even though that attribute is something which can't be + called. + """ + if len(node.args) != 1: + # Invalid callable call. + raise UseInferenceDefault + + argument = node.args[0] + try: + inferred = next(argument.infer(context=context)) + except (InferenceError, StopIteration): + return util.Uninferable + if inferred is util.Uninferable: + return util.Uninferable + return nodes.Const(inferred.callable()) + + +def infer_property( + node: nodes.Call, context: Optional[InferenceContext] = None +) -> objects.Property: + """Understand `property` class + + This only infers the output of `property` + call, not the arguments themselves. + """ + if len(node.args) < 1: + # Invalid property call. + raise UseInferenceDefault + + getter = node.args[0] + try: + inferred = next(getter.infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + + if not isinstance(inferred, (nodes.FunctionDef, nodes.Lambda)): + raise UseInferenceDefault + + prop_func = objects.Property( + function=inferred, + name=inferred.name, + lineno=node.lineno, + parent=node, + col_offset=node.col_offset, + ) + prop_func.postinit( + body=[], + args=inferred.args, + doc_node=getattr(inferred, "doc_node", None), + ) + return prop_func + + +def infer_bool(node, context=None): + """Understand bool calls.""" + if len(node.args) > 1: + # Invalid bool call. + raise UseInferenceDefault + + if not node.args: + return nodes.Const(False) + + argument = node.args[0] + try: + inferred = next(argument.infer(context=context)) + except (InferenceError, StopIteration): + return util.Uninferable + if inferred is util.Uninferable: + return util.Uninferable + + bool_value = inferred.bool_value(context=context) + if bool_value is util.Uninferable: + return util.Uninferable + return nodes.Const(bool_value) + + +def infer_type(node, context=None): + """Understand the one-argument form of *type*.""" + if len(node.args) != 1: + raise UseInferenceDefault + + return helpers.object_type(node.args[0], context) + + +def infer_slice(node, context=None): + """Understand `slice` calls.""" + args = node.args + if not 0 < len(args) <= 3: + raise UseInferenceDefault + + infer_func = partial(helpers.safe_infer, context=context) + args = [infer_func(arg) for arg in args] + for arg in args: + if not arg or arg is util.Uninferable: + raise UseInferenceDefault + if not isinstance(arg, nodes.Const): + raise UseInferenceDefault + if not isinstance(arg.value, (type(None), int)): + raise UseInferenceDefault + + if len(args) < 3: + # Make sure we have 3 arguments. + args.extend([None] * (3 - len(args))) + + slice_node = nodes.Slice( + lineno=node.lineno, col_offset=node.col_offset, parent=node.parent + ) + slice_node.postinit(*args) + return slice_node + + +def _infer_object__new__decorator(node, context=None): + # Instantiate class immediately + # since that's what @object.__new__ does + return iter((node.instantiate_class(),)) + + +def _infer_object__new__decorator_check(node): + """Predicate before inference_tip + + Check if the given ClassDef has an @object.__new__ decorator + """ + if not node.decorators: + return False + + for decorator in node.decorators.nodes: + if isinstance(decorator, nodes.Attribute): + if decorator.as_string() == OBJECT_DUNDER_NEW: + return True + return False + + +def infer_issubclass(callnode, context=None): + """Infer issubclass() calls + + :param nodes.Call callnode: an `issubclass` call + :param InferenceContext context: the context for the inference + :rtype nodes.Const: Boolean Const value of the `issubclass` call + :raises UseInferenceDefault: If the node cannot be inferred + """ + call = arguments.CallSite.from_call(callnode, context=context) + if call.keyword_arguments: + # issubclass doesn't support keyword arguments + raise UseInferenceDefault("TypeError: issubclass() takes no keyword arguments") + if len(call.positional_arguments) != 2: + raise UseInferenceDefault( + f"Expected two arguments, got {len(call.positional_arguments)}" + ) + # The left hand argument is the obj to be checked + obj_node, class_or_tuple_node = call.positional_arguments + + try: + obj_type = next(obj_node.infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + if not isinstance(obj_type, nodes.ClassDef): + raise UseInferenceDefault("TypeError: arg 1 must be class") + + # The right hand argument is the class(es) that the given + # object is to be checked against. + try: + class_container = _class_or_tuple_to_container( + class_or_tuple_node, context=context + ) + except InferenceError as exc: + raise UseInferenceDefault from exc + try: + issubclass_bool = helpers.object_issubclass(obj_type, class_container, context) + except AstroidTypeError as exc: + raise UseInferenceDefault("TypeError: " + str(exc)) from exc + except MroError as exc: + raise UseInferenceDefault from exc + return nodes.Const(issubclass_bool) + + +def infer_isinstance(callnode, context=None): + """Infer isinstance calls + + :param nodes.Call callnode: an isinstance call + :param InferenceContext context: context for call + (currently unused but is a common interface for inference) + :rtype nodes.Const: Boolean Const value of isinstance call + + :raises UseInferenceDefault: If the node cannot be inferred + """ + call = arguments.CallSite.from_call(callnode, context=context) + if call.keyword_arguments: + # isinstance doesn't support keyword arguments + raise UseInferenceDefault("TypeError: isinstance() takes no keyword arguments") + if len(call.positional_arguments) != 2: + raise UseInferenceDefault( + f"Expected two arguments, got {len(call.positional_arguments)}" + ) + # The left hand argument is the obj to be checked + obj_node, class_or_tuple_node = call.positional_arguments + # The right hand argument is the class(es) that the given + # obj is to be check is an instance of + try: + class_container = _class_or_tuple_to_container( + class_or_tuple_node, context=context + ) + except InferenceError as exc: + raise UseInferenceDefault from exc + try: + isinstance_bool = helpers.object_isinstance(obj_node, class_container, context) + except AstroidTypeError as exc: + raise UseInferenceDefault("TypeError: " + str(exc)) from exc + except MroError as exc: + raise UseInferenceDefault from exc + if isinstance_bool is util.Uninferable: + raise UseInferenceDefault + return nodes.Const(isinstance_bool) + + +def _class_or_tuple_to_container(node, context=None): + # Move inferences results into container + # to simplify later logic + # raises InferenceError if any of the inferences fall through + try: + node_infer = next(node.infer(context=context)) + except StopIteration as e: + raise InferenceError(node=node, context=context) from e + # arg2 MUST be a type or a TUPLE of types + # for isinstance + if isinstance(node_infer, nodes.Tuple): + try: + class_container = [ + next(node.infer(context=context)) for node in node_infer.elts + ] + except StopIteration as e: + raise InferenceError(node=node, context=context) from e + class_container = [ + klass_node for klass_node in class_container if klass_node is not None + ] + else: + class_container = [node_infer] + return class_container + + +def infer_len(node, context=None): + """Infer length calls + + :param nodes.Call node: len call to infer + :param context.InferenceContext: node context + :rtype nodes.Const: a Const node with the inferred length, if possible + """ + call = arguments.CallSite.from_call(node, context=context) + if call.keyword_arguments: + raise UseInferenceDefault("TypeError: len() must take no keyword arguments") + if len(call.positional_arguments) != 1: + raise UseInferenceDefault( + "TypeError: len() must take exactly one argument " + "({len}) given".format(len=len(call.positional_arguments)) + ) + [argument_node] = call.positional_arguments + + try: + return nodes.Const(helpers.object_len(argument_node, context=context)) + except (AstroidTypeError, InferenceError) as exc: + raise UseInferenceDefault(str(exc)) from exc + + +def infer_str(node, context=None): + """Infer str() calls + + :param nodes.Call node: str() call to infer + :param context.InferenceContext: node context + :rtype nodes.Const: a Const containing an empty string + """ + call = arguments.CallSite.from_call(node, context=context) + if call.keyword_arguments: + raise UseInferenceDefault("TypeError: str() must take no keyword arguments") + try: + return nodes.Const("") + except (AstroidTypeError, InferenceError) as exc: + raise UseInferenceDefault(str(exc)) from exc + + +def infer_int(node, context=None): + """Infer int() calls + + :param nodes.Call node: int() call to infer + :param context.InferenceContext: node context + :rtype nodes.Const: a Const containing the integer value of the int() call + """ + call = arguments.CallSite.from_call(node, context=context) + if call.keyword_arguments: + raise UseInferenceDefault("TypeError: int() must take no keyword arguments") + + if call.positional_arguments: + try: + first_value = next(call.positional_arguments[0].infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault(str(exc)) from exc + + if first_value is util.Uninferable: + raise UseInferenceDefault + + if isinstance(first_value, nodes.Const) and isinstance( + first_value.value, (int, str) + ): + try: + actual_value = int(first_value.value) + except ValueError: + return nodes.Const(0) + return nodes.Const(actual_value) + + return nodes.Const(0) + + +def infer_dict_fromkeys(node, context=None): + """Infer dict.fromkeys + + :param nodes.Call node: dict.fromkeys() call to infer + :param context.InferenceContext context: node context + :rtype nodes.Dict: + a Dictionary containing the values that astroid was able to infer. + In case the inference failed for any reason, an empty dictionary + will be inferred instead. + """ + + def _build_dict_with_elements(elements): + new_node = nodes.Dict( + col_offset=node.col_offset, lineno=node.lineno, parent=node.parent + ) + new_node.postinit(elements) + return new_node + + call = arguments.CallSite.from_call(node, context=context) + if call.keyword_arguments: + raise UseInferenceDefault("TypeError: int() must take no keyword arguments") + if len(call.positional_arguments) not in {1, 2}: + raise UseInferenceDefault( + "TypeError: Needs between 1 and 2 positional arguments" + ) + + default = nodes.Const(None) + values = call.positional_arguments[0] + try: + inferred_values = next(values.infer(context=context)) + except (InferenceError, StopIteration): + return _build_dict_with_elements([]) + if inferred_values is util.Uninferable: + return _build_dict_with_elements([]) + + # Limit to a couple of potential values, as this can become pretty complicated + accepted_iterable_elements = (nodes.Const,) + if isinstance(inferred_values, (nodes.List, nodes.Set, nodes.Tuple)): + elements = inferred_values.elts + for element in elements: + if not isinstance(element, accepted_iterable_elements): + # Fallback to an empty dict + return _build_dict_with_elements([]) + + elements_with_value = [(element, default) for element in elements] + return _build_dict_with_elements(elements_with_value) + if isinstance(inferred_values, nodes.Const) and isinstance( + inferred_values.value, (str, bytes) + ): + elements = [ + (nodes.Const(element), default) for element in inferred_values.value + ] + return _build_dict_with_elements(elements) + if isinstance(inferred_values, nodes.Dict): + keys = inferred_values.itered() + for key in keys: + if not isinstance(key, accepted_iterable_elements): + # Fallback to an empty dict + return _build_dict_with_elements([]) + + elements_with_value = [(element, default) for element in keys] + return _build_dict_with_elements(elements_with_value) + + # Fallback to an empty dictionary + return _build_dict_with_elements([]) + + +# Builtins inference +register_builtin_transform(infer_bool, "bool") +register_builtin_transform(infer_super, "super") +register_builtin_transform(infer_callable, "callable") +register_builtin_transform(infer_property, "property") +register_builtin_transform(infer_getattr, "getattr") +register_builtin_transform(infer_hasattr, "hasattr") +register_builtin_transform(infer_tuple, "tuple") +register_builtin_transform(infer_set, "set") +register_builtin_transform(infer_list, "list") +register_builtin_transform(infer_dict, "dict") +register_builtin_transform(infer_frozenset, "frozenset") +register_builtin_transform(infer_type, "type") +register_builtin_transform(infer_slice, "slice") +register_builtin_transform(infer_isinstance, "isinstance") +register_builtin_transform(infer_issubclass, "issubclass") +register_builtin_transform(infer_len, "len") +register_builtin_transform(infer_str, "str") +register_builtin_transform(infer_int, "int") +register_builtin_transform(infer_dict_fromkeys, "dict.fromkeys") + + +# Infer object.__new__ calls +AstroidManager().register_transform( + nodes.ClassDef, + inference_tip(_infer_object__new__decorator), + _infer_object__new__decorator_check, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_collections.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_collections.py new file mode 100644 index 0000000..43304ec --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_collections.py @@ -0,0 +1,123 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import extract_node, parse +from astroid.const import PY39_PLUS +from astroid.exceptions import AttributeInferenceError +from astroid.manager import AstroidManager +from astroid.nodes.scoped_nodes import ClassDef + + +def _collections_transform(): + return parse( + """ + class defaultdict(dict): + default_factory = None + def __missing__(self, key): pass + def __getitem__(self, key): return default_factory + + """ + + _deque_mock() + + _ordered_dict_mock() + ) + + +def _deque_mock(): + base_deque_class = """ + class deque(object): + maxlen = 0 + def __init__(self, iterable=None, maxlen=None): + self.iterable = iterable or [] + def append(self, x): pass + def appendleft(self, x): pass + def clear(self): pass + def count(self, x): return 0 + def extend(self, iterable): pass + def extendleft(self, iterable): pass + def pop(self): return self.iterable[0] + def popleft(self): return self.iterable[0] + def remove(self, value): pass + def reverse(self): return reversed(self.iterable) + def rotate(self, n=1): return self + def __iter__(self): return self + def __reversed__(self): return self.iterable[::-1] + def __getitem__(self, index): return self.iterable[index] + def __setitem__(self, index, value): pass + def __delitem__(self, index): pass + def __bool__(self): return bool(self.iterable) + def __nonzero__(self): return bool(self.iterable) + def __contains__(self, o): return o in self.iterable + def __len__(self): return len(self.iterable) + def __copy__(self): return deque(self.iterable) + def copy(self): return deque(self.iterable) + def index(self, x, start=0, end=0): return 0 + def insert(self, i, x): pass + def __add__(self, other): pass + def __iadd__(self, other): pass + def __mul__(self, other): pass + def __imul__(self, other): pass + def __rmul__(self, other): pass""" + if PY39_PLUS: + base_deque_class += """ + @classmethod + def __class_getitem__(self, item): return cls""" + return base_deque_class + + +def _ordered_dict_mock(): + base_ordered_dict_class = """ + class OrderedDict(dict): + def __reversed__(self): return self[::-1] + def move_to_end(self, key, last=False): pass""" + if PY39_PLUS: + base_ordered_dict_class += """ + @classmethod + def __class_getitem__(cls, item): return cls""" + return base_ordered_dict_class + + +register_module_extender(AstroidManager(), "collections", _collections_transform) + + +def _looks_like_subscriptable(node: ClassDef) -> bool: + """ + Returns True if the node corresponds to a ClassDef of the Collections.abc module that + supports subscripting + + :param node: ClassDef node + """ + if node.qname().startswith("_collections") or node.qname().startswith( + "collections" + ): + try: + node.getattr("__class_getitem__") + return True + except AttributeInferenceError: + pass + return False + + +CLASS_GET_ITEM_TEMPLATE = """ +@classmethod +def __class_getitem__(cls, item): + return cls +""" + + +def easy_class_getitem_inference(node, context=None): + # Here __class_getitem__ exists but is quite a mess to infer thus + # put an easy inference tip + func_to_add = extract_node(CLASS_GET_ITEM_TEMPLATE) + node.locals["__class_getitem__"] = [func_to_add] + + +if PY39_PLUS: + # Starting with Python39 some objects of the collection module are subscriptable + # thanks to the __class_getitem__ method but the way it is implemented in + # _collection_abc makes it difficult to infer. (We would have to handle AssignName inference in the + # getitem method of the ClassDef class) Instead we put here a mock of the __class_getitem__ method + AstroidManager().register_transform( + ClassDef, easy_class_getitem_inference, _looks_like_subscriptable + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_crypt.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_crypt.py new file mode 100644 index 0000000..45c3055 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_crypt.py @@ -0,0 +1,28 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.const import PY37_PLUS +from astroid.manager import AstroidManager + +if PY37_PLUS: + # Since Python 3.7 Hashing Methods are added + # dynamically to globals() + + def _re_transform(): + return parse( + """ + from collections import namedtuple + _Method = namedtuple('_Method', 'name ident salt_chars total_size') + + METHOD_SHA512 = _Method('SHA512', '6', 16, 106) + METHOD_SHA256 = _Method('SHA256', '5', 16, 63) + METHOD_BLOWFISH = _Method('BLOWFISH', 2, 'b', 22) + METHOD_MD5 = _Method('MD5', '1', 8, 34) + METHOD_CRYPT = _Method('CRYPT', None, 2, 13) + """ + ) + + register_module_extender(AstroidManager(), "crypt", _re_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_ctypes.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_ctypes.py new file mode 100644 index 0000000..323b19c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_ctypes.py @@ -0,0 +1,82 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Astroid hooks for ctypes module. + +Inside the ctypes module, the value class is defined inside +the C coded module _ctypes. +Thus astroid doesn't know that the value member is a builtin type +among float, int, bytes or str. +""" +import sys + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def enrich_ctypes_redefined_types(): + """ + For each ctypes redefined types, overload 'value' and '_type_' members definition. + Overloading 'value' is mandatory otherwise astroid cannot infer the correct type for it. + Overloading '_type_' is necessary because the class definition made here replaces the original + one, in which '_type_' member is defined. Luckily those original class definitions are very short + and contain only the '_type_' member definition. + """ + c_class_to_type = ( + ("c_byte", "int", "b"), + ("c_char", "bytes", "c"), + ("c_double", "float", "d"), + ("c_float", "float", "f"), + ("c_int", "int", "i"), + ("c_int16", "int", "h"), + ("c_int32", "int", "i"), + ("c_int64", "int", "l"), + ("c_int8", "int", "b"), + ("c_long", "int", "l"), + ("c_longdouble", "float", "g"), + ("c_longlong", "int", "l"), + ("c_short", "int", "h"), + ("c_size_t", "int", "L"), + ("c_ssize_t", "int", "l"), + ("c_ubyte", "int", "B"), + ("c_uint", "int", "I"), + ("c_uint16", "int", "H"), + ("c_uint32", "int", "I"), + ("c_uint64", "int", "L"), + ("c_uint8", "int", "B"), + ("c_ulong", "int", "L"), + ("c_ulonglong", "int", "L"), + ("c_ushort", "int", "H"), + ("c_wchar", "str", "u"), + ) + + src = [ + """ +from _ctypes import _SimpleCData + +class c_bool(_SimpleCData): + def __init__(self, value): + self.value = True + self._type_ = '?' + """ + ] + + for c_type, builtin_type, type_code in c_class_to_type: + src.append( + f""" +class {c_type}(_SimpleCData): + def __init__(self, value): + self.value = {builtin_type}(value) + self._type_ = '{type_code}' + """ + ) + + return parse("\n".join(src)) + + +if not hasattr(sys, "pypy_version_info"): + # No need of this module in pypy where everything is written in python + register_module_extender(AstroidManager(), "ctypes", enrich_ctypes_redefined_types) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_curses.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_curses.py new file mode 100644 index 0000000..66cd5b2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_curses.py @@ -0,0 +1,183 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def _curses_transform(): + return parse( + """ + A_ALTCHARSET = 1 + A_BLINK = 1 + A_BOLD = 1 + A_DIM = 1 + A_INVIS = 1 + A_ITALIC = 1 + A_NORMAL = 1 + A_PROTECT = 1 + A_REVERSE = 1 + A_STANDOUT = 1 + A_UNDERLINE = 1 + A_HORIZONTAL = 1 + A_LEFT = 1 + A_LOW = 1 + A_RIGHT = 1 + A_TOP = 1 + A_VERTICAL = 1 + A_CHARTEXT = 1 + A_ATTRIBUTES = 1 + A_CHARTEXT = 1 + A_COLOR = 1 + KEY_MIN = 1 + KEY_BREAK = 1 + KEY_DOWN = 1 + KEY_UP = 1 + KEY_LEFT = 1 + KEY_RIGHT = 1 + KEY_HOME = 1 + KEY_BACKSPACE = 1 + KEY_F0 = 1 + KEY_Fn = 1 + KEY_DL = 1 + KEY_IL = 1 + KEY_DC = 1 + KEY_IC = 1 + KEY_EIC = 1 + KEY_CLEAR = 1 + KEY_EOS = 1 + KEY_EOL = 1 + KEY_SF = 1 + KEY_SR = 1 + KEY_NPAGE = 1 + KEY_PPAGE = 1 + KEY_STAB = 1 + KEY_CTAB = 1 + KEY_CATAB = 1 + KEY_ENTER = 1 + KEY_SRESET = 1 + KEY_RESET = 1 + KEY_PRINT = 1 + KEY_LL = 1 + KEY_A1 = 1 + KEY_A3 = 1 + KEY_B2 = 1 + KEY_C1 = 1 + KEY_C3 = 1 + KEY_BTAB = 1 + KEY_BEG = 1 + KEY_CANCEL = 1 + KEY_CLOSE = 1 + KEY_COMMAND = 1 + KEY_COPY = 1 + KEY_CREATE = 1 + KEY_END = 1 + KEY_EXIT = 1 + KEY_FIND = 1 + KEY_HELP = 1 + KEY_MARK = 1 + KEY_MESSAGE = 1 + KEY_MOVE = 1 + KEY_NEXT = 1 + KEY_OPEN = 1 + KEY_OPTIONS = 1 + KEY_PREVIOUS = 1 + KEY_REDO = 1 + KEY_REFERENCE = 1 + KEY_REFRESH = 1 + KEY_REPLACE = 1 + KEY_RESTART = 1 + KEY_RESUME = 1 + KEY_SAVE = 1 + KEY_SBEG = 1 + KEY_SCANCEL = 1 + KEY_SCOMMAND = 1 + KEY_SCOPY = 1 + KEY_SCREATE = 1 + KEY_SDC = 1 + KEY_SDL = 1 + KEY_SELECT = 1 + KEY_SEND = 1 + KEY_SEOL = 1 + KEY_SEXIT = 1 + KEY_SFIND = 1 + KEY_SHELP = 1 + KEY_SHOME = 1 + KEY_SIC = 1 + KEY_SLEFT = 1 + KEY_SMESSAGE = 1 + KEY_SMOVE = 1 + KEY_SNEXT = 1 + KEY_SOPTIONS = 1 + KEY_SPREVIOUS = 1 + KEY_SPRINT = 1 + KEY_SREDO = 1 + KEY_SREPLACE = 1 + KEY_SRIGHT = 1 + KEY_SRSUME = 1 + KEY_SSAVE = 1 + KEY_SSUSPEND = 1 + KEY_SUNDO = 1 + KEY_SUSPEND = 1 + KEY_UNDO = 1 + KEY_MOUSE = 1 + KEY_RESIZE = 1 + KEY_MAX = 1 + ACS_BBSS = 1 + ACS_BLOCK = 1 + ACS_BOARD = 1 + ACS_BSBS = 1 + ACS_BSSB = 1 + ACS_BSSS = 1 + ACS_BTEE = 1 + ACS_BULLET = 1 + ACS_CKBOARD = 1 + ACS_DARROW = 1 + ACS_DEGREE = 1 + ACS_DIAMOND = 1 + ACS_GEQUAL = 1 + ACS_HLINE = 1 + ACS_LANTERN = 1 + ACS_LARROW = 1 + ACS_LEQUAL = 1 + ACS_LLCORNER = 1 + ACS_LRCORNER = 1 + ACS_LTEE = 1 + ACS_NEQUAL = 1 + ACS_PI = 1 + ACS_PLMINUS = 1 + ACS_PLUS = 1 + ACS_RARROW = 1 + ACS_RTEE = 1 + ACS_S1 = 1 + ACS_S3 = 1 + ACS_S7 = 1 + ACS_S9 = 1 + ACS_SBBS = 1 + ACS_SBSB = 1 + ACS_SBSS = 1 + ACS_SSBB = 1 + ACS_SSBS = 1 + ACS_SSSB = 1 + ACS_SSSS = 1 + ACS_STERLING = 1 + ACS_TTEE = 1 + ACS_UARROW = 1 + ACS_ULCORNER = 1 + ACS_URCORNER = 1 + ACS_VLINE = 1 + COLOR_BLACK = 1 + COLOR_BLUE = 1 + COLOR_CYAN = 1 + COLOR_GREEN = 1 + COLOR_MAGENTA = 1 + COLOR_RED = 1 + COLOR_WHITE = 1 + COLOR_YELLOW = 1 + """ + ) + + +register_module_extender(AstroidManager(), "curses", _curses_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_dataclasses.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_dataclasses.py new file mode 100644 index 0000000..769d9ee --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_dataclasses.py @@ -0,0 +1,467 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Astroid hook for the dataclasses library + +Support built-in dataclasses, pydantic.dataclasses, and marshmallow_dataclass-annotated +dataclasses. References: +- https://docs.python.org/3/library/dataclasses.html +- https://pydantic-docs.helpmanual.io/usage/dataclasses/ +- https://lovasoa.github.io/marshmallow_dataclass/ + +""" +import sys +from typing import FrozenSet, Generator, List, Optional, Tuple, Union + +from astroid import context, inference_tip +from astroid.builder import parse +from astroid.const import PY37_PLUS, PY39_PLUS +from astroid.exceptions import ( + AstroidSyntaxError, + InferenceError, + MroError, + UseInferenceDefault, +) +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import ( + AnnAssign, + Assign, + AssignName, + Attribute, + Call, + Name, + NodeNG, + Subscript, + Unknown, +) +from astroid.nodes.scoped_nodes import ClassDef, FunctionDef +from astroid.util import Uninferable + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_FieldDefaultReturn = Union[ + None, Tuple[Literal["default"], NodeNG], Tuple[Literal["default_factory"], Call] +] + +DATACLASSES_DECORATORS = frozenset(("dataclass",)) +FIELD_NAME = "field" +DATACLASS_MODULES = frozenset( + ("dataclasses", "marshmallow_dataclass", "pydantic.dataclasses") +) +DEFAULT_FACTORY = "_HAS_DEFAULT_FACTORY" # based on typing.py + + +def is_decorated_with_dataclass(node, decorator_names=DATACLASSES_DECORATORS): + """Return True if a decorated node has a `dataclass` decorator applied.""" + if not isinstance(node, ClassDef) or not node.decorators: + return False + + return any( + _looks_like_dataclass_decorator(decorator_attribute, decorator_names) + for decorator_attribute in node.decorators.nodes + ) + + +def dataclass_transform(node: ClassDef) -> None: + """Rewrite a dataclass to be easily understood by pylint""" + node.is_dataclass = True + + for assign_node in _get_dataclass_attributes(node): + name = assign_node.target.name + + rhs_node = Unknown( + lineno=assign_node.lineno, + col_offset=assign_node.col_offset, + parent=assign_node, + ) + rhs_node = AstroidManager().visit_transforms(rhs_node) + node.instance_attrs[name] = [rhs_node] + + if not _check_generate_dataclass_init(node): + return + + try: + reversed_mro = list(reversed(node.mro())) + except MroError: + reversed_mro = [node] + + field_assigns = {} + field_order = [] + for klass in (k for k in reversed_mro if is_decorated_with_dataclass(k)): + for assign_node in _get_dataclass_attributes(klass, init=True): + name = assign_node.target.name + if name not in field_assigns: + field_order.append(name) + field_assigns[name] = assign_node + + init_str = _generate_dataclass_init([field_assigns[name] for name in field_order]) + try: + init_node = parse(init_str)["__init__"] + except AstroidSyntaxError: + pass + else: + init_node.parent = node + init_node.lineno, init_node.col_offset = None, None + node.locals["__init__"] = [init_node] + + root = node.root() + if DEFAULT_FACTORY not in root.locals: + new_assign = parse(f"{DEFAULT_FACTORY} = object()").body[0] + new_assign.parent = root + root.locals[DEFAULT_FACTORY] = [new_assign.targets[0]] + + +def _get_dataclass_attributes(node: ClassDef, init: bool = False) -> Generator: + """Yield the AnnAssign nodes of dataclass attributes for the node. + + If init is True, also include InitVars, but exclude attributes from calls to + field where init=False. + """ + for assign_node in node.body: + if not isinstance(assign_node, AnnAssign) or not isinstance( + assign_node.target, AssignName + ): + continue + + if _is_class_var(assign_node.annotation): # type: ignore[arg-type] # annotation is never None + continue + + if init: + value = assign_node.value + if ( + isinstance(value, Call) + and _looks_like_dataclass_field_call(value, check_scope=False) + and any( + keyword.arg == "init" + and not keyword.value.bool_value() # type: ignore[union-attr] # value is never None + for keyword in value.keywords + ) + ): + continue + elif _is_init_var(assign_node.annotation): # type: ignore[arg-type] # annotation is never None + continue + + yield assign_node + + +def _check_generate_dataclass_init(node: ClassDef) -> bool: + """Return True if we should generate an __init__ method for node. + + This is True when: + - node doesn't define its own __init__ method + - the dataclass decorator was called *without* the keyword argument init=False + """ + if "__init__" in node.locals: + return False + + found = None + + for decorator_attribute in node.decorators.nodes: + if not isinstance(decorator_attribute, Call): + continue + + if _looks_like_dataclass_decorator(decorator_attribute): + found = decorator_attribute + + if found is None: + return True + + # Check for keyword arguments of the form init=False + return all( + keyword.arg != "init" + and keyword.value.bool_value() # type: ignore[union-attr] # value is never None + for keyword in found.keywords + ) + + +def _generate_dataclass_init(assigns: List[AnnAssign]) -> str: + """Return an init method for a dataclass given the targets.""" + target_names = [] + params = [] + assignments = [] + + for assign in assigns: + name, annotation, value = assign.target.name, assign.annotation, assign.value + target_names.append(name) + + if _is_init_var(annotation): # type: ignore[arg-type] # annotation is never None + init_var = True + if isinstance(annotation, Subscript): + annotation = annotation.slice + else: + # Cannot determine type annotation for parameter from InitVar + annotation = None + assignment_str = "" + else: + init_var = False + assignment_str = f"self.{name} = {name}" + + if annotation: + param_str = f"{name}: {annotation.as_string()}" + else: + param_str = name + + if value: + if isinstance(value, Call) and _looks_like_dataclass_field_call( + value, check_scope=False + ): + result = _get_field_default(value) + if result: + default_type, default_node = result + if default_type == "default": + param_str += f" = {default_node.as_string()}" + elif default_type == "default_factory": + param_str += f" = {DEFAULT_FACTORY}" + assignment_str = ( + f"self.{name} = {default_node.as_string()} " + f"if {name} is {DEFAULT_FACTORY} else {name}" + ) + else: + param_str += f" = {value.as_string()}" + + params.append(param_str) + if not init_var: + assignments.append(assignment_str) + + params_string = ", ".join(["self"] + params) + assignments_string = "\n ".join(assignments) if assignments else "pass" + return f"def __init__({params_string}) -> None:\n {assignments_string}" + + +def infer_dataclass_attribute( + node: Unknown, ctx: Optional[context.InferenceContext] = None +) -> Generator: + """Inference tip for an Unknown node that was dynamically generated to + represent a dataclass attribute. + + In the case that a default value is provided, that is inferred first. + Then, an Instance of the annotated class is yielded. + """ + assign = node.parent + if not isinstance(assign, AnnAssign): + yield Uninferable + return + + annotation, value = assign.annotation, assign.value + if value is not None: + yield from value.infer(context=ctx) + if annotation is not None: + yield from _infer_instance_from_annotation(annotation, ctx=ctx) + else: + yield Uninferable + + +def infer_dataclass_field_call( + node: Call, ctx: Optional[context.InferenceContext] = None +) -> Generator: + """Inference tip for dataclass field calls.""" + if not isinstance(node.parent, (AnnAssign, Assign)): + raise UseInferenceDefault + result = _get_field_default(node) + if not result: + yield Uninferable + else: + default_type, default = result + if default_type == "default": + yield from default.infer(context=ctx) + else: + new_call = parse(default.as_string()).body[0].value + new_call.parent = node.parent + yield from new_call.infer(context=ctx) + + +def _looks_like_dataclass_decorator( + node: NodeNG, decorator_names: FrozenSet[str] = DATACLASSES_DECORATORS +) -> bool: + """Return True if node looks like a dataclass decorator. + + Uses inference to lookup the value of the node, and if that fails, + matches against specific names. + """ + if isinstance(node, Call): # decorator with arguments + node = node.func + try: + inferred = next(node.infer()) + except (InferenceError, StopIteration): + inferred = Uninferable + + if inferred is Uninferable: + if isinstance(node, Name): + return node.name in decorator_names + if isinstance(node, Attribute): + return node.attrname in decorator_names + + return False + + return ( + isinstance(inferred, FunctionDef) + and inferred.name in decorator_names + and inferred.root().name in DATACLASS_MODULES + ) + + +def _looks_like_dataclass_attribute(node: Unknown) -> bool: + """Return True if node was dynamically generated as the child of an AnnAssign + statement. + """ + parent = node.parent + if not parent: + return False + + scope = parent.scope() + return ( + isinstance(parent, AnnAssign) + and isinstance(scope, ClassDef) + and is_decorated_with_dataclass(scope) + ) + + +def _looks_like_dataclass_field_call(node: Call, check_scope: bool = True) -> bool: + """Return True if node is calling dataclasses field or Field + from an AnnAssign statement directly in the body of a ClassDef. + + If check_scope is False, skips checking the statement and body. + """ + if check_scope: + stmt = node.statement(future=True) + scope = stmt.scope() + if not ( + isinstance(stmt, AnnAssign) + and stmt.value is not None + and isinstance(scope, ClassDef) + and is_decorated_with_dataclass(scope) + ): + return False + + try: + inferred = next(node.func.infer()) + except (InferenceError, StopIteration): + return False + + if not isinstance(inferred, FunctionDef): + return False + + return inferred.name == FIELD_NAME and inferred.root().name in DATACLASS_MODULES + + +def _get_field_default(field_call: Call) -> _FieldDefaultReturn: + """Return a the default value of a field call, and the corresponding keyword argument name. + + field(default=...) results in the ... node + field(default_factory=...) results in a Call node with func ... and no arguments + + If neither or both arguments are present, return ("", None) instead, + indicating that there is not a valid default value. + """ + default, default_factory = None, None + for keyword in field_call.keywords: + if keyword.arg == "default": + default = keyword.value + elif keyword.arg == "default_factory": + default_factory = keyword.value + + if default is not None and default_factory is None: + return "default", default + + if default is None and default_factory is not None: + new_call = Call( + lineno=field_call.lineno, + col_offset=field_call.col_offset, + parent=field_call.parent, + ) + new_call.postinit(func=default_factory) + return "default_factory", new_call + + return None + + +def _is_class_var(node: NodeNG) -> bool: + """Return True if node is a ClassVar, with or without subscripting.""" + if PY39_PLUS: + try: + inferred = next(node.infer()) + except (InferenceError, StopIteration): + return False + + return getattr(inferred, "name", "") == "ClassVar" + + # Before Python 3.9, inference returns typing._SpecialForm instead of ClassVar. + # Our backup is to inspect the node's structure. + return isinstance(node, Subscript) and ( + isinstance(node.value, Name) + and node.value.name == "ClassVar" + or isinstance(node.value, Attribute) + and node.value.attrname == "ClassVar" + ) + + +def _is_init_var(node: NodeNG) -> bool: + """Return True if node is an InitVar, with or without subscripting.""" + try: + inferred = next(node.infer()) + except (InferenceError, StopIteration): + return False + + return getattr(inferred, "name", "") == "InitVar" + + +# Allowed typing classes for which we support inferring instances +_INFERABLE_TYPING_TYPES = frozenset( + ( + "Dict", + "FrozenSet", + "List", + "Set", + "Tuple", + ) +) + + +def _infer_instance_from_annotation( + node: NodeNG, ctx: Optional[context.InferenceContext] = None +) -> Generator: + """Infer an instance corresponding to the type annotation represented by node. + + Currently has limited support for the typing module. + """ + klass = None + try: + klass = next(node.infer(context=ctx)) + except (InferenceError, StopIteration): + yield Uninferable + if not isinstance(klass, ClassDef): + yield Uninferable + elif klass.root().name in { + "typing", + "_collections_abc", + "", + }: # "" because of synthetic nodes in brain_typing.py + if klass.name in _INFERABLE_TYPING_TYPES: + yield klass.instantiate_class() + else: + yield Uninferable + else: + yield klass.instantiate_class() + + +if PY37_PLUS: + AstroidManager().register_transform( + ClassDef, dataclass_transform, is_decorated_with_dataclass + ) + + AstroidManager().register_transform( + Call, + inference_tip(infer_dataclass_field_call, raise_on_overwrite=True), + _looks_like_dataclass_field_call, + ) + + AstroidManager().register_transform( + Unknown, + inference_tip(infer_dataclass_attribute, raise_on_overwrite=True), + _looks_like_dataclass_attribute, + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_dateutil.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_dateutil.py new file mode 100644 index 0000000..0d27135 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_dateutil.py @@ -0,0 +1,26 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for dateutil""" + +import textwrap + +from astroid.brain.helpers import register_module_extender +from astroid.builder import AstroidBuilder +from astroid.manager import AstroidManager + + +def dateutil_transform(): + return AstroidBuilder(AstroidManager()).string_build( + textwrap.dedent( + """ + import datetime + def parse(timestr, parserinfo=None, **kwargs): + return datetime.datetime() + """ + ) + ) + + +register_module_extender(AstroidManager(), "dateutil.parser", dateutil_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_fstrings.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_fstrings.py new file mode 100644 index 0000000..db7dd95 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_fstrings.py @@ -0,0 +1,48 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import collections.abc + +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import FormattedValue + + +def _clone_node_with_lineno(node, parent, lineno): + cls = node.__class__ + other_fields = node._other_fields + _astroid_fields = node._astroid_fields + init_params = {"lineno": lineno, "col_offset": node.col_offset, "parent": parent} + postinit_params = {param: getattr(node, param) for param in _astroid_fields} + if other_fields: + init_params.update({param: getattr(node, param) for param in other_fields}) + new_node = cls(**init_params) + if hasattr(node, "postinit") and _astroid_fields: + for param, child in postinit_params.items(): + if child and not isinstance(child, collections.abc.Sequence): + cloned_child = _clone_node_with_lineno( + node=child, lineno=new_node.lineno, parent=new_node + ) + postinit_params[param] = cloned_child + new_node.postinit(**postinit_params) + return new_node + + +def _transform_formatted_value(node): # pylint: disable=inconsistent-return-statements + if node.value and node.value.lineno == 1: + if node.lineno != node.value.lineno: + new_node = FormattedValue( + lineno=node.lineno, col_offset=node.col_offset, parent=node.parent + ) + new_value = _clone_node_with_lineno( + node=node.value, lineno=node.lineno, parent=new_node + ) + new_node.postinit(value=new_value, format_spec=node.format_spec) + return new_node + + +# TODO: this fix tries to *patch* http://bugs.python.org/issue29051 +# The problem is that FormattedValue.value, which is a Name node, +# has wrong line numbers, usually 1. This creates problems for pylint, +# which expects correct line numbers for things such as message control. +AstroidManager().register_transform(FormattedValue, _transform_formatted_value) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_functools.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_functools.py new file mode 100644 index 0000000..63333dc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_functools.py @@ -0,0 +1,159 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for understanding functools library module.""" +from functools import partial +from itertools import chain +from typing import Iterator, Optional + +from astroid import BoundMethod, arguments, extract_node, helpers, nodes, objects +from astroid.context import InferenceContext +from astroid.exceptions import InferenceError, UseInferenceDefault +from astroid.inference_tip import inference_tip +from astroid.interpreter import objectmodel +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import AssignName, Attribute, Call, Name +from astroid.nodes.scoped_nodes import FunctionDef +from astroid.util import Uninferable + +LRU_CACHE = "functools.lru_cache" + + +class LruWrappedModel(objectmodel.FunctionModel): + """Special attribute model for functions decorated with functools.lru_cache. + + The said decorators patches at decoration time some functions onto + the decorated function. + """ + + @property + def attr___wrapped__(self): + return self._instance + + @property + def attr_cache_info(self): + cache_info = extract_node( + """ + from functools import _CacheInfo + _CacheInfo(0, 0, 0, 0) + """ + ) + + class CacheInfoBoundMethod(BoundMethod): + def infer_call_result(self, caller, context=None): + yield helpers.safe_infer(cache_info) + + return CacheInfoBoundMethod(proxy=self._instance, bound=self._instance) + + @property + def attr_cache_clear(self): + node = extract_node("""def cache_clear(self): pass""") + return BoundMethod(proxy=node, bound=self._instance.parent.scope()) + + +def _transform_lru_cache(node, context=None) -> None: + # TODO: this is not ideal, since the node should be immutable, + # but due to https://github.com/PyCQA/astroid/issues/354, + # there's not much we can do now. + # Replacing the node would work partially, because, + # in pylint, the old node would still be available, leading + # to spurious false positives. + node.special_attributes = LruWrappedModel()(node) + + +def _functools_partial_inference( + node: nodes.Call, context: Optional[InferenceContext] = None +) -> Iterator[objects.PartialFunction]: + call = arguments.CallSite.from_call(node, context=context) + number_of_positional = len(call.positional_arguments) + if number_of_positional < 1: + raise UseInferenceDefault("functools.partial takes at least one argument") + if number_of_positional == 1 and not call.keyword_arguments: + raise UseInferenceDefault( + "functools.partial needs at least to have some filled arguments" + ) + + partial_function = call.positional_arguments[0] + try: + inferred_wrapped_function = next(partial_function.infer(context=context)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + if inferred_wrapped_function is Uninferable: + raise UseInferenceDefault("Cannot infer the wrapped function") + if not isinstance(inferred_wrapped_function, FunctionDef): + raise UseInferenceDefault("The wrapped function is not a function") + + # Determine if the passed keywords into the callsite are supported + # by the wrapped function. + if not inferred_wrapped_function.args: + function_parameters = [] + else: + function_parameters = chain( + inferred_wrapped_function.args.args or (), + inferred_wrapped_function.args.posonlyargs or (), + inferred_wrapped_function.args.kwonlyargs or (), + ) + parameter_names = { + param.name for param in function_parameters if isinstance(param, AssignName) + } + if set(call.keyword_arguments) - parameter_names: + raise UseInferenceDefault("wrapped function received unknown parameters") + + partial_function = objects.PartialFunction( + call, + name=inferred_wrapped_function.name, + lineno=inferred_wrapped_function.lineno, + col_offset=inferred_wrapped_function.col_offset, + parent=node.parent, + ) + partial_function.postinit( + args=inferred_wrapped_function.args, + body=inferred_wrapped_function.body, + decorators=inferred_wrapped_function.decorators, + returns=inferred_wrapped_function.returns, + type_comment_returns=inferred_wrapped_function.type_comment_returns, + type_comment_args=inferred_wrapped_function.type_comment_args, + doc_node=inferred_wrapped_function.doc_node, + ) + return iter((partial_function,)) + + +def _looks_like_lru_cache(node): + """Check if the given function node is decorated with lru_cache.""" + if not node.decorators: + return False + for decorator in node.decorators.nodes: + if not isinstance(decorator, Call): + continue + if _looks_like_functools_member(decorator, "lru_cache"): + return True + return False + + +def _looks_like_functools_member(node, member) -> bool: + """Check if the given Call node is a functools.partial call""" + if isinstance(node.func, Name): + return node.func.name == member + if isinstance(node.func, Attribute): + return ( + node.func.attrname == member + and isinstance(node.func.expr, Name) + and node.func.expr.name == "functools" + ) + return False + + +_looks_like_partial = partial(_looks_like_functools_member, member="partial") + + +AstroidManager().register_transform( + FunctionDef, _transform_lru_cache, _looks_like_lru_cache +) + + +AstroidManager().register_transform( + Call, + inference_tip(_functools_partial_inference), + _looks_like_partial, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_gi.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_gi.py new file mode 100644 index 0000000..5728e2d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_gi.py @@ -0,0 +1,250 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for the Python 2 GObject introspection bindings. + +Helps with understanding everything imported from 'gi.repository' +""" + +# pylint:disable=import-error,import-outside-toplevel + +import inspect +import itertools +import re +import sys +import warnings + +from astroid import nodes +from astroid.builder import AstroidBuilder +from astroid.exceptions import AstroidBuildingError +from astroid.manager import AstroidManager + +_inspected_modules = {} + +_identifier_re = r"^[A-Za-z_]\w*$" + +_special_methods = frozenset( + { + "__lt__", + "__le__", + "__eq__", + "__ne__", + "__ge__", + "__gt__", + "__iter__", + "__getitem__", + "__setitem__", + "__delitem__", + "__len__", + "__bool__", + "__nonzero__", + "__next__", + "__str__", + "__len__", + "__contains__", + "__enter__", + "__exit__", + "__repr__", + "__getattr__", + "__setattr__", + "__delattr__", + "__del__", + "__hash__", + } +) + + +def _gi_build_stub(parent): + """ + Inspect the passed module recursively and build stubs for functions, + classes, etc. + """ + classes = {} + functions = {} + constants = {} + methods = {} + for name in dir(parent): + if name.startswith("__") and name not in _special_methods: + continue + + # Check if this is a valid name in python + if not re.match(_identifier_re, name): + continue + + try: + obj = getattr(parent, name) + except Exception: # pylint: disable=broad-except + # gi.module.IntrospectionModule.__getattr__() can raise all kinds of things + # like ValueError, TypeError, NotImplementedError, RepositoryError, etc + continue + + if inspect.isclass(obj): + classes[name] = obj + elif inspect.isfunction(obj) or inspect.isbuiltin(obj): + functions[name] = obj + elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): + methods[name] = obj + elif ( + str(obj).startswith(", ) + # Only accept function calls with two constant arguments + if len(node.args) != 2: + return False + + if not all(isinstance(arg, nodes.Const) for arg in node.args): + return False + + func = node.func + if isinstance(func, nodes.Attribute): + if func.attrname != "require_version": + return False + if isinstance(func.expr, nodes.Name) and func.expr.name == "gi": + return True + + return False + + if isinstance(func, nodes.Name): + return func.name == "require_version" + + return False + + +def _register_require_version(node): + # Load the gi.require_version locally + try: + import gi + + gi.require_version(node.args[0].value, node.args[1].value) + except Exception: # pylint:disable=broad-except + pass + + return node + + +AstroidManager().register_failed_import_hook(_import_gi_module) +AstroidManager().register_transform( + nodes.Call, _register_require_version, _looks_like_require_version +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_hashlib.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_hashlib.py new file mode 100644 index 0000000..b628361 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_hashlib.py @@ -0,0 +1,58 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.const import PY39_PLUS +from astroid.manager import AstroidManager + + +def _hashlib_transform(): + maybe_usedforsecurity = ", usedforsecurity=True" if PY39_PLUS else "" + signature = f"value=''{maybe_usedforsecurity}" + template = """ + class %(name)s(object): + def __init__(self, %(signature)s): pass + def digest(self): + return %(digest)s + def copy(self): + return self + def update(self, value): pass + def hexdigest(self): + return '' + @property + def name(self): + return %(name)r + @property + def block_size(self): + return 1 + @property + def digest_size(self): + return 1 + """ + algorithms_with_signature = dict.fromkeys( + ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"], signature + ) + blake2b_signature = f"data=b'', *, digest_size=64, key=b'', salt=b'', \ + person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \ + node_depth=0, inner_size=0, last_node=False{maybe_usedforsecurity}" + blake2s_signature = f"data=b'', *, digest_size=32, key=b'', salt=b'', \ + person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \ + node_depth=0, inner_size=0, last_node=False{maybe_usedforsecurity}" + new_algorithms = dict.fromkeys( + ["sha3_224", "sha3_256", "sha3_384", "sha3_512", "shake_128", "shake_256"], + signature, + ) + algorithms_with_signature.update(new_algorithms) + algorithms_with_signature.update( + {"blake2b": blake2b_signature, "blake2s": blake2s_signature} + ) + classes = "".join( + template % {"name": hashfunc, "digest": 'b""', "signature": signature} + for hashfunc, signature in algorithms_with_signature.items() + ) + return parse(classes) + + +register_module_extender(AstroidManager(), "hashlib", _hashlib_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_http.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_http.py new file mode 100644 index 0000000..acf07bd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_http.py @@ -0,0 +1,211 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid brain hints for some of the `http` module.""" +import textwrap + +from astroid.brain.helpers import register_module_extender +from astroid.builder import AstroidBuilder +from astroid.manager import AstroidManager + + +def _http_transform(): + code = textwrap.dedent( + """ + from collections import namedtuple + _HTTPStatus = namedtuple('_HTTPStatus', 'value phrase description') + + class HTTPStatus: + + @property + def phrase(self): + return "" + @property + def value(self): + return 0 + @property + def description(self): + return "" + + # informational + CONTINUE = _HTTPStatus(100, 'Continue', 'Request received, please continue') + SWITCHING_PROTOCOLS = _HTTPStatus(101, 'Switching Protocols', + 'Switching to new protocol; obey Upgrade header') + PROCESSING = _HTTPStatus(102, 'Processing', '') + OK = _HTTPStatus(200, 'OK', 'Request fulfilled, document follows') + CREATED = _HTTPStatus(201, 'Created', 'Document created, URL follows') + ACCEPTED = _HTTPStatus(202, 'Accepted', + 'Request accepted, processing continues off-line') + NON_AUTHORITATIVE_INFORMATION = _HTTPStatus(203, + 'Non-Authoritative Information', 'Request fulfilled from cache') + NO_CONTENT = _HTTPStatus(204, 'No Content', 'Request fulfilled, nothing follows') + RESET_CONTENT =_HTTPStatus(205, 'Reset Content', 'Clear input form for further input') + PARTIAL_CONTENT = _HTTPStatus(206, 'Partial Content', 'Partial content follows') + MULTI_STATUS = _HTTPStatus(207, 'Multi-Status', '') + ALREADY_REPORTED = _HTTPStatus(208, 'Already Reported', '') + IM_USED = _HTTPStatus(226, 'IM Used', '') + MULTIPLE_CHOICES = _HTTPStatus(300, 'Multiple Choices', + 'Object has several resources -- see URI list') + MOVED_PERMANENTLY = _HTTPStatus(301, 'Moved Permanently', + 'Object moved permanently -- see URI list') + FOUND = _HTTPStatus(302, 'Found', 'Object moved temporarily -- see URI list') + SEE_OTHER = _HTTPStatus(303, 'See Other', 'Object moved -- see Method and URL list') + NOT_MODIFIED = _HTTPStatus(304, 'Not Modified', + 'Document has not changed since given time') + USE_PROXY = _HTTPStatus(305, 'Use Proxy', + 'You must use proxy specified in Location to access this resource') + TEMPORARY_REDIRECT = _HTTPStatus(307, 'Temporary Redirect', + 'Object moved temporarily -- see URI list') + PERMANENT_REDIRECT = _HTTPStatus(308, 'Permanent Redirect', + 'Object moved permanently -- see URI list') + BAD_REQUEST = _HTTPStatus(400, 'Bad Request', + 'Bad request syntax or unsupported method') + UNAUTHORIZED = _HTTPStatus(401, 'Unauthorized', + 'No permission -- see authorization schemes') + PAYMENT_REQUIRED = _HTTPStatus(402, 'Payment Required', + 'No payment -- see charging schemes') + FORBIDDEN = _HTTPStatus(403, 'Forbidden', + 'Request forbidden -- authorization will not help') + NOT_FOUND = _HTTPStatus(404, 'Not Found', + 'Nothing matches the given URI') + METHOD_NOT_ALLOWED = _HTTPStatus(405, 'Method Not Allowed', + 'Specified method is invalid for this resource') + NOT_ACCEPTABLE = _HTTPStatus(406, 'Not Acceptable', + 'URI not available in preferred format') + PROXY_AUTHENTICATION_REQUIRED = _HTTPStatus(407, + 'Proxy Authentication Required', + 'You must authenticate with this proxy before proceeding') + REQUEST_TIMEOUT = _HTTPStatus(408, 'Request Timeout', + 'Request timed out; try again later') + CONFLICT = _HTTPStatus(409, 'Conflict', 'Request conflict') + GONE = _HTTPStatus(410, 'Gone', + 'URI no longer exists and has been permanently removed') + LENGTH_REQUIRED = _HTTPStatus(411, 'Length Required', + 'Client must specify Content-Length') + PRECONDITION_FAILED = _HTTPStatus(412, 'Precondition Failed', + 'Precondition in headers is false') + REQUEST_ENTITY_TOO_LARGE = _HTTPStatus(413, 'Request Entity Too Large', + 'Entity is too large') + REQUEST_URI_TOO_LONG = _HTTPStatus(414, 'Request-URI Too Long', + 'URI is too long') + UNSUPPORTED_MEDIA_TYPE = _HTTPStatus(415, 'Unsupported Media Type', + 'Entity body in unsupported format') + REQUESTED_RANGE_NOT_SATISFIABLE = _HTTPStatus(416, + 'Requested Range Not Satisfiable', + 'Cannot satisfy request range') + EXPECTATION_FAILED = _HTTPStatus(417, 'Expectation Failed', + 'Expect condition could not be satisfied') + MISDIRECTED_REQUEST = _HTTPStatus(421, 'Misdirected Request', + 'Server is not able to produce a response') + UNPROCESSABLE_ENTITY = _HTTPStatus(422, 'Unprocessable Entity') + LOCKED = _HTTPStatus(423, 'Locked') + FAILED_DEPENDENCY = _HTTPStatus(424, 'Failed Dependency') + UPGRADE_REQUIRED = _HTTPStatus(426, 'Upgrade Required') + PRECONDITION_REQUIRED = _HTTPStatus(428, 'Precondition Required', + 'The origin server requires the request to be conditional') + TOO_MANY_REQUESTS = _HTTPStatus(429, 'Too Many Requests', + 'The user has sent too many requests in ' + 'a given amount of time ("rate limiting")') + REQUEST_HEADER_FIELDS_TOO_LARGE = _HTTPStatus(431, + 'Request Header Fields Too Large', + 'The server is unwilling to process the request because its header ' + 'fields are too large') + UNAVAILABLE_FOR_LEGAL_REASONS = _HTTPStatus(451, + 'Unavailable For Legal Reasons', + 'The server is denying access to the ' + 'resource as a consequence of a legal demand') + INTERNAL_SERVER_ERROR = _HTTPStatus(500, 'Internal Server Error', + 'Server got itself in trouble') + NOT_IMPLEMENTED = _HTTPStatus(501, 'Not Implemented', + 'Server does not support this operation') + BAD_GATEWAY = _HTTPStatus(502, 'Bad Gateway', + 'Invalid responses from another server/proxy') + SERVICE_UNAVAILABLE = _HTTPStatus(503, 'Service Unavailable', + 'The server cannot process the request due to a high load') + GATEWAY_TIMEOUT = _HTTPStatus(504, 'Gateway Timeout', + 'The gateway server did not receive a timely response') + HTTP_VERSION_NOT_SUPPORTED = _HTTPStatus(505, 'HTTP Version Not Supported', + 'Cannot fulfill request') + VARIANT_ALSO_NEGOTIATES = _HTTPStatus(506, 'Variant Also Negotiates') + INSUFFICIENT_STORAGE = _HTTPStatus(507, 'Insufficient Storage') + LOOP_DETECTED = _HTTPStatus(508, 'Loop Detected') + NOT_EXTENDED = _HTTPStatus(510, 'Not Extended') + NETWORK_AUTHENTICATION_REQUIRED = _HTTPStatus(511, + 'Network Authentication Required', + 'The client needs to authenticate to gain network access') + """ + ) + return AstroidBuilder(AstroidManager()).string_build(code) + + +def _http_client_transform(): + return AstroidBuilder(AstroidManager()).string_build( + textwrap.dedent( + """ + from http import HTTPStatus + + CONTINUE = HTTPStatus.CONTINUE + SWITCHING_PROTOCOLS = HTTPStatus.SWITCHING_PROTOCOLS + PROCESSING = HTTPStatus.PROCESSING + OK = HTTPStatus.OK + CREATED = HTTPStatus.CREATED + ACCEPTED = HTTPStatus.ACCEPTED + NON_AUTHORITATIVE_INFORMATION = HTTPStatus.NON_AUTHORITATIVE_INFORMATION + NO_CONTENT = HTTPStatus.NO_CONTENT + RESET_CONTENT = HTTPStatus.RESET_CONTENT + PARTIAL_CONTENT = HTTPStatus.PARTIAL_CONTENT + MULTI_STATUS = HTTPStatus.MULTI_STATUS + ALREADY_REPORTED = HTTPStatus.ALREADY_REPORTED + IM_USED = HTTPStatus.IM_USED + MULTIPLE_CHOICES = HTTPStatus.MULTIPLE_CHOICES + MOVED_PERMANENTLY = HTTPStatus.MOVED_PERMANENTLY + FOUND = HTTPStatus.FOUND + SEE_OTHER = HTTPStatus.SEE_OTHER + NOT_MODIFIED = HTTPStatus.NOT_MODIFIED + USE_PROXY = HTTPStatus.USE_PROXY + TEMPORARY_REDIRECT = HTTPStatus.TEMPORARY_REDIRECT + PERMANENT_REDIRECT = HTTPStatus.PERMANENT_REDIRECT + BAD_REQUEST = HTTPStatus.BAD_REQUEST + UNAUTHORIZED = HTTPStatus.UNAUTHORIZED + PAYMENT_REQUIRED = HTTPStatus.PAYMENT_REQUIRED + FORBIDDEN = HTTPStatus.FORBIDDEN + NOT_FOUND = HTTPStatus.NOT_FOUND + METHOD_NOT_ALLOWED = HTTPStatus.METHOD_NOT_ALLOWED + NOT_ACCEPTABLE = HTTPStatus.NOT_ACCEPTABLE + PROXY_AUTHENTICATION_REQUIRED = HTTPStatus.PROXY_AUTHENTICATION_REQUIRED + REQUEST_TIMEOUT = HTTPStatus.REQUEST_TIMEOUT + CONFLICT = HTTPStatus.CONFLICT + GONE = HTTPStatus.GONE + LENGTH_REQUIRED = HTTPStatus.LENGTH_REQUIRED + PRECONDITION_FAILED = HTTPStatus.PRECONDITION_FAILED + REQUEST_ENTITY_TOO_LARGE = HTTPStatus.REQUEST_ENTITY_TOO_LARGE + REQUEST_URI_TOO_LONG = HTTPStatus.REQUEST_URI_TOO_LONG + UNSUPPORTED_MEDIA_TYPE = HTTPStatus.UNSUPPORTED_MEDIA_TYPE + REQUESTED_RANGE_NOT_SATISFIABLE = HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE + EXPECTATION_FAILED = HTTPStatus.EXPECTATION_FAILED + UNPROCESSABLE_ENTITY = HTTPStatus.UNPROCESSABLE_ENTITY + LOCKED = HTTPStatus.LOCKED + FAILED_DEPENDENCY = HTTPStatus.FAILED_DEPENDENCY + UPGRADE_REQUIRED = HTTPStatus.UPGRADE_REQUIRED + PRECONDITION_REQUIRED = HTTPStatus.PRECONDITION_REQUIRED + TOO_MANY_REQUESTS = HTTPStatus.TOO_MANY_REQUESTS + REQUEST_HEADER_FIELDS_TOO_LARGE = HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE + INTERNAL_SERVER_ERROR = HTTPStatus.INTERNAL_SERVER_ERROR + NOT_IMPLEMENTED = HTTPStatus.NOT_IMPLEMENTED + BAD_GATEWAY = HTTPStatus.BAD_GATEWAY + SERVICE_UNAVAILABLE = HTTPStatus.SERVICE_UNAVAILABLE + GATEWAY_TIMEOUT = HTTPStatus.GATEWAY_TIMEOUT + HTTP_VERSION_NOT_SUPPORTED = HTTPStatus.HTTP_VERSION_NOT_SUPPORTED + VARIANT_ALSO_NEGOTIATES = HTTPStatus.VARIANT_ALSO_NEGOTIATES + INSUFFICIENT_STORAGE = HTTPStatus.INSUFFICIENT_STORAGE + LOOP_DETECTED = HTTPStatus.LOOP_DETECTED + NOT_EXTENDED = HTTPStatus.NOT_EXTENDED + NETWORK_AUTHENTICATION_REQUIRED = HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED + """ + ) + ) + + +register_module_extender(AstroidManager(), "http", _http_transform) +register_module_extender(AstroidManager(), "http.client", _http_client_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_hypothesis.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_hypothesis.py new file mode 100644 index 0000000..dae8361 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_hypothesis.py @@ -0,0 +1,55 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Astroid hook for the Hypothesis library. + +Without this hook pylint reports no-value-for-parameter for use of strategies +defined using the `@hypothesis.strategies.composite` decorator. For example: + + from hypothesis import strategies as st + + @st.composite + def a_strategy(draw): + return draw(st.integers()) + + a_strategy() + +""" +from astroid.manager import AstroidManager +from astroid.nodes.scoped_nodes import FunctionDef + +COMPOSITE_NAMES = ( + "composite", + "st.composite", + "strategies.composite", + "hypothesis.strategies.composite", +) + + +def is_decorated_with_st_composite(node): + """Return True if a decorated node has @st.composite applied.""" + if node.decorators and node.args.args and node.args.args[0].name == "draw": + for decorator_attribute in node.decorators.nodes: + if decorator_attribute.as_string() in COMPOSITE_NAMES: + return True + return False + + +def remove_draw_parameter_from_composite_strategy(node): + """Given that the FunctionDef is decorated with @st.composite, remove the + first argument (`draw`) - it's always supplied by Hypothesis so we don't + need to emit the no-value-for-parameter lint. + """ + del node.args.args[0] + del node.args.annotations[0] + del node.args.type_comment_args[0] + return node + + +AstroidManager().register_transform( + node_class=FunctionDef, + transform=remove_draw_parameter_from_composite_strategy, + predicate=is_decorated_with_st_composite, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_io.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_io.py new file mode 100644 index 0000000..9957ce9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_io.py @@ -0,0 +1,41 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid brain hints for some of the _io C objects.""" +from astroid.manager import AstroidManager +from astroid.nodes import ClassDef + +BUFFERED = {"BufferedWriter", "BufferedReader"} +TextIOWrapper = "TextIOWrapper" +FileIO = "FileIO" +BufferedWriter = "BufferedWriter" + + +def _generic_io_transform(node, name, cls): + """Transform the given name, by adding the given *class* as a member of the node.""" + + io_module = AstroidManager().ast_from_module_name("_io") + attribute_object = io_module[cls] + instance = attribute_object.instantiate_class() + node.locals[name] = [instance] + + +def _transform_text_io_wrapper(node): + # This is not always correct, since it can vary with the type of the descriptor, + # being stdout, stderr or stdin. But we cannot get access to the name of the + # stream, which is why we are using the BufferedWriter class as a default + # value + return _generic_io_transform(node, name="buffer", cls=BufferedWriter) + + +def _transform_buffered(node): + return _generic_io_transform(node, name="raw", cls=FileIO) + + +AstroidManager().register_transform( + ClassDef, _transform_buffered, lambda node: node.name in BUFFERED +) +AstroidManager().register_transform( + ClassDef, _transform_text_io_wrapper, lambda node: node.name == TextIOWrapper +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_mechanize.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_mechanize.py new file mode 100644 index 0000000..4c86fd9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_mechanize.py @@ -0,0 +1,83 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import AstroidBuilder +from astroid.manager import AstroidManager + + +def mechanize_transform(): + return AstroidBuilder(AstroidManager()).string_build( + """ + +class Browser(object): + def __getattr__(self, name): + return None + def __getitem__(self, name): + return None + def __setitem__(self, name, val): + return None + def back(self, n=1): + return None + def clear_history(self): + return None + def click(self, *args, **kwds): + return None + def click_link(self, link=None, **kwds): + return None + def close(self): + return None + def encoding(self): + return None + def find_link(self, text=None, text_regex=None, name=None, name_regex=None, url=None, url_regex=None, tag=None, predicate=None, nr=0): + return None + def follow_link(self, link=None, **kwds): + return None + def forms(self): + return None + def geturl(self): + return None + def global_form(self): + return None + def links(self, **kwds): + return None + def open_local_file(self, filename): + return None + def open(self, url, data=None, timeout=None): + return None + def open_novisit(self, url, data=None, timeout=None): + return None + def open_local_file(self, filename): + return None + def reload(self): + return None + def response(self): + return None + def select_form(self, name=None, predicate=None, nr=None, **attrs): + return None + def set_cookie(self, cookie_string): + return None + def set_handle_referer(self, handle): + return None + def set_header(self, header, value=None): + return None + def set_html(self, html, url="http://example.com/"): + return None + def set_response(self, response): + return None + def set_simple_cookie(self, name, value, domain, path='/'): + return None + def submit(self, *args, **kwds): + return None + def title(self): + return None + def viewing_html(self): + return None + def visit_response(self, response, request=None): + return None +""" + ) + + +register_module_extender(AstroidManager(), "mechanize", mechanize_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_multiprocessing.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_multiprocessing.py new file mode 100644 index 0000000..fc98a06 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_multiprocessing.py @@ -0,0 +1,106 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.bases import BoundMethod +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.exceptions import InferenceError +from astroid.manager import AstroidManager +from astroid.nodes.scoped_nodes import FunctionDef + + +def _multiprocessing_transform(): + module = parse( + """ + from multiprocessing.managers import SyncManager + def Manager(): + return SyncManager() + """ + ) + # Multiprocessing uses a getattr lookup inside contexts, + # in order to get the attributes they need. Since it's extremely + # dynamic, we use this approach to fake it. + node = parse( + """ + from multiprocessing.context import DefaultContext, BaseContext + default = DefaultContext() + base = BaseContext() + """ + ) + try: + context = next(node["default"].infer()) + base = next(node["base"].infer()) + except (InferenceError, StopIteration): + return module + + for node in (context, base): + for key, value in node.locals.items(): + if key.startswith("_"): + continue + + value = value[0] + if isinstance(value, FunctionDef): + # We need to rebound this, since otherwise + # it will have an extra argument (self). + value = BoundMethod(value, node) + module[key] = value + return module + + +def _multiprocessing_managers_transform(): + return parse( + """ + import array + import threading + import multiprocessing.pool as pool + import queue + + class Namespace(object): + pass + + class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + + def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + + class SyncManager(object): + Queue = JoinableQueue = queue.Queue + Event = threading.Event + RLock = threading.RLock + BoundedSemaphore = threading.BoundedSemaphore + Condition = threading.Condition + Barrier = threading.Barrier + Pool = pool.Pool + list = list + dict = dict + Value = Value + Array = Array + Namespace = Namespace + __enter__ = lambda self: self + __exit__ = lambda *args: args + + def start(self, initializer=None, initargs=None): + pass + def shutdown(self): + pass + """ + ) + + +register_module_extender( + AstroidManager(), "multiprocessing.managers", _multiprocessing_managers_transform +) +register_module_extender( + AstroidManager(), "multiprocessing", _multiprocessing_transform +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_namedtuple_enum.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_namedtuple_enum.py new file mode 100644 index 0000000..dbd9667 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_namedtuple_enum.py @@ -0,0 +1,574 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for the Python standard library.""" + +import functools +import keyword +from textwrap import dedent +from typing import Iterator, List, Optional, Tuple + +import astroid +from astroid import arguments, inference_tip, nodes, util +from astroid.builder import AstroidBuilder, extract_node +from astroid.context import InferenceContext +from astroid.exceptions import ( + AstroidTypeError, + AstroidValueError, + InferenceError, + MroError, + UseInferenceDefault, +) +from astroid.manager import AstroidManager + +TYPING_NAMEDTUPLE_BASENAMES = {"NamedTuple", "typing.NamedTuple"} +ENUM_BASE_NAMES = { + "Enum", + "IntEnum", + "enum.Enum", + "enum.IntEnum", + "IntFlag", + "enum.IntFlag", +} + + +def _infer_first(node, context): + if node is util.Uninferable: + raise UseInferenceDefault + try: + value = next(node.infer(context=context)) + except StopIteration as exc: + raise InferenceError from exc + if value is util.Uninferable: + raise UseInferenceDefault() + return value + + +def _find_func_form_arguments(node, context): + def _extract_namedtuple_arg_or_keyword( # pylint: disable=inconsistent-return-statements + position, key_name=None + ): + if len(args) > position: + return _infer_first(args[position], context) + if key_name and key_name in found_keywords: + return _infer_first(found_keywords[key_name], context) + + args = node.args + keywords = node.keywords + found_keywords = ( + {keyword.arg: keyword.value for keyword in keywords} if keywords else {} + ) + + name = _extract_namedtuple_arg_or_keyword(position=0, key_name="typename") + names = _extract_namedtuple_arg_or_keyword(position=1, key_name="field_names") + if name and names: + return name.value, names + + raise UseInferenceDefault() + + +def infer_func_form( + node: nodes.Call, + base_type: nodes.NodeNG, + context: Optional[InferenceContext] = None, + enum: bool = False, +) -> Tuple[nodes.ClassDef, str, List[str]]: + """Specific inference function for namedtuple or Python 3 enum.""" + # node is a Call node, class name as first argument and generated class + # attributes as second argument + + # namedtuple or enums list of attributes can be a list of strings or a + # whitespace-separate string + try: + name, names = _find_func_form_arguments(node, context) + try: + attributes = names.value.replace(",", " ").split() + except AttributeError as exc: + # Handle attributes of NamedTuples + if not enum: + attributes = [ + _infer_first(const, context).value for const in names.elts + ] + + # Handle attributes of Enums + else: + # Enums supports either iterator of (name, value) pairs + # or mappings. + if hasattr(names, "items") and isinstance(names.items, list): + attributes = [ + _infer_first(const[0], context).value + for const in names.items + if isinstance(const[0], nodes.Const) + ] + elif hasattr(names, "elts"): + # Enums can support either ["a", "b", "c"] + # or [("a", 1), ("b", 2), ...], but they can't + # be mixed. + if all(isinstance(const, nodes.Tuple) for const in names.elts): + attributes = [ + _infer_first(const.elts[0], context).value + for const in names.elts + if isinstance(const, nodes.Tuple) + ] + else: + attributes = [ + _infer_first(const, context).value for const in names.elts + ] + else: + raise AttributeError from exc + if not attributes: + raise AttributeError from exc + except (AttributeError, InferenceError) as exc: + raise UseInferenceDefault from exc + + if not enum: + # namedtuple maps sys.intern(str()) over over field_names + attributes = [str(attr) for attr in attributes] + # XXX this should succeed *unless* __str__/__repr__ is incorrect or throws + # in which case we should not have inferred these values and raised earlier + attributes = [attr for attr in attributes if " " not in attr] + + # If we can't infer the name of the class, don't crash, up to this point + # we know it is a namedtuple anyway. + name = name or "Uninferable" + # we want to return a Class node instance with proper attributes set + class_node = nodes.ClassDef(name) + # A typical ClassDef automatically adds its name to the parent scope, + # but doing so causes problems, so defer setting parent until after init + # see: https://github.com/PyCQA/pylint/issues/5982 + class_node.parent = node.parent + class_node.postinit( + # set base class=tuple + bases=[base_type], + body=[], + decorators=None, + ) + # XXX add __init__(*attributes) method + for attr in attributes: + fake_node = nodes.EmptyNode() + fake_node.parent = class_node + fake_node.attrname = attr + class_node.instance_attrs[attr] = [fake_node] + return class_node, name, attributes + + +def _has_namedtuple_base(node): + """Predicate for class inference tip + + :type node: ClassDef + :rtype: bool + """ + return set(node.basenames) & TYPING_NAMEDTUPLE_BASENAMES + + +def _looks_like(node, name): + func = node.func + if isinstance(func, nodes.Attribute): + return func.attrname == name + if isinstance(func, nodes.Name): + return func.name == name + return False + + +_looks_like_namedtuple = functools.partial(_looks_like, name="namedtuple") +_looks_like_enum = functools.partial(_looks_like, name="Enum") +_looks_like_typing_namedtuple = functools.partial(_looks_like, name="NamedTuple") + + +def infer_named_tuple( + node: nodes.Call, context: Optional[InferenceContext] = None +) -> Iterator[nodes.ClassDef]: + """Specific inference function for namedtuple Call node""" + tuple_base_name = nodes.Name(name="tuple", parent=node.root()) + class_node, name, attributes = infer_func_form( + node, tuple_base_name, context=context + ) + call_site = arguments.CallSite.from_call(node, context=context) + node = extract_node("import collections; collections.namedtuple") + try: + + func = next(node.infer()) + except StopIteration as e: + raise InferenceError(node=node) from e + try: + rename = next(call_site.infer_argument(func, "rename", context)).bool_value() + except (InferenceError, StopIteration): + rename = False + + try: + attributes = _check_namedtuple_attributes(name, attributes, rename) + except AstroidTypeError as exc: + raise UseInferenceDefault("TypeError: " + str(exc)) from exc + except AstroidValueError as exc: + raise UseInferenceDefault("ValueError: " + str(exc)) from exc + + replace_args = ", ".join(f"{arg}=None" for arg in attributes) + field_def = ( + " {name} = property(lambda self: self[{index:d}], " + "doc='Alias for field number {index:d}')" + ) + field_defs = "\n".join( + field_def.format(name=name, index=index) + for index, name in enumerate(attributes) + ) + fake = AstroidBuilder(AstroidManager()).string_build( + f""" +class {name}(tuple): + __slots__ = () + _fields = {attributes!r} + def _asdict(self): + return self.__dict__ + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + return new(cls, iterable) + def _replace(self, {replace_args}): + return self + def __getnewargs__(self): + return tuple(self) +{field_defs} + """ + ) + class_node.locals["_asdict"] = fake.body[0].locals["_asdict"] + class_node.locals["_make"] = fake.body[0].locals["_make"] + class_node.locals["_replace"] = fake.body[0].locals["_replace"] + class_node.locals["_fields"] = fake.body[0].locals["_fields"] + for attr in attributes: + class_node.locals[attr] = fake.body[0].locals[attr] + # we use UseInferenceDefault, we can't be a generator so return an iterator + return iter([class_node]) + + +def _get_renamed_namedtuple_attributes(field_names): + names = list(field_names) + seen = set() + for i, name in enumerate(field_names): + if ( + not all(c.isalnum() or c == "_" for c in name) + or keyword.iskeyword(name) + or not name + or name[0].isdigit() + or name.startswith("_") + or name in seen + ): + names[i] = "_%d" % i + seen.add(name) + return tuple(names) + + +def _check_namedtuple_attributes(typename, attributes, rename=False): + attributes = tuple(attributes) + if rename: + attributes = _get_renamed_namedtuple_attributes(attributes) + + # The following snippet is derived from the CPython Lib/collections/__init__.py sources + # + for name in (typename,) + attributes: + if not isinstance(name, str): + raise AstroidTypeError("Type names and field names must be strings") + if not name.isidentifier(): + raise AstroidValueError( + "Type names and field names must be valid" + f"identifiers: {name!r}" + ) + if keyword.iskeyword(name): + raise AstroidValueError( + f"Type names and field names cannot be a keyword: {name!r}" + ) + + seen = set() + for name in attributes: + if name.startswith("_") and not rename: + raise AstroidValueError( + f"Field names cannot start with an underscore: {name!r}" + ) + if name in seen: + raise AstroidValueError(f"Encountered duplicate field name: {name!r}") + seen.add(name) + # + + return attributes + + +def infer_enum(node, context=None): + """Specific inference function for enum Call node.""" + enum_meta = extract_node( + """ + class EnumMeta(object): + 'docstring' + def __call__(self, node): + class EnumAttribute(object): + name = '' + value = 0 + return EnumAttribute() + def __iter__(self): + class EnumAttribute(object): + name = '' + value = 0 + return [EnumAttribute()] + def __reversed__(self): + class EnumAttribute(object): + name = '' + value = 0 + return (EnumAttribute, ) + def __next__(self): + return next(iter(self)) + def __getitem__(self, attr): + class Value(object): + @property + def name(self): + return '' + @property + def value(self): + return attr + + return Value() + __members__ = [''] + """ + ) + class_node = infer_func_form(node, enum_meta, context=context, enum=True)[0] + return iter([class_node.instantiate_class()]) + + +INT_FLAG_ADDITION_METHODS = """ + def __or__(self, other): + return {name}(self.value | other.value) + def __and__(self, other): + return {name}(self.value & other.value) + def __xor__(self, other): + return {name}(self.value ^ other.value) + def __add__(self, other): + return {name}(self.value + other.value) + def __div__(self, other): + return {name}(self.value / other.value) + def __invert__(self): + return {name}(~self.value) + def __mul__(self, other): + return {name}(self.value * other.value) +""" + + +def infer_enum_class(node): + """Specific inference for enums.""" + for basename in (b for cls in node.mro() for b in cls.basenames): + if basename not in ENUM_BASE_NAMES: + continue + if node.root().name == "enum": + # Skip if the class is directly from enum module. + break + dunder_members = {} + target_names = set() + for local, values in node.locals.items(): + if any(not isinstance(value, nodes.AssignName) for value in values): + continue + + stmt = values[0].statement(future=True) + if isinstance(stmt, nodes.Assign): + if isinstance(stmt.targets[0], nodes.Tuple): + targets = stmt.targets[0].itered() + else: + targets = stmt.targets + elif isinstance(stmt, nodes.AnnAssign): + targets = [stmt.target] + else: + continue + + inferred_return_value = None + if isinstance(stmt, nodes.Assign): + if isinstance(stmt.value, nodes.Const): + if isinstance(stmt.value.value, str): + inferred_return_value = repr(stmt.value.value) + else: + inferred_return_value = stmt.value.value + else: + inferred_return_value = stmt.value.as_string() + + new_targets = [] + for target in targets: + if isinstance(target, nodes.Starred): + continue + target_names.add(target.name) + # Replace all the assignments with our mocked class. + classdef = dedent( + """ + class {name}({types}): + @property + def value(self): + return {return_value} + @property + def name(self): + return "{name}" + """.format( + name=target.name, + types=", ".join(node.basenames), + return_value=inferred_return_value, + ) + ) + if "IntFlag" in basename: + # Alright, we need to add some additional methods. + # Unfortunately we still can't infer the resulting objects as + # Enum members, but once we'll be able to do that, the following + # should result in some nice symbolic execution + classdef += INT_FLAG_ADDITION_METHODS.format(name=target.name) + + fake = AstroidBuilder( + AstroidManager(), apply_transforms=False + ).string_build(classdef)[target.name] + fake.parent = target.parent + for method in node.mymethods(): + fake.locals[method.name] = [method] + new_targets.append(fake.instantiate_class()) + dunder_members[local] = fake + node.locals[local] = new_targets + members = nodes.Dict(parent=node) + members.postinit( + [ + (nodes.Const(k, parent=members), nodes.Name(v.name, parent=members)) + for k, v in dunder_members.items() + ] + ) + node.locals["__members__"] = [members] + # The enum.Enum class itself defines two @DynamicClassAttribute data-descriptors + # "name" and "value" (which we override in the mocked class for each enum member + # above). When dealing with inference of an arbitrary instance of the enum + # class, e.g. in a method defined in the class body like: + # class SomeEnum(enum.Enum): + # def method(self): + # self.name # <- here + # In the absence of an enum member called "name" or "value", these attributes + # should resolve to the descriptor on that particular instance, i.e. enum member. + # For "value", we have no idea what that should be, but for "name", we at least + # know that it should be a string, so infer that as a guess. + if "name" not in target_names: + code = dedent( + """ + @property + def name(self): + return '' + """ + ) + name_dynamicclassattr = AstroidBuilder(AstroidManager()).string_build(code)[ + "name" + ] + node.locals["name"] = [name_dynamicclassattr] + break + return node + + +def infer_typing_namedtuple_class(class_node, context=None): + """Infer a subclass of typing.NamedTuple""" + # Check if it has the corresponding bases + annassigns_fields = [ + annassign.target.name + for annassign in class_node.body + if isinstance(annassign, nodes.AnnAssign) + ] + code = dedent( + """ + from collections import namedtuple + namedtuple({typename!r}, {fields!r}) + """ + ).format(typename=class_node.name, fields=",".join(annassigns_fields)) + node = extract_node(code) + try: + generated_class_node = next(infer_named_tuple(node, context)) + except StopIteration as e: + raise InferenceError(node=node, context=context) from e + for method in class_node.mymethods(): + generated_class_node.locals[method.name] = [method] + + for body_node in class_node.body: + if isinstance(body_node, nodes.Assign): + for target in body_node.targets: + attr = target.name + generated_class_node.locals[attr] = class_node.locals[attr] + elif isinstance(body_node, nodes.ClassDef): + generated_class_node.locals[body_node.name] = [body_node] + + return iter((generated_class_node,)) + + +def infer_typing_namedtuple_function(node, context=None): + """ + Starting with python3.9, NamedTuple is a function of the typing module. + The class NamedTuple is build dynamically through a call to `type` during + initialization of the `_NamedTuple` variable. + """ + klass = extract_node( + """ + from typing import _NamedTuple + _NamedTuple + """ + ) + return klass.infer(context) + + +def infer_typing_namedtuple( + node: nodes.Call, context: Optional[InferenceContext] = None +) -> Iterator[nodes.ClassDef]: + """Infer a typing.NamedTuple(...) call.""" + # This is essentially a namedtuple with different arguments + # so we extract the args and infer a named tuple. + try: + func = next(node.func.infer()) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + + if func.qname() != "typing.NamedTuple": + raise UseInferenceDefault + + if len(node.args) != 2: + raise UseInferenceDefault + + if not isinstance(node.args[1], (nodes.List, nodes.Tuple)): + raise UseInferenceDefault + + names = [] + for elt in node.args[1].elts: + if not isinstance(elt, (nodes.List, nodes.Tuple)): + raise UseInferenceDefault + if len(elt.elts) != 2: + raise UseInferenceDefault + names.append(elt.elts[0].as_string()) + + typename = node.args[0].as_string() + if names: + field_names = f"({','.join(names)},)" + else: + field_names = "''" + node = extract_node(f"namedtuple({typename}, {field_names})") + return infer_named_tuple(node, context) + + +def _is_enum_subclass(cls: astroid.ClassDef) -> bool: + """Return whether cls is a subclass of an Enum.""" + try: + return any( + klass.name in ENUM_BASE_NAMES + and getattr(klass.root(), "name", None) == "enum" + for klass in cls.mro() + ) + except MroError: + return False + + +AstroidManager().register_transform( + nodes.Call, inference_tip(infer_named_tuple), _looks_like_namedtuple +) +AstroidManager().register_transform( + nodes.Call, inference_tip(infer_enum), _looks_like_enum +) +AstroidManager().register_transform( + nodes.ClassDef, infer_enum_class, predicate=_is_enum_subclass +) +AstroidManager().register_transform( + nodes.ClassDef, inference_tip(infer_typing_namedtuple_class), _has_namedtuple_base +) +AstroidManager().register_transform( + nodes.FunctionDef, + inference_tip(infer_typing_namedtuple_function), + lambda node: node.name == "NamedTuple" + and getattr(node.root(), "name", None) == "typing", +) +AstroidManager().register_transform( + nodes.Call, inference_tip(infer_typing_namedtuple), _looks_like_typing_namedtuple +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_nose.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_nose.py new file mode 100644 index 0000000..38e2229 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_nose.py @@ -0,0 +1,79 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Hooks for nose library.""" + +import re +import textwrap + +import astroid.builder +from astroid.brain.helpers import register_module_extender +from astroid.exceptions import InferenceError +from astroid.manager import AstroidManager + +_BUILDER = astroid.builder.AstroidBuilder(AstroidManager()) + + +CAPITALS = re.compile("([A-Z])") + + +def _pep8(name, caps=CAPITALS): + return caps.sub(lambda m: "_" + m.groups()[0].lower(), name) + + +def _nose_tools_functions(): + """Get an iterator of names and bound methods.""" + module = _BUILDER.string_build( + textwrap.dedent( + """ + import unittest + + class Test(unittest.TestCase): + pass + a = Test() + """ + ) + ) + try: + case = next(module["a"].infer()) + except (InferenceError, StopIteration): + return + for method in case.methods(): + if method.name.startswith("assert") and "_" not in method.name: + pep8_name = _pep8(method.name) + yield pep8_name, astroid.BoundMethod(method, case) + if method.name == "assertEqual": + # nose also exports assert_equals. + yield "assert_equals", astroid.BoundMethod(method, case) + + +def _nose_tools_transform(node): + for method_name, method in _nose_tools_functions(): + node.locals[method_name] = [method] + + +def _nose_tools_trivial_transform(): + """Custom transform for the nose.tools module.""" + stub = _BUILDER.string_build("""__all__ = []""") + all_entries = ["ok_", "eq_"] + + for pep8_name, method in _nose_tools_functions(): + all_entries.append(pep8_name) + stub[pep8_name] = method + + # Update the __all__ variable, since nose.tools + # does this manually with .append. + all_assign = stub["__all__"].parent + all_object = astroid.List(all_entries) + all_object.parent = all_assign + all_assign.value = all_object + return stub + + +register_module_extender( + AstroidManager(), "nose.tools.trivial", _nose_tools_trivial_transform +) +AstroidManager().register_transform( + astroid.Module, _nose_tools_transform, lambda n: n.name == "nose.tools" +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_fromnumeric.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_fromnumeric.py new file mode 100644 index 0000000..19d4822 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_fromnumeric.py @@ -0,0 +1,22 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for numpy.core.fromnumeric module.""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def numpy_core_fromnumeric_transform(): + return parse( + """ + def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None): + return numpy.ndarray([0, 0]) + """ + ) + + +register_module_extender( + AstroidManager(), "numpy.core.fromnumeric", numpy_core_fromnumeric_transform +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_function_base.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_function_base.py new file mode 100644 index 0000000..31d53cb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_function_base.py @@ -0,0 +1,29 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for numpy.core.function_base module.""" + +import functools + +from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member +from astroid.inference_tip import inference_tip +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import Attribute + +METHODS_TO_BE_INFERRED = { + "linspace": """def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + return numpy.ndarray([0, 0])""", + "logspace": """def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): + return numpy.ndarray([0, 0])""", + "geomspace": """def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + return numpy.ndarray([0, 0])""", +} + +for func_name, func_src in METHODS_TO_BE_INFERRED.items(): + inference_function = functools.partial(infer_numpy_member, func_src) + AstroidManager().register_transform( + Attribute, + inference_tip(inference_function), + functools.partial(looks_like_numpy_member, func_name), + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_multiarray.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_multiarray.py new file mode 100644 index 0000000..487ec47 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_multiarray.py @@ -0,0 +1,95 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for numpy.core.multiarray module.""" + +import functools + +from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.inference_tip import inference_tip +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import Attribute, Name + + +def numpy_core_multiarray_transform(): + return parse( + """ + # different functions defined in multiarray.py + def inner(a, b): + return numpy.ndarray([0, 0]) + + def vdot(a, b): + return numpy.ndarray([0, 0]) + """ + ) + + +register_module_extender( + AstroidManager(), "numpy.core.multiarray", numpy_core_multiarray_transform +) + + +METHODS_TO_BE_INFERRED = { + "array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0): + return numpy.ndarray([0, 0])""", + "dot": """def dot(a, b, out=None): + return numpy.ndarray([0, 0])""", + "empty_like": """def empty_like(a, dtype=None, order='K', subok=True): + return numpy.ndarray((0, 0))""", + "concatenate": """def concatenate(arrays, axis=None, out=None): + return numpy.ndarray((0, 0))""", + "where": """def where(condition, x=None, y=None): + return numpy.ndarray([0, 0])""", + "empty": """def empty(shape, dtype=float, order='C'): + return numpy.ndarray([0, 0])""", + "bincount": """def bincount(x, weights=None, minlength=0): + return numpy.ndarray([0, 0])""", + "busday_count": """def busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None): + return numpy.ndarray([0, 0])""", + "busday_offset": """def busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None): + return numpy.ndarray([0, 0])""", + "can_cast": """def can_cast(from_, to, casting='safe'): + return True""", + "copyto": """def copyto(dst, src, casting='same_kind', where=True): + return None""", + "datetime_as_string": """def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'): + return numpy.ndarray([0, 0])""", + "is_busday": """def is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None): + return numpy.ndarray([0, 0])""", + "lexsort": """def lexsort(keys, axis=-1): + return numpy.ndarray([0, 0])""", + "may_share_memory": """def may_share_memory(a, b, max_work=None): + return True""", + # Not yet available because dtype is not yet present in those brains + # "min_scalar_type": """def min_scalar_type(a): + # return numpy.dtype('int16')""", + "packbits": """def packbits(a, axis=None, bitorder='big'): + return numpy.ndarray([0, 0])""", + # Not yet available because dtype is not yet present in those brains + # "result_type": """def result_type(*arrays_and_dtypes): + # return numpy.dtype('int16')""", + "shares_memory": """def shares_memory(a, b, max_work=None): + return True""", + "unpackbits": """def unpackbits(a, axis=None, count=None, bitorder='big'): + return numpy.ndarray([0, 0])""", + "unravel_index": """def unravel_index(indices, shape, order='C'): + return (numpy.ndarray([0, 0]),)""", + "zeros": """def zeros(shape, dtype=float, order='C'): + return numpy.ndarray([0, 0])""", +} + +for method_name, function_src in METHODS_TO_BE_INFERRED.items(): + inference_function = functools.partial(infer_numpy_member, function_src) + AstroidManager().register_transform( + Attribute, + inference_tip(inference_function), + functools.partial(looks_like_numpy_member, method_name), + ) + AstroidManager().register_transform( + Name, + inference_tip(inference_function), + functools.partial(looks_like_numpy_member, method_name), + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_numeric.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_numeric.py new file mode 100644 index 0000000..140d81a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_numeric.py @@ -0,0 +1,46 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for numpy.core.numeric module.""" + +import functools + +from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.inference_tip import inference_tip +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import Attribute + + +def numpy_core_numeric_transform(): + return parse( + """ + # different functions defined in numeric.py + import numpy + def zeros_like(a, dtype=None, order='K', subok=True, shape=None): return numpy.ndarray((0, 0)) + def ones_like(a, dtype=None, order='K', subok=True, shape=None): return numpy.ndarray((0, 0)) + def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): return numpy.ndarray((0, 0)) + """ + ) + + +register_module_extender( + AstroidManager(), "numpy.core.numeric", numpy_core_numeric_transform +) + + +METHODS_TO_BE_INFERRED = { + "ones": """def ones(shape, dtype=None, order='C'): + return numpy.ndarray([0, 0])""" +} + + +for method_name, function_src in METHODS_TO_BE_INFERRED.items(): + inference_function = functools.partial(infer_numpy_member, function_src) + AstroidManager().register_transform( + Attribute, + inference_tip(inference_function), + functools.partial(looks_like_numpy_member, method_name), + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_numerictypes.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_numerictypes.py new file mode 100644 index 0000000..245296e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_numerictypes.py @@ -0,0 +1,263 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +# TODO(hippo91) : correct the methods signature. + +"""Astroid hooks for numpy.core.numerictypes module.""" +from astroid.brain.brain_numpy_utils import numpy_supports_type_hints +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def numpy_core_numerictypes_transform(): + # TODO: Uniformize the generic API with the ndarray one. + # According to numpy doc the generic object should expose + # the same API than ndarray. This has been done here partially + # through the astype method. + generic_src = """ + class generic(object): + def __init__(self, value): + self.T = np.ndarray([0, 0]) + self.base = None + self.data = None + self.dtype = None + self.flags = None + # Should be a numpy.flatiter instance but not available for now + # Putting an array instead so that iteration and indexing are authorized + self.flat = np.ndarray([0, 0]) + self.imag = None + self.itemsize = None + self.nbytes = None + self.ndim = None + self.real = None + self.size = None + self.strides = None + + def all(self): return uninferable + def any(self): return uninferable + def argmax(self): return uninferable + def argmin(self): return uninferable + def argsort(self): return uninferable + def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): return np.ndarray([0, 0]) + def base(self): return uninferable + def byteswap(self): return uninferable + def choose(self): return uninferable + def clip(self): return uninferable + def compress(self): return uninferable + def conj(self): return uninferable + def conjugate(self): return uninferable + def copy(self): return uninferable + def cumprod(self): return uninferable + def cumsum(self): return uninferable + def data(self): return uninferable + def diagonal(self): return uninferable + def dtype(self): return uninferable + def dump(self): return uninferable + def dumps(self): return uninferable + def fill(self): return uninferable + def flags(self): return uninferable + def flat(self): return uninferable + def flatten(self): return uninferable + def getfield(self): return uninferable + def imag(self): return uninferable + def item(self): return uninferable + def itemset(self): return uninferable + def itemsize(self): return uninferable + def max(self): return uninferable + def mean(self): return uninferable + def min(self): return uninferable + def nbytes(self): return uninferable + def ndim(self): return uninferable + def newbyteorder(self): return uninferable + def nonzero(self): return uninferable + def prod(self): return uninferable + def ptp(self): return uninferable + def put(self): return uninferable + def ravel(self): return uninferable + def real(self): return uninferable + def repeat(self): return uninferable + def reshape(self): return uninferable + def resize(self): return uninferable + def round(self): return uninferable + def searchsorted(self): return uninferable + def setfield(self): return uninferable + def setflags(self): return uninferable + def shape(self): return uninferable + def size(self): return uninferable + def sort(self): return uninferable + def squeeze(self): return uninferable + def std(self): return uninferable + def strides(self): return uninferable + def sum(self): return uninferable + def swapaxes(self): return uninferable + def take(self): return uninferable + def tobytes(self): return uninferable + def tofile(self): return uninferable + def tolist(self): return uninferable + def tostring(self): return uninferable + def trace(self): return uninferable + def transpose(self): return uninferable + def var(self): return uninferable + def view(self): return uninferable + """ + if numpy_supports_type_hints(): + generic_src += """ + @classmethod + def __class_getitem__(cls, value): + return cls + """ + return parse( + generic_src + + """ + class dtype(object): + def __init__(self, obj, align=False, copy=False): + self.alignment = None + self.base = None + self.byteorder = None + self.char = None + self.descr = None + self.fields = None + self.flags = None + self.hasobject = None + self.isalignedstruct = None + self.isbuiltin = None + self.isnative = None + self.itemsize = None + self.kind = None + self.metadata = None + self.name = None + self.names = None + self.num = None + self.shape = None + self.str = None + self.subdtype = None + self.type = None + + def newbyteorder(self, new_order='S'): return uninferable + def __neg__(self): return uninferable + + class busdaycalendar(object): + def __init__(self, weekmask='1111100', holidays=None): + self.holidays = None + self.weekmask = None + + class flexible(generic): pass + class bool_(generic): pass + class number(generic): + def __neg__(self): return uninferable + class datetime64(generic): + def __init__(self, nb, unit=None): pass + + + class void(flexible): + def __init__(self, *args, **kwargs): + self.base = None + self.dtype = None + self.flags = None + def getfield(self): return uninferable + def setfield(self): return uninferable + + + class character(flexible): pass + + + class integer(number): + def __init__(self, value): + self.denominator = None + self.numerator = None + + + class inexact(number): pass + + + class str_(str, character): + def maketrans(self, x, y=None, z=None): return uninferable + + + class bytes_(bytes, character): + def fromhex(self, string): return uninferable + def maketrans(self, frm, to): return uninferable + + + class signedinteger(integer): pass + + + class unsignedinteger(integer): pass + + + class complexfloating(inexact): pass + + + class floating(inexact): pass + + + class float64(floating, float): + def fromhex(self, string): return uninferable + + + class uint64(unsignedinteger): pass + class complex64(complexfloating): pass + class int16(signedinteger): pass + class float96(floating): pass + class int8(signedinteger): pass + class uint32(unsignedinteger): pass + class uint8(unsignedinteger): pass + class _typedict(dict): pass + class complex192(complexfloating): pass + class timedelta64(signedinteger): + def __init__(self, nb, unit=None): pass + class int32(signedinteger): pass + class uint16(unsignedinteger): pass + class float32(floating): pass + class complex128(complexfloating, complex): pass + class float16(floating): pass + class int64(signedinteger): pass + + buffer_type = memoryview + bool8 = bool_ + byte = int8 + bytes0 = bytes_ + cdouble = complex128 + cfloat = complex128 + clongdouble = complex192 + clongfloat = complex192 + complex_ = complex128 + csingle = complex64 + double = float64 + float_ = float64 + half = float16 + int0 = int32 + int_ = int32 + intc = int32 + intp = int32 + long = int32 + longcomplex = complex192 + longdouble = float96 + longfloat = float96 + longlong = int64 + object0 = object_ + object_ = object_ + short = int16 + single = float32 + singlecomplex = complex64 + str0 = str_ + string_ = bytes_ + ubyte = uint8 + uint = uint32 + uint0 = uint32 + uintc = uint32 + uintp = uint32 + ulonglong = uint64 + unicode = str_ + unicode_ = str_ + ushort = uint16 + void0 = void + """ + ) + + +register_module_extender( + AstroidManager(), "numpy.core.numerictypes", numpy_core_numerictypes_transform +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_umath.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_umath.py new file mode 100644 index 0000000..42dfdfa --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_core_umath.py @@ -0,0 +1,154 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +# Note: starting with version 1.18 numpy module has `__getattr__` method which prevent +# `pylint` to emit `no-member` message for all numpy's attributes. (see pylint's module +# typecheck in `_emit_no_member` function) + +"""Astroid hooks for numpy.core.umath module.""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def numpy_core_umath_transform(): + ufunc_optional_keyword_arguments = ( + """out=None, where=True, casting='same_kind', order='K', """ + """dtype=None, subok=True""" + ) + return parse( + """ + class FakeUfunc: + def __init__(self): + self.__doc__ = str() + self.__name__ = str() + self.nin = 0 + self.nout = 0 + self.nargs = 0 + self.ntypes = 0 + self.types = None + self.identity = None + self.signature = None + + @classmethod + def reduce(cls, a, axis=None, dtype=None, out=None): + return numpy.ndarray([0, 0]) + + @classmethod + def accumulate(cls, array, axis=None, dtype=None, out=None): + return numpy.ndarray([0, 0]) + + @classmethod + def reduceat(cls, a, indices, axis=None, dtype=None, out=None): + return numpy.ndarray([0, 0]) + + @classmethod + def outer(cls, A, B, **kwargs): + return numpy.ndarray([0, 0]) + + @classmethod + def at(cls, a, indices, b=None): + return numpy.ndarray([0, 0]) + + class FakeUfuncOneArg(FakeUfunc): + def __call__(self, x, {opt_args:s}): + return numpy.ndarray([0, 0]) + + class FakeUfuncOneArgBis(FakeUfunc): + def __call__(self, x, {opt_args:s}): + return numpy.ndarray([0, 0]), numpy.ndarray([0, 0]) + + class FakeUfuncTwoArgs(FakeUfunc): + def __call__(self, x1, x2, {opt_args:s}): + return numpy.ndarray([0, 0]) + + # Constants + e = 2.718281828459045 + euler_gamma = 0.5772156649015329 + + # One arg functions with optional kwargs + arccos = FakeUfuncOneArg() + arccosh = FakeUfuncOneArg() + arcsin = FakeUfuncOneArg() + arcsinh = FakeUfuncOneArg() + arctan = FakeUfuncOneArg() + arctanh = FakeUfuncOneArg() + cbrt = FakeUfuncOneArg() + conj = FakeUfuncOneArg() + conjugate = FakeUfuncOneArg() + cosh = FakeUfuncOneArg() + deg2rad = FakeUfuncOneArg() + degrees = FakeUfuncOneArg() + exp2 = FakeUfuncOneArg() + expm1 = FakeUfuncOneArg() + fabs = FakeUfuncOneArg() + frexp = FakeUfuncOneArgBis() + isfinite = FakeUfuncOneArg() + isinf = FakeUfuncOneArg() + log = FakeUfuncOneArg() + log1p = FakeUfuncOneArg() + log2 = FakeUfuncOneArg() + logical_not = FakeUfuncOneArg() + modf = FakeUfuncOneArgBis() + negative = FakeUfuncOneArg() + positive = FakeUfuncOneArg() + rad2deg = FakeUfuncOneArg() + radians = FakeUfuncOneArg() + reciprocal = FakeUfuncOneArg() + rint = FakeUfuncOneArg() + sign = FakeUfuncOneArg() + signbit = FakeUfuncOneArg() + sinh = FakeUfuncOneArg() + spacing = FakeUfuncOneArg() + square = FakeUfuncOneArg() + tan = FakeUfuncOneArg() + tanh = FakeUfuncOneArg() + trunc = FakeUfuncOneArg() + + # Two args functions with optional kwargs + add = FakeUfuncTwoArgs() + bitwise_and = FakeUfuncTwoArgs() + bitwise_or = FakeUfuncTwoArgs() + bitwise_xor = FakeUfuncTwoArgs() + copysign = FakeUfuncTwoArgs() + divide = FakeUfuncTwoArgs() + divmod = FakeUfuncTwoArgs() + equal = FakeUfuncTwoArgs() + float_power = FakeUfuncTwoArgs() + floor_divide = FakeUfuncTwoArgs() + fmax = FakeUfuncTwoArgs() + fmin = FakeUfuncTwoArgs() + fmod = FakeUfuncTwoArgs() + greater = FakeUfuncTwoArgs() + gcd = FakeUfuncTwoArgs() + hypot = FakeUfuncTwoArgs() + heaviside = FakeUfuncTwoArgs() + lcm = FakeUfuncTwoArgs() + ldexp = FakeUfuncTwoArgs() + left_shift = FakeUfuncTwoArgs() + less = FakeUfuncTwoArgs() + logaddexp = FakeUfuncTwoArgs() + logaddexp2 = FakeUfuncTwoArgs() + logical_and = FakeUfuncTwoArgs() + logical_or = FakeUfuncTwoArgs() + logical_xor = FakeUfuncTwoArgs() + maximum = FakeUfuncTwoArgs() + minimum = FakeUfuncTwoArgs() + multiply = FakeUfuncTwoArgs() + nextafter = FakeUfuncTwoArgs() + not_equal = FakeUfuncTwoArgs() + power = FakeUfuncTwoArgs() + remainder = FakeUfuncTwoArgs() + right_shift = FakeUfuncTwoArgs() + subtract = FakeUfuncTwoArgs() + true_divide = FakeUfuncTwoArgs() + """.format( + opt_args=ufunc_optional_keyword_arguments + ) + ) + + +register_module_extender( + AstroidManager(), "numpy.core.umath", numpy_core_umath_transform +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_ma.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_ma.py new file mode 100644 index 0000000..241665c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_ma.py @@ -0,0 +1,28 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for numpy ma module""" + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def numpy_ma_transform(): + """ + Infer the call of the masked_where function + + :param node: node to infer + :param context: inference context + """ + return parse( + """ + import numpy.ma + def masked_where(condition, a, copy=True): + return numpy.ma.masked_array(a, mask=[]) + """ + ) + + +register_module_extender(AstroidManager(), "numpy.ma", numpy_ma_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_ndarray.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_ndarray.py new file mode 100644 index 0000000..f9b611e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_ndarray.py @@ -0,0 +1,159 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for numpy ndarray class.""" +from astroid.brain.brain_numpy_utils import numpy_supports_type_hints +from astroid.builder import extract_node +from astroid.inference_tip import inference_tip +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import Attribute + + +def infer_numpy_ndarray(node, context=None): + ndarray = """ + class ndarray(object): + def __init__(self, shape, dtype=float, buffer=None, offset=0, + strides=None, order=None): + self.T = numpy.ndarray([0, 0]) + self.base = None + self.ctypes = None + self.data = None + self.dtype = None + self.flags = None + # Should be a numpy.flatiter instance but not available for now + # Putting an array instead so that iteration and indexing are authorized + self.flat = np.ndarray([0, 0]) + self.imag = np.ndarray([0, 0]) + self.itemsize = None + self.nbytes = None + self.ndim = None + self.real = np.ndarray([0, 0]) + self.shape = numpy.ndarray([0, 0]) + self.size = None + self.strides = None + + def __abs__(self): return numpy.ndarray([0, 0]) + def __add__(self, value): return numpy.ndarray([0, 0]) + def __and__(self, value): return numpy.ndarray([0, 0]) + def __array__(self, dtype=None): return numpy.ndarray([0, 0]) + def __array_wrap__(self, obj): return numpy.ndarray([0, 0]) + def __contains__(self, key): return True + def __copy__(self): return numpy.ndarray([0, 0]) + def __deepcopy__(self, memo): return numpy.ndarray([0, 0]) + def __divmod__(self, value): return (numpy.ndarray([0, 0]), numpy.ndarray([0, 0])) + def __eq__(self, value): return numpy.ndarray([0, 0]) + def __float__(self): return 0. + def __floordiv__(self): return numpy.ndarray([0, 0]) + def __ge__(self, value): return numpy.ndarray([0, 0]) + def __getitem__(self, key): return uninferable + def __gt__(self, value): return numpy.ndarray([0, 0]) + def __iadd__(self, value): return numpy.ndarray([0, 0]) + def __iand__(self, value): return numpy.ndarray([0, 0]) + def __ifloordiv__(self, value): return numpy.ndarray([0, 0]) + def __ilshift__(self, value): return numpy.ndarray([0, 0]) + def __imod__(self, value): return numpy.ndarray([0, 0]) + def __imul__(self, value): return numpy.ndarray([0, 0]) + def __int__(self): return 0 + def __invert__(self): return numpy.ndarray([0, 0]) + def __ior__(self, value): return numpy.ndarray([0, 0]) + def __ipow__(self, value): return numpy.ndarray([0, 0]) + def __irshift__(self, value): return numpy.ndarray([0, 0]) + def __isub__(self, value): return numpy.ndarray([0, 0]) + def __itruediv__(self, value): return numpy.ndarray([0, 0]) + def __ixor__(self, value): return numpy.ndarray([0, 0]) + def __le__(self, value): return numpy.ndarray([0, 0]) + def __len__(self): return 1 + def __lshift__(self, value): return numpy.ndarray([0, 0]) + def __lt__(self, value): return numpy.ndarray([0, 0]) + def __matmul__(self, value): return numpy.ndarray([0, 0]) + def __mod__(self, value): return numpy.ndarray([0, 0]) + def __mul__(self, value): return numpy.ndarray([0, 0]) + def __ne__(self, value): return numpy.ndarray([0, 0]) + def __neg__(self): return numpy.ndarray([0, 0]) + def __or__(self, value): return numpy.ndarray([0, 0]) + def __pos__(self): return numpy.ndarray([0, 0]) + def __pow__(self): return numpy.ndarray([0, 0]) + def __repr__(self): return str() + def __rshift__(self): return numpy.ndarray([0, 0]) + def __setitem__(self, key, value): return uninferable + def __str__(self): return str() + def __sub__(self, value): return numpy.ndarray([0, 0]) + def __truediv__(self, value): return numpy.ndarray([0, 0]) + def __xor__(self, value): return numpy.ndarray([0, 0]) + def all(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0]) + def any(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0]) + def argmax(self, axis=None, out=None): return np.ndarray([0, 0]) + def argmin(self, axis=None, out=None): return np.ndarray([0, 0]) + def argpartition(self, kth, axis=-1, kind='introselect', order=None): return np.ndarray([0, 0]) + def argsort(self, axis=-1, kind='quicksort', order=None): return np.ndarray([0, 0]) + def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): return np.ndarray([0, 0]) + def byteswap(self, inplace=False): return np.ndarray([0, 0]) + def choose(self, choices, out=None, mode='raise'): return np.ndarray([0, 0]) + def clip(self, min=None, max=None, out=None): return np.ndarray([0, 0]) + def compress(self, condition, axis=None, out=None): return np.ndarray([0, 0]) + def conj(self): return np.ndarray([0, 0]) + def conjugate(self): return np.ndarray([0, 0]) + def copy(self, order='C'): return np.ndarray([0, 0]) + def cumprod(self, axis=None, dtype=None, out=None): return np.ndarray([0, 0]) + def cumsum(self, axis=None, dtype=None, out=None): return np.ndarray([0, 0]) + def diagonal(self, offset=0, axis1=0, axis2=1): return np.ndarray([0, 0]) + def dot(self, b, out=None): return np.ndarray([0, 0]) + def dump(self, file): return None + def dumps(self): return str() + def fill(self, value): return None + def flatten(self, order='C'): return np.ndarray([0, 0]) + def getfield(self, dtype, offset=0): return np.ndarray([0, 0]) + def item(self, *args): return uninferable + def itemset(self, *args): return None + def max(self, axis=None, out=None): return np.ndarray([0, 0]) + def mean(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0]) + def min(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0]) + def newbyteorder(self, new_order='S'): return np.ndarray([0, 0]) + def nonzero(self): return (1,) + def partition(self, kth, axis=-1, kind='introselect', order=None): return None + def prod(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0]) + def ptp(self, axis=None, out=None): return np.ndarray([0, 0]) + def put(self, indices, values, mode='raise'): return None + def ravel(self, order='C'): return np.ndarray([0, 0]) + def repeat(self, repeats, axis=None): return np.ndarray([0, 0]) + def reshape(self, shape, order='C'): return np.ndarray([0, 0]) + def resize(self, new_shape, refcheck=True): return None + def round(self, decimals=0, out=None): return np.ndarray([0, 0]) + def searchsorted(self, v, side='left', sorter=None): return np.ndarray([0, 0]) + def setfield(self, val, dtype, offset=0): return None + def setflags(self, write=None, align=None, uic=None): return None + def sort(self, axis=-1, kind='quicksort', order=None): return None + def squeeze(self, axis=None): return np.ndarray([0, 0]) + def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return np.ndarray([0, 0]) + def sum(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0]) + def swapaxes(self, axis1, axis2): return np.ndarray([0, 0]) + def take(self, indices, axis=None, out=None, mode='raise'): return np.ndarray([0, 0]) + def tobytes(self, order='C'): return b'' + def tofile(self, fid, sep="", format="%s"): return None + def tolist(self, ): return [] + def tostring(self, order='C'): return b'' + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): return np.ndarray([0, 0]) + def transpose(self, *axes): return np.ndarray([0, 0]) + def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return np.ndarray([0, 0]) + def view(self, dtype=None, type=None): return np.ndarray([0, 0]) + """ + if numpy_supports_type_hints(): + ndarray += """ + @classmethod + def __class_getitem__(cls, value): + return cls + """ + node = extract_node(ndarray) + return node.infer(context=context) + + +def _looks_like_numpy_ndarray(node): + return isinstance(node, Attribute) and node.attrname == "ndarray" + + +AstroidManager().register_transform( + Attribute, + inference_tip(infer_numpy_ndarray), + _looks_like_numpy_ndarray, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_random_mtrand.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_random_mtrand.py new file mode 100644 index 0000000..b1f0d45 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_random_mtrand.py @@ -0,0 +1,71 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +# TODO(hippo91) : correct the functions return types +"""Astroid hooks for numpy.random.mtrand module.""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def numpy_random_mtrand_transform(): + return parse( + """ + def beta(a, b, size=None): return uninferable + def binomial(n, p, size=None): return uninferable + def bytes(length): return uninferable + def chisquare(df, size=None): return uninferable + def choice(a, size=None, replace=True, p=None): return uninferable + def dirichlet(alpha, size=None): return uninferable + def exponential(scale=1.0, size=None): return uninferable + def f(dfnum, dfden, size=None): return uninferable + def gamma(shape, scale=1.0, size=None): return uninferable + def geometric(p, size=None): return uninferable + def get_state(): return uninferable + def gumbel(loc=0.0, scale=1.0, size=None): return uninferable + def hypergeometric(ngood, nbad, nsample, size=None): return uninferable + def laplace(loc=0.0, scale=1.0, size=None): return uninferable + def logistic(loc=0.0, scale=1.0, size=None): return uninferable + def lognormal(mean=0.0, sigma=1.0, size=None): return uninferable + def logseries(p, size=None): return uninferable + def multinomial(n, pvals, size=None): return uninferable + def multivariate_normal(mean, cov, size=None): return uninferable + def negative_binomial(n, p, size=None): return uninferable + def noncentral_chisquare(df, nonc, size=None): return uninferable + def noncentral_f(dfnum, dfden, nonc, size=None): return uninferable + def normal(loc=0.0, scale=1.0, size=None): return uninferable + def pareto(a, size=None): return uninferable + def permutation(x): return uninferable + def poisson(lam=1.0, size=None): return uninferable + def power(a, size=None): return uninferable + def rand(*args): return uninferable + def randint(low, high=None, size=None, dtype='l'): + import numpy + return numpy.ndarray((1,1)) + def randn(*args): return uninferable + def random(size=None): return uninferable + def random_integers(low, high=None, size=None): return uninferable + def random_sample(size=None): return uninferable + def rayleigh(scale=1.0, size=None): return uninferable + def seed(seed=None): return uninferable + def set_state(state): return uninferable + def shuffle(x): return uninferable + def standard_cauchy(size=None): return uninferable + def standard_exponential(size=None): return uninferable + def standard_gamma(shape, size=None): return uninferable + def standard_normal(size=None): return uninferable + def standard_t(df, size=None): return uninferable + def triangular(left, mode, right, size=None): return uninferable + def uniform(low=0.0, high=1.0, size=None): return uninferable + def vonmises(mu, kappa, size=None): return uninferable + def wald(mean, scale, size=None): return uninferable + def weibull(a, size=None): return uninferable + def zipf(a, size=None): return uninferable + """ + ) + + +register_module_extender( + AstroidManager(), "numpy.random.mtrand", numpy_random_mtrand_transform +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_utils.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_utils.py new file mode 100644 index 0000000..c32d6d6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_numpy_utils.py @@ -0,0 +1,85 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Different utilities for the numpy brains""" +from typing import Tuple + +from astroid.builder import extract_node +from astroid.nodes.node_classes import Attribute, Import, Name, NodeNG + +# Class subscript is available in numpy starting with version 1.20.0 +NUMPY_VERSION_TYPE_HINTS_SUPPORT = ("1", "20", "0") + + +def numpy_supports_type_hints() -> bool: + """ + Returns True if numpy supports type hints + """ + np_ver = _get_numpy_version() + return np_ver and np_ver > NUMPY_VERSION_TYPE_HINTS_SUPPORT + + +def _get_numpy_version() -> Tuple[str, str, str]: + """ + Return the numpy version number if numpy can be imported. Otherwise returns + ('0', '0', '0') + """ + try: + import numpy # pylint: disable=import-outside-toplevel + + return tuple(numpy.version.version.split(".")) + except ImportError: + return ("0", "0", "0") + + +def infer_numpy_member(src, node, context=None): + node = extract_node(src) + return node.infer(context=context) + + +def _is_a_numpy_module(node: Name) -> bool: + """ + Returns True if the node is a representation of a numpy module. + + For example in : + import numpy as np + x = np.linspace(1, 2) + The node is a representation of the numpy module. + + :param node: node to test + :return: True if the node is a representation of the numpy module. + """ + module_nickname = node.name + potential_import_target = [ + x for x in node.lookup(module_nickname)[1] if isinstance(x, Import) + ] + return any( + ("numpy", module_nickname) in target.names or ("numpy", None) in target.names + for target in potential_import_target + ) + + +def looks_like_numpy_member(member_name: str, node: NodeNG) -> bool: + """ + Returns True if the node is a member of numpy whose + name is member_name. + + :param member_name: name of the member + :param node: node to test + :return: True if the node is a member of numpy + """ + if ( + isinstance(node, Attribute) + and node.attrname == member_name + and isinstance(node.expr, Name) + and _is_a_numpy_module(node.expr) + ): + return True + if ( + isinstance(node, Name) + and node.name == member_name + and node.root().name.startswith("numpy") + ): + return True + return False diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_pkg_resources.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_pkg_resources.py new file mode 100644 index 0000000..689dd74 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_pkg_resources.py @@ -0,0 +1,70 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid import parse +from astroid.brain.helpers import register_module_extender +from astroid.manager import AstroidManager + + +def pkg_resources_transform(): + return parse( + """ +def require(*requirements): + return pkg_resources.working_set.require(*requirements) + +def run_script(requires, script_name): + return pkg_resources.working_set.run_script(requires, script_name) + +def iter_entry_points(group, name=None): + return pkg_resources.working_set.iter_entry_points(group, name) + +def resource_exists(package_or_requirement, resource_name): + return get_provider(package_or_requirement).has_resource(resource_name) + +def resource_isdir(package_or_requirement, resource_name): + return get_provider(package_or_requirement).resource_isdir( + resource_name) + +def resource_filename(package_or_requirement, resource_name): + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name) + +def resource_stream(package_or_requirement, resource_name): + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name) + +def resource_string(package_or_requirement, resource_name): + return get_provider(package_or_requirement).get_resource_string( + self, resource_name) + +def resource_listdir(package_or_requirement, resource_name): + return get_provider(package_or_requirement).resource_listdir( + resource_name) + +def extraction_error(): + pass + +def get_cache_path(archive_name, names=()): + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name+'-tmp', *names) + return target_path + +def postprocess(tempname, filename): + pass + +def set_extraction_path(path): + pass + +def cleanup_resources(force=False): + pass + +def get_distribution(dist): + return Distribution(dist) + +_namespace_packages = {} +""" + ) + + +register_module_extender(AstroidManager(), "pkg_resources", pkg_resources_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_pytest.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_pytest.py new file mode 100644 index 0000000..78c9779 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_pytest.py @@ -0,0 +1,83 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for pytest.""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import AstroidBuilder +from astroid.manager import AstroidManager + + +def pytest_transform(): + return AstroidBuilder(AstroidManager()).string_build( + """ + +try: + import _pytest.mark + import _pytest.recwarn + import _pytest.runner + import _pytest.python + import _pytest.skipping + import _pytest.assertion +except ImportError: + pass +else: + deprecated_call = _pytest.recwarn.deprecated_call + warns = _pytest.recwarn.warns + + exit = _pytest.runner.exit + fail = _pytest.runner.fail + skip = _pytest.runner.skip + importorskip = _pytest.runner.importorskip + + xfail = _pytest.skipping.xfail + mark = _pytest.mark.MarkGenerator() + raises = _pytest.python.raises + + # New in pytest 3.0 + try: + approx = _pytest.python.approx + register_assert_rewrite = _pytest.assertion.register_assert_rewrite + except AttributeError: + pass + + +# Moved in pytest 3.0 + +try: + import _pytest.freeze_support + freeze_includes = _pytest.freeze_support.freeze_includes +except ImportError: + try: + import _pytest.genscript + freeze_includes = _pytest.genscript.freeze_includes + except ImportError: + pass + +try: + import _pytest.debugging + set_trace = _pytest.debugging.pytestPDB().set_trace +except ImportError: + try: + import _pytest.pdb + set_trace = _pytest.pdb.pytestPDB().set_trace + except ImportError: + pass + +try: + import _pytest.fixtures + fixture = _pytest.fixtures.fixture + yield_fixture = _pytest.fixtures.yield_fixture +except ImportError: + try: + import _pytest.python + fixture = _pytest.python.fixture + yield_fixture = _pytest.python.yield_fixture + except ImportError: + pass +""" + ) + + +register_module_extender(AstroidManager(), "pytest", pytest_transform) +register_module_extender(AstroidManager(), "py.test", pytest_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_qt.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_qt.py new file mode 100644 index 0000000..6b97bf6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_qt.py @@ -0,0 +1,82 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for the PyQT library.""" + +from astroid import nodes, parse +from astroid.brain.helpers import register_module_extender +from astroid.builder import AstroidBuilder +from astroid.manager import AstroidManager + + +def _looks_like_signal(node, signal_name="pyqtSignal"): + if "__class__" in node.instance_attrs: + try: + cls = node.instance_attrs["__class__"][0] + return cls.name == signal_name + except AttributeError: + # return False if the cls does not have a name attribute + pass + return False + + +def transform_pyqt_signal(node: nodes.FunctionDef) -> None: + module = parse( + """ + _UNSET = object() + + class pyqtSignal(object): + def connect(self, slot, type=None, no_receiver_check=False): + pass + def disconnect(self, slot=_UNSET): + pass + def emit(self, *args): + pass + """ + ) + signal_cls: nodes.ClassDef = module["pyqtSignal"] + node.instance_attrs["emit"] = [signal_cls["emit"]] + node.instance_attrs["disconnect"] = [signal_cls["disconnect"]] + node.instance_attrs["connect"] = [signal_cls["connect"]] + + +def transform_pyside_signal(node: nodes.FunctionDef) -> None: + module = parse( + """ + class NotPySideSignal(object): + def connect(self, receiver, type=None): + pass + def disconnect(self, receiver): + pass + def emit(self, *args): + pass + """ + ) + signal_cls: nodes.ClassDef = module["NotPySideSignal"] + node.instance_attrs["connect"] = [signal_cls["connect"]] + node.instance_attrs["disconnect"] = [signal_cls["disconnect"]] + node.instance_attrs["emit"] = [signal_cls["emit"]] + + +def pyqt4_qtcore_transform(): + return AstroidBuilder(AstroidManager()).string_build( + """ + +def SIGNAL(signal_name): pass + +class QObject(object): + def emit(self, signal): pass +""" + ) + + +register_module_extender(AstroidManager(), "PyQt4.QtCore", pyqt4_qtcore_transform) +AstroidManager().register_transform( + nodes.FunctionDef, transform_pyqt_signal, _looks_like_signal +) +AstroidManager().register_transform( + nodes.ClassDef, + transform_pyside_signal, + lambda node: node.qname() in {"PySide.QtCore.Signal", "PySide2.QtCore.Signal"}, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_random.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_random.py new file mode 100644 index 0000000..e66aa81 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_random.py @@ -0,0 +1,87 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import random + +from astroid import helpers +from astroid.exceptions import UseInferenceDefault +from astroid.inference_tip import inference_tip +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import ( + Attribute, + Call, + Const, + EvaluatedObject, + List, + Name, + Set, + Tuple, +) + +ACCEPTED_ITERABLES_FOR_SAMPLE = (List, Set, Tuple) + + +def _clone_node_with_lineno(node, parent, lineno): + if isinstance(node, EvaluatedObject): + node = node.original + cls = node.__class__ + other_fields = node._other_fields + _astroid_fields = node._astroid_fields + init_params = {"lineno": lineno, "col_offset": node.col_offset, "parent": parent} + postinit_params = {param: getattr(node, param) for param in _astroid_fields} + if other_fields: + init_params.update({param: getattr(node, param) for param in other_fields}) + new_node = cls(**init_params) + if hasattr(node, "postinit") and _astroid_fields: + new_node.postinit(**postinit_params) + return new_node + + +def infer_random_sample(node, context=None): + if len(node.args) != 2: + raise UseInferenceDefault + + length = node.args[1] + if not isinstance(length, Const): + raise UseInferenceDefault + if not isinstance(length.value, int): + raise UseInferenceDefault + + inferred_sequence = helpers.safe_infer(node.args[0], context=context) + if not inferred_sequence: + raise UseInferenceDefault + + if not isinstance(inferred_sequence, ACCEPTED_ITERABLES_FOR_SAMPLE): + raise UseInferenceDefault + + if length.value > len(inferred_sequence.elts): + # In this case, this will raise a ValueError + raise UseInferenceDefault + + try: + elts = random.sample(inferred_sequence.elts, length.value) + except ValueError as exc: + raise UseInferenceDefault from exc + + new_node = List(lineno=node.lineno, col_offset=node.col_offset, parent=node.scope()) + new_elts = [ + _clone_node_with_lineno(elt, parent=new_node, lineno=new_node.lineno) + for elt in elts + ] + new_node.postinit(new_elts) + return iter((new_node,)) + + +def _looks_like_random_sample(node): + func = node.func + if isinstance(func, Attribute): + return func.attrname == "sample" + if isinstance(func, Name): + return func.name == "sample" + return False + + +AstroidManager().register_transform( + Call, inference_tip(infer_random_sample), _looks_like_random_sample +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_re.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_re.py new file mode 100644 index 0000000..0dd346a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_re.py @@ -0,0 +1,90 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from typing import Optional + +from astroid import context, inference_tip, nodes +from astroid.brain.helpers import register_module_extender +from astroid.builder import _extract_single_node, parse +from astroid.const import PY37_PLUS, PY39_PLUS +from astroid.manager import AstroidManager + + +def _re_transform(): + # Since Python 3.6 there is the RegexFlag enum + # where every entry will be exposed via updating globals() + return parse( + """ + import sre_compile + ASCII = sre_compile.SRE_FLAG_ASCII + IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE + LOCALE = sre_compile.SRE_FLAG_LOCALE + UNICODE = sre_compile.SRE_FLAG_UNICODE + MULTILINE = sre_compile.SRE_FLAG_MULTILINE + DOTALL = sre_compile.SRE_FLAG_DOTALL + VERBOSE = sre_compile.SRE_FLAG_VERBOSE + A = ASCII + I = IGNORECASE + L = LOCALE + U = UNICODE + M = MULTILINE + S = DOTALL + X = VERBOSE + TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE + T = TEMPLATE + DEBUG = sre_compile.SRE_FLAG_DEBUG + """ + ) + + +register_module_extender(AstroidManager(), "re", _re_transform) + + +CLASS_GETITEM_TEMPLATE = """ +@classmethod +def __class_getitem__(cls, item): + return cls +""" + + +def _looks_like_pattern_or_match(node: nodes.Call) -> bool: + """Check for re.Pattern or re.Match call in stdlib. + + Match these patterns from stdlib/re.py + ```py + Pattern = type(...) + Match = type(...) + ``` + """ + return ( + node.root().name == "re" + and isinstance(node.func, nodes.Name) + and node.func.name == "type" + and isinstance(node.parent, nodes.Assign) + and len(node.parent.targets) == 1 + and isinstance(node.parent.targets[0], nodes.AssignName) + and node.parent.targets[0].name in {"Pattern", "Match"} + ) + + +def infer_pattern_match( + node: nodes.Call, ctx: Optional[context.InferenceContext] = None +): + """Infer re.Pattern and re.Match as classes. For PY39+ add `__class_getitem__`.""" + class_def = nodes.ClassDef( + name=node.parent.targets[0].name, + lineno=node.lineno, + col_offset=node.col_offset, + parent=node.parent, + ) + if PY39_PLUS: + func_to_add = _extract_single_node(CLASS_GETITEM_TEMPLATE) + class_def.locals["__class_getitem__"] = [func_to_add] + return iter([class_def]) + + +if PY37_PLUS: + AstroidManager().register_transform( + nodes.Call, inference_tip(infer_pattern_match), _looks_like_pattern_or_match + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_responses.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_responses.py new file mode 100644 index 0000000..0fb0e42 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_responses.py @@ -0,0 +1,79 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Astroid hooks for responses. + +It might need to be manually updated from the public methods of +:class:`responses.RequestsMock`. + +See: https://github.com/getsentry/responses/blob/master/responses.py + +""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def responses_funcs(): + return parse( + """ + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" + response_callback = None + + def reset(): + return + + def add( + method=None, # method or ``Response`` + url=None, + body="", + adding_headers=None, + *args, + **kwargs + ): + return + + def add_passthru(prefix): + return + + def remove(method_or_response=None, url=None): + return + + def replace(method_or_response=None, url=None, body="", *args, **kwargs): + return + + def add_callback( + method, url, callback, match_querystring=False, content_type="text/plain" + ): + return + + calls = [] + + def __enter__(): + return + + def __exit__(type, value, traceback): + success = type is None + return success + + def activate(func): + return func + + def start(): + return + + def stop(allow_assert=True): + return + """ + ) + + +register_module_extender(AstroidManager(), "responses", responses_funcs) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_scipy_signal.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_scipy_signal.py new file mode 100644 index 0000000..578022f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_scipy_signal.py @@ -0,0 +1,88 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for scipy.signal module.""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def scipy_signal(): + return parse( + """ + # different functions defined in scipy.signals + + def barthann(M, sym=True): + return numpy.ndarray([0]) + + def bartlett(M, sym=True): + return numpy.ndarray([0]) + + def blackman(M, sym=True): + return numpy.ndarray([0]) + + def blackmanharris(M, sym=True): + return numpy.ndarray([0]) + + def bohman(M, sym=True): + return numpy.ndarray([0]) + + def boxcar(M, sym=True): + return numpy.ndarray([0]) + + def chebwin(M, at, sym=True): + return numpy.ndarray([0]) + + def cosine(M, sym=True): + return numpy.ndarray([0]) + + def exponential(M, center=None, tau=1.0, sym=True): + return numpy.ndarray([0]) + + def flattop(M, sym=True): + return numpy.ndarray([0]) + + def gaussian(M, std, sym=True): + return numpy.ndarray([0]) + + def general_gaussian(M, p, sig, sym=True): + return numpy.ndarray([0]) + + def hamming(M, sym=True): + return numpy.ndarray([0]) + + def hann(M, sym=True): + return numpy.ndarray([0]) + + def hanning(M, sym=True): + return numpy.ndarray([0]) + + def impulse2(system, X0=None, T=None, N=None, **kwargs): + return numpy.ndarray([0]), numpy.ndarray([0]) + + def kaiser(M, beta, sym=True): + return numpy.ndarray([0]) + + def nuttall(M, sym=True): + return numpy.ndarray([0]) + + def parzen(M, sym=True): + return numpy.ndarray([0]) + + def slepian(M, width, sym=True): + return numpy.ndarray([0]) + + def step2(system, X0=None, T=None, N=None, **kwargs): + return numpy.ndarray([0]), numpy.ndarray([0]) + + def triang(M, sym=True): + return numpy.ndarray([0]) + + def tukey(M, alpha=0.5, sym=True): + return numpy.ndarray([0]) + """ + ) + + +register_module_extender(AstroidManager(), "scipy.signal", scipy_signal) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_signal.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_signal.py new file mode 100644 index 0000000..5eee7f6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_signal.py @@ -0,0 +1,119 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for the signal library. + +The signal module generates the 'Signals', 'Handlers' and 'Sigmasks' IntEnums +dynamically using the IntEnum._convert() classmethod, which modifies the module +globals. Astroid is unable to handle this type of code. + +Without these hooks, the following are erroneously triggered by Pylint: + * E1101: Module 'signal' has no 'Signals' member (no-member) + * E1101: Module 'signal' has no 'Handlers' member (no-member) + * E1101: Module 'signal' has no 'Sigmasks' member (no-member) + +These enums are defined slightly differently depending on the user's operating +system and platform. These platform differences should follow the current +Python typeshed stdlib `signal.pyi` stub file, available at: + +* https://github.com/python/typeshed/blob/master/stdlib/signal.pyi + +Note that the enum.auto() values defined here for the Signals, Handlers and +Sigmasks IntEnums are just dummy integer values, and do not correspond to the +actual standard signal numbers - which may vary depending on the system. +""" + + +import sys + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def _signals_enums_transform(): + """Generates the AST for 'Signals', 'Handlers' and 'Sigmasks' IntEnums.""" + return parse(_signals_enum() + _handlers_enum() + _sigmasks_enum()) + + +def _signals_enum(): + """Generates the source code for the Signals int enum.""" + signals_enum = """ + import enum + class Signals(enum.IntEnum): + SIGABRT = enum.auto() + SIGEMT = enum.auto() + SIGFPE = enum.auto() + SIGILL = enum.auto() + SIGINFO = enum.auto() + SIGINT = enum.auto() + SIGSEGV = enum.auto() + SIGTERM = enum.auto() + """ + if sys.platform != "win32": + signals_enum += """ + SIGALRM = enum.auto() + SIGBUS = enum.auto() + SIGCHLD = enum.auto() + SIGCONT = enum.auto() + SIGHUP = enum.auto() + SIGIO = enum.auto() + SIGIOT = enum.auto() + SIGKILL = enum.auto() + SIGPIPE = enum.auto() + SIGPROF = enum.auto() + SIGQUIT = enum.auto() + SIGSTOP = enum.auto() + SIGSYS = enum.auto() + SIGTRAP = enum.auto() + SIGTSTP = enum.auto() + SIGTTIN = enum.auto() + SIGTTOU = enum.auto() + SIGURG = enum.auto() + SIGUSR1 = enum.auto() + SIGUSR2 = enum.auto() + SIGVTALRM = enum.auto() + SIGWINCH = enum.auto() + SIGXCPU = enum.auto() + SIGXFSZ = enum.auto() + """ + if sys.platform == "win32": + signals_enum += """ + SIGBREAK = enum.auto() + """ + if sys.platform not in ("darwin", "win32"): + signals_enum += """ + SIGCLD = enum.auto() + SIGPOLL = enum.auto() + SIGPWR = enum.auto() + SIGRTMAX = enum.auto() + SIGRTMIN = enum.auto() + """ + return signals_enum + + +def _handlers_enum(): + """Generates the source code for the Handlers int enum.""" + return """ + import enum + class Handlers(enum.IntEnum): + SIG_DFL = enum.auto() + SIG_IGN = eunm.auto() + """ + + +def _sigmasks_enum(): + """Generates the source code for the Sigmasks int enum.""" + if sys.platform != "win32": + return """ + import enum + class Sigmasks(enum.IntEnum): + SIG_BLOCK = enum.auto() + SIG_UNBLOCK = enum.auto() + SIG_SETMASK = enum.auto() + """ + return "" + + +register_module_extender(AstroidManager(), "signal", _signals_enums_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_six.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_six.py new file mode 100644 index 0000000..022fcf2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_six.py @@ -0,0 +1,239 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for six module.""" + +from textwrap import dedent + +from astroid import nodes +from astroid.brain.helpers import register_module_extender +from astroid.builder import AstroidBuilder +from astroid.exceptions import ( + AstroidBuildingError, + AttributeInferenceError, + InferenceError, +) +from astroid.manager import AstroidManager + +SIX_ADD_METACLASS = "six.add_metaclass" +SIX_WITH_METACLASS = "six.with_metaclass" + + +def default_predicate(line): + return line.strip() + + +def _indent(text, prefix, predicate=default_predicate): + """Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + """ + + def prefixed_lines(): + for line in text.splitlines(True): + yield prefix + line if predicate(line) else line + + return "".join(prefixed_lines()) + + +_IMPORTS = """ +import _io +cStringIO = _io.StringIO +filter = filter +from itertools import filterfalse +input = input +from sys import intern +map = map +range = range +from importlib import reload +reload_module = lambda module: reload(module) +from functools import reduce +from shlex import quote as shlex_quote +from io import StringIO +from collections import UserDict, UserList, UserString +xrange = range +zip = zip +from itertools import zip_longest +import builtins +import configparser +import copyreg +import _dummy_thread +import http.cookiejar as http_cookiejar +import http.cookies as http_cookies +import html.entities as html_entities +import html.parser as html_parser +import http.client as http_client +import http.server as http_server +BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server +import pickle as cPickle +import queue +import reprlib +import socketserver +import _thread +import winreg +import xmlrpc.server as xmlrpc_server +import xmlrpc.client as xmlrpc_client +import urllib.robotparser as urllib_robotparser +import email.mime.multipart as email_mime_multipart +import email.mime.nonmultipart as email_mime_nonmultipart +import email.mime.text as email_mime_text +import email.mime.base as email_mime_base +import urllib.parse as urllib_parse +import urllib.error as urllib_error +import tkinter +import tkinter.dialog as tkinter_dialog +import tkinter.filedialog as tkinter_filedialog +import tkinter.scrolledtext as tkinter_scrolledtext +import tkinter.simpledialog as tkinder_simpledialog +import tkinter.tix as tkinter_tix +import tkinter.ttk as tkinter_ttk +import tkinter.constants as tkinter_constants +import tkinter.dnd as tkinter_dnd +import tkinter.colorchooser as tkinter_colorchooser +import tkinter.commondialog as tkinter_commondialog +import tkinter.filedialog as tkinter_tkfiledialog +import tkinter.font as tkinter_font +import tkinter.messagebox as tkinter_messagebox +import urllib +import urllib.request as urllib_request +import urllib.robotparser as urllib_robotparser +import urllib.parse as urllib_parse +import urllib.error as urllib_error +""" + + +def six_moves_transform(): + code = dedent( + """ + class Moves(object): + {} + moves = Moves() + """ + ).format(_indent(_IMPORTS, " ")) + module = AstroidBuilder(AstroidManager()).string_build(code) + module.name = "six.moves" + return module + + +def _six_fail_hook(modname): + """Fix six.moves imports due to the dynamic nature of this + class. + + Construct a pseudo-module which contains all the necessary imports + for six + + :param modname: Name of failed module + :type modname: str + + :return: An astroid module + :rtype: nodes.Module + """ + + attribute_of = modname != "six.moves" and modname.startswith("six.moves") + if modname != "six.moves" and not attribute_of: + raise AstroidBuildingError(modname=modname) + module = AstroidBuilder(AstroidManager()).string_build(_IMPORTS) + module.name = "six.moves" + if attribute_of: + # Facilitate import of submodules in Moves + start_index = len(module.name) + attribute = modname[start_index:].lstrip(".").replace(".", "_") + try: + import_attr = module.getattr(attribute)[0] + except AttributeInferenceError as exc: + raise AstroidBuildingError(modname=modname) from exc + if isinstance(import_attr, nodes.Import): + submodule = AstroidManager().ast_from_module_name(import_attr.names[0][0]) + return submodule + # Let dummy submodule imports pass through + # This will cause an Uninferable result, which is okay + return module + + +def _looks_like_decorated_with_six_add_metaclass(node): + if not node.decorators: + return False + + for decorator in node.decorators.nodes: + if not isinstance(decorator, nodes.Call): + continue + if decorator.func.as_string() == SIX_ADD_METACLASS: + return True + return False + + +def transform_six_add_metaclass(node): # pylint: disable=inconsistent-return-statements + """Check if the given class node is decorated with *six.add_metaclass* + + If so, inject its argument as the metaclass of the underlying class. + """ + if not node.decorators: + return + + for decorator in node.decorators.nodes: + if not isinstance(decorator, nodes.Call): + continue + + try: + func = next(decorator.func.infer()) + except (InferenceError, StopIteration): + continue + if func.qname() == SIX_ADD_METACLASS and decorator.args: + metaclass = decorator.args[0] + node._metaclass = metaclass + return node + return + + +def _looks_like_nested_from_six_with_metaclass(node): + if len(node.bases) != 1: + return False + base = node.bases[0] + if not isinstance(base, nodes.Call): + return False + try: + if hasattr(base.func, "expr"): + # format when explicit 'six.with_metaclass' is used + mod = base.func.expr.name + func = base.func.attrname + func = f"{mod}.{func}" + else: + # format when 'with_metaclass' is used directly (local import from six) + # check reference module to avoid 'with_metaclass' name clashes + mod = base.parent.parent + import_from = mod.locals["with_metaclass"][0] + func = f"{import_from.modname}.{base.func.name}" + except (AttributeError, KeyError, IndexError): + return False + return func == SIX_WITH_METACLASS + + +def transform_six_with_metaclass(node): + """Check if the given class node is defined with *six.with_metaclass* + + If so, inject its argument as the metaclass of the underlying class. + """ + call = node.bases[0] + node._metaclass = call.args[0] + return node + + +register_module_extender(AstroidManager(), "six", six_moves_transform) +register_module_extender( + AstroidManager(), "requests.packages.urllib3.packages.six", six_moves_transform +) +AstroidManager().register_failed_import_hook(_six_fail_hook) +AstroidManager().register_transform( + nodes.ClassDef, + transform_six_add_metaclass, + _looks_like_decorated_with_six_add_metaclass, +) +AstroidManager().register_transform( + nodes.ClassDef, + transform_six_with_metaclass, + _looks_like_nested_from_six_with_metaclass, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_sqlalchemy.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_sqlalchemy.py new file mode 100644 index 0000000..f3695de --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_sqlalchemy.py @@ -0,0 +1,39 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def _session_transform(): + return parse( + """ + from sqlalchemy.orm.session import Session + + class sessionmaker: + def __init__( + self, + bind=None, + class_=Session, + autoflush=True, + autocommit=False, + expire_on_commit=True, + info=None, + **kw + ): + return + + def __call__(self, **local_kw): + return Session() + + def configure(self, **new_kw): + return + + return Session() + """ + ) + + +register_module_extender(AstroidManager(), "sqlalchemy.orm.session", _session_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_ssl.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_ssl.py new file mode 100644 index 0000000..6ca0d5a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_ssl.py @@ -0,0 +1,71 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for the ssl library.""" + +from astroid import parse +from astroid.brain.helpers import register_module_extender +from astroid.manager import AstroidManager + + +def ssl_transform(): + return parse( + """ + from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION + from _ssl import _SSLContext, MemoryBIO + from _ssl import ( + SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, + SSLSyscallError, SSLEOFError, + ) + from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED + from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj + from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes + try: + from _ssl import RAND_egd + except ImportError: + # LibreSSL does not provide RAND_egd + pass + from _ssl import (OP_ALL, OP_CIPHER_SERVER_PREFERENCE, + OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3, + OP_NO_TLSv1, OP_NO_TLSv1_1, OP_NO_TLSv1_2, + OP_SINGLE_DH_USE, OP_SINGLE_ECDH_USE) + + from _ssl import (ALERT_DESCRIPTION_ACCESS_DENIED, ALERT_DESCRIPTION_BAD_CERTIFICATE, + ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE, + ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE, + ALERT_DESCRIPTION_BAD_RECORD_MAC, + ALERT_DESCRIPTION_CERTIFICATE_EXPIRED, + ALERT_DESCRIPTION_CERTIFICATE_REVOKED, + ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN, + ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE, + ALERT_DESCRIPTION_CLOSE_NOTIFY, ALERT_DESCRIPTION_DECODE_ERROR, + ALERT_DESCRIPTION_DECOMPRESSION_FAILURE, + ALERT_DESCRIPTION_DECRYPT_ERROR, + ALERT_DESCRIPTION_HANDSHAKE_FAILURE, + ALERT_DESCRIPTION_ILLEGAL_PARAMETER, + ALERT_DESCRIPTION_INSUFFICIENT_SECURITY, + ALERT_DESCRIPTION_INTERNAL_ERROR, + ALERT_DESCRIPTION_NO_RENEGOTIATION, + ALERT_DESCRIPTION_PROTOCOL_VERSION, + ALERT_DESCRIPTION_RECORD_OVERFLOW, + ALERT_DESCRIPTION_UNEXPECTED_MESSAGE, + ALERT_DESCRIPTION_UNKNOWN_CA, + ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY, + ALERT_DESCRIPTION_UNRECOGNIZED_NAME, + ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE, + ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION, + ALERT_DESCRIPTION_USER_CANCELLED) + from _ssl import (SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SSL_ERROR_SSL, + SSL_ERROR_SYSCALL, SSL_ERROR_WANT_CONNECT, SSL_ERROR_WANT_READ, + SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_ZERO_RETURN) + from _ssl import VERIFY_CRL_CHECK_CHAIN, VERIFY_CRL_CHECK_LEAF, VERIFY_DEFAULT, VERIFY_X509_STRICT + from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN + from _ssl import _OPENSSL_API_VERSION + from _ssl import PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2 + from _ssl import PROTOCOL_TLS, PROTOCOL_TLS_CLIENT, PROTOCOL_TLS_SERVER + """ + ) + + +register_module_extender(AstroidManager(), "ssl", ssl_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_subprocess.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_subprocess.py new file mode 100644 index 0000000..f296ab4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_subprocess.py @@ -0,0 +1,131 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import textwrap + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.const import PY37_PLUS, PY39_PLUS, PY310_PLUS, PY311_PLUS +from astroid.manager import AstroidManager + + +def _subprocess_transform(): + communicate = (bytes("string", "ascii"), bytes("string", "ascii")) + communicate_signature = "def communicate(self, input=None, timeout=None)" + args = """\ + self, args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None, + preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None, + universal_newlines=None, startupinfo=None, creationflags=0, restore_signals=True, + start_new_session=False, pass_fds=(), *, encoding=None, errors=None, text=None""" + + if PY39_PLUS: + args += ", user=None, group=None, extra_groups=None, umask=-1" + if PY310_PLUS: + args += ", pipesize=-1" + if PY311_PLUS: + args += ", process_group=None" + + init = f""" + def __init__({args}): + pass""" + wait_signature = "def wait(self, timeout=None)" + ctx_manager = """ + def __enter__(self): return self + def __exit__(self, *args): pass + """ + py3_args = "args = []" + + if PY37_PLUS: + check_output_signature = """ + check_output( + args, *, + stdin=None, + stderr=None, + shell=False, + cwd=None, + encoding=None, + errors=None, + universal_newlines=False, + timeout=None, + env=None, + text=None, + restore_signals=True, + preexec_fn=None, + pass_fds=(), + input=None, + bufsize=0, + executable=None, + close_fds=False, + startupinfo=None, + creationflags=0, + start_new_session=False + ): + """.strip() + else: + check_output_signature = """ + check_output( + args, *, + stdin=None, + stderr=None, + shell=False, + cwd=None, + encoding=None, + errors=None, + universal_newlines=False, + timeout=None, + env=None, + restore_signals=True, + preexec_fn=None, + pass_fds=(), + input=None, + bufsize=0, + executable=None, + close_fds=False, + startupinfo=None, + creationflags=0, + start_new_session=False + ): + """.strip() + + code = textwrap.dedent( + f""" + def {check_output_signature} + if universal_newlines: + return "" + return b"" + + class Popen(object): + returncode = pid = 0 + stdin = stdout = stderr = file() + {py3_args} + + {communicate_signature}: + return {communicate!r} + {wait_signature}: + return self.returncode + def poll(self): + return self.returncode + def send_signal(self, signal): + pass + def terminate(self): + pass + def kill(self): + pass + {ctx_manager} + """ + ) + if PY39_PLUS: + code += """ + @classmethod + def __class_getitem__(cls, item): + pass + """ + + init_lines = textwrap.dedent(init).splitlines() + indented_init = "\n".join(" " * 4 + line for line in init_lines) + code += indented_init + return parse(code) + + +register_module_extender(AstroidManager(), "subprocess", _subprocess_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_threading.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_threading.py new file mode 100644 index 0000000..a85055d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_threading.py @@ -0,0 +1,31 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.manager import AstroidManager + + +def _thread_transform(): + return parse( + """ + class lock(object): + def acquire(self, blocking=True, timeout=-1): + return False + def release(self): + pass + def __enter__(self): + return True + def __exit__(self, *args): + pass + def locked(self): + return False + + def Lock(): + return lock() + """ + ) + + +register_module_extender(AstroidManager(), "threading", _thread_transform) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_type.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_type.py new file mode 100644 index 0000000..f9c3ff4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_type.py @@ -0,0 +1,69 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Astroid hooks for type support. + +Starting from python3.9, type object behaves as it had __class_getitem__ method. +However it was not possible to simply add this method inside type's body, otherwise +all types would also have this method. In this case it would have been possible +to write str[int]. +Guido Van Rossum proposed a hack to handle this in the interpreter: +https://github.com/python/cpython/blob/67e394562d67cbcd0ac8114e5439494e7645b8f5/Objects/abstract.c#L181-L184 + +This brain follows the same logic. It is no wise to add permanently the __class_getitem__ method +to the type object. Instead we choose to add it only in the case of a subscript node +which inside name node is type. +Doing this type[int] is allowed whereas str[int] is not. + +Thanks to Lukasz Langa for fruitful discussion. +""" + +from astroid import extract_node, inference_tip, nodes +from astroid.const import PY39_PLUS +from astroid.exceptions import UseInferenceDefault +from astroid.manager import AstroidManager + + +def _looks_like_type_subscript(node): + """ + Try to figure out if a Name node is used inside a type related subscript + + :param node: node to check + :type node: astroid.nodes.node_classes.NodeNG + :return: true if the node is a Name node inside a type related subscript + :rtype: bool + """ + if isinstance(node, nodes.Name) and isinstance(node.parent, nodes.Subscript): + return node.name == "type" + return False + + +def infer_type_sub(node, context=None): + """ + Infer a type[...] subscript + + :param node: node to infer + :type node: astroid.nodes.node_classes.NodeNG + :param context: inference context + :type context: astroid.context.InferenceContext + :return: the inferred node + :rtype: nodes.NodeNG + """ + node_scope, _ = node.scope().lookup("type") + if not isinstance(node_scope, nodes.Module) or node_scope.qname() != "builtins": + raise UseInferenceDefault() + class_src = """ + class type: + def __class_getitem__(cls, key): + return cls + """ + node = extract_node(class_src) + return node.infer(context=context) + + +if PY39_PLUS: + AstroidManager().register_transform( + nodes.Name, inference_tip(infer_type_sub), _looks_like_type_subscript + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_typing.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_typing.py new file mode 100644 index 0000000..6077773 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_typing.py @@ -0,0 +1,433 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for typing.py support.""" +import typing +from functools import partial + +from astroid import context, extract_node, inference_tip +from astroid.builder import _extract_single_node +from astroid.const import PY37_PLUS, PY38_PLUS, PY39_PLUS +from astroid.exceptions import ( + AttributeInferenceError, + InferenceError, + UseInferenceDefault, +) +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import ( + Assign, + AssignName, + Attribute, + Call, + Const, + JoinedStr, + Name, + NodeNG, + Subscript, + Tuple, +) +from astroid.nodes.scoped_nodes import ClassDef, FunctionDef +from astroid.util import Uninferable + +TYPING_NAMEDTUPLE_BASENAMES = {"NamedTuple", "typing.NamedTuple"} +TYPING_TYPEVARS = {"TypeVar", "NewType"} +TYPING_TYPEVARS_QUALIFIED = {"typing.TypeVar", "typing.NewType"} +TYPING_TYPE_TEMPLATE = """ +class Meta(type): + def __getitem__(self, item): + return self + + @property + def __args__(self): + return () + +class {0}(metaclass=Meta): + pass +""" +TYPING_MEMBERS = set(getattr(typing, "__all__", [])) + +TYPING_ALIAS = frozenset( + ( + "typing.Hashable", + "typing.Awaitable", + "typing.Coroutine", + "typing.AsyncIterable", + "typing.AsyncIterator", + "typing.Iterable", + "typing.Iterator", + "typing.Reversible", + "typing.Sized", + "typing.Container", + "typing.Collection", + "typing.Callable", + "typing.AbstractSet", + "typing.MutableSet", + "typing.Mapping", + "typing.MutableMapping", + "typing.Sequence", + "typing.MutableSequence", + "typing.ByteString", + "typing.Tuple", + "typing.List", + "typing.Deque", + "typing.Set", + "typing.FrozenSet", + "typing.MappingView", + "typing.KeysView", + "typing.ItemsView", + "typing.ValuesView", + "typing.ContextManager", + "typing.AsyncContextManager", + "typing.Dict", + "typing.DefaultDict", + "typing.OrderedDict", + "typing.Counter", + "typing.ChainMap", + "typing.Generator", + "typing.AsyncGenerator", + "typing.Type", + "typing.Pattern", + "typing.Match", + ) +) + +CLASS_GETITEM_TEMPLATE = """ +@classmethod +def __class_getitem__(cls, item): + return cls +""" + + +def looks_like_typing_typevar_or_newtype(node): + func = node.func + if isinstance(func, Attribute): + return func.attrname in TYPING_TYPEVARS + if isinstance(func, Name): + return func.name in TYPING_TYPEVARS + return False + + +def infer_typing_typevar_or_newtype(node, context_itton=None): + """Infer a typing.TypeVar(...) or typing.NewType(...) call""" + try: + func = next(node.func.infer(context=context_itton)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + + if func.qname() not in TYPING_TYPEVARS_QUALIFIED: + raise UseInferenceDefault + if not node.args: + raise UseInferenceDefault + # Cannot infer from a dynamic class name (f-string) + if isinstance(node.args[0], JoinedStr): + raise UseInferenceDefault + + typename = node.args[0].as_string().strip("'") + node = extract_node(TYPING_TYPE_TEMPLATE.format(typename)) + return node.infer(context=context_itton) + + +def _looks_like_typing_subscript(node): + """Try to figure out if a Subscript node *might* be a typing-related subscript""" + if isinstance(node, Name): + return node.name in TYPING_MEMBERS + if isinstance(node, Attribute): + return node.attrname in TYPING_MEMBERS + if isinstance(node, Subscript): + return _looks_like_typing_subscript(node.value) + return False + + +def infer_typing_attr( + node: Subscript, ctx: typing.Optional[context.InferenceContext] = None +) -> typing.Iterator[ClassDef]: + """Infer a typing.X[...] subscript""" + try: + value = next(node.value.infer()) # type: ignore[union-attr] # value shouldn't be None for Subscript. + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + + if ( + not value.qname().startswith("typing.") + or PY37_PLUS + and value.qname() in TYPING_ALIAS + ): + # If typing subscript belongs to an alias + # (PY37+) handle it separately. + raise UseInferenceDefault + + if ( + PY37_PLUS + and isinstance(value, ClassDef) + and value.qname() + in {"typing.Generic", "typing.Annotated", "typing_extensions.Annotated"} + ): + # With PY37+ typing.Generic and typing.Annotated (PY39) are subscriptable + # through __class_getitem__. Since astroid can't easily + # infer the native methods, replace them for an easy inference tip + func_to_add = _extract_single_node(CLASS_GETITEM_TEMPLATE) + value.locals["__class_getitem__"] = [func_to_add] + if ( + isinstance(node.parent, ClassDef) + and node in node.parent.bases + and getattr(node.parent, "__cache", None) + ): + # node.parent.slots is evaluated and cached before the inference tip + # is first applied. Remove the last result to allow a recalculation of slots + cache = node.parent.__cache # type: ignore[attr-defined] # Unrecognized getattr + if cache.get(node.parent.slots) is not None: + del cache[node.parent.slots] + return iter([value]) + + node = extract_node(TYPING_TYPE_TEMPLATE.format(value.qname().split(".")[-1])) + return node.infer(context=ctx) + + +def _looks_like_typedDict( # pylint: disable=invalid-name + node: typing.Union[FunctionDef, ClassDef], +) -> bool: + """Check if node is TypedDict FunctionDef.""" + return node.qname() in {"typing.TypedDict", "typing_extensions.TypedDict"} + + +def infer_old_typedDict( # pylint: disable=invalid-name + node: ClassDef, ctx: typing.Optional[context.InferenceContext] = None +) -> typing.Iterator[ClassDef]: + func_to_add = _extract_single_node("dict") + node.locals["__call__"] = [func_to_add] + return iter([node]) + + +def infer_typedDict( # pylint: disable=invalid-name + node: FunctionDef, ctx: typing.Optional[context.InferenceContext] = None +) -> typing.Iterator[ClassDef]: + """Replace TypedDict FunctionDef with ClassDef.""" + class_def = ClassDef( + name="TypedDict", + lineno=node.lineno, + col_offset=node.col_offset, + parent=node.parent, + ) + class_def.postinit(bases=[extract_node("dict")], body=[], decorators=None) + func_to_add = _extract_single_node("dict") + class_def.locals["__call__"] = [func_to_add] + return iter([class_def]) + + +def _looks_like_typing_alias(node: Call) -> bool: + """ + Returns True if the node corresponds to a call to _alias function. + For example : + + MutableSet = _alias(collections.abc.MutableSet, T) + + :param node: call node + """ + return ( + isinstance(node.func, Name) + and node.func.name == "_alias" + and ( + # _alias function works also for builtins object such as list and dict + isinstance(node.args[0], (Attribute, Name)) + ) + ) + + +def _forbid_class_getitem_access(node: ClassDef) -> None: + """ + Disable the access to __class_getitem__ method for the node in parameters + """ + + def full_raiser(origin_func, attr, *args, **kwargs): + """ + Raises an AttributeInferenceError in case of access to __class_getitem__ method. + Otherwise just call origin_func. + """ + if attr == "__class_getitem__": + raise AttributeInferenceError("__class_getitem__ access is not allowed") + return origin_func(attr, *args, **kwargs) + + try: + node.getattr("__class_getitem__") + # If we are here, then we are sure to modify object that do have __class_getitem__ method (which origin is one the + # protocol defined in collections module) whereas the typing module consider it should not + # We do not want __class_getitem__ to be found in the classdef + partial_raiser = partial(full_raiser, node.getattr) + node.getattr = partial_raiser + except AttributeInferenceError: + pass + + +def infer_typing_alias( + node: Call, ctx: typing.Optional[context.InferenceContext] = None +) -> typing.Iterator[ClassDef]: + """ + Infers the call to _alias function + Insert ClassDef, with same name as aliased class, + in mro to simulate _GenericAlias. + + :param node: call node + :param context: inference context + """ + if ( + not isinstance(node.parent, Assign) + or not len(node.parent.targets) == 1 + or not isinstance(node.parent.targets[0], AssignName) + ): + raise UseInferenceDefault + try: + res = next(node.args[0].infer(context=ctx)) + except StopIteration as e: + raise InferenceError(node=node.args[0], context=context) from e + + assign_name = node.parent.targets[0] + + class_def = ClassDef( + name=assign_name.name, + lineno=assign_name.lineno, + col_offset=assign_name.col_offset, + parent=node.parent, + ) + if res != Uninferable and isinstance(res, ClassDef): + # Only add `res` as base if it's a `ClassDef` + # This isn't the case for `typing.Pattern` and `typing.Match` + class_def.postinit(bases=[res], body=[], decorators=None) + + maybe_type_var = node.args[1] + if ( + not PY39_PLUS + and not (isinstance(maybe_type_var, Tuple) and not maybe_type_var.elts) + or PY39_PLUS + and isinstance(maybe_type_var, Const) + and maybe_type_var.value > 0 + ): + # If typing alias is subscriptable, add `__class_getitem__` to ClassDef + func_to_add = _extract_single_node(CLASS_GETITEM_TEMPLATE) + class_def.locals["__class_getitem__"] = [func_to_add] + else: + # If not, make sure that `__class_getitem__` access is forbidden. + # This is an issue in cases where the aliased class implements it, + # but the typing alias isn't subscriptable. E.g., `typing.ByteString` for PY39+ + _forbid_class_getitem_access(class_def) + return iter([class_def]) + + +def _looks_like_special_alias(node: Call) -> bool: + """Return True if call is for Tuple or Callable alias. + + In PY37 and PY38 the call is to '_VariadicGenericAlias' with 'tuple' as + first argument. In PY39+ it is replaced by a call to '_TupleType'. + + PY37: Tuple = _VariadicGenericAlias(tuple, (), inst=False, special=True) + PY39: Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') + + + PY37: Callable = _VariadicGenericAlias(collections.abc.Callable, (), special=True) + PY39: Callable = _CallableType(collections.abc.Callable, 2) + """ + return isinstance(node.func, Name) and ( + not PY39_PLUS + and node.func.name == "_VariadicGenericAlias" + and ( + isinstance(node.args[0], Name) + and node.args[0].name == "tuple" + or isinstance(node.args[0], Attribute) + and node.args[0].as_string() == "collections.abc.Callable" + ) + or PY39_PLUS + and ( + node.func.name == "_TupleType" + and isinstance(node.args[0], Name) + and node.args[0].name == "tuple" + or node.func.name == "_CallableType" + and isinstance(node.args[0], Attribute) + and node.args[0].as_string() == "collections.abc.Callable" + ) + ) + + +def infer_special_alias( + node: Call, ctx: typing.Optional[context.InferenceContext] = None +) -> typing.Iterator[ClassDef]: + """Infer call to tuple alias as new subscriptable class typing.Tuple.""" + if not ( + isinstance(node.parent, Assign) + and len(node.parent.targets) == 1 + and isinstance(node.parent.targets[0], AssignName) + ): + raise UseInferenceDefault + try: + res = next(node.args[0].infer(context=ctx)) + except StopIteration as e: + raise InferenceError(node=node.args[0], context=context) from e + + assign_name = node.parent.targets[0] + class_def = ClassDef( + name=assign_name.name, + parent=node.parent, + ) + class_def.postinit(bases=[res], body=[], decorators=None) + func_to_add = _extract_single_node(CLASS_GETITEM_TEMPLATE) + class_def.locals["__class_getitem__"] = [func_to_add] + return iter([class_def]) + + +def _looks_like_typing_cast(node: Call) -> bool: + return isinstance(node, Call) and ( + isinstance(node.func, Name) + and node.func.name == "cast" + or isinstance(node.func, Attribute) + and node.func.attrname == "cast" + ) + + +def infer_typing_cast( + node: Call, ctx: typing.Optional[context.InferenceContext] = None +) -> typing.Iterator[NodeNG]: + """Infer call to cast() returning same type as casted-from var""" + if not isinstance(node.func, (Name, Attribute)): + raise UseInferenceDefault + + try: + func = next(node.func.infer(context=ctx)) + except (InferenceError, StopIteration) as exc: + raise UseInferenceDefault from exc + if ( + not isinstance(func, FunctionDef) + or func.qname() != "typing.cast" + or len(node.args) != 2 + ): + raise UseInferenceDefault + + return node.args[1].infer(context=ctx) + + +AstroidManager().register_transform( + Call, + inference_tip(infer_typing_typevar_or_newtype), + looks_like_typing_typevar_or_newtype, +) +AstroidManager().register_transform( + Subscript, inference_tip(infer_typing_attr), _looks_like_typing_subscript +) +AstroidManager().register_transform( + Call, inference_tip(infer_typing_cast), _looks_like_typing_cast +) + +if PY39_PLUS: + AstroidManager().register_transform( + FunctionDef, inference_tip(infer_typedDict), _looks_like_typedDict + ) +elif PY38_PLUS: + AstroidManager().register_transform( + ClassDef, inference_tip(infer_old_typedDict), _looks_like_typedDict + ) + +if PY37_PLUS: + AstroidManager().register_transform( + Call, inference_tip(infer_typing_alias), _looks_like_typing_alias + ) + AstroidManager().register_transform( + Call, inference_tip(infer_special_alias), _looks_like_special_alias + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_unittest.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_unittest.py new file mode 100644 index 0000000..b34e1cf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_unittest.py @@ -0,0 +1,31 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for unittest module""" +from astroid.brain.helpers import register_module_extender +from astroid.builder import parse +from astroid.const import PY38_PLUS +from astroid.manager import AstroidManager + + +def IsolatedAsyncioTestCaseImport(): + """ + In the unittest package, the IsolatedAsyncioTestCase class is imported lazily, i.e only + when the __getattr__ method of the unittest module is called with 'IsolatedAsyncioTestCase' as + argument. Thus the IsolatedAsyncioTestCase is not imported statically (during import time). + This function mocks a classical static import of the IsolatedAsyncioTestCase. + + (see https://github.com/PyCQA/pylint/issues/4060) + """ + return parse( + """ + from .async_case import IsolatedAsyncioTestCase + """ + ) + + +if PY38_PLUS: + register_module_extender( + AstroidManager(), "unittest", IsolatedAsyncioTestCaseImport + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/brain_uuid.py b/myenv/lib/python3.9/site-packages/astroid/brain/brain_uuid.py new file mode 100644 index 0000000..f6ba888 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/brain_uuid.py @@ -0,0 +1,18 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Astroid hooks for the UUID module.""" +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import Const +from astroid.nodes.scoped_nodes import ClassDef + + +def _patch_uuid_class(node): + # The .int member is patched using __dict__ + node.locals["int"] = [Const(0, parent=node)] + + +AstroidManager().register_transform( + ClassDef, _patch_uuid_class, lambda node: node.qname() == "uuid.UUID" +) diff --git a/myenv/lib/python3.9/site-packages/astroid/brain/helpers.py b/myenv/lib/python3.9/site-packages/astroid/brain/helpers.py new file mode 100644 index 0000000..d74f595 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/brain/helpers.py @@ -0,0 +1,17 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from astroid.nodes.scoped_nodes import Module + + +def register_module_extender(manager, module_name, get_extension_mod): + def transform(node): + extension_module = get_extension_mod() + for name, objs in extension_module.locals.items(): + node.locals[name] = objs + for obj in objs: + if obj.parent is extension_module: + obj.parent = node + + manager.register_transform(Module, transform, lambda n: n.name == module_name) diff --git a/myenv/lib/python3.9/site-packages/astroid/builder.py b/myenv/lib/python3.9/site-packages/astroid/builder.py new file mode 100644 index 0000000..1cdb963 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/builder.py @@ -0,0 +1,464 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""The AstroidBuilder makes astroid from living object and / or from _ast + +The builder is not thread safe and can't be used to parse different sources +at the same time. +""" +import os +import textwrap +import types +from tokenize import detect_encoding +from typing import List, Optional, Tuple, Union + +from astroid import bases, modutils, nodes, raw_building, rebuilder, util +from astroid._ast import get_parser_module +from astroid.exceptions import AstroidBuildingError, AstroidSyntaxError, InferenceError +from astroid.manager import AstroidManager +from astroid.nodes.node_classes import NodeNG + +objects = util.lazy_import("objects") + +# The name of the transient function that is used to +# wrap expressions to be extracted when calling +# extract_node. +_TRANSIENT_FUNCTION = "__" + +# The comment used to select a statement to be extracted +# when calling extract_node. +_STATEMENT_SELECTOR = "#@" +MISPLACED_TYPE_ANNOTATION_ERROR = "misplaced type annotation" + + +def open_source_file(filename): + # pylint: disable=consider-using-with + with open(filename, "rb") as byte_stream: + encoding = detect_encoding(byte_stream.readline)[0] + stream = open(filename, newline=None, encoding=encoding) + data = stream.read() + return stream, encoding, data + + +def _can_assign_attr(node, attrname): + try: + slots = node.slots() + except NotImplementedError: + pass + else: + if slots and attrname not in {slot.value for slot in slots}: + return False + return node.qname() != "builtins.object" + + +class AstroidBuilder(raw_building.InspectBuilder): + """Class for building an astroid tree from source code or from a live module. + + The param *manager* specifies the manager class which should be used. + If no manager is given, then the default one will be used. The + param *apply_transforms* determines if the transforms should be + applied after the tree was built from source or from a live object, + by default being True. + """ + + # pylint: disable=redefined-outer-name + def __init__(self, manager=None, apply_transforms=True): + super().__init__(manager) + self._apply_transforms = apply_transforms + + def module_build( + self, module: types.ModuleType, modname: Optional[str] = None + ) -> nodes.Module: + """Build an astroid from a living module instance.""" + node = None + path = getattr(module, "__file__", None) + loader = getattr(module, "__loader__", None) + # Prefer the loader to get the source rather than assuming we have a + # filesystem to read the source file from ourselves. + if loader: + modname = modname or module.__name__ + source = loader.get_source(modname) + if source: + node = self.string_build(source, modname, path=path) + if node is None and path is not None: + path_, ext = os.path.splitext(modutils._path_from_filename(path)) + if ext in {".py", ".pyc", ".pyo"} and os.path.exists(path_ + ".py"): + node = self.file_build(path_ + ".py", modname) + if node is None: + # this is a built-in module + # get a partial representation by introspection + node = self.inspect_build(module, modname=modname, path=path) + if self._apply_transforms: + # We have to handle transformation by ourselves since the + # rebuilder isn't called for builtin nodes + node = self._manager.visit_transforms(node) + return node + + def file_build(self, path, modname=None): + """Build astroid from a source code file (i.e. from an ast) + + *path* is expected to be a python source file + """ + try: + stream, encoding, data = open_source_file(path) + except OSError as exc: + raise AstroidBuildingError( + "Unable to load file {path}:\n{error}", + modname=modname, + path=path, + error=exc, + ) from exc + except (SyntaxError, LookupError) as exc: + raise AstroidSyntaxError( + "Python 3 encoding specification error or unknown encoding:\n" + "{error}", + modname=modname, + path=path, + error=exc, + ) from exc + except UnicodeError as exc: # wrong encoding + # detect_encoding returns utf-8 if no encoding specified + raise AstroidBuildingError( + "Wrong or no encoding specified for {filename}.", filename=path + ) from exc + with stream: + # get module name if necessary + if modname is None: + try: + modname = ".".join(modutils.modpath_from_file(path)) + except ImportError: + modname = os.path.splitext(os.path.basename(path))[0] + # build astroid representation + module, builder = self._data_build(data, modname, path) + return self._post_build(module, builder, encoding) + + def string_build(self, data, modname="", path=None): + """Build astroid from source code string.""" + module, builder = self._data_build(data, modname, path) + module.file_bytes = data.encode("utf-8") + return self._post_build(module, builder, "utf-8") + + def _post_build( + self, module: nodes.Module, builder: rebuilder.TreeRebuilder, encoding: str + ) -> nodes.Module: + """Handles encoding and delayed nodes after a module has been built""" + module.file_encoding = encoding + self._manager.cache_module(module) + # post tree building steps after we stored the module in the cache: + for from_node in builder._import_from_nodes: + if from_node.modname == "__future__": + for symbol, _ in from_node.names: + module.future_imports.add(symbol) + self.add_from_names_to_locals(from_node) + # handle delayed assattr nodes + for delayed in builder._delayed_assattr: + self.delayed_assattr(delayed) + + # Visit the transforms + if self._apply_transforms: + module = self._manager.visit_transforms(module) + return module + + def _data_build( + self, data: str, modname, path + ) -> Tuple[nodes.Module, rebuilder.TreeRebuilder]: + """Build tree node from data and add some informations""" + try: + node, parser_module = _parse_string(data, type_comments=True) + except (TypeError, ValueError, SyntaxError) as exc: + raise AstroidSyntaxError( + "Parsing Python code failed:\n{error}", + source=data, + modname=modname, + path=path, + error=exc, + ) from exc + + if path is not None: + node_file = os.path.abspath(path) + else: + node_file = "" + if modname.endswith(".__init__"): + modname = modname[:-9] + package = True + else: + package = ( + path is not None + and os.path.splitext(os.path.basename(path))[0] == "__init__" + ) + builder = rebuilder.TreeRebuilder(self._manager, parser_module, data) + module = builder.visit_module(node, modname, node_file, package) + return module, builder + + def add_from_names_to_locals(self, node): + """Store imported names to the locals + + Resort the locals if coming from a delayed node + """ + + def _key_func(node): + return node.fromlineno + + def sort_locals(my_list): + my_list.sort(key=_key_func) + + for (name, asname) in node.names: + if name == "*": + try: + imported = node.do_import_module() + except AstroidBuildingError: + continue + for name in imported.public_names(): + node.parent.set_local(name, node) + sort_locals(node.parent.scope().locals[name]) + else: + node.parent.set_local(asname or name, node) + sort_locals(node.parent.scope().locals[asname or name]) + + def delayed_assattr(self, node): + """Visit a AssAttr node + + This adds name to locals and handle members definition. + """ + try: + frame = node.frame(future=True) + for inferred in node.expr.infer(): + if inferred is util.Uninferable: + continue + try: + cls = inferred.__class__ + if cls is bases.Instance or cls is objects.ExceptionInstance: + inferred = inferred._proxied + iattrs = inferred.instance_attrs + if not _can_assign_attr(inferred, node.attrname): + continue + elif isinstance(inferred, bases.Instance): + # Const, Tuple or other containers that inherit from + # `Instance` + continue + elif inferred.is_function: + iattrs = inferred.instance_attrs + else: + iattrs = inferred.locals + except AttributeError: + # XXX log error + continue + values = iattrs.setdefault(node.attrname, []) + if node in values: + continue + # get assign in __init__ first XXX useful ? + if ( + frame.name == "__init__" + and values + and values[0].frame(future=True).name != "__init__" + ): + values.insert(0, node) + else: + values.append(node) + except InferenceError: + pass + + +def build_namespace_package_module(name: str, path: List[str]) -> nodes.Module: + return nodes.Module(name, path=path, package=True) + + +def parse(code, module_name="", path=None, apply_transforms=True): + """Parses a source string in order to obtain an astroid AST from it + + :param str code: The code for the module. + :param str module_name: The name for the module, if any + :param str path: The path for the module + :param bool apply_transforms: + Apply the transforms for the give code. Use it if you + don't want the default transforms to be applied. + """ + code = textwrap.dedent(code) + builder = AstroidBuilder( + manager=AstroidManager(), apply_transforms=apply_transforms + ) + return builder.string_build(code, modname=module_name, path=path) + + +def _extract_expressions(node): + """Find expressions in a call to _TRANSIENT_FUNCTION and extract them. + + The function walks the AST recursively to search for expressions that + are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an + expression, it completely removes the function call node from the tree, + replacing it by the wrapped expression inside the parent. + + :param node: An astroid node. + :type node: astroid.bases.NodeNG + :yields: The sequence of wrapped expressions on the modified tree + expression can be found. + """ + if ( + isinstance(node, nodes.Call) + and isinstance(node.func, nodes.Name) + and node.func.name == _TRANSIENT_FUNCTION + ): + real_expr = node.args[0] + real_expr.parent = node.parent + # Search for node in all _astng_fields (the fields checked when + # get_children is called) of its parent. Some of those fields may + # be lists or tuples, in which case the elements need to be checked. + # When we find it, replace it by real_expr, so that the AST looks + # like no call to _TRANSIENT_FUNCTION ever took place. + for name in node.parent._astroid_fields: + child = getattr(node.parent, name) + if isinstance(child, (list, tuple)): + for idx, compound_child in enumerate(child): + if compound_child is node: + child[idx] = real_expr + elif child is node: + setattr(node.parent, name, real_expr) + yield real_expr + else: + for child in node.get_children(): + yield from _extract_expressions(child) + + +def _find_statement_by_line(node, line): + """Extracts the statement on a specific line from an AST. + + If the line number of node matches line, it will be returned; + otherwise its children are iterated and the function is called + recursively. + + :param node: An astroid node. + :type node: astroid.bases.NodeNG + :param line: The line number of the statement to extract. + :type line: int + :returns: The statement on the line, or None if no statement for the line + can be found. + :rtype: astroid.bases.NodeNG or None + """ + if isinstance(node, (nodes.ClassDef, nodes.FunctionDef, nodes.MatchCase)): + # This is an inaccuracy in the AST: the nodes that can be + # decorated do not carry explicit information on which line + # the actual definition (class/def), but .fromline seems to + # be close enough. + node_line = node.fromlineno + else: + node_line = node.lineno + + if node_line == line: + return node + + for child in node.get_children(): + result = _find_statement_by_line(child, line) + if result: + return result + + return None + + +def extract_node(code: str, module_name: str = "") -> Union[NodeNG, List[NodeNG]]: + """Parses some Python code as a module and extracts a designated AST node. + + Statements: + To extract one or more statement nodes, append #@ to the end of the line + + Examples: + >>> def x(): + >>> def y(): + >>> return 1 #@ + + The return statement will be extracted. + + >>> class X(object): + >>> def meth(self): #@ + >>> pass + + The function object 'meth' will be extracted. + + Expressions: + To extract arbitrary expressions, surround them with the fake + function call __(...). After parsing, the surrounded expression + will be returned and the whole AST (accessible via the returned + node's parent attribute) will look like the function call was + never there in the first place. + + Examples: + >>> a = __(1) + + The const node will be extracted. + + >>> def x(d=__(foo.bar)): pass + + The node containing the default argument will be extracted. + + >>> def foo(a, b): + >>> return 0 < __(len(a)) < b + + The node containing the function call 'len' will be extracted. + + If no statements or expressions are selected, the last toplevel + statement will be returned. + + If the selected statement is a discard statement, (i.e. an expression + turned into a statement), the wrapped expression is returned instead. + + For convenience, singleton lists are unpacked. + + :param str code: A piece of Python code that is parsed as + a module. Will be passed through textwrap.dedent first. + :param str module_name: The name of the module. + :returns: The designated node from the parse tree, or a list of nodes. + """ + + def _extract(node): + if isinstance(node, nodes.Expr): + return node.value + + return node + + requested_lines = [] + for idx, line in enumerate(code.splitlines()): + if line.strip().endswith(_STATEMENT_SELECTOR): + requested_lines.append(idx + 1) + + tree = parse(code, module_name=module_name) + if not tree.body: + raise ValueError("Empty tree, cannot extract from it") + + extracted = [] + if requested_lines: + extracted = [_find_statement_by_line(tree, line) for line in requested_lines] + + # Modifies the tree. + extracted.extend(_extract_expressions(tree)) + + if not extracted: + extracted.append(tree.body[-1]) + + extracted = [_extract(node) for node in extracted] + if len(extracted) == 1: + return extracted[0] + return extracted + + +def _extract_single_node(code: str, module_name: str = "") -> NodeNG: + """Call extract_node while making sure that only one value is returned.""" + ret = extract_node(code, module_name) + if isinstance(ret, list): + return ret[0] + return ret + + +def _parse_string(data, type_comments=True): + parser_module = get_parser_module(type_comments=type_comments) + try: + parsed = parser_module.parse(data + "\n", type_comments=type_comments) + except SyntaxError as exc: + # If the type annotations are misplaced for some reason, we do not want + # to fail the entire parsing of the file, so we need to retry the parsing without + # type comment support. + if exc.args[0] != MISPLACED_TYPE_ANNOTATION_ERROR or not type_comments: + raise + + parser_module = get_parser_module(type_comments=False) + parsed = parser_module.parse(data + "\n", type_comments=False) + return parsed, parser_module diff --git a/myenv/lib/python3.9/site-packages/astroid/const.py b/myenv/lib/python3.9/site-packages/astroid/const.py new file mode 100644 index 0000000..0cb2d09 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/const.py @@ -0,0 +1,32 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import enum +import sys + +PY36 = sys.version_info[:2] == (3, 6) +PY38 = sys.version_info[:2] == (3, 8) +PY37_PLUS = sys.version_info >= (3, 7) +PY38_PLUS = sys.version_info >= (3, 8) +PY39_PLUS = sys.version_info >= (3, 9) +PY310_PLUS = sys.version_info >= (3, 10) +PY311_PLUS = sys.version_info >= (3, 11) +BUILTINS = "builtins" # TODO Remove in 2.8 + +WIN32 = sys.platform == "win32" + +IS_PYPY = sys.implementation.name == "pypy" +IS_JYTHON = sys.implementation.name == "jython" + + +class Context(enum.Enum): + Load = 1 + Store = 2 + Del = 3 + + +# TODO Remove in 3.0 in favor of Context +Load = Context.Load +Store = Context.Store +Del = Context.Del diff --git a/myenv/lib/python3.9/site-packages/astroid/context.py b/myenv/lib/python3.9/site-packages/astroid/context.py new file mode 100644 index 0000000..a04996e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/context.py @@ -0,0 +1,201 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Various context related utilities, including inference and call contexts.""" +import contextlib +import pprint +from typing import TYPE_CHECKING, List, MutableMapping, Optional, Sequence, Tuple + +if TYPE_CHECKING: + from astroid.nodes.node_classes import Keyword, NodeNG + + +_INFERENCE_CACHE = {} + + +def _invalidate_cache(): + _INFERENCE_CACHE.clear() + + +class InferenceContext: + """Provide context for inference + + Store already inferred nodes to save time + Account for already visited nodes to stop infinite recursion + """ + + __slots__ = ( + "path", + "lookupname", + "callcontext", + "boundnode", + "extra_context", + "_nodes_inferred", + ) + + max_inferred = 100 + + def __init__(self, path=None, nodes_inferred=None): + if nodes_inferred is None: + self._nodes_inferred = [0] + else: + self._nodes_inferred = nodes_inferred + self.path = path or set() + """ + :type: set(tuple(NodeNG, optional(str))) + + Path of visited nodes and their lookupname + + Currently this key is ``(node, context.lookupname)`` + """ + self.lookupname = None + """ + :type: optional[str] + + The original name of the node + + e.g. + foo = 1 + The inference of 'foo' is nodes.Const(1) but the lookup name is 'foo' + """ + self.callcontext = None + """ + :type: optional[CallContext] + + The call arguments and keywords for the given context + """ + self.boundnode = None + """ + :type: optional[NodeNG] + + The bound node of the given context + + e.g. the bound node of object.__new__(cls) is the object node + """ + self.extra_context = {} + """ + :type: dict(NodeNG, Context) + + Context that needs to be passed down through call stacks + for call arguments + """ + + @property + def nodes_inferred(self): + """ + Number of nodes inferred in this context and all its clones/descendents + + Wrap inner value in a mutable cell to allow for mutating a class + variable in the presence of __slots__ + """ + return self._nodes_inferred[0] + + @nodes_inferred.setter + def nodes_inferred(self, value): + self._nodes_inferred[0] = value + + @property + def inferred( + self, + ) -> MutableMapping[ + Tuple["NodeNG", Optional[str], Optional[str], Optional[str]], Sequence["NodeNG"] + ]: + """ + Inferred node contexts to their mapped results + + Currently the key is ``(node, lookupname, callcontext, boundnode)`` + and the value is tuple of the inferred results + """ + return _INFERENCE_CACHE + + def push(self, node): + """Push node into inference path + + :return: True if node is already in context path else False + :rtype: bool + + Allows one to see if the given node has already + been looked at for this inference context""" + name = self.lookupname + if (node, name) in self.path: + return True + + self.path.add((node, name)) + return False + + def clone(self): + """Clone inference path + + For example, each side of a binary operation (BinOp) + starts with the same context but diverge as each side is inferred + so the InferenceContext will need be cloned""" + # XXX copy lookupname/callcontext ? + clone = InferenceContext(self.path.copy(), nodes_inferred=self._nodes_inferred) + clone.callcontext = self.callcontext + clone.boundnode = self.boundnode + clone.extra_context = self.extra_context + return clone + + @contextlib.contextmanager + def restore_path(self): + path = set(self.path) + yield + self.path = path + + def __str__(self): + state = ( + f"{field}={pprint.pformat(getattr(self, field), width=80 - len(field))}" + for field in self.__slots__ + ) + return "{}({})".format(type(self).__name__, ",\n ".join(state)) + + +class CallContext: + """Holds information for a call site.""" + + __slots__ = ("args", "keywords", "callee") + + def __init__( + self, + args: List["NodeNG"], + keywords: Optional[List["Keyword"]] = None, + callee: Optional["NodeNG"] = None, + ): + self.args = args # Call positional arguments + if keywords: + keywords = [(arg.arg, arg.value) for arg in keywords] + else: + keywords = [] + self.keywords = keywords # Call keyword arguments + self.callee = callee # Function being called + + +def copy_context(context: Optional[InferenceContext]) -> InferenceContext: + """Clone a context if given, or return a fresh contexxt""" + if context is not None: + return context.clone() + + return InferenceContext() + + +def bind_context_to_node(context, node): + """Give a context a boundnode + to retrieve the correct function name or attribute value + with from further inference. + + Do not use an existing context since the boundnode could then + be incorrectly propagated higher up in the call stack. + + :param context: Context to use + :type context: Optional(context) + + :param node: Node to do name lookups from + :type node NodeNG: + + :returns: A new context + :rtype: InferenceContext + """ + context = copy_context(context) + context.boundnode = node + return context diff --git a/myenv/lib/python3.9/site-packages/astroid/decorators.py b/myenv/lib/python3.9/site-packages/astroid/decorators.py new file mode 100644 index 0000000..ec16fea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/decorators.py @@ -0,0 +1,273 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" A few useful function/method decorators.""" + +import functools +import inspect +import sys +import warnings +from typing import Callable, TypeVar + +import wrapt + +from astroid import util +from astroid.context import InferenceContext +from astroid.exceptions import InferenceError + +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +R = TypeVar("R") +P = ParamSpec("P") + + +@wrapt.decorator +def cached(func, instance, args, kwargs): + """Simple decorator to cache result of method calls without args.""" + cache = getattr(instance, "__cache", None) + if cache is None: + instance.__cache = cache = {} + try: + return cache[func] + except KeyError: + cache[func] = result = func(*args, **kwargs) + return result + + +# TODO: Remove when support for 3.7 is dropped +# TODO: astroid 3.0 -> move class behind sys.version_info < (3, 8) guard +class cachedproperty: + """Provides a cached property equivalent to the stacking of + @cached and @property, but more efficient. + + After first usage, the becomes part of the object's + __dict__. Doing: + + del obj. empties the cache. + + Idea taken from the pyramid_ framework and the mercurial_ project. + + .. _pyramid: http://pypi.python.org/pypi/pyramid + .. _mercurial: http://pypi.python.org/pypi/Mercurial + """ + + __slots__ = ("wrapped",) + + def __init__(self, wrapped): + if sys.version_info >= (3, 8): + warnings.warn( + "cachedproperty has been deprecated and will be removed in astroid 3.0 for Python 3.8+. " + "Use functools.cached_property instead.", + DeprecationWarning, + ) + try: + wrapped.__name__ + except AttributeError as exc: + raise TypeError(f"{wrapped} must have a __name__ attribute") from exc + self.wrapped = wrapped + + @property + def __doc__(self): + doc = getattr(self.wrapped, "__doc__", None) + return "%s" % ( + "\n%s" % doc if doc else "" + ) + + def __get__(self, inst, objtype=None): + if inst is None: + return self + val = self.wrapped(inst) + setattr(inst, self.wrapped.__name__, val) + return val + + +def path_wrapper(func): + """return the given infer function wrapped to handle the path + + Used to stop inference if the node has already been looked + at for a given `InferenceContext` to prevent infinite recursion + """ + + @functools.wraps(func) + def wrapped(node, context=None, _func=func, **kwargs): + """wrapper function handling context""" + if context is None: + context = InferenceContext() + if context.push(node): + return + + yielded = set() + + for res in _func(node, context, **kwargs): + # unproxy only true instance, not const, tuple, dict... + if res.__class__.__name__ == "Instance": + ares = res._proxied + else: + ares = res + if ares not in yielded: + yield res + yielded.add(ares) + + return wrapped + + +@wrapt.decorator +def yes_if_nothing_inferred(func, instance, args, kwargs): + generator = func(*args, **kwargs) + + try: + yield next(generator) + except StopIteration: + # generator is empty + yield util.Uninferable + return + + yield from generator + + +@wrapt.decorator +def raise_if_nothing_inferred(func, instance, args, kwargs): + generator = func(*args, **kwargs) + try: + yield next(generator) + except StopIteration as error: + # generator is empty + if error.args: + # pylint: disable=not-a-mapping + raise InferenceError(**error.args[0]) from error + raise InferenceError( + "StopIteration raised without any error information." + ) from error + + yield from generator + + +# Expensive decorators only used to emit Deprecation warnings. +# If no other than the default DeprecationWarning are enabled, +# fall back to passthrough implementations. +if util.check_warnings_filter(): + + def deprecate_default_argument_values( + astroid_version: str = "3.0", **arguments: str + ) -> Callable[[Callable[P, R]], Callable[P, R]]: + """Decorator which emits a DeprecationWarning if any arguments specified + are None or not passed at all. + + Arguments should be a key-value mapping, with the key being the argument to check + and the value being a type annotation as string for the value of the argument. + + To improve performance, only used when DeprecationWarnings other than + the default one are enabled. + """ + # Helpful links + # Decorator for DeprecationWarning: https://stackoverflow.com/a/49802489 + # Typing of stacked decorators: https://stackoverflow.com/a/68290080 + + def deco(func: Callable[P, R]) -> Callable[P, R]: + """Decorator function.""" + + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + """Emit DeprecationWarnings if conditions are met.""" + + keys = list(inspect.signature(func).parameters.keys()) + for arg, type_annotation in arguments.items(): + try: + index = keys.index(arg) + except ValueError: + raise Exception( + f"Can't find argument '{arg}' for '{args[0].__class__.__qualname__}'" + ) from None + if ( + # Check kwargs + # - if found, check it's not None + (arg in kwargs and kwargs[arg] is None) + # Check args + # - make sure not in kwargs + # - len(args) needs to be long enough, if too short + # arg can't be in args either + # - args[index] should not be None + or arg not in kwargs + and ( + index == -1 + or len(args) <= index + or (len(args) > index and args[index] is None) + ) + ): + warnings.warn( + f"'{arg}' will be a required argument for " + f"'{args[0].__class__.__qualname__}.{func.__name__}' in astroid {astroid_version} " + f"('{arg}' should be of type: '{type_annotation}')", + DeprecationWarning, + ) + return func(*args, **kwargs) + + return wrapper + + return deco + + def deprecate_arguments( + astroid_version: str = "3.0", **arguments: str + ) -> Callable[[Callable[P, R]], Callable[P, R]]: + """Decorator which emits a DeprecationWarning if any arguments specified + are passed. + + Arguments should be a key-value mapping, with the key being the argument to check + and the value being a string that explains what to do instead of passing the argument. + + To improve performance, only used when DeprecationWarnings other than + the default one are enabled. + """ + + def deco(func: Callable[P, R]) -> Callable[P, R]: + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + + keys = list(inspect.signature(func).parameters.keys()) + for arg, note in arguments.items(): + try: + index = keys.index(arg) + except ValueError: + raise Exception( + f"Can't find argument '{arg}' for '{args[0].__class__.__qualname__}'" + ) from None + if arg in kwargs or len(args) > index: + warnings.warn( + f"The argument '{arg}' for " + f"'{args[0].__class__.__qualname__}.{func.__name__}' is deprecated " + f"and will be removed in astroid {astroid_version} ({note})", + DeprecationWarning, + ) + return func(*args, **kwargs) + + return wrapper + + return deco + +else: + + def deprecate_default_argument_values( + astroid_version: str = "3.0", **arguments: str + ) -> Callable[[Callable[P, R]], Callable[P, R]]: + """Passthrough decorator to improve performance if DeprecationWarnings are disabled.""" + + def deco(func: Callable[P, R]) -> Callable[P, R]: + """Decorator function.""" + return func + + return deco + + def deprecate_arguments( + astroid_version: str = "3.0", **arguments: str + ) -> Callable[[Callable[P, R]], Callable[P, R]]: + """Passthrough decorator to improve performance if DeprecationWarnings are disabled.""" + + def deco(func: Callable[P, R]) -> Callable[P, R]: + """Decorator function.""" + return func + + return deco diff --git a/myenv/lib/python3.9/site-packages/astroid/exceptions.py b/myenv/lib/python3.9/site-packages/astroid/exceptions.py new file mode 100644 index 0000000..c3909b2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/exceptions.py @@ -0,0 +1,292 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""this module contains exceptions used in the astroid library +""" +from typing import TYPE_CHECKING + +from astroid import util + +if TYPE_CHECKING: + from astroid import nodes + +__all__ = ( + "AstroidBuildingError", + "AstroidBuildingException", + "AstroidError", + "AstroidImportError", + "AstroidIndexError", + "AstroidSyntaxError", + "AstroidTypeError", + "AstroidValueError", + "AttributeInferenceError", + "BinaryOperationError", + "DuplicateBasesError", + "InconsistentMroError", + "InferenceError", + "InferenceOverwriteError", + "MroError", + "NameInferenceError", + "NoDefault", + "NotFoundError", + "OperationError", + "ParentMissingError", + "ResolveError", + "StatementMissing", + "SuperArgumentTypeError", + "SuperError", + "TooManyLevelsError", + "UnaryOperationError", + "UnresolvableName", + "UseInferenceDefault", +) + + +class AstroidError(Exception): + """base exception class for all astroid related exceptions + + AstroidError and its subclasses are structured, intended to hold + objects representing state when the exception is thrown. Field + values are passed to the constructor as keyword-only arguments. + Each subclass has its own set of standard fields, but use your + best judgment to decide whether a specific exception instance + needs more or fewer fields for debugging. Field values may be + used to lazily generate the error message: self.message.format() + will be called with the field names and values supplied as keyword + arguments. + """ + + def __init__(self, message="", **kws): + super().__init__(message) + self.message = message + for key, value in kws.items(): + setattr(self, key, value) + + def __str__(self): + return self.message.format(**vars(self)) + + +class AstroidBuildingError(AstroidError): + """exception class when we are unable to build an astroid representation + + Standard attributes: + modname: Name of the module that AST construction failed for. + error: Exception raised during construction. + """ + + def __init__(self, message="Failed to import module {modname}.", **kws): + super().__init__(message, **kws) + + +class AstroidImportError(AstroidBuildingError): + """Exception class used when a module can't be imported by astroid.""" + + +class TooManyLevelsError(AstroidImportError): + """Exception class which is raised when a relative import was beyond the top-level. + + Standard attributes: + level: The level which was attempted. + name: the name of the module on which the relative import was attempted. + """ + + level = None + name = None + + def __init__( + self, + message="Relative import with too many levels " "({level}) for module {name!r}", + **kws, + ): + super().__init__(message, **kws) + + +class AstroidSyntaxError(AstroidBuildingError): + """Exception class used when a module can't be parsed.""" + + +class NoDefault(AstroidError): + """raised by function's `default_value` method when an argument has + no default value + + Standard attributes: + func: Function node. + name: Name of argument without a default. + """ + + func = None + name = None + + def __init__(self, message="{func!r} has no default for {name!r}.", **kws): + super().__init__(message, **kws) + + +class ResolveError(AstroidError): + """Base class of astroid resolution/inference error. + + ResolveError is not intended to be raised. + + Standard attributes: + context: InferenceContext object. + """ + + context = None + + +class MroError(ResolveError): + """Error raised when there is a problem with method resolution of a class. + + Standard attributes: + mros: A sequence of sequences containing ClassDef nodes. + cls: ClassDef node whose MRO resolution failed. + context: InferenceContext object. + """ + + mros = () + cls = None + + def __str__(self): + mro_names = ", ".join(f"({', '.join(b.name for b in m)})" for m in self.mros) + return self.message.format(mros=mro_names, cls=self.cls) + + +class DuplicateBasesError(MroError): + """Error raised when there are duplicate bases in the same class bases.""" + + +class InconsistentMroError(MroError): + """Error raised when a class's MRO is inconsistent.""" + + +class SuperError(ResolveError): + """Error raised when there is a problem with a *super* call. + + Standard attributes: + *super_*: The Super instance that raised the exception. + context: InferenceContext object. + """ + + super_ = None + + def __str__(self): + return self.message.format(**vars(self.super_)) + + +class InferenceError(ResolveError): + """raised when we are unable to infer a node + + Standard attributes: + node: The node inference was called on. + context: InferenceContext object. + """ + + node = None + context = None + + def __init__(self, message="Inference failed for {node!r}.", **kws): + super().__init__(message, **kws) + + +# Why does this inherit from InferenceError rather than ResolveError? +# Changing it causes some inference tests to fail. +class NameInferenceError(InferenceError): + """Raised when a name lookup fails, corresponds to NameError. + + Standard attributes: + name: The name for which lookup failed, as a string. + scope: The node representing the scope in which the lookup occurred. + context: InferenceContext object. + """ + + name = None + scope = None + + def __init__(self, message="{name!r} not found in {scope!r}.", **kws): + super().__init__(message, **kws) + + +class AttributeInferenceError(ResolveError): + """Raised when an attribute lookup fails, corresponds to AttributeError. + + Standard attributes: + target: The node for which lookup failed. + attribute: The attribute for which lookup failed, as a string. + context: InferenceContext object. + """ + + target = None + attribute = None + + def __init__(self, message="{attribute!r} not found on {target!r}.", **kws): + super().__init__(message, **kws) + + +class UseInferenceDefault(Exception): + """exception to be raised in custom inference function to indicate that it + should go back to the default behaviour + """ + + +class _NonDeducibleTypeHierarchy(Exception): + """Raised when is_subtype / is_supertype can't deduce the relation between two types.""" + + +class AstroidIndexError(AstroidError): + """Raised when an Indexable / Mapping does not have an index / key.""" + + +class AstroidTypeError(AstroidError): + """Raised when a TypeError would be expected in Python code.""" + + +class AstroidValueError(AstroidError): + """Raised when a ValueError would be expected in Python code.""" + + +class InferenceOverwriteError(AstroidError): + """Raised when an inference tip is overwritten + + Currently only used for debugging. + """ + + +class ParentMissingError(AstroidError): + """Raised when a node which is expected to have a parent attribute is missing one + + Standard attributes: + target: The node for which the parent lookup failed. + """ + + def __init__(self, target: "nodes.NodeNG") -> None: + self.target = target + super().__init__(message=f"Parent not found on {target!r}.") + + +class StatementMissing(ParentMissingError): + """Raised when a call to node.statement() does not return a node. This is because + a node in the chain does not have a parent attribute and therefore does not + return a node for statement(). + + Standard attributes: + target: The node for which the parent lookup failed. + """ + + def __init__(self, target: "nodes.NodeNG") -> None: + # pylint: disable-next=bad-super-call + # https://github.com/PyCQA/pylint/issues/2903 + # https://github.com/PyCQA/astroid/pull/1217#discussion_r744149027 + super(ParentMissingError, self).__init__( + message=f"Statement not found on {target!r}" + ) + + +# Backwards-compatibility aliases +OperationError = util.BadOperationMessage +UnaryOperationError = util.BadUnaryOperationMessage +BinaryOperationError = util.BadBinaryOperationMessage + +SuperArgumentTypeError = SuperError +UnresolvableName = NameInferenceError +NotFoundError = AttributeInferenceError +AstroidBuildingException = AstroidBuildingError diff --git a/myenv/lib/python3.9/site-packages/astroid/filter_statements.py b/myenv/lib/python3.9/site-packages/astroid/filter_statements.py new file mode 100644 index 0000000..86a63f3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/filter_statements.py @@ -0,0 +1,239 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""_filter_stmts and helper functions. This method gets used in LocalsDictnodes.NodeNG._scope_lookup. +It is not considered public. +""" + +from typing import List, Optional, Tuple + +from astroid import nodes + + +def _get_filtered_node_statements( + base_node: nodes.NodeNG, stmt_nodes: List[nodes.NodeNG] +) -> List[Tuple[nodes.NodeNG, nodes.Statement]]: + statements = [(node, node.statement(future=True)) for node in stmt_nodes] + # Next we check if we have ExceptHandlers that are parent + # of the underlying variable, in which case the last one survives + if len(statements) > 1 and all( + isinstance(stmt, nodes.ExceptHandler) for _, stmt in statements + ): + statements = [ + (node, stmt) for node, stmt in statements if stmt.parent_of(base_node) + ] + return statements + + +def _is_from_decorator(node): + """Return True if the given node is the child of a decorator""" + return any(isinstance(parent, nodes.Decorators) for parent in node.node_ancestors()) + + +def _get_if_statement_ancestor(node: nodes.NodeNG) -> Optional[nodes.If]: + """Return the first parent node that is an If node (or None)""" + for parent in node.node_ancestors(): + if isinstance(parent, nodes.If): + return parent + return None + + +def _filter_stmts(base_node: nodes.NodeNG, stmts, frame, offset): + """Filter the given list of statements to remove ignorable statements. + + If base_node is not a frame itself and the name is found in the inner + frame locals, statements will be filtered to remove ignorable + statements according to base_node's location. + + :param stmts: The statements to filter. + :type stmts: list(nodes.NodeNG) + + :param frame: The frame that all of the given statements belong to. + :type frame: nodes.NodeNG + + :param offset: The line offset to filter statements up to. + :type offset: int + + :returns: The filtered statements. + :rtype: list(nodes.NodeNG) + """ + # if offset == -1, my actual frame is not the inner frame but its parent + # + # class A(B): pass + # + # we need this to resolve B correctly + if offset == -1: + myframe = base_node.frame().parent.frame() + else: + myframe = base_node.frame() + # If the frame of this node is the same as the statement + # of this node, then the node is part of a class or + # a function definition and the frame of this node should be the + # the upper frame, not the frame of the definition. + # For more information why this is important, + # see Pylint issue #295. + # For example, for 'b', the statement is the same + # as the frame / scope: + # + # def test(b=1): + # ... + if ( + base_node.parent + and base_node.statement(future=True) is myframe + and myframe.parent + ): + myframe = myframe.parent.frame() + + mystmt: Optional[nodes.Statement] = None + if base_node.parent: + mystmt = base_node.statement(future=True) + + # line filtering if we are in the same frame + # + # take care node may be missing lineno information (this is the case for + # nodes inserted for living objects) + if myframe is frame and mystmt and mystmt.fromlineno is not None: + assert mystmt.fromlineno is not None, mystmt + mylineno = mystmt.fromlineno + offset + else: + # disabling lineno filtering + mylineno = 0 + + _stmts = [] + _stmt_parents = [] + statements = _get_filtered_node_statements(base_node, stmts) + for node, stmt in statements: + # line filtering is on and we have reached our location, break + if stmt.fromlineno and stmt.fromlineno > mylineno > 0: + break + # Ignore decorators with the same name as the + # decorated function + # Fixes issue #375 + if mystmt is stmt and _is_from_decorator(base_node): + continue + if node.has_base(base_node): + break + + if isinstance(node, nodes.EmptyNode): + # EmptyNode does not have assign_type(), so just add it and move on + _stmts.append(node) + continue + + assign_type = node.assign_type() + _stmts, done = assign_type._get_filtered_stmts(base_node, node, _stmts, mystmt) + if done: + break + + optional_assign = assign_type.optional_assign + if optional_assign and assign_type.parent_of(base_node): + # we are inside a loop, loop var assignment is hiding previous + # assignment + _stmts = [node] + _stmt_parents = [stmt.parent] + continue + + if isinstance(assign_type, nodes.NamedExpr): + # If the NamedExpr is in an if statement we do some basic control flow inference + if_parent = _get_if_statement_ancestor(assign_type) + if if_parent: + # If the if statement is within another if statement we append the node + # to possible statements + if _get_if_statement_ancestor(if_parent): + optional_assign = False + _stmts.append(node) + _stmt_parents.append(stmt.parent) + # If the if statement is first-level and not within an orelse block + # we know that it will be evaluated + elif not if_parent.is_orelse: + _stmts = [node] + _stmt_parents = [stmt.parent] + # Else we do not known enough about the control flow to be 100% certain + # and we append to possible statements + else: + _stmts.append(node) + _stmt_parents.append(stmt.parent) + else: + _stmts = [node] + _stmt_parents = [stmt.parent] + + # XXX comment various branches below!!! + try: + pindex = _stmt_parents.index(stmt.parent) + except ValueError: + pass + else: + # we got a parent index, this means the currently visited node + # is at the same block level as a previously visited node + if _stmts[pindex].assign_type().parent_of(assign_type): + # both statements are not at the same block level + continue + # if currently visited node is following previously considered + # assignment and both are not exclusive, we can drop the + # previous one. For instance in the following code :: + # + # if a: + # x = 1 + # else: + # x = 2 + # print x + # + # we can't remove neither x = 1 nor x = 2 when looking for 'x' + # of 'print x'; while in the following :: + # + # x = 1 + # x = 2 + # print x + # + # we can remove x = 1 when we see x = 2 + # + # moreover, on loop assignment types, assignment won't + # necessarily be done if the loop has no iteration, so we don't + # want to clear previous assignments if any (hence the test on + # optional_assign) + if not (optional_assign or nodes.are_exclusive(_stmts[pindex], node)): + del _stmt_parents[pindex] + del _stmts[pindex] + + # If base_node and node are exclusive, then we can ignore node + if nodes.are_exclusive(base_node, node): + continue + + # An AssignName node overrides previous assignments if: + # 1. node's statement always assigns + # 2. node and base_node are in the same block (i.e., has the same parent as base_node) + if isinstance(node, (nodes.NamedExpr, nodes.AssignName)): + if isinstance(stmt, nodes.ExceptHandler): + # If node's statement is an ExceptHandler, then it is the variable + # bound to the caught exception. If base_node is not contained within + # the exception handler block, node should override previous assignments; + # otherwise, node should be ignored, as an exception variable + # is local to the handler block. + if stmt.parent_of(base_node): + _stmts = [] + _stmt_parents = [] + else: + continue + elif not optional_assign and mystmt and stmt.parent is mystmt.parent: + _stmts = [] + _stmt_parents = [] + elif isinstance(node, nodes.DelName): + # Remove all previously stored assignments + _stmts = [] + _stmt_parents = [] + continue + # Add the new assignment + _stmts.append(node) + if isinstance(node, nodes.Arguments) or isinstance( + node.parent, nodes.Arguments + ): + # Special case for _stmt_parents when node is a function parameter; + # in this case, stmt is the enclosing FunctionDef, which is what we + # want to add to _stmt_parents, not stmt.parent. This case occurs when + # node is an Arguments node (representing varargs or kwargs parameter), + # and when node.parent is an Arguments node (other parameters). + # See issue #180. + _stmt_parents.append(stmt) + else: + _stmt_parents.append(stmt.parent) + return _stmts diff --git a/myenv/lib/python3.9/site-packages/astroid/helpers.py b/myenv/lib/python3.9/site-packages/astroid/helpers.py new file mode 100644 index 0000000..8462f87 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/helpers.py @@ -0,0 +1,304 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Various helper utilities. +""" + + +from astroid import bases, manager, nodes, raw_building, util +from astroid.context import CallContext, InferenceContext +from astroid.exceptions import ( + AstroidTypeError, + AttributeInferenceError, + InferenceError, + MroError, + _NonDeducibleTypeHierarchy, +) +from astroid.nodes import scoped_nodes + + +def _build_proxy_class(cls_name, builtins): + proxy = raw_building.build_class(cls_name) + proxy.parent = builtins + return proxy + + +def _function_type(function, builtins): + if isinstance(function, scoped_nodes.Lambda): + if function.root().name == "builtins": + cls_name = "builtin_function_or_method" + else: + cls_name = "function" + elif isinstance(function, bases.BoundMethod): + cls_name = "method" + elif isinstance(function, bases.UnboundMethod): + cls_name = "function" + return _build_proxy_class(cls_name, builtins) + + +def _object_type(node, context=None): + astroid_manager = manager.AstroidManager() + builtins = astroid_manager.builtins_module + context = context or InferenceContext() + + for inferred in node.infer(context=context): + if isinstance(inferred, scoped_nodes.ClassDef): + if inferred.newstyle: + metaclass = inferred.metaclass(context=context) + if metaclass: + yield metaclass + continue + yield builtins.getattr("type")[0] + elif isinstance(inferred, (scoped_nodes.Lambda, bases.UnboundMethod)): + yield _function_type(inferred, builtins) + elif isinstance(inferred, scoped_nodes.Module): + yield _build_proxy_class("module", builtins) + elif isinstance(inferred, nodes.Unknown): + raise InferenceError + else: + yield inferred._proxied + + +def object_type(node, context=None): + """Obtain the type of the given node + + This is used to implement the ``type`` builtin, which means that it's + used for inferring type calls, as well as used in a couple of other places + in the inference. + The node will be inferred first, so this function can support all + sorts of objects, as long as they support inference. + """ + + try: + types = set(_object_type(node, context)) + except InferenceError: + return util.Uninferable + if len(types) > 1 or not types: + return util.Uninferable + return list(types)[0] + + +def _object_type_is_subclass(obj_type, class_or_seq, context=None): + if not isinstance(class_or_seq, (tuple, list)): + class_seq = (class_or_seq,) + else: + class_seq = class_or_seq + + if obj_type is util.Uninferable: + return util.Uninferable + + # Instances are not types + class_seq = [ + item if not isinstance(item, bases.Instance) else util.Uninferable + for item in class_seq + ] + # strict compatibility with issubclass + # issubclass(type, (object, 1)) evaluates to true + # issubclass(object, (1, type)) raises TypeError + for klass in class_seq: + if klass is util.Uninferable: + raise AstroidTypeError("arg 2 must be a type or tuple of types") + + for obj_subclass in obj_type.mro(): + if obj_subclass == klass: + return True + return False + + +def object_isinstance(node, class_or_seq, context=None): + """Check if a node 'isinstance' any node in class_or_seq + + :param node: A given node + :param class_or_seq: Union[nodes.NodeNG, Sequence[nodes.NodeNG]] + :rtype: bool + + :raises AstroidTypeError: if the given ``classes_or_seq`` are not types + """ + obj_type = object_type(node, context) + if obj_type is util.Uninferable: + return util.Uninferable + return _object_type_is_subclass(obj_type, class_or_seq, context=context) + + +def object_issubclass(node, class_or_seq, context=None): + """Check if a type is a subclass of any node in class_or_seq + + :param node: A given node + :param class_or_seq: Union[Nodes.NodeNG, Sequence[nodes.NodeNG]] + :rtype: bool + + :raises AstroidTypeError: if the given ``classes_or_seq`` are not types + :raises AstroidError: if the type of the given node cannot be inferred + or its type's mro doesn't work + """ + if not isinstance(node, nodes.ClassDef): + raise TypeError(f"{node} needs to be a ClassDef node") + return _object_type_is_subclass(node, class_or_seq, context=context) + + +def safe_infer(node, context=None): + """Return the inferred value for the given node. + + Return None if inference failed or if there is some ambiguity (more than + one node has been inferred). + """ + try: + inferit = node.infer(context=context) + value = next(inferit) + except (InferenceError, StopIteration): + return None + try: + next(inferit) + return None # None if there is ambiguity on the inferred node + except InferenceError: + return None # there is some kind of ambiguity + except StopIteration: + return value + + +def has_known_bases(klass, context=None): + """Return true if all base classes of a class could be inferred.""" + try: + return klass._all_bases_known + except AttributeError: + pass + for base in klass.bases: + result = safe_infer(base, context=context) + # TODO: check for A->B->A->B pattern in class structure too? + if ( + not isinstance(result, scoped_nodes.ClassDef) + or result is klass + or not has_known_bases(result, context=context) + ): + klass._all_bases_known = False + return False + klass._all_bases_known = True + return True + + +def _type_check(type1, type2): + if not all(map(has_known_bases, (type1, type2))): + raise _NonDeducibleTypeHierarchy + + if not all([type1.newstyle, type2.newstyle]): + return False + try: + return type1 in type2.mro()[:-1] + except MroError as e: + # The MRO is invalid. + raise _NonDeducibleTypeHierarchy from e + + +def is_subtype(type1, type2): + """Check if *type1* is a subtype of *type2*.""" + return _type_check(type1=type2, type2=type1) + + +def is_supertype(type1, type2): + """Check if *type2* is a supertype of *type1*.""" + return _type_check(type1, type2) + + +def class_instance_as_index(node): + """Get the value as an index for the given instance. + + If an instance provides an __index__ method, then it can + be used in some scenarios where an integer is expected, + for instance when multiplying or subscripting a list. + """ + context = InferenceContext() + try: + for inferred in node.igetattr("__index__", context=context): + if not isinstance(inferred, bases.BoundMethod): + continue + + context.boundnode = node + context.callcontext = CallContext(args=[], callee=inferred) + for result in inferred.infer_call_result(node, context=context): + if isinstance(result, nodes.Const) and isinstance(result.value, int): + return result + except InferenceError: + pass + return None + + +def object_len(node, context=None): + """Infer length of given node object + + :param Union[nodes.ClassDef, nodes.Instance] node: + :param node: Node to infer length of + + :raises AstroidTypeError: If an invalid node is returned + from __len__ method or no __len__ method exists + :raises InferenceError: If the given node cannot be inferred + or if multiple nodes are inferred or if the code executed in python + would result in a infinite recursive check for length + :rtype int: Integer length of node + """ + # pylint: disable=import-outside-toplevel; circular import + from astroid.objects import FrozenSet + + inferred_node = safe_infer(node, context=context) + + # prevent self referential length calls from causing a recursion error + # see https://github.com/PyCQA/astroid/issues/777 + node_frame = node.frame(future=True) + if ( + isinstance(node_frame, scoped_nodes.FunctionDef) + and node_frame.name == "__len__" + and hasattr(inferred_node, "_proxied") + and inferred_node._proxied == node_frame.parent + ): + message = ( + "Self referential __len__ function will " + "cause a RecursionError on line {} of {}".format( + node.lineno, node.root().file + ) + ) + raise InferenceError(message) + + if inferred_node is None or inferred_node is util.Uninferable: + raise InferenceError(node=node) + if isinstance(inferred_node, nodes.Const) and isinstance( + inferred_node.value, (bytes, str) + ): + return len(inferred_node.value) + if isinstance(inferred_node, (nodes.List, nodes.Set, nodes.Tuple, FrozenSet)): + return len(inferred_node.elts) + if isinstance(inferred_node, nodes.Dict): + return len(inferred_node.items) + + node_type = object_type(inferred_node, context=context) + if not node_type: + raise InferenceError(node=node) + + try: + len_call = next(node_type.igetattr("__len__", context=context)) + except StopIteration as e: + raise AstroidTypeError(str(e)) from e + except AttributeInferenceError as e: + raise AstroidTypeError( + f"object of type '{node_type.pytype()}' has no len()" + ) from e + + inferred = len_call.infer_call_result(node, context) + if inferred is util.Uninferable: + raise InferenceError(node=node, context=context) + result_of_len = next(inferred, None) + if ( + isinstance(result_of_len, nodes.Const) + and result_of_len.pytype() == "builtins.int" + ): + return result_of_len.value + if ( + result_of_len is None + or isinstance(result_of_len, bases.Instance) + and result_of_len.is_subtype_of("builtins.int") + ): + # Fake a result as we don't know the arguments of the instance call. + return 0 + raise AstroidTypeError( + f"'{result_of_len}' object cannot be interpreted as an integer" + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/inference.py b/myenv/lib/python3.9/site-packages/astroid/inference.py new file mode 100644 index 0000000..d220300 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/inference.py @@ -0,0 +1,1081 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""this module contains a set of functions to handle inference on astroid trees +""" + +import ast +import functools +import itertools +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generator, + Iterable, + Iterator, + Optional, + Type, + TypeVar, + Union, +) + +import wrapt + +from astroid import bases, decorators, helpers, nodes, protocols, util +from astroid.context import ( + CallContext, + InferenceContext, + bind_context_to_node, + copy_context, +) +from astroid.exceptions import ( + AstroidBuildingError, + AstroidError, + AstroidIndexError, + AstroidTypeError, + AttributeInferenceError, + InferenceError, + NameInferenceError, + _NonDeducibleTypeHierarchy, +) +from astroid.interpreter import dunder_lookup +from astroid.manager import AstroidManager +from astroid.typing import InferenceErrorInfo + +if TYPE_CHECKING: + from astroid.objects import Property + +# Prevents circular imports +objects = util.lazy_import("objects") + + +_FunctionDefT = TypeVar("_FunctionDefT", bound=nodes.FunctionDef) + + +# .infer method ############################################################### + + +def infer_end(self, context=None): + """Inference's end for nodes that yield themselves on inference + + These are objects for which inference does not have any semantic, + such as Module or Consts. + """ + yield self + + +# We add ignores to all these assignments in this file +# See https://github.com/python/mypy/issues/2427 +nodes.Module._infer = infer_end # type: ignore[assignment] +nodes.ClassDef._infer = infer_end # type: ignore[assignment] +nodes.Lambda._infer = infer_end # type: ignore[assignment] +nodes.Const._infer = infer_end # type: ignore[assignment] +nodes.Slice._infer = infer_end # type: ignore[assignment] + + +def _infer_sequence_helper(node, context=None): + """Infer all values based on _BaseContainer.elts""" + values = [] + + for elt in node.elts: + if isinstance(elt, nodes.Starred): + starred = helpers.safe_infer(elt.value, context) + if not starred: + raise InferenceError(node=node, context=context) + if not hasattr(starred, "elts"): + raise InferenceError(node=node, context=context) + values.extend(_infer_sequence_helper(starred)) + elif isinstance(elt, nodes.NamedExpr): + value = helpers.safe_infer(elt.value, context) + if not value: + raise InferenceError(node=node, context=context) + values.append(value) + else: + values.append(elt) + return values + + +@decorators.raise_if_nothing_inferred +def infer_sequence(self, context=None): + has_starred_named_expr = any( + isinstance(e, (nodes.Starred, nodes.NamedExpr)) for e in self.elts + ) + if has_starred_named_expr: + values = _infer_sequence_helper(self, context) + new_seq = type(self)( + lineno=self.lineno, col_offset=self.col_offset, parent=self.parent + ) + new_seq.postinit(values) + + yield new_seq + else: + yield self + + +nodes.List._infer = infer_sequence # type: ignore[assignment] +nodes.Tuple._infer = infer_sequence # type: ignore[assignment] +nodes.Set._infer = infer_sequence # type: ignore[assignment] + + +def infer_map(self, context=None): + if not any(isinstance(k, nodes.DictUnpack) for k, _ in self.items): + yield self + else: + items = _infer_map(self, context) + new_seq = type(self)(self.lineno, self.col_offset, self.parent) + new_seq.postinit(list(items.items())) + yield new_seq + + +def _update_with_replacement(lhs_dict, rhs_dict): + """Delete nodes that equate to duplicate keys + + Since an astroid node doesn't 'equal' another node with the same value, + this function uses the as_string method to make sure duplicate keys + don't get through + + Note that both the key and the value are astroid nodes + + Fixes issue with DictUnpack causing duplicte keys + in inferred Dict items + + :param dict(nodes.NodeNG, nodes.NodeNG) lhs_dict: Dictionary to 'merge' nodes into + :param dict(nodes.NodeNG, nodes.NodeNG) rhs_dict: Dictionary with nodes to pull from + :return dict(nodes.NodeNG, nodes.NodeNG): merged dictionary of nodes + """ + combined_dict = itertools.chain(lhs_dict.items(), rhs_dict.items()) + # Overwrite keys which have the same string values + string_map = {key.as_string(): (key, value) for key, value in combined_dict} + # Return to dictionary + return dict(string_map.values()) + + +def _infer_map(node, context): + """Infer all values based on Dict.items""" + values = {} + for name, value in node.items: + if isinstance(name, nodes.DictUnpack): + double_starred = helpers.safe_infer(value, context) + if not double_starred: + raise InferenceError + if not isinstance(double_starred, nodes.Dict): + raise InferenceError(node=node, context=context) + unpack_items = _infer_map(double_starred, context) + values = _update_with_replacement(values, unpack_items) + else: + key = helpers.safe_infer(name, context=context) + value = helpers.safe_infer(value, context=context) + if any(not elem for elem in (key, value)): + raise InferenceError(node=node, context=context) + values = _update_with_replacement(values, {key: value}) + return values + + +nodes.Dict._infer = infer_map # type: ignore[assignment] + + +def _higher_function_scope(node): + """Search for the first function which encloses the given + scope. This can be used for looking up in that function's + scope, in case looking up in a lower scope for a particular + name fails. + + :param node: A scope node. + :returns: + ``None``, if no parent function scope was found, + otherwise an instance of :class:`astroid.nodes.scoped_nodes.Function`, + which encloses the given node. + """ + current = node + while current.parent and not isinstance(current.parent, nodes.FunctionDef): + current = current.parent + if current and current.parent: + return current.parent + return None + + +def infer_name(self, context=None): + """infer a Name: use name lookup rules""" + frame, stmts = self.lookup(self.name) + if not stmts: + # Try to see if the name is enclosed in a nested function + # and use the higher (first function) scope for searching. + parent_function = _higher_function_scope(self.scope()) + if parent_function: + _, stmts = parent_function.lookup(self.name) + + if not stmts: + raise NameInferenceError( + name=self.name, scope=self.scope(), context=context + ) + context = copy_context(context) + context.lookupname = self.name + return bases._infer_stmts(stmts, context, frame) + + +# pylint: disable=no-value-for-parameter +nodes.Name._infer = decorators.raise_if_nothing_inferred( + decorators.path_wrapper(infer_name) +) +nodes.AssignName.infer_lhs = infer_name # won't work with a path wrapper + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_call(self, context=None): + """infer a Call node by trying to guess what the function returns""" + callcontext = copy_context(context) + callcontext.boundnode = None + if context is not None: + callcontext.extra_context = _populate_context_lookup(self, context.clone()) + + for callee in self.func.infer(context): + if callee is util.Uninferable: + yield callee + continue + try: + if hasattr(callee, "infer_call_result"): + callcontext.callcontext = CallContext( + args=self.args, keywords=self.keywords, callee=callee + ) + yield from callee.infer_call_result(caller=self, context=callcontext) + except InferenceError: + continue + return dict(node=self, context=context) + + +nodes.Call._infer = infer_call # type: ignore[assignment] + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_import(self, context=None, asname=True): + """infer an Import node: return the imported module/object""" + name = context.lookupname + if name is None: + raise InferenceError(node=self, context=context) + + try: + if asname: + yield self.do_import_module(self.real_name(name)) + else: + yield self.do_import_module(name) + except AstroidBuildingError as exc: + raise InferenceError(node=self, context=context) from exc + + +nodes.Import._infer = infer_import + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_import_from(self, context=None, asname=True): + """infer a ImportFrom node: return the imported module/object""" + name = context.lookupname + if name is None: + raise InferenceError(node=self, context=context) + if asname: + try: + name = self.real_name(name) + except AttributeInferenceError as exc: + # See https://github.com/PyCQA/pylint/issues/4692 + raise InferenceError(node=self, context=context) from exc + try: + module = self.do_import_module() + except AstroidBuildingError as exc: + raise InferenceError(node=self, context=context) from exc + + try: + context = copy_context(context) + context.lookupname = name + stmts = module.getattr(name, ignore_locals=module is self.root()) + return bases._infer_stmts(stmts, context) + except AttributeInferenceError as error: + raise InferenceError( + str(error), target=self, attribute=name, context=context + ) from error + + +nodes.ImportFrom._infer = infer_import_from # type: ignore[assignment] + + +def infer_attribute(self, context=None): + """infer an Attribute node by using getattr on the associated object""" + for owner in self.expr.infer(context): + if owner is util.Uninferable: + yield owner + continue + + if not context: + context = InferenceContext() + else: + context = copy_context(context) + + old_boundnode = context.boundnode + try: + context.boundnode = owner + yield from owner.igetattr(self.attrname, context) + except ( + AttributeInferenceError, + InferenceError, + AttributeError, + ): + pass + finally: + context.boundnode = old_boundnode + return dict(node=self, context=context) + + +nodes.Attribute._infer = decorators.raise_if_nothing_inferred( + decorators.path_wrapper(infer_attribute) +) +# won't work with a path wrapper +nodes.AssignAttr.infer_lhs = decorators.raise_if_nothing_inferred(infer_attribute) + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_global(self, context=None): + if context.lookupname is None: + raise InferenceError(node=self, context=context) + try: + return bases._infer_stmts(self.root().getattr(context.lookupname), context) + except AttributeInferenceError as error: + raise InferenceError( + str(error), target=self, attribute=context.lookupname, context=context + ) from error + + +nodes.Global._infer = infer_global # type: ignore[assignment] + + +_SUBSCRIPT_SENTINEL = object() + + +def infer_subscript(self, context=None): + """Inference for subscripts + + We're understanding if the index is a Const + or a slice, passing the result of inference + to the value's `getitem` method, which should + handle each supported index type accordingly. + """ + + found_one = False + for value in self.value.infer(context): + if value is util.Uninferable: + yield util.Uninferable + return None + for index in self.slice.infer(context): + if index is util.Uninferable: + yield util.Uninferable + return None + + # Try to deduce the index value. + index_value = _SUBSCRIPT_SENTINEL + if value.__class__ == bases.Instance: + index_value = index + elif index.__class__ == bases.Instance: + instance_as_index = helpers.class_instance_as_index(index) + if instance_as_index: + index_value = instance_as_index + else: + index_value = index + + if index_value is _SUBSCRIPT_SENTINEL: + raise InferenceError(node=self, context=context) + + try: + assigned = value.getitem(index_value, context) + except ( + AstroidTypeError, + AstroidIndexError, + AttributeInferenceError, + AttributeError, + ) as exc: + raise InferenceError(node=self, context=context) from exc + + # Prevent inferring if the inferred subscript + # is the same as the original subscripted object. + if self is assigned or assigned is util.Uninferable: + yield util.Uninferable + return None + yield from assigned.infer(context) + found_one = True + + if found_one: + return dict(node=self, context=context) + return None + + +nodes.Subscript._infer = decorators.raise_if_nothing_inferred( # type: ignore[assignment] + decorators.path_wrapper(infer_subscript) +) +nodes.Subscript.infer_lhs = decorators.raise_if_nothing_inferred(infer_subscript) + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def _infer_boolop(self, context=None): + """Infer a boolean operation (and / or / not). + + The function will calculate the boolean operation + for all pairs generated through inference for each component + node. + """ + values = self.values + if self.op == "or": + predicate = operator.truth + else: + predicate = operator.not_ + + try: + values = [value.infer(context=context) for value in values] + except InferenceError: + yield util.Uninferable + return None + + for pair in itertools.product(*values): + if any(item is util.Uninferable for item in pair): + # Can't infer the final result, just yield Uninferable. + yield util.Uninferable + continue + + bool_values = [item.bool_value() for item in pair] + if any(item is util.Uninferable for item in bool_values): + # Can't infer the final result, just yield Uninferable. + yield util.Uninferable + continue + + # Since the boolean operations are short circuited operations, + # this code yields the first value for which the predicate is True + # and if no value respected the predicate, then the last value will + # be returned (or Uninferable if there was no last value). + # This is conforming to the semantics of `and` and `or`: + # 1 and 0 -> 1 + # 0 and 1 -> 0 + # 1 or 0 -> 1 + # 0 or 1 -> 1 + value = util.Uninferable + for value, bool_value in zip(pair, bool_values): + if predicate(bool_value): + yield value + break + else: + yield value + + return dict(node=self, context=context) + + +nodes.BoolOp._infer = _infer_boolop + + +# UnaryOp, BinOp and AugAssign inferences + + +def _filter_operation_errors(self, infer_callable, context, error): + for result in infer_callable(self, context): + if isinstance(result, error): + # For the sake of .infer(), we don't care about operation + # errors, which is the job of pylint. So return something + # which shows that we can't infer the result. + yield util.Uninferable + else: + yield result + + +def _infer_unaryop(self, context=None): + """Infer what an UnaryOp should return when evaluated.""" + for operand in self.operand.infer(context): + try: + yield operand.infer_unary_op(self.op) + except TypeError as exc: + # The operand doesn't support this operation. + yield util.BadUnaryOperationMessage(operand, self.op, exc) + except AttributeError as exc: + meth = protocols.UNARY_OP_METHOD[self.op] + if meth is None: + # `not node`. Determine node's boolean + # value and negate its result, unless it is + # Uninferable, which will be returned as is. + bool_value = operand.bool_value() + if bool_value is not util.Uninferable: + yield nodes.const_factory(not bool_value) + else: + yield util.Uninferable + else: + if not isinstance(operand, (bases.Instance, nodes.ClassDef)): + # The operation was used on something which + # doesn't support it. + yield util.BadUnaryOperationMessage(operand, self.op, exc) + continue + + try: + try: + methods = dunder_lookup.lookup(operand, meth) + except AttributeInferenceError: + yield util.BadUnaryOperationMessage(operand, self.op, exc) + continue + + meth = methods[0] + inferred = next(meth.infer(context=context), None) + if inferred is util.Uninferable or not inferred.callable(): + continue + + context = copy_context(context) + context.boundnode = operand + context.callcontext = CallContext(args=[], callee=inferred) + + call_results = inferred.infer_call_result(self, context=context) + result = next(call_results, None) + if result is None: + # Failed to infer, return the same type. + yield operand + else: + yield result + except AttributeInferenceError as exc: + # The unary operation special method was not found. + yield util.BadUnaryOperationMessage(operand, self.op, exc) + except InferenceError: + yield util.Uninferable + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_unaryop(self, context=None): + """Infer what an UnaryOp should return when evaluated.""" + yield from _filter_operation_errors( + self, _infer_unaryop, context, util.BadUnaryOperationMessage + ) + return dict(node=self, context=context) + + +nodes.UnaryOp._infer_unaryop = _infer_unaryop +nodes.UnaryOp._infer = infer_unaryop + + +def _is_not_implemented(const): + """Check if the given const node is NotImplemented.""" + return isinstance(const, nodes.Const) and const.value is NotImplemented + + +def _invoke_binop_inference(instance, opnode, op, other, context, method_name): + """Invoke binary operation inference on the given instance.""" + methods = dunder_lookup.lookup(instance, method_name) + context = bind_context_to_node(context, instance) + method = methods[0] + context.callcontext.callee = method + try: + inferred = next(method.infer(context=context)) + except StopIteration as e: + raise InferenceError(node=method, context=context) from e + if inferred is util.Uninferable: + raise InferenceError + return instance.infer_binary_op(opnode, op, other, context, inferred) + + +def _aug_op(instance, opnode, op, other, context, reverse=False): + """Get an inference callable for an augmented binary operation.""" + method_name = protocols.AUGMENTED_OP_METHOD[op] + return functools.partial( + _invoke_binop_inference, + instance=instance, + op=op, + opnode=opnode, + other=other, + context=context, + method_name=method_name, + ) + + +def _bin_op(instance, opnode, op, other, context, reverse=False): + """Get an inference callable for a normal binary operation. + + If *reverse* is True, then the reflected method will be used instead. + """ + if reverse: + method_name = protocols.REFLECTED_BIN_OP_METHOD[op] + else: + method_name = protocols.BIN_OP_METHOD[op] + return functools.partial( + _invoke_binop_inference, + instance=instance, + op=op, + opnode=opnode, + other=other, + context=context, + method_name=method_name, + ) + + +def _get_binop_contexts(context, left, right): + """Get contexts for binary operations. + + This will return two inference contexts, the first one + for x.__op__(y), the other one for y.__rop__(x), where + only the arguments are inversed. + """ + # The order is important, since the first one should be + # left.__op__(right). + for arg in (right, left): + new_context = context.clone() + new_context.callcontext = CallContext(args=[arg]) + new_context.boundnode = None + yield new_context + + +def _same_type(type1, type2): + """Check if type1 is the same as type2.""" + return type1.qname() == type2.qname() + + +def _get_binop_flow( + left, left_type, binary_opnode, right, right_type, context, reverse_context +): + """Get the flow for binary operations. + + The rules are a bit messy: + + * if left and right have the same type, then only one + method will be called, left.__op__(right) + * if left and right are unrelated typewise, then first + left.__op__(right) is tried and if this does not exist + or returns NotImplemented, then right.__rop__(left) is tried. + * if left is a subtype of right, then only left.__op__(right) + is tried. + * if left is a supertype of right, then right.__rop__(left) + is first tried and then left.__op__(right) + """ + op = binary_opnode.op + if _same_type(left_type, right_type): + methods = [_bin_op(left, binary_opnode, op, right, context)] + elif helpers.is_subtype(left_type, right_type): + methods = [_bin_op(left, binary_opnode, op, right, context)] + elif helpers.is_supertype(left_type, right_type): + methods = [ + _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), + _bin_op(left, binary_opnode, op, right, context), + ] + else: + methods = [ + _bin_op(left, binary_opnode, op, right, context), + _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), + ] + return methods + + +def _get_aug_flow( + left, left_type, aug_opnode, right, right_type, context, reverse_context +): + """Get the flow for augmented binary operations. + + The rules are a bit messy: + + * if left and right have the same type, then left.__augop__(right) + is first tried and then left.__op__(right). + * if left and right are unrelated typewise, then + left.__augop__(right) is tried, then left.__op__(right) + is tried and then right.__rop__(left) is tried. + * if left is a subtype of right, then left.__augop__(right) + is tried and then left.__op__(right). + * if left is a supertype of right, then left.__augop__(right) + is tried, then right.__rop__(left) and then + left.__op__(right) + """ + bin_op = aug_opnode.op.strip("=") + aug_op = aug_opnode.op + if _same_type(left_type, right_type): + methods = [ + _aug_op(left, aug_opnode, aug_op, right, context), + _bin_op(left, aug_opnode, bin_op, right, context), + ] + elif helpers.is_subtype(left_type, right_type): + methods = [ + _aug_op(left, aug_opnode, aug_op, right, context), + _bin_op(left, aug_opnode, bin_op, right, context), + ] + elif helpers.is_supertype(left_type, right_type): + methods = [ + _aug_op(left, aug_opnode, aug_op, right, context), + _bin_op(right, aug_opnode, bin_op, left, reverse_context, reverse=True), + _bin_op(left, aug_opnode, bin_op, right, context), + ] + else: + methods = [ + _aug_op(left, aug_opnode, aug_op, right, context), + _bin_op(left, aug_opnode, bin_op, right, context), + _bin_op(right, aug_opnode, bin_op, left, reverse_context, reverse=True), + ] + return methods + + +def _infer_binary_operation(left, right, binary_opnode, context, flow_factory): + """Infer a binary operation between a left operand and a right operand + + This is used by both normal binary operations and augmented binary + operations, the only difference is the flow factory used. + """ + + context, reverse_context = _get_binop_contexts(context, left, right) + left_type = helpers.object_type(left) + right_type = helpers.object_type(right) + methods = flow_factory( + left, left_type, binary_opnode, right, right_type, context, reverse_context + ) + for method in methods: + try: + results = list(method()) + except AttributeError: + continue + except AttributeInferenceError: + continue + except InferenceError: + yield util.Uninferable + return + else: + if any(result is util.Uninferable for result in results): + yield util.Uninferable + return + + if all(map(_is_not_implemented, results)): + continue + not_implemented = sum( + 1 for result in results if _is_not_implemented(result) + ) + if not_implemented and not_implemented != len(results): + # Can't infer yet what this is. + yield util.Uninferable + return + + yield from results + return + # The operation doesn't seem to be supported so let the caller know about it + yield util.BadBinaryOperationMessage(left_type, binary_opnode.op, right_type) + + +def _infer_binop(self, context): + """Binary operation inference logic.""" + left = self.left + right = self.right + + # we use two separate contexts for evaluating lhs and rhs because + # 1. evaluating lhs may leave some undesired entries in context.path + # which may not let us infer right value of rhs + context = context or InferenceContext() + lhs_context = copy_context(context) + rhs_context = copy_context(context) + lhs_iter = left.infer(context=lhs_context) + rhs_iter = right.infer(context=rhs_context) + for lhs, rhs in itertools.product(lhs_iter, rhs_iter): + if any(value is util.Uninferable for value in (rhs, lhs)): + # Don't know how to process this. + yield util.Uninferable + return + + try: + yield from _infer_binary_operation(lhs, rhs, self, context, _get_binop_flow) + except _NonDeducibleTypeHierarchy: + yield util.Uninferable + + +@decorators.yes_if_nothing_inferred +@decorators.path_wrapper +def infer_binop(self, context=None): + return _filter_operation_errors( + self, _infer_binop, context, util.BadBinaryOperationMessage + ) + + +nodes.BinOp._infer_binop = _infer_binop +nodes.BinOp._infer = infer_binop + +COMPARE_OPS: Dict[str, Callable[[Any, Any], bool]] = { + "==": operator.eq, + "!=": operator.ne, + "<": operator.lt, + "<=": operator.le, + ">": operator.gt, + ">=": operator.ge, + "in": lambda a, b: a in b, + "not in": lambda a, b: a not in b, +} +UNINFERABLE_OPS = { + "is", + "is not", +} + + +def _to_literal(node: nodes.NodeNG) -> Any: + # Can raise SyntaxError or ValueError from ast.literal_eval + # Can raise AttributeError from node.as_string() as not all nodes have a visitor + # Is this the stupidest idea or the simplest idea? + return ast.literal_eval(node.as_string()) + + +def _do_compare( + left_iter: Iterable[nodes.NodeNG], op: str, right_iter: Iterable[nodes.NodeNG] +) -> "bool | type[util.Uninferable]": + """ + If all possible combinations are either True or False, return that: + >>> _do_compare([1, 2], '<=', [3, 4]) + True + >>> _do_compare([1, 2], '==', [3, 4]) + False + + If any item is uninferable, or if some combinations are True and some + are False, return Uninferable: + >>> _do_compare([1, 3], '<=', [2, 4]) + util.Uninferable + """ + retval: Union[None, bool] = None + if op in UNINFERABLE_OPS: + return util.Uninferable + op_func = COMPARE_OPS[op] + + for left, right in itertools.product(left_iter, right_iter): + if left is util.Uninferable or right is util.Uninferable: + return util.Uninferable + + try: + left, right = _to_literal(left), _to_literal(right) + except (SyntaxError, ValueError, AttributeError): + return util.Uninferable + + try: + expr = op_func(left, right) + except TypeError as exc: + raise AstroidTypeError from exc + + if retval is None: + retval = expr + elif retval != expr: + return util.Uninferable + # (or both, but "True | False" is basically the same) + + assert retval is not None + return retval # it was all the same value + + +def _infer_compare( + self: nodes.Compare, context: Optional[InferenceContext] = None +) -> Iterator[Union[nodes.Const, Type[util.Uninferable]]]: + """Chained comparison inference logic.""" + retval: Union[bool, Type[util.Uninferable]] = True + + ops = self.ops + left_node = self.left + lhs = list(left_node.infer(context=context)) + # should we break early if first element is uninferable? + for op, right_node in ops: + # eagerly evaluate rhs so that values can be re-used as lhs + rhs = list(right_node.infer(context=context)) + try: + retval = _do_compare(lhs, op, rhs) + except AstroidTypeError: + retval = util.Uninferable + break + if retval is not True: + break # short-circuit + lhs = rhs # continue + if retval is util.Uninferable: + yield retval # type: ignore[misc] + else: + yield nodes.Const(retval) + + +nodes.Compare._infer = _infer_compare # type: ignore[assignment] + + +def _infer_augassign(self, context=None): + """Inference logic for augmented binary operations.""" + if context is None: + context = InferenceContext() + + rhs_context = context.clone() + + lhs_iter = self.target.infer_lhs(context=context) + rhs_iter = self.value.infer(context=rhs_context) + for lhs, rhs in itertools.product(lhs_iter, rhs_iter): + if any(value is util.Uninferable for value in (rhs, lhs)): + # Don't know how to process this. + yield util.Uninferable + return + + try: + yield from _infer_binary_operation( + left=lhs, + right=rhs, + binary_opnode=self, + context=context, + flow_factory=_get_aug_flow, + ) + except _NonDeducibleTypeHierarchy: + yield util.Uninferable + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_augassign(self, context=None): + return _filter_operation_errors( + self, _infer_augassign, context, util.BadBinaryOperationMessage + ) + + +nodes.AugAssign._infer_augassign = _infer_augassign +nodes.AugAssign._infer = infer_augassign + +# End of binary operation inference. + + +@decorators.raise_if_nothing_inferred +def infer_arguments(self, context=None): + name = context.lookupname + if name is None: + raise InferenceError(node=self, context=context) + return protocols._arguments_infer_argname(self, name, context) + + +nodes.Arguments._infer = infer_arguments # type: ignore[assignment] + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_assign(self, context=None): + """infer a AssignName/AssignAttr: need to inspect the RHS part of the + assign node + """ + if isinstance(self.parent, nodes.AugAssign): + return self.parent.infer(context) + + stmts = list(self.assigned_stmts(context=context)) + return bases._infer_stmts(stmts, context) + + +nodes.AssignName._infer = infer_assign +nodes.AssignAttr._infer = infer_assign + + +@decorators.raise_if_nothing_inferred +@decorators.path_wrapper +def infer_empty_node(self, context=None): + if not self.has_underlying_object(): + yield util.Uninferable + else: + try: + yield from AstroidManager().infer_ast_from_something( + self.object, context=context + ) + except AstroidError: + yield util.Uninferable + + +nodes.EmptyNode._infer = infer_empty_node # type: ignore[assignment] + + +@decorators.raise_if_nothing_inferred +def infer_index(self, context=None): + return self.value.infer(context) + + +nodes.Index._infer = infer_index # type: ignore[assignment] + + +def _populate_context_lookup(call, context): + # Allows context to be saved for later + # for inference inside a function + context_lookup = {} + if context is None: + return context_lookup + for arg in call.args: + if isinstance(arg, nodes.Starred): + context_lookup[arg.value] = context + else: + context_lookup[arg] = context + keywords = call.keywords if call.keywords is not None else [] + for keyword in keywords: + context_lookup[keyword.value] = context + return context_lookup + + +@decorators.raise_if_nothing_inferred +def infer_ifexp(self, context=None): + """Support IfExp inference + + If we can't infer the truthiness of the condition, we default + to inferring both branches. Otherwise, we infer either branch + depending on the condition. + """ + both_branches = False + # We use two separate contexts for evaluating lhs and rhs because + # evaluating lhs may leave some undesired entries in context.path + # which may not let us infer right value of rhs. + + context = context or InferenceContext() + lhs_context = copy_context(context) + rhs_context = copy_context(context) + try: + test = next(self.test.infer(context=context.clone())) + except (InferenceError, StopIteration): + both_branches = True + else: + if test is not util.Uninferable: + if test.bool_value(): + yield from self.body.infer(context=lhs_context) + else: + yield from self.orelse.infer(context=rhs_context) + else: + both_branches = True + if both_branches: + yield from self.body.infer(context=lhs_context) + yield from self.orelse.infer(context=rhs_context) + + +nodes.IfExp._infer = infer_ifexp # type: ignore[assignment] + + +# pylint: disable=dangerous-default-value +@wrapt.decorator +def _cached_generator( + func, instance: _FunctionDefT, args, kwargs, _cache={} # noqa: B006 +): + node = instance + try: + return iter(_cache[func, id(node)]) + except KeyError: + result = func(*args, **kwargs) + # Need to keep an iterator around + original, copy = itertools.tee(result) + _cache[func, id(node)] = list(copy) + return original + + +# When inferring a property, we instantiate a new `objects.Property` object, +# which in turn, because it inherits from `FunctionDef`, sets itself in the locals +# of the wrapping frame. This means that every time we infer a property, the locals +# are mutated with a new instance of the property. This is why we cache the result +# of the function's inference. +@_cached_generator +def infer_functiondef( + self: _FunctionDefT, context: Optional[InferenceContext] = None +) -> Generator[Union["Property", _FunctionDefT], None, InferenceErrorInfo]: + if not self.decorators or not bases._is_property(self): + yield self + return InferenceErrorInfo(node=self, context=context) + + prop_func = objects.Property( + function=self, + name=self.name, + lineno=self.lineno, + parent=self.parent, + col_offset=self.col_offset, + ) + prop_func.postinit(body=[], args=self.args, doc_node=self.doc_node) + yield prop_func + return InferenceErrorInfo(node=self, context=context) + + +nodes.FunctionDef._infer = infer_functiondef # type: ignore[assignment] diff --git a/myenv/lib/python3.9/site-packages/astroid/inference_tip.py b/myenv/lib/python3.9/site-packages/astroid/inference_tip.py new file mode 100644 index 0000000..f74ff23 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/inference_tip.py @@ -0,0 +1,89 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Transform utilities (filters and decorator)""" + +import typing + +import wrapt + +from astroid import bases, util +from astroid.exceptions import InferenceOverwriteError, UseInferenceDefault +from astroid.nodes import NodeNG +from astroid.typing import InferFn + +InferOptions = typing.Union[ + NodeNG, bases.Instance, bases.UnboundMethod, typing.Type[util.Uninferable] +] + +_cache: typing.Dict[ + typing.Tuple[InferFn, NodeNG], typing.Optional[typing.List[InferOptions]] +] = {} + + +def clear_inference_tip_cache(): + """Clear the inference tips cache.""" + _cache.clear() + + +@wrapt.decorator +def _inference_tip_cached( + func: InferFn, instance: None, args: typing.Any, kwargs: typing.Any +) -> typing.Iterator[InferOptions]: + """Cache decorator used for inference tips""" + node = args[0] + try: + result = _cache[func, node] + # If through recursion we end up trying to infer the same + # func + node we raise here. + if result is None: + raise UseInferenceDefault() + except KeyError: + _cache[func, node] = None + result = _cache[func, node] = list(func(*args, **kwargs)) + assert result + return iter(result) + + +def inference_tip(infer_function: InferFn, raise_on_overwrite: bool = False) -> InferFn: + """Given an instance specific inference function, return a function to be + given to AstroidManager().register_transform to set this inference function. + + :param bool raise_on_overwrite: Raise an `InferenceOverwriteError` + if the inference tip will overwrite another. Used for debugging + + Typical usage + + .. sourcecode:: python + + AstroidManager().register_transform(Call, inference_tip(infer_named_tuple), + predicate) + + .. Note:: + + Using an inference tip will override + any previously set inference tip for the given + node. Use a predicate in the transform to prevent + excess overwrites. + """ + + def transform(node: NodeNG, infer_function: InferFn = infer_function) -> NodeNG: + if ( + raise_on_overwrite + and node._explicit_inference is not None + and node._explicit_inference is not infer_function + ): + raise InferenceOverwriteError( + "Inference already set to {existing_inference}. " + "Trying to overwrite with {new_inference} for {node}".format( + existing_inference=infer_function, + new_inference=node._explicit_inference, + node=node, + ) + ) + # pylint: disable=no-value-for-parameter + node._explicit_inference = _inference_tip_cached(infer_function) + return node + + return transform diff --git a/myenv/lib/python3.9/site-packages/astroid/interpreter/__init__.py b/myenv/lib/python3.9/site-packages/astroid/interpreter/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/__init__.py b/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/spec.py b/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/spec.py new file mode 100644 index 0000000..73192ef --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/spec.py @@ -0,0 +1,378 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import abc +import collections +import enum +import importlib.machinery +import importlib.util +import os +import sys +import zipimport +from functools import lru_cache +from pathlib import Path + +from astroid.modutils import EXT_LIB_DIRS + +from . import util + +ModuleType = enum.Enum( + "ModuleType", + "C_BUILTIN C_EXTENSION PKG_DIRECTORY " + "PY_CODERESOURCE PY_COMPILED PY_FROZEN PY_RESOURCE " + "PY_SOURCE PY_ZIPMODULE PY_NAMESPACE", +) + + +_ModuleSpec = collections.namedtuple( + "_ModuleSpec", "name type location " "origin submodule_search_locations" +) + + +class ModuleSpec(_ModuleSpec): + """Defines a class similar to PEP 420's ModuleSpec + + A module spec defines a name of a module, its type, location + and where submodules can be found, if the module is a package. + """ + + def __new__( + cls, + name, + module_type, + location=None, + origin=None, + submodule_search_locations=None, + ): + return _ModuleSpec.__new__( + cls, + name=name, + type=module_type, + location=location, + origin=origin, + submodule_search_locations=submodule_search_locations, + ) + + +class Finder: + """A finder is a class which knows how to find a particular module.""" + + def __init__(self, path=None): + self._path = path or sys.path + + @abc.abstractmethod + def find_module(self, modname, module_parts, processed, submodule_path): + """Find the given module + + Each finder is responsible for each protocol of finding, as long as + they all return a ModuleSpec. + + :param str modname: The module which needs to be searched. + :param list module_parts: It should be a list of strings, + where each part contributes to the module's + namespace. + :param list processed: What parts from the module parts were processed + so far. + :param list submodule_path: A list of paths where the module + can be looked into. + :returns: A ModuleSpec, describing how and where the module was found, + None, otherwise. + """ + + def contribute_to_path(self, spec, processed): + """Get a list of extra paths where this finder can search.""" + + +class ImportlibFinder(Finder): + """A finder based on the importlib module.""" + + _SUFFIXES = ( + [(s, ModuleType.C_EXTENSION) for s in importlib.machinery.EXTENSION_SUFFIXES] + + [(s, ModuleType.PY_SOURCE) for s in importlib.machinery.SOURCE_SUFFIXES] + + [(s, ModuleType.PY_COMPILED) for s in importlib.machinery.BYTECODE_SUFFIXES] + ) + + def find_module(self, modname, module_parts, processed, submodule_path): + if not isinstance(modname, str): + raise TypeError(f"'modname' must be a str, not {type(modname)}") + if submodule_path is not None: + submodule_path = list(submodule_path) + else: + try: + spec = importlib.util.find_spec(modname) + if spec: + if spec.loader is importlib.machinery.BuiltinImporter: + return ModuleSpec( + name=modname, + location=None, + module_type=ModuleType.C_BUILTIN, + ) + if spec.loader is importlib.machinery.FrozenImporter: + return ModuleSpec( + name=modname, + location=None, + module_type=ModuleType.PY_FROZEN, + ) + except ValueError: + pass + submodule_path = sys.path + + for entry in submodule_path: + package_directory = os.path.join(entry, modname) + for suffix in (".py", importlib.machinery.BYTECODE_SUFFIXES[0]): + package_file_name = "__init__" + suffix + file_path = os.path.join(package_directory, package_file_name) + if os.path.isfile(file_path): + return ModuleSpec( + name=modname, + location=package_directory, + module_type=ModuleType.PKG_DIRECTORY, + ) + for suffix, type_ in ImportlibFinder._SUFFIXES: + file_name = modname + suffix + file_path = os.path.join(entry, file_name) + if os.path.isfile(file_path): + return ModuleSpec( + name=modname, location=file_path, module_type=type_ + ) + return None + + def contribute_to_path(self, spec, processed): + if spec.location is None: + # Builtin. + return None + + if _is_setuptools_namespace(spec.location): + # extend_path is called, search sys.path for module/packages + # of this name see pkgutil.extend_path documentation + path = [ + os.path.join(p, *processed) + for p in sys.path + if os.path.isdir(os.path.join(p, *processed)) + ] + elif spec.name == "distutils" and not any( + spec.location.lower().startswith(ext_lib_dir.lower()) + for ext_lib_dir in EXT_LIB_DIRS + ): + # virtualenv below 20.0 patches distutils in an unexpected way + # so we just find the location of distutils that will be + # imported to avoid spurious import-error messages + # https://github.com/PyCQA/pylint/issues/5645 + # A regression test to create this scenario exists in release-tests.yml + # and can be triggered manually from GitHub Actions + distutils_spec = importlib.util.find_spec("distutils") + if distutils_spec and distutils_spec.origin: + origin_path = Path( + distutils_spec.origin + ) # e.g. .../distutils/__init__.py + path = [str(origin_path.parent)] # e.g. .../distutils + else: + path = [spec.location] + else: + path = [spec.location] + return path + + +class ExplicitNamespacePackageFinder(ImportlibFinder): + """A finder for the explicit namespace packages, generated through pkg_resources.""" + + def find_module(self, modname, module_parts, processed, submodule_path): + if processed: + modname = ".".join(processed + [modname]) + if util.is_namespace(modname) and modname in sys.modules: + submodule_path = sys.modules[modname].__path__ + return ModuleSpec( + name=modname, + location="", + origin="namespace", + module_type=ModuleType.PY_NAMESPACE, + submodule_search_locations=submodule_path, + ) + return None + + def contribute_to_path(self, spec, processed): + return spec.submodule_search_locations + + +class ZipFinder(Finder): + """Finder that knows how to find a module inside zip files.""" + + def __init__(self, path): + super().__init__(path) + self._zipimporters = _precache_zipimporters(path) + + def find_module(self, modname, module_parts, processed, submodule_path): + try: + file_type, filename, path = _search_zip(module_parts, self._zipimporters) + except ImportError: + return None + + return ModuleSpec( + name=modname, + location=filename, + origin="egg", + module_type=file_type, + submodule_search_locations=path, + ) + + +class PathSpecFinder(Finder): + """Finder based on importlib.machinery.PathFinder.""" + + def find_module(self, modname, module_parts, processed, submodule_path): + spec = importlib.machinery.PathFinder.find_spec(modname, path=submodule_path) + if spec: + # origin can be either a string on older Python versions + # or None in case it is a namespace package: + # https://github.com/python/cpython/pull/5481 + is_namespace_pkg = spec.origin in {"namespace", None} + location = spec.origin if not is_namespace_pkg else None + module_type = ModuleType.PY_NAMESPACE if is_namespace_pkg else None + spec = ModuleSpec( + name=spec.name, + location=location, + origin=spec.origin, + module_type=module_type, + submodule_search_locations=list(spec.submodule_search_locations or []), + ) + return spec + + def contribute_to_path(self, spec, processed): + if spec.type == ModuleType.PY_NAMESPACE: + return spec.submodule_search_locations + return None + + +_SPEC_FINDERS = ( + ImportlibFinder, + ZipFinder, + PathSpecFinder, + ExplicitNamespacePackageFinder, +) + + +def _is_setuptools_namespace(location): + try: + with open(os.path.join(location, "__init__.py"), "rb") as stream: + data = stream.read(4096) + except OSError: + return None + else: + extend_path = b"pkgutil" in data and b"extend_path" in data + declare_namespace = ( + b"pkg_resources" in data and b"declare_namespace(__name__)" in data + ) + return extend_path or declare_namespace + + +@lru_cache() +def _cached_set_diff(left, right): + result = set(left) + result.difference_update(right) + return result + + +def _precache_zipimporters(path=None): + """ + For each path that has not been already cached + in the sys.path_importer_cache, create a new zipimporter + instance and add it into the cache. + Return a dict associating all paths, stored in the cache, to corresponding + zipimporter instances. + + :param path: paths that has to be added into the cache + :return: association between paths stored in the cache and zipimporter instances + """ + pic = sys.path_importer_cache + + # When measured, despite having the same complexity (O(n)), + # converting to tuples and then caching the conversion to sets + # and the set difference is faster than converting to sets + # and then only caching the set difference. + + req_paths = tuple(path or sys.path) + cached_paths = tuple(pic) + new_paths = _cached_set_diff(req_paths, cached_paths) + # pylint: disable=no-member + for entry_path in new_paths: + try: + pic[entry_path] = zipimport.zipimporter(entry_path) + except zipimport.ZipImportError: + continue + return { + key: value + for key, value in pic.items() + if isinstance(value, zipimport.zipimporter) + } + + +def _search_zip(modpath, pic): + for filepath, importer in list(pic.items()): + if importer is not None: + found = importer.find_module(modpath[0]) + if found: + if not importer.find_module(os.path.sep.join(modpath)): + raise ImportError( + "No module named %s in %s/%s" + % (".".join(modpath[1:]), filepath, modpath) + ) + # import code; code.interact(local=locals()) + return ( + ModuleType.PY_ZIPMODULE, + os.path.abspath(filepath) + os.path.sep + os.path.sep.join(modpath), + filepath, + ) + raise ImportError(f"No module named {'.'.join(modpath)}") + + +def _find_spec_with_path(search_path, modname, module_parts, processed, submodule_path): + finders = [finder(search_path) for finder in _SPEC_FINDERS] + for finder in finders: + spec = finder.find_module(modname, module_parts, processed, submodule_path) + if spec is None: + continue + return finder, spec + + raise ImportError(f"No module named {'.'.join(module_parts)}") + + +def find_spec(modpath, path=None): + """Find a spec for the given module. + + :type modpath: list or tuple + :param modpath: + split module's name (i.e name of a module or package split + on '.'), with leading empty strings for explicit relative import + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :rtype: ModuleSpec + :return: A module spec, which describes how the module was + found and where. + """ + _path = path or sys.path + + # Need a copy for not mutating the argument. + modpath = modpath[:] + + submodule_path = None + module_parts = modpath[:] + processed = [] + + while modpath: + modname = modpath.pop(0) + finder, spec = _find_spec_with_path( + _path, modname, module_parts, processed, submodule_path or path + ) + processed.append(modname) + if modpath: + submodule_path = finder.contribute_to_path(spec, processed) + + if spec.type == ModuleType.PKG_DIRECTORY: + spec = spec._replace(submodule_search_locations=submodule_path) + + return spec diff --git a/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/util.py b/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/util.py new file mode 100644 index 0000000..ce3da7e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/interpreter/_import/util.py @@ -0,0 +1,16 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +try: + import pkg_resources +except ImportError: + pkg_resources = None # type: ignore[assignment] + + +def is_namespace(modname): + return ( + pkg_resources is not None + and hasattr(pkg_resources, "_namespace_packages") + and modname in pkg_resources._namespace_packages + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/interpreter/dunder_lookup.py b/myenv/lib/python3.9/site-packages/astroid/interpreter/dunder_lookup.py new file mode 100644 index 0000000..b0c7ae5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/interpreter/dunder_lookup.py @@ -0,0 +1,66 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Contains logic for retrieving special methods. + +This implementation does not rely on the dot attribute access +logic, found in ``.getattr()``. The difference between these two +is that the dunder methods are looked with the type slots +(you can find more about these here +http://lucumr.pocoo.org/2014/8/16/the-python-i-would-like-to-see/) +As such, the lookup for the special methods is actually simpler than +the dot attribute access. +""" +import itertools + +import astroid +from astroid.exceptions import AttributeInferenceError + + +def _lookup_in_mro(node, name): + attrs = node.locals.get(name, []) + + nodes = itertools.chain.from_iterable( + ancestor.locals.get(name, []) for ancestor in node.ancestors(recurs=True) + ) + values = list(itertools.chain(attrs, nodes)) + if not values: + raise AttributeInferenceError(attribute=name, target=node) + + return values + + +def lookup(node, name): + """Lookup the given special method name in the given *node* + + If the special method was found, then a list of attributes + will be returned. Otherwise, `astroid.AttributeInferenceError` + is going to be raised. + """ + if isinstance( + node, (astroid.List, astroid.Tuple, astroid.Const, astroid.Dict, astroid.Set) + ): + return _builtin_lookup(node, name) + if isinstance(node, astroid.Instance): + return _lookup_in_mro(node, name) + if isinstance(node, astroid.ClassDef): + return _class_lookup(node, name) + + raise AttributeInferenceError(attribute=name, target=node) + + +def _class_lookup(node, name): + metaclass = node.metaclass() + if metaclass is None: + raise AttributeInferenceError(attribute=name, target=node) + + return _lookup_in_mro(metaclass, name) + + +def _builtin_lookup(node, name): + values = node.locals.get(name, []) + if not values: + raise AttributeInferenceError(attribute=name, target=node) + + return values diff --git a/myenv/lib/python3.9/site-packages/astroid/interpreter/objectmodel.py b/myenv/lib/python3.9/site-packages/astroid/interpreter/objectmodel.py new file mode 100644 index 0000000..cf9227b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/interpreter/objectmodel.py @@ -0,0 +1,856 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Data object model, as per https://docs.python.org/3/reference/datamodel.html. + +This module describes, at least partially, a data object model for some +of astroid's nodes. The model contains special attributes that nodes such +as functions, classes, modules etc have, such as __doc__, __class__, +__module__ etc, being used when doing attribute lookups over nodes. + +For instance, inferring `obj.__class__` will first trigger an inference +of the `obj` variable. If it was successfully inferred, then an attribute +`__class__ will be looked for in the inferred object. This is the part +where the data model occurs. The model is attached to those nodes +and the lookup mechanism will try to see if attributes such as +`__class__` are defined by the model or not. If they are defined, +the model will be requested to return the corresponding value of that +attribute. Thus the model can be viewed as a special part of the lookup +mechanism. +""" + +import itertools +import os +import pprint +import types +from typing import TYPE_CHECKING, List, Optional + +import astroid +from astroid import util +from astroid.context import InferenceContext, copy_context +from astroid.exceptions import AttributeInferenceError, InferenceError, NoDefault +from astroid.manager import AstroidManager +from astroid.nodes import node_classes + +objects = util.lazy_import("objects") + +if TYPE_CHECKING: + from astroid.objects import Property + +IMPL_PREFIX = "attr_" +LEN_OF_IMPL_PREFIX = len(IMPL_PREFIX) + + +def _dunder_dict(instance, attributes): + obj = node_classes.Dict(parent=instance) + + # Convert the keys to node strings + keys = [ + node_classes.Const(value=value, parent=obj) for value in list(attributes.keys()) + ] + + # The original attribute has a list of elements for each key, + # but that is not useful for retrieving the special attribute's value. + # In this case, we're picking the last value from each list. + values = [elem[-1] for elem in attributes.values()] + + obj.postinit(list(zip(keys, values))) + return obj + + +class ObjectModel: + def __init__(self): + self._instance = None + + def __repr__(self): + result = [] + cname = type(self).__name__ + string = "%(cname)s(%(fields)s)" + alignment = len(cname) + 1 + for field in sorted(self.attributes()): + width = 80 - len(field) - alignment + lines = pprint.pformat(field, indent=2, width=width).splitlines(True) + + inner = [lines[0]] + for line in lines[1:]: + inner.append(" " * alignment + line) + result.append(field) + + return string % { + "cname": cname, + "fields": (",\n" + " " * alignment).join(result), + } + + def __call__(self, instance): + self._instance = instance + return self + + def __get__(self, instance, cls=None): + # ObjectModel needs to be a descriptor so that just doing + # `special_attributes = SomeObjectModel` should be enough in the body of a node. + # But at the same time, node.special_attributes should return an object + # which can be used for manipulating the special attributes. That's the reason + # we pass the instance through which it got accessed to ObjectModel.__call__, + # returning itself afterwards, so we can still have access to the + # underlying data model and to the instance for which it got accessed. + return self(instance) + + def __contains__(self, name): + return name in self.attributes() + + def attributes(self) -> List[str]: + """Get the attributes which are exported by this object model.""" + return [o[LEN_OF_IMPL_PREFIX:] for o in dir(self) if o.startswith(IMPL_PREFIX)] + + def lookup(self, name): + """Look up the given *name* in the current model + + It should return an AST or an interpreter object, + but if the name is not found, then an AttributeInferenceError will be raised. + """ + if name in self.attributes(): + return getattr(self, IMPL_PREFIX + name) + raise AttributeInferenceError(target=self._instance, attribute=name) + + +class ModuleModel(ObjectModel): + def _builtins(self): + builtins_ast_module = AstroidManager().builtins_module + return builtins_ast_module.special_attributes.lookup("__dict__") + + @property + def attr_builtins(self): + return self._builtins() + + @property + def attr___path__(self): + if not self._instance.package: + raise AttributeInferenceError(target=self._instance, attribute="__path__") + + path_objs = [ + node_classes.Const( + value=path + if not path.endswith("__init__.py") + else os.path.dirname(path), + parent=self._instance, + ) + for path in self._instance.path + ] + + container = node_classes.List(parent=self._instance) + container.postinit(path_objs) + + return container + + @property + def attr___name__(self): + return node_classes.Const(value=self._instance.name, parent=self._instance) + + @property + def attr___doc__(self): + return node_classes.Const( + value=getattr(self._instance.doc_node, "value", None), + parent=self._instance, + ) + + @property + def attr___file__(self): + return node_classes.Const(value=self._instance.file, parent=self._instance) + + @property + def attr___dict__(self): + return _dunder_dict(self._instance, self._instance.globals) + + @property + def attr___package__(self): + if not self._instance.package: + value = "" + else: + value = self._instance.name + + return node_classes.Const(value=value, parent=self._instance) + + # These are related to the Python 3 implementation of the + # import system, + # https://docs.python.org/3/reference/import.html#import-related-module-attributes + + @property + def attr___spec__(self): + # No handling for now. + return node_classes.Unknown() + + @property + def attr___loader__(self): + # No handling for now. + return node_classes.Unknown() + + @property + def attr___cached__(self): + # No handling for now. + return node_classes.Unknown() + + +class FunctionModel(ObjectModel): + @property + def attr___name__(self): + return node_classes.Const(value=self._instance.name, parent=self._instance) + + @property + def attr___doc__(self): + return node_classes.Const( + value=getattr(self._instance.doc_node, "value", None), + parent=self._instance, + ) + + @property + def attr___qualname__(self): + return node_classes.Const(value=self._instance.qname(), parent=self._instance) + + @property + def attr___defaults__(self): + func = self._instance + if not func.args.defaults: + return node_classes.Const(value=None, parent=func) + + defaults_obj = node_classes.Tuple(parent=func) + defaults_obj.postinit(func.args.defaults) + return defaults_obj + + @property + def attr___annotations__(self): + obj = node_classes.Dict(parent=self._instance) + + if not self._instance.returns: + returns = None + else: + returns = self._instance.returns + + args = self._instance.args + pair_annotations = itertools.chain( + zip(args.args or [], args.annotations), + zip(args.kwonlyargs, args.kwonlyargs_annotations), + zip(args.posonlyargs or [], args.posonlyargs_annotations), + ) + + annotations = { + arg.name: annotation for (arg, annotation) in pair_annotations if annotation + } + if args.varargannotation: + annotations[args.vararg] = args.varargannotation + if args.kwargannotation: + annotations[args.kwarg] = args.kwargannotation + if returns: + annotations["return"] = returns + + items = [ + (node_classes.Const(key, parent=obj), value) + for (key, value) in annotations.items() + ] + + obj.postinit(items) + return obj + + @property + def attr___dict__(self): + return node_classes.Dict(parent=self._instance) + + attr___globals__ = attr___dict__ + + @property + def attr___kwdefaults__(self): + def _default_args(args, parent): + for arg in args.kwonlyargs: + try: + default = args.default_value(arg.name) + except NoDefault: + continue + + name = node_classes.Const(arg.name, parent=parent) + yield name, default + + args = self._instance.args + obj = node_classes.Dict(parent=self._instance) + defaults = dict(_default_args(args, obj)) + + obj.postinit(list(defaults.items())) + return obj + + @property + def attr___module__(self): + return node_classes.Const(self._instance.root().qname()) + + @property + def attr___get__(self): + # pylint: disable=import-outside-toplevel; circular import + from astroid import bases + + func = self._instance + + class DescriptorBoundMethod(bases.BoundMethod): + """Bound method which knows how to understand calling descriptor binding.""" + + def implicit_parameters(self): + # Different than BoundMethod since the signature + # is different. + return 0 + + def infer_call_result(self, caller, context=None): + if len(caller.args) > 2 or len(caller.args) < 1: + raise InferenceError( + "Invalid arguments for descriptor binding", + target=self, + context=context, + ) + + context = copy_context(context) + try: + cls = next(caller.args[0].infer(context=context)) + except StopIteration as e: + raise InferenceError(context=context, node=caller.args[0]) from e + + if cls is astroid.Uninferable: + raise InferenceError( + "Invalid class inferred", target=self, context=context + ) + + # For some reason func is a Node that the below + # code is not expecting + if isinstance(func, bases.BoundMethod): + yield func + return + + # Rebuild the original value, but with the parent set as the + # class where it will be bound. + new_func = func.__class__( + name=func.name, + lineno=func.lineno, + col_offset=func.col_offset, + parent=func.parent, + ) + # pylint: disable=no-member + new_func.postinit( + func.args, + func.body, + func.decorators, + func.returns, + doc_node=func.doc_node, + ) + + # Build a proper bound method that points to our newly built function. + proxy = bases.UnboundMethod(new_func) + yield bases.BoundMethod(proxy=proxy, bound=cls) + + @property + def args(self): + """Overwrite the underlying args to match those of the underlying func + + Usually the underlying *func* is a function/method, as in: + + def test(self): + pass + + This has only the *self* parameter but when we access test.__get__ + we get a new object which has two parameters, *self* and *type*. + """ + nonlocal func + positional_or_keyword_params = func.args.args.copy() + positional_or_keyword_params.append(astroid.AssignName(name="type")) + + positional_only_params = func.args.posonlyargs.copy() + + arguments = astroid.Arguments(parent=func.args.parent) + arguments.postinit( + args=positional_or_keyword_params, + posonlyargs=positional_only_params, + defaults=[], + kwonlyargs=[], + kw_defaults=[], + annotations=[], + ) + return arguments + + return DescriptorBoundMethod(proxy=self._instance, bound=self._instance) + + # These are here just for completion. + @property + def attr___ne__(self): + return node_classes.Unknown() + + attr___subclasshook__ = attr___ne__ + attr___str__ = attr___ne__ + attr___sizeof__ = attr___ne__ + attr___setattr___ = attr___ne__ + attr___repr__ = attr___ne__ + attr___reduce__ = attr___ne__ + attr___reduce_ex__ = attr___ne__ + attr___new__ = attr___ne__ + attr___lt__ = attr___ne__ + attr___eq__ = attr___ne__ + attr___gt__ = attr___ne__ + attr___format__ = attr___ne__ + attr___delattr___ = attr___ne__ + attr___getattribute__ = attr___ne__ + attr___hash__ = attr___ne__ + attr___init__ = attr___ne__ + attr___dir__ = attr___ne__ + attr___call__ = attr___ne__ + attr___class__ = attr___ne__ + attr___closure__ = attr___ne__ + attr___code__ = attr___ne__ + + +class ClassModel(ObjectModel): + def __init__(self): + # Add a context so that inferences called from an instance don't recurse endlessly + self.context = InferenceContext() + + super().__init__() + + @property + def attr___module__(self): + return node_classes.Const(self._instance.root().qname()) + + @property + def attr___name__(self): + return node_classes.Const(self._instance.name) + + @property + def attr___qualname__(self): + return node_classes.Const(self._instance.qname()) + + @property + def attr___doc__(self): + return node_classes.Const(getattr(self._instance.doc_node, "value", None)) + + @property + def attr___mro__(self): + if not self._instance.newstyle: + raise AttributeInferenceError(target=self._instance, attribute="__mro__") + + mro = self._instance.mro() + obj = node_classes.Tuple(parent=self._instance) + obj.postinit(mro) + return obj + + @property + def attr_mro(self): + if not self._instance.newstyle: + raise AttributeInferenceError(target=self._instance, attribute="mro") + + # pylint: disable=import-outside-toplevel; circular import + from astroid import bases + + other_self = self + + # Cls.mro is a method and we need to return one in order to have a proper inference. + # The method we're returning is capable of inferring the underlying MRO though. + class MroBoundMethod(bases.BoundMethod): + def infer_call_result(self, caller, context=None): + yield other_self.attr___mro__ + + implicit_metaclass = self._instance.implicit_metaclass() + mro_method = implicit_metaclass.locals["mro"][0] + return MroBoundMethod(proxy=mro_method, bound=implicit_metaclass) + + @property + def attr___bases__(self): + obj = node_classes.Tuple() + context = InferenceContext() + elts = list(self._instance._inferred_bases(context)) + obj.postinit(elts=elts) + return obj + + @property + def attr___class__(self): + # pylint: disable=import-outside-toplevel; circular import + from astroid import helpers + + return helpers.object_type(self._instance) + + @property + def attr___subclasses__(self): + """Get the subclasses of the underlying class + + This looks only in the current module for retrieving the subclasses, + thus it might miss a couple of them. + """ + # pylint: disable=import-outside-toplevel; circular import + from astroid import bases + from astroid.nodes import scoped_nodes + + if not self._instance.newstyle: + raise AttributeInferenceError( + target=self._instance, attribute="__subclasses__" + ) + + qname = self._instance.qname() + root = self._instance.root() + classes = [ + cls + for cls in root.nodes_of_class(scoped_nodes.ClassDef) + if cls != self._instance and cls.is_subtype_of(qname, context=self.context) + ] + + obj = node_classes.List(parent=self._instance) + obj.postinit(classes) + + class SubclassesBoundMethod(bases.BoundMethod): + def infer_call_result(self, caller, context=None): + yield obj + + implicit_metaclass = self._instance.implicit_metaclass() + subclasses_method = implicit_metaclass.locals["__subclasses__"][0] + return SubclassesBoundMethod(proxy=subclasses_method, bound=implicit_metaclass) + + @property + def attr___dict__(self): + return node_classes.Dict(parent=self._instance) + + +class SuperModel(ObjectModel): + @property + def attr___thisclass__(self): + return self._instance.mro_pointer + + @property + def attr___self_class__(self): + return self._instance._self_class + + @property + def attr___self__(self): + return self._instance.type + + @property + def attr___class__(self): + return self._instance._proxied + + +class UnboundMethodModel(ObjectModel): + @property + def attr___class__(self): + # pylint: disable=import-outside-toplevel; circular import + from astroid import helpers + + return helpers.object_type(self._instance) + + @property + def attr___func__(self): + return self._instance._proxied + + @property + def attr___self__(self): + return node_classes.Const(value=None, parent=self._instance) + + attr_im_func = attr___func__ + attr_im_class = attr___class__ + attr_im_self = attr___self__ + + +class BoundMethodModel(FunctionModel): + @property + def attr___func__(self): + return self._instance._proxied._proxied + + @property + def attr___self__(self): + return self._instance.bound + + +class GeneratorModel(FunctionModel): + def __new__(cls, *args, **kwargs): + # Append the values from the GeneratorType unto this object. + ret = super().__new__(cls, *args, **kwargs) + generator = AstroidManager().builtins_module["generator"] + for name, values in generator.locals.items(): + method = values[0] + + def patched(cls, meth=method): + return meth + + setattr(type(ret), IMPL_PREFIX + name, property(patched)) + + return ret + + @property + def attr___name__(self): + return node_classes.Const( + value=self._instance.parent.name, parent=self._instance + ) + + @property + def attr___doc__(self): + return node_classes.Const( + value=getattr(self._instance.parent.doc_node, "value", None), + parent=self._instance, + ) + + +class AsyncGeneratorModel(GeneratorModel): + def __new__(cls, *args, **kwargs): + # Append the values from the AGeneratorType unto this object. + ret = super().__new__(cls, *args, **kwargs) + astroid_builtins = AstroidManager().builtins_module + generator = astroid_builtins.get("async_generator") + if generator is None: + # Make it backward compatible. + generator = astroid_builtins.get("generator") + + for name, values in generator.locals.items(): + method = values[0] + + def patched(cls, meth=method): + return meth + + setattr(type(ret), IMPL_PREFIX + name, property(patched)) + + return ret + + +class InstanceModel(ObjectModel): + @property + def attr___class__(self): + return self._instance._proxied + + @property + def attr___module__(self): + return node_classes.Const(self._instance.root().qname()) + + @property + def attr___doc__(self): + return node_classes.Const(getattr(self._instance.doc_node, "value", None)) + + @property + def attr___dict__(self): + return _dunder_dict(self._instance, self._instance.instance_attrs) + + +# Exception instances + + +class ExceptionInstanceModel(InstanceModel): + @property + def attr_args(self): + message = node_classes.Const("") + args = node_classes.Tuple(parent=self._instance) + args.postinit((message,)) + return args + + @property + def attr___traceback__(self): + builtins_ast_module = AstroidManager().builtins_module + traceback_type = builtins_ast_module[types.TracebackType.__name__] + return traceback_type.instantiate_class() + + +class SyntaxErrorInstanceModel(ExceptionInstanceModel): + @property + def attr_text(self): + return node_classes.Const("") + + +class OSErrorInstanceModel(ExceptionInstanceModel): + @property + def attr_filename(self): + return node_classes.Const("") + + @property + def attr_errno(self): + return node_classes.Const(0) + + @property + def attr_strerror(self): + return node_classes.Const("") + + attr_filename2 = attr_filename + + +class ImportErrorInstanceModel(ExceptionInstanceModel): + @property + def attr_name(self): + return node_classes.Const("") + + @property + def attr_path(self): + return node_classes.Const("") + + +class UnicodeDecodeErrorInstanceModel(ExceptionInstanceModel): + @property + def attr_object(self): + return node_classes.Const("") + + +BUILTIN_EXCEPTIONS = { + "builtins.SyntaxError": SyntaxErrorInstanceModel, + "builtins.ImportError": ImportErrorInstanceModel, + "builtins.UnicodeDecodeError": UnicodeDecodeErrorInstanceModel, + # These are all similar to OSError in terms of attributes + "builtins.OSError": OSErrorInstanceModel, + "builtins.BlockingIOError": OSErrorInstanceModel, + "builtins.BrokenPipeError": OSErrorInstanceModel, + "builtins.ChildProcessError": OSErrorInstanceModel, + "builtins.ConnectionAbortedError": OSErrorInstanceModel, + "builtins.ConnectionError": OSErrorInstanceModel, + "builtins.ConnectionRefusedError": OSErrorInstanceModel, + "builtins.ConnectionResetError": OSErrorInstanceModel, + "builtins.FileExistsError": OSErrorInstanceModel, + "builtins.FileNotFoundError": OSErrorInstanceModel, + "builtins.InterruptedError": OSErrorInstanceModel, + "builtins.IsADirectoryError": OSErrorInstanceModel, + "builtins.NotADirectoryError": OSErrorInstanceModel, + "builtins.PermissionError": OSErrorInstanceModel, + "builtins.ProcessLookupError": OSErrorInstanceModel, + "builtins.TimeoutError": OSErrorInstanceModel, +} + + +class DictModel(ObjectModel): + @property + def attr___class__(self): + return self._instance._proxied + + def _generic_dict_attribute(self, obj, name): + """Generate a bound method that can infer the given *obj*.""" + + class DictMethodBoundMethod(astroid.BoundMethod): + def infer_call_result(self, caller, context=None): + yield obj + + meth = next(self._instance._proxied.igetattr(name), None) + return DictMethodBoundMethod(proxy=meth, bound=self._instance) + + @property + def attr_items(self): + elems = [] + obj = node_classes.List(parent=self._instance) + for key, value in self._instance.items: + elem = node_classes.Tuple(parent=obj) + elem.postinit((key, value)) + elems.append(elem) + obj.postinit(elts=elems) + + obj = objects.DictItems(obj) + return self._generic_dict_attribute(obj, "items") + + @property + def attr_keys(self): + keys = [key for (key, _) in self._instance.items] + obj = node_classes.List(parent=self._instance) + obj.postinit(elts=keys) + + obj = objects.DictKeys(obj) + return self._generic_dict_attribute(obj, "keys") + + @property + def attr_values(self): + + values = [value for (_, value) in self._instance.items] + obj = node_classes.List(parent=self._instance) + obj.postinit(values) + + obj = objects.DictValues(obj) + return self._generic_dict_attribute(obj, "values") + + +class PropertyModel(ObjectModel): + """Model for a builtin property""" + + # pylint: disable=import-outside-toplevel + def _init_function(self, name): + from astroid.nodes.node_classes import Arguments + from astroid.nodes.scoped_nodes import FunctionDef + + args = Arguments() + args.postinit( + args=[], + defaults=[], + kwonlyargs=[], + kw_defaults=[], + annotations=[], + posonlyargs=[], + posonlyargs_annotations=[], + kwonlyargs_annotations=[], + ) + + function = FunctionDef(name=name, parent=self._instance) + + function.postinit(args=args, body=[]) + return function + + @property + def attr_fget(self): + from astroid.nodes.scoped_nodes import FunctionDef + + func = self._instance + + class PropertyFuncAccessor(FunctionDef): + def infer_call_result(self, caller=None, context=None): + nonlocal func + if caller and len(caller.args) != 1: + raise InferenceError( + "fget() needs a single argument", target=self, context=context + ) + + yield from func.function.infer_call_result( + caller=caller, context=context + ) + + property_accessor = PropertyFuncAccessor(name="fget", parent=self._instance) + property_accessor.postinit(args=func.args, body=func.body) + return property_accessor + + @property + def attr_fset(self): + from astroid.nodes.scoped_nodes import FunctionDef + + func = self._instance + + def find_setter(func: "Property") -> Optional[astroid.FunctionDef]: + """ + Given a property, find the corresponding setter function and returns it. + + :param func: property for which the setter has to be found + :return: the setter function or None + """ + for target in [ + t for t in func.parent.get_children() if t.name == func.function.name + ]: + for dec_name in target.decoratornames(): + if dec_name.endswith(func.function.name + ".setter"): + return target + return None + + func_setter = find_setter(func) + if not func_setter: + raise InferenceError( + f"Unable to find the setter of property {func.function.name}" + ) + + class PropertyFuncAccessor(FunctionDef): + def infer_call_result(self, caller=None, context=None): + nonlocal func_setter + if caller and len(caller.args) != 2: + raise InferenceError( + "fset() needs two arguments", target=self, context=context + ) + yield from func_setter.infer_call_result(caller=caller, context=context) + + property_accessor = PropertyFuncAccessor(name="fset", parent=self._instance) + property_accessor.postinit(args=func_setter.args, body=func_setter.body) + return property_accessor + + @property + def attr_setter(self): + return self._init_function("setter") + + @property + def attr_deleter(self): + return self._init_function("deleter") + + @property + def attr_getter(self): + return self._init_function("getter") + + # pylint: enable=import-outside-toplevel diff --git a/myenv/lib/python3.9/site-packages/astroid/manager.py b/myenv/lib/python3.9/site-packages/astroid/manager.py new file mode 100644 index 0000000..950842e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/manager.py @@ -0,0 +1,353 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""astroid manager: avoid multiple astroid build of a same module when +possible by providing a class responsible to get astroid representation +from various source and using a cache of built modules) +""" + +import os +import types +import zipimport +from typing import TYPE_CHECKING, ClassVar, List, Optional + +from astroid.exceptions import AstroidBuildingError, AstroidImportError +from astroid.interpreter._import import spec +from astroid.modutils import ( + NoSourceFile, + file_info_from_modpath, + get_source_file, + is_module_name_part_of_extension_package_whitelist, + is_python_source, + is_standard_module, + load_module_from_name, + modpath_from_file, +) +from astroid.transforms import TransformVisitor + +if TYPE_CHECKING: + from astroid import nodes + +ZIP_IMPORT_EXTS = (".zip", ".egg", ".whl", ".pyz", ".pyzw") + + +def safe_repr(obj): + try: + return repr(obj) + except Exception: # pylint: disable=broad-except + return "???" + + +class AstroidManager: + """Responsible to build astroid from files or modules. + + Use the Borg (singleton) pattern. + """ + + name = "astroid loader" + brain = {} + max_inferable_values: ClassVar[int] = 100 + + def __init__(self): + self.__dict__ = AstroidManager.brain + if not self.__dict__: + # NOTE: cache entries are added by the [re]builder + self.astroid_cache = {} + self._mod_file_cache = {} + self._failed_import_hooks = [] + self.always_load_extensions = False + self.optimize_ast = False + self.extension_package_whitelist = set() + self._transform = TransformVisitor() + + @property + def register_transform(self): + # This and unregister_transform below are exported for convenience + return self._transform.register_transform + + @property + def unregister_transform(self): + return self._transform.unregister_transform + + @property + def builtins_module(self): + return self.astroid_cache["builtins"] + + def visit_transforms(self, node): + """Visit the transforms and apply them to the given *node*.""" + return self._transform.visit(node) + + def ast_from_file(self, filepath, modname=None, fallback=True, source=False): + """given a module name, return the astroid object""" + try: + filepath = get_source_file(filepath, include_no_ext=True) + source = True + except NoSourceFile: + pass + if modname is None: + try: + modname = ".".join(modpath_from_file(filepath)) + except ImportError: + modname = filepath + if ( + modname in self.astroid_cache + and self.astroid_cache[modname].file == filepath + ): + return self.astroid_cache[modname] + if source: + # pylint: disable=import-outside-toplevel; circular import + from astroid.builder import AstroidBuilder + + return AstroidBuilder(self).file_build(filepath, modname) + if fallback and modname: + return self.ast_from_module_name(modname) + raise AstroidBuildingError("Unable to build an AST for {path}.", path=filepath) + + def ast_from_string(self, data, modname="", filepath=None): + """Given some source code as a string, return its corresponding astroid object""" + # pylint: disable=import-outside-toplevel; circular import + from astroid.builder import AstroidBuilder + + return AstroidBuilder(self).string_build(data, modname, filepath) + + def _build_stub_module(self, modname): + # pylint: disable=import-outside-toplevel; circular import + from astroid.builder import AstroidBuilder + + return AstroidBuilder(self).string_build("", modname) + + def _build_namespace_module(self, modname: str, path: List[str]) -> "nodes.Module": + # pylint: disable=import-outside-toplevel; circular import + from astroid.builder import build_namespace_package_module + + return build_namespace_package_module(modname, path) + + def _can_load_extension(self, modname: str) -> bool: + if self.always_load_extensions: + return True + if is_standard_module(modname): + return True + return is_module_name_part_of_extension_package_whitelist( + modname, self.extension_package_whitelist + ) + + def ast_from_module_name(self, modname, context_file=None): + """given a module name, return the astroid object""" + if modname in self.astroid_cache: + return self.astroid_cache[modname] + if modname == "__main__": + return self._build_stub_module(modname) + if context_file: + old_cwd = os.getcwd() + os.chdir(os.path.dirname(context_file)) + try: + found_spec = self.file_from_module_name(modname, context_file) + if found_spec.type == spec.ModuleType.PY_ZIPMODULE: + module = self.zip_import_data(found_spec.location) + if module is not None: + return module + + elif found_spec.type in ( + spec.ModuleType.C_BUILTIN, + spec.ModuleType.C_EXTENSION, + ): + if ( + found_spec.type == spec.ModuleType.C_EXTENSION + and not self._can_load_extension(modname) + ): + return self._build_stub_module(modname) + try: + module = load_module_from_name(modname) + except Exception as e: + raise AstroidImportError( + "Loading {modname} failed with:\n{error}", + modname=modname, + path=found_spec.location, + ) from e + return self.ast_from_module(module, modname) + + elif found_spec.type == spec.ModuleType.PY_COMPILED: + raise AstroidImportError( + "Unable to load compiled module {modname}.", + modname=modname, + path=found_spec.location, + ) + + elif found_spec.type == spec.ModuleType.PY_NAMESPACE: + return self._build_namespace_module( + modname, found_spec.submodule_search_locations + ) + elif found_spec.type == spec.ModuleType.PY_FROZEN: + return self._build_stub_module(modname) + + if found_spec.location is None: + raise AstroidImportError( + "Can't find a file for module {modname}.", modname=modname + ) + + return self.ast_from_file(found_spec.location, modname, fallback=False) + except AstroidBuildingError as e: + for hook in self._failed_import_hooks: + try: + return hook(modname) + except AstroidBuildingError: + pass + raise e + finally: + if context_file: + os.chdir(old_cwd) + + def zip_import_data(self, filepath): + if zipimport is None: + return None + + # pylint: disable=import-outside-toplevel; circular import + from astroid.builder import AstroidBuilder + + builder = AstroidBuilder(self) + for ext in ZIP_IMPORT_EXTS: + try: + eggpath, resource = filepath.rsplit(ext + os.path.sep, 1) + except ValueError: + continue + try: + # pylint: disable-next=no-member + importer = zipimport.zipimporter(eggpath + ext) + zmodname = resource.replace(os.path.sep, ".") + if importer.is_package(resource): + zmodname = zmodname + ".__init__" + module = builder.string_build( + importer.get_source(resource), zmodname, filepath + ) + return module + except Exception: # pylint: disable=broad-except + continue + return None + + def file_from_module_name(self, modname, contextfile): + try: + value = self._mod_file_cache[(modname, contextfile)] + except KeyError: + try: + value = file_info_from_modpath( + modname.split("."), context_file=contextfile + ) + except ImportError as e: + value = AstroidImportError( + "Failed to import module {modname} with error:\n{error}.", + modname=modname, + # we remove the traceback here to save on memory usage (since these exceptions are cached) + error=e.with_traceback(None), + ) + self._mod_file_cache[(modname, contextfile)] = value + if isinstance(value, AstroidBuildingError): + # we remove the traceback here to save on memory usage (since these exceptions are cached) + raise value.with_traceback(None) + return value + + def ast_from_module(self, module: types.ModuleType, modname: Optional[str] = None): + """given an imported module, return the astroid object""" + modname = modname or module.__name__ + if modname in self.astroid_cache: + return self.astroid_cache[modname] + try: + # some builtin modules don't have __file__ attribute + filepath = module.__file__ + if is_python_source(filepath): + return self.ast_from_file(filepath, modname) + except AttributeError: + pass + + # pylint: disable=import-outside-toplevel; circular import + from astroid.builder import AstroidBuilder + + return AstroidBuilder(self).module_build(module, modname) + + def ast_from_class(self, klass, modname=None): + """get astroid for the given class""" + if modname is None: + try: + modname = klass.__module__ + except AttributeError as exc: + raise AstroidBuildingError( + "Unable to get module for class {class_name}.", + cls=klass, + class_repr=safe_repr(klass), + modname=modname, + ) from exc + modastroid = self.ast_from_module_name(modname) + return modastroid.getattr(klass.__name__)[0] # XXX + + def infer_ast_from_something(self, obj, context=None): + """infer astroid for the given class""" + if hasattr(obj, "__class__") and not isinstance(obj, type): + klass = obj.__class__ + else: + klass = obj + try: + modname = klass.__module__ + except AttributeError as exc: + raise AstroidBuildingError( + "Unable to get module for {class_repr}.", + cls=klass, + class_repr=safe_repr(klass), + ) from exc + except Exception as exc: + raise AstroidImportError( + "Unexpected error while retrieving module for {class_repr}:\n" + "{error}", + cls=klass, + class_repr=safe_repr(klass), + ) from exc + try: + name = klass.__name__ + except AttributeError as exc: + raise AstroidBuildingError( + "Unable to get name for {class_repr}:\n", + cls=klass, + class_repr=safe_repr(klass), + ) from exc + except Exception as exc: + raise AstroidImportError( + "Unexpected error while retrieving name for {class_repr}:\n" "{error}", + cls=klass, + class_repr=safe_repr(klass), + ) from exc + # take care, on living object __module__ is regularly wrong :( + modastroid = self.ast_from_module_name(modname) + if klass is obj: + for inferred in modastroid.igetattr(name, context): + yield inferred + else: + for inferred in modastroid.igetattr(name, context): + yield inferred.instantiate_class() + + def register_failed_import_hook(self, hook): + """Registers a hook to resolve imports that cannot be found otherwise. + + `hook` must be a function that accepts a single argument `modname` which + contains the name of the module or package that could not be imported. + If `hook` can resolve the import, must return a node of type `astroid.Module`, + otherwise, it must raise `AstroidBuildingError`. + """ + self._failed_import_hooks.append(hook) + + def cache_module(self, module): + """Cache a module if no module with the same name is known yet.""" + self.astroid_cache.setdefault(module.name, module) + + def bootstrap(self): + """Bootstrap the required AST modules needed for the manager to work + + The bootstrap usually involves building the AST for the builtins + module, which is required by the rest of astroid to work correctly. + """ + from astroid import raw_building # pylint: disable=import-outside-toplevel + + raw_building._astroid_bootstrapping() + + def clear_cache(self): + """Clear the underlying cache. Also bootstraps the builtins module.""" + self.astroid_cache.clear() + self.bootstrap() diff --git a/myenv/lib/python3.9/site-packages/astroid/mixins.py b/myenv/lib/python3.9/site-packages/astroid/mixins.py new file mode 100644 index 0000000..ea68aff --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/mixins.py @@ -0,0 +1,163 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""This module contains some mixins for the different nodes. +""" +import itertools +import sys +from typing import TYPE_CHECKING, Optional + +from astroid import decorators +from astroid.exceptions import AttributeInferenceError + +if TYPE_CHECKING: + from astroid import nodes + +if sys.version_info >= (3, 8) or TYPE_CHECKING: + from functools import cached_property +else: + from astroid.decorators import cachedproperty as cached_property + + +class BlockRangeMixIn: + """override block range""" + + @cached_property + def blockstart_tolineno(self): + return self.lineno + + def _elsed_block_range(self, lineno, orelse, last=None): + """handle block line numbers range for try/finally, for, if and while + statements + """ + if lineno == self.fromlineno: + return lineno, lineno + if orelse: + if lineno >= orelse[0].fromlineno: + return lineno, orelse[-1].tolineno + return lineno, orelse[0].fromlineno - 1 + return lineno, last or self.tolineno + + +class FilterStmtsMixin: + """Mixin for statement filtering and assignment type""" + + def _get_filtered_stmts(self, _, node, _stmts, mystmt: Optional["nodes.Statement"]): + """method used in _filter_stmts to get statements and trigger break""" + if self.statement(future=True) is mystmt: + # original node's statement is the assignment, only keep + # current node (gen exp, list comp) + return [node], True + return _stmts, False + + def assign_type(self): + return self + + +class AssignTypeMixin: + def assign_type(self): + return self + + def _get_filtered_stmts( + self, lookup_node, node, _stmts, mystmt: Optional["nodes.Statement"] + ): + """method used in filter_stmts""" + if self is mystmt: + return _stmts, True + if self.statement(future=True) is mystmt: + # original node's statement is the assignment, only keep + # current node (gen exp, list comp) + return [node], True + return _stmts, False + + +class ParentAssignTypeMixin(AssignTypeMixin): + def assign_type(self): + return self.parent.assign_type() + + +class ImportFromMixin(FilterStmtsMixin): + """MixIn for From and Import Nodes""" + + def _infer_name(self, frame, name): + return name + + def do_import_module(self, modname=None): + """return the ast for a module whose name is imported by """ + # handle special case where we are on a package node importing a module + # using the same name as the package, which may end in an infinite loop + # on relative imports + # XXX: no more needed ? + mymodule = self.root() + level = getattr(self, "level", None) # Import as no level + if modname is None: + modname = self.modname + # XXX we should investigate deeper if we really want to check + # importing itself: modname and mymodule.name be relative or absolute + if mymodule.relative_to_absolute_name(modname, level) == mymodule.name: + # FIXME: we used to raise InferenceError here, but why ? + return mymodule + + return mymodule.import_module( + modname, level=level, relative_only=level and level >= 1 + ) + + def real_name(self, asname): + """get name from 'as' name""" + for name, _asname in self.names: + if name == "*": + return asname + if not _asname: + name = name.split(".", 1)[0] + _asname = name + if asname == _asname: + return name + raise AttributeInferenceError( + "Could not find original name for {attribute} in {target!r}", + target=self, + attribute=asname, + ) + + +class MultiLineBlockMixin: + """Mixin for nodes with multi-line blocks, e.g. For and FunctionDef. + Note that this does not apply to every node with a `body` field. + For instance, an If node has a multi-line body, but the body of an + IfExpr is not multi-line, and hence cannot contain Return nodes, + Assign nodes, etc. + """ + + @cached_property + def _multi_line_blocks(self): + return tuple(getattr(self, field) for field in self._multi_line_block_fields) + + def _get_return_nodes_skip_functions(self): + for block in self._multi_line_blocks: + for child_node in block: + if child_node.is_function: + continue + yield from child_node._get_return_nodes_skip_functions() + + def _get_yield_nodes_skip_lambdas(self): + for block in self._multi_line_blocks: + for child_node in block: + if child_node.is_lambda: + continue + yield from child_node._get_yield_nodes_skip_lambdas() + + @decorators.cached + def _get_assign_nodes(self): + children_assign_nodes = ( + child_node._get_assign_nodes() + for block in self._multi_line_blocks + for child_node in block + ) + return list(itertools.chain.from_iterable(children_assign_nodes)) + + +class NoChildrenMixin: + """Mixin for nodes with no children, e.g. Pass.""" + + def get_children(self): + yield from () diff --git a/myenv/lib/python3.9/site-packages/astroid/modutils.py b/myenv/lib/python3.9/site-packages/astroid/modutils.py new file mode 100644 index 0000000..01135ef --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/modutils.py @@ -0,0 +1,626 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Python modules manipulation utility functions. + +:type PY_SOURCE_EXTS: tuple(str) +:var PY_SOURCE_EXTS: list of possible python source file extension + +:type STD_LIB_DIRS: set of str +:var STD_LIB_DIRS: directories where standard modules are located + +:type BUILTIN_MODULES: dict +:var BUILTIN_MODULES: dictionary with builtin module names has key +""" + +import importlib +import importlib.machinery +import importlib.util +import itertools +import os +import sys +import sysconfig +import types +from pathlib import Path +from typing import Dict, Set + +from astroid.const import IS_JYTHON, IS_PYPY +from astroid.interpreter._import import spec, util + +if sys.platform.startswith("win"): + PY_SOURCE_EXTS = ("py", "pyw") + PY_COMPILED_EXTS = ("dll", "pyd") +else: + PY_SOURCE_EXTS = ("py",) + PY_COMPILED_EXTS = ("so",) + + +# TODO: Adding `platstdlib` is a fix for a workaround in virtualenv. At some point we should +# revisit whether this is still necessary. See https://github.com/PyCQA/astroid/pull/1323. +STD_LIB_DIRS = {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")} + +if os.name == "nt": + STD_LIB_DIRS.add(os.path.join(sys.prefix, "dlls")) + try: + # real_prefix is defined when running inside virtual environments, + # created with the **virtualenv** library. + # Deprecated in virtualenv==16.7.9 + # See: https://github.com/pypa/virtualenv/issues/1622 + STD_LIB_DIRS.add(os.path.join(sys.real_prefix, "dlls")) # type: ignore[attr-defined] + except AttributeError: + # sys.base_exec_prefix is always defined, but in a virtual environment + # created with the stdlib **venv** module, it points to the original + # installation, if the virtual env is activated. + try: + STD_LIB_DIRS.add(os.path.join(sys.base_exec_prefix, "dlls")) + except AttributeError: + pass + +if IS_PYPY and sys.version_info < (3, 8): + # PyPy stores the stdlib in two places: sys.prefix/lib_pypy and sys.prefix/lib-python/3 + # sysconfig.get_path on PyPy returns the first, but without an underscore so we patch this manually. + # Beginning with 3.8 the stdlib is only stored in: sys.prefix/pypy{py_version_short} + STD_LIB_DIRS.add(str(Path(sysconfig.get_path("stdlib")).parent / "lib_pypy")) + STD_LIB_DIRS.add(str(Path(sysconfig.get_path("stdlib")).parent / "lib-python/3")) + + # TODO: This is a fix for a workaround in virtualenv. At some point we should revisit + # whether this is still necessary. See https://github.com/PyCQA/astroid/pull/1324. + STD_LIB_DIRS.add(str(Path(sysconfig.get_path("platstdlib")).parent / "lib_pypy")) + STD_LIB_DIRS.add( + str(Path(sysconfig.get_path("platstdlib")).parent / "lib-python/3") + ) + +if os.name == "posix": + # Need the real prefix if we're in a virtualenv, otherwise + # the usual one will do. + # Deprecated in virtualenv==16.7.9 + # See: https://github.com/pypa/virtualenv/issues/1622 + try: + prefix = sys.real_prefix # type: ignore[attr-defined] + except AttributeError: + prefix = sys.prefix + + def _posix_path(path): + base_python = "python%d.%d" % sys.version_info[:2] + return os.path.join(prefix, path, base_python) + + STD_LIB_DIRS.add(_posix_path("lib")) + if sys.maxsize > 2**32: + # This tries to fix a problem with /usr/lib64 builds, + # where systems are running both 32-bit and 64-bit code + # on the same machine, which reflects into the places where + # standard library could be found. More details can be found + # here http://bugs.python.org/issue1294959. + # An easy reproducing case would be + # https://github.com/PyCQA/pylint/issues/712#issuecomment-163178753 + STD_LIB_DIRS.add(_posix_path("lib64")) + +EXT_LIB_DIRS = {sysconfig.get_path("purelib"), sysconfig.get_path("platlib")} +BUILTIN_MODULES = dict.fromkeys(sys.builtin_module_names, True) + + +class NoSourceFile(Exception): + """exception raised when we are not able to get a python + source file for a precompiled file + """ + + +def _normalize_path(path: str) -> str: + """Resolve symlinks in path and convert to absolute path. + + Note that environment variables and ~ in the path need to be expanded in + advance. + + This can be cached by using _cache_normalize_path. + """ + return os.path.normcase(os.path.realpath(path)) + + +def _path_from_filename(filename, is_jython=IS_JYTHON): + if not is_jython: + return filename + head, has_pyclass, _ = filename.partition("$py.class") + if has_pyclass: + return head + ".py" + return filename + + +def _handle_blacklist(blacklist, dirnames, filenames): + """remove files/directories in the black list + + dirnames/filenames are usually from os.walk + """ + for norecurs in blacklist: + if norecurs in dirnames: + dirnames.remove(norecurs) + elif norecurs in filenames: + filenames.remove(norecurs) + + +_NORM_PATH_CACHE: Dict[str, str] = {} + + +def _cache_normalize_path(path: str) -> str: + """Normalize path with caching.""" + # _module_file calls abspath on every path in sys.path every time it's + # called; on a larger codebase this easily adds up to half a second just + # assembling path components. This cache alleviates that. + try: + return _NORM_PATH_CACHE[path] + except KeyError: + if not path: # don't cache result for '' + return _normalize_path(path) + result = _NORM_PATH_CACHE[path] = _normalize_path(path) + return result + + +def load_module_from_name(dotted_name: str) -> types.ModuleType: + """Load a Python module from its name. + + :type dotted_name: str + :param dotted_name: python name of a module or package + + :raise ImportError: if the module or package is not found + + :rtype: module + :return: the loaded module + """ + try: + return sys.modules[dotted_name] + except KeyError: + pass + + return importlib.import_module(dotted_name) + + +def load_module_from_modpath(parts): + """Load a python module from its split name. + + :type parts: list(str) or tuple(str) + :param parts: + python name of a module or package split on '.' + + :raise ImportError: if the module or package is not found + + :rtype: module + :return: the loaded module + """ + return load_module_from_name(".".join(parts)) + + +def load_module_from_file(filepath: str): + """Load a Python module from it's path. + + :type filepath: str + :param filepath: path to the python module or package + + :raise ImportError: if the module or package is not found + + :rtype: module + :return: the loaded module + """ + modpath = modpath_from_file(filepath) + return load_module_from_modpath(modpath) + + +def check_modpath_has_init(path, mod_path): + """check there are some __init__.py all along the way""" + modpath = [] + for part in mod_path: + modpath.append(part) + path = os.path.join(path, part) + if not _has_init(path): + old_namespace = util.is_namespace(".".join(modpath)) + if not old_namespace: + return False + return True + + +def _get_relative_base_path(filename, path_to_check): + """Extracts the relative mod path of the file to import from + + Check if a file is within the passed in path and if so, returns the + relative mod path from the one passed in. + + If the filename is no in path_to_check, returns None + + Note this function will look for both abs and realpath of the file, + this allows to find the relative base path even if the file is a + symlink of a file in the passed in path + + Examples: + _get_relative_base_path("/a/b/c/d.py", "/a/b") -> ["c","d"] + _get_relative_base_path("/a/b/c/d.py", "/dev") -> None + """ + importable_path = None + path_to_check = os.path.normcase(path_to_check) + abs_filename = os.path.abspath(filename) + if os.path.normcase(abs_filename).startswith(path_to_check): + importable_path = abs_filename + + real_filename = os.path.realpath(filename) + if os.path.normcase(real_filename).startswith(path_to_check): + importable_path = real_filename + + # if "var" in path_to_check: + # breakpoint() + + if importable_path: + base_path = os.path.splitext(importable_path)[0] + relative_base_path = base_path[len(path_to_check) :] + return [pkg for pkg in relative_base_path.split(os.sep) if pkg] + + return None + + +def modpath_from_file_with_callback(filename, path=None, is_package_cb=None): + filename = os.path.expanduser(_path_from_filename(filename)) + paths_to_check = sys.path.copy() + if path: + paths_to_check += path + for pathname in itertools.chain( + paths_to_check, map(_cache_normalize_path, paths_to_check) + ): + if not pathname: + continue + modpath = _get_relative_base_path(filename, pathname) + if not modpath: + continue + if is_package_cb(pathname, modpath[:-1]): + return modpath + + raise ImportError( + "Unable to find module for {} in {}".format(filename, ", \n".join(sys.path)) + ) + + +def modpath_from_file(filename, path=None): + """Get the corresponding split module's name from a filename + + This function will return the name of a module or package split on `.`. + + :type filename: str + :param filename: file's path for which we want the module's name + + :type Optional[List[str]] path: + Optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :raise ImportError: + if the corresponding module's name has not been found + + :rtype: list(str) + :return: the corresponding split module's name + """ + return modpath_from_file_with_callback(filename, path, check_modpath_has_init) + + +def file_from_modpath(modpath, path=None, context_file=None): + return file_info_from_modpath(modpath, path, context_file).location + + +def file_info_from_modpath(modpath, path=None, context_file=None): + """given a mod path (i.e. split module / package name), return the + corresponding file, giving priority to source file over precompiled + file if it exists + + :type modpath: list or tuple + :param modpath: + split module's name (i.e name of a module or package split + on '.') + (this means explicit relative imports that start with dots have + empty strings in this list!) + + :type path: list or None + :param path: + optional list of path where the module or package should be + searched (use sys.path if nothing or None is given) + + :type context_file: str or None + :param context_file: + context file to consider, necessary if the identifier has been + introduced using a relative import unresolvable in the actual + context (i.e. modutils) + + :raise ImportError: if there is no such module in the directory + + :rtype: (str or None, import type) + :return: + the path to the module's file or None if it's an integrated + builtin module such as 'sys' + """ + if context_file is not None: + context = os.path.dirname(context_file) + else: + context = context_file + if modpath[0] == "xml": + # handle _xmlplus + try: + return _spec_from_modpath(["_xmlplus"] + modpath[1:], path, context) + except ImportError: + return _spec_from_modpath(modpath, path, context) + elif modpath == ["os", "path"]: + # FIXME: currently ignoring search_path... + return spec.ModuleSpec( + name="os.path", + location=os.path.__file__, + module_type=spec.ModuleType.PY_SOURCE, + ) + return _spec_from_modpath(modpath, path, context) + + +def get_module_part(dotted_name, context_file=None): + """given a dotted name return the module part of the name : + + >>> get_module_part('astroid.as_string.dump') + 'astroid.as_string' + + :type dotted_name: str + :param dotted_name: full name of the identifier we are interested in + + :type context_file: str or None + :param context_file: + context file to consider, necessary if the identifier has been + introduced using a relative import unresolvable in the actual + context (i.e. modutils) + + + :raise ImportError: if there is no such module in the directory + + :rtype: str or None + :return: + the module part of the name or None if we have not been able at + all to import the given name + + XXX: deprecated, since it doesn't handle package precedence over module + (see #10066) + """ + # os.path trick + if dotted_name.startswith("os.path"): + return "os.path" + parts = dotted_name.split(".") + if context_file is not None: + # first check for builtin module which won't be considered latter + # in that case (path != None) + if parts[0] in BUILTIN_MODULES: + if len(parts) > 2: + raise ImportError(dotted_name) + return parts[0] + # don't use += or insert, we want a new list to be created ! + path = None + starti = 0 + if parts[0] == "": + assert ( + context_file is not None + ), "explicit relative import, but no context_file?" + path = [] # prevent resolving the import non-relatively + starti = 1 + while parts[starti] == "": # for all further dots: change context + starti += 1 + context_file = os.path.dirname(context_file) + for i in range(starti, len(parts)): + try: + file_from_modpath( + parts[starti : i + 1], path=path, context_file=context_file + ) + except ImportError: + if i < max(1, len(parts) - 2): + raise + return ".".join(parts[:i]) + return dotted_name + + +def get_module_files(src_directory, blacklist, list_all=False): + """given a package directory return a list of all available python + module's files in the package and its subpackages + + :type src_directory: str + :param src_directory: + path of the directory corresponding to the package + + :type blacklist: list or tuple + :param blacklist: iterable + list of files or directories to ignore. + + :type list_all: bool + :param list_all: + get files from all paths, including ones without __init__.py + + :rtype: list + :return: + the list of all available python module's files in the package and + its subpackages + """ + files = [] + for directory, dirnames, filenames in os.walk(src_directory): + if directory in blacklist: + continue + _handle_blacklist(blacklist, dirnames, filenames) + # check for __init__.py + if not list_all and "__init__.py" not in filenames: + dirnames[:] = () + continue + for filename in filenames: + if _is_python_file(filename): + src = os.path.join(directory, filename) + files.append(src) + return files + + +def get_source_file(filename, include_no_ext=False): + """given a python module's file name return the matching source file + name (the filename will be returned identically if it's already an + absolute path to a python source file...) + + :type filename: str + :param filename: python module's file name + + + :raise NoSourceFile: if no source file exists on the file system + + :rtype: str + :return: the absolute path of the source file if it exists + """ + filename = os.path.abspath(_path_from_filename(filename)) + base, orig_ext = os.path.splitext(filename) + for ext in PY_SOURCE_EXTS: + source_path = f"{base}.{ext}" + if os.path.exists(source_path): + return source_path + if include_no_ext and not orig_ext and os.path.exists(base): + return base + raise NoSourceFile(filename) + + +def is_python_source(filename): + """ + rtype: bool + return: True if the filename is a python source file + """ + return os.path.splitext(filename)[1][1:] in PY_SOURCE_EXTS + + +def is_standard_module(modname, std_path=None): + """try to guess if a module is a standard python module (by default, + see `std_path` parameter's description) + + :type modname: str + :param modname: name of the module we are interested in + + :type std_path: list(str) or tuple(str) + :param std_path: list of path considered has standard + + + :rtype: bool + :return: + true if the module: + - is located on the path listed in one of the directory in `std_path` + - is a built-in module + """ + modname = modname.split(".")[0] + try: + filename = file_from_modpath([modname]) + except ImportError: + # import failed, i'm probably not so wrong by supposing it's + # not standard... + return False + # modules which are not living in a file are considered standard + # (sys and __builtin__ for instance) + if filename is None: + # we assume there are no namespaces in stdlib + return not util.is_namespace(modname) + filename = _normalize_path(filename) + for path in EXT_LIB_DIRS: + if filename.startswith(_cache_normalize_path(path)): + return False + if std_path is None: + std_path = STD_LIB_DIRS + + return any(filename.startswith(_cache_normalize_path(path)) for path in std_path) + + +def is_relative(modname, from_file): + """return true if the given module name is relative to the given + file name + + :type modname: str + :param modname: name of the module we are interested in + + :type from_file: str + :param from_file: + path of the module from which modname has been imported + + :rtype: bool + :return: + true if the module has been imported relatively to `from_file` + """ + if not os.path.isdir(from_file): + from_file = os.path.dirname(from_file) + if from_file in sys.path: + return False + return bool( + importlib.machinery.PathFinder.find_spec( + modname.split(".", maxsplit=1)[0], [from_file] + ) + ) + + +# internal only functions ##################################################### + + +def _spec_from_modpath(modpath, path=None, context=None): + """given a mod path (i.e. split module / package name), return the + corresponding spec + + this function is used internally, see `file_from_modpath`'s + documentation for more information + """ + assert modpath + location = None + if context is not None: + try: + found_spec = spec.find_spec(modpath, [context]) + location = found_spec.location + except ImportError: + found_spec = spec.find_spec(modpath, path) + location = found_spec.location + else: + found_spec = spec.find_spec(modpath, path) + if found_spec.type == spec.ModuleType.PY_COMPILED: + try: + location = get_source_file(found_spec.location) + return found_spec._replace( + location=location, type=spec.ModuleType.PY_SOURCE + ) + except NoSourceFile: + return found_spec._replace(location=location) + elif found_spec.type == spec.ModuleType.C_BUILTIN: + # integrated builtin module + return found_spec._replace(location=None) + elif found_spec.type == spec.ModuleType.PKG_DIRECTORY: + location = _has_init(found_spec.location) + return found_spec._replace(location=location, type=spec.ModuleType.PY_SOURCE) + return found_spec + + +def _is_python_file(filename): + """return true if the given filename should be considered as a python file + + .pyc and .pyo are ignored + """ + return filename.endswith((".py", ".so", ".pyd", ".pyw")) + + +def _has_init(directory): + """if the given directory has a valid __init__ file, return its path, + else return None + """ + mod_or_pack = os.path.join(directory, "__init__") + for ext in PY_SOURCE_EXTS + ("pyc", "pyo"): + if os.path.exists(mod_or_pack + "." + ext): + return mod_or_pack + "." + ext + return None + + +def is_namespace(specobj): + return specobj.type == spec.ModuleType.PY_NAMESPACE + + +def is_directory(specobj): + return specobj.type == spec.ModuleType.PKG_DIRECTORY + + +def is_module_name_part_of_extension_package_whitelist( + module_name: str, package_whitelist: Set[str] +) -> bool: + """ + Returns True if one part of the module name is in the package whitelist + + >>> is_module_name_part_of_extension_package_whitelist('numpy.core.umath', {'numpy'}) + True + """ + parts = module_name.split(".") + return any( + ".".join(parts[:x]) in package_whitelist for x in range(1, len(parts) + 1) + ) diff --git a/myenv/lib/python3.9/site-packages/astroid/node_classes.py b/myenv/lib/python3.9/site-packages/astroid/node_classes.py new file mode 100644 index 0000000..3711309 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/node_classes.py @@ -0,0 +1,97 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +# pylint: disable=unused-import + +import warnings + +from astroid.nodes.node_classes import ( # pylint: disable=redefined-builtin (Ellipsis) + CONST_CLS, + AnnAssign, + Arguments, + Assert, + Assign, + AssignAttr, + AssignName, + AsyncFor, + AsyncWith, + Attribute, + AugAssign, + Await, + BaseContainer, + BinOp, + BoolOp, + Break, + Call, + Compare, + Comprehension, + Const, + Continue, + Decorators, + DelAttr, + Delete, + DelName, + Dict, + DictUnpack, + Ellipsis, + EmptyNode, + EvaluatedObject, + ExceptHandler, + Expr, + ExtSlice, + For, + FormattedValue, + Global, + If, + IfExp, + Import, + ImportFrom, + Index, + JoinedStr, + Keyword, + List, + LookupMixIn, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchMapping, + MatchOr, + MatchSequence, + MatchSingleton, + MatchStar, + MatchValue, + Name, + NamedExpr, + NodeNG, + Nonlocal, + Pass, + Pattern, + Raise, + Return, + Set, + Slice, + Starred, + Subscript, + TryExcept, + TryFinally, + Tuple, + UnaryOp, + Unknown, + While, + With, + Yield, + YieldFrom, + are_exclusive, + const_factory, + unpack_infer, +) + +# We cannot create a __all__ here because it would create a circular import +# Please remove astroid/scoped_nodes.py|astroid/node_classes.py in autoflake +# exclude when removing this file. +warnings.warn( + "The 'astroid.node_classes' module is deprecated and will be replaced by 'astroid.nodes' in astroid 3.0.0", + DeprecationWarning, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/__init__.py b/myenv/lib/python3.9/site-packages/astroid/nodes/__init__.py new file mode 100644 index 0000000..0a98ed1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/__init__.py @@ -0,0 +1,299 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Every available node class. + +.. seealso:: + :doc:`ast documentation ` + +All nodes inherit from :class:`~astroid.nodes.node_classes.NodeNG`. +""" + +# Nodes not present in the builtin ast module: DictUnpack, Unknown, and EvaluatedObject. + +from astroid.nodes.node_classes import ( # pylint: disable=redefined-builtin (Ellipsis) + CONST_CLS, + AnnAssign, + Arguments, + Assert, + Assign, + AssignAttr, + AssignName, + AsyncFor, + AsyncWith, + Attribute, + AugAssign, + Await, + BaseContainer, + BinOp, + BoolOp, + Break, + Call, + Compare, + Comprehension, + Const, + Continue, + Decorators, + DelAttr, + Delete, + DelName, + Dict, + DictUnpack, + Ellipsis, + EmptyNode, + EvaluatedObject, + ExceptHandler, + Expr, + ExtSlice, + For, + FormattedValue, + Global, + If, + IfExp, + Import, + ImportFrom, + Index, + JoinedStr, + Keyword, + List, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchMapping, + MatchOr, + MatchSequence, + MatchSingleton, + MatchStar, + MatchValue, + Name, + NamedExpr, + NodeNG, + Nonlocal, + Pass, + Pattern, + Raise, + Return, + Set, + Slice, + Starred, + Statement, + Subscript, + TryExcept, + TryFinally, + Tuple, + UnaryOp, + Unknown, + While, + With, + Yield, + YieldFrom, + are_exclusive, + const_factory, + unpack_infer, +) +from astroid.nodes.scoped_nodes import ( + AsyncFunctionDef, + ClassDef, + ComprehensionScope, + DictComp, + FunctionDef, + GeneratorExp, + Lambda, + ListComp, + LocalsDictNodeNG, + Module, + SetComp, + builtin_lookup, + function_to_method, + get_wrapping_class, +) +from astroid.nodes.utils import Position + +_BaseContainer = BaseContainer # TODO Remove for astroid 3.0 + +ALL_NODE_CLASSES = ( + _BaseContainer, + BaseContainer, + AnnAssign, + Arguments, + Assert, + Assign, + AssignAttr, + AssignName, + AsyncFor, + AsyncFunctionDef, + AsyncWith, + Attribute, + AugAssign, + Await, + BinOp, + BoolOp, + Break, + Call, + ClassDef, + Compare, + Comprehension, + ComprehensionScope, + Const, + const_factory, + Continue, + Decorators, + DelAttr, + Delete, + DelName, + Dict, + DictComp, + DictUnpack, + Ellipsis, + EmptyNode, + EvaluatedObject, + ExceptHandler, + Expr, + ExtSlice, + For, + FormattedValue, + FunctionDef, + GeneratorExp, + Global, + If, + IfExp, + Import, + ImportFrom, + Index, + JoinedStr, + Keyword, + Lambda, + List, + ListComp, + LocalsDictNodeNG, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchMapping, + MatchOr, + MatchSequence, + MatchSingleton, + MatchStar, + MatchValue, + Module, + Name, + NamedExpr, + NodeNG, + Nonlocal, + Pass, + Pattern, + Raise, + Return, + Set, + SetComp, + Slice, + Starred, + Subscript, + TryExcept, + TryFinally, + Tuple, + UnaryOp, + Unknown, + While, + With, + Yield, + YieldFrom, +) + +__all__ = ( + "AnnAssign", + "are_exclusive", + "Arguments", + "Assert", + "Assign", + "AssignAttr", + "AssignName", + "AsyncFor", + "AsyncFunctionDef", + "AsyncWith", + "Attribute", + "AugAssign", + "Await", + "BinOp", + "BoolOp", + "Break", + "builtin_lookup", + "Call", + "ClassDef", + "CONST_CLS", + "Compare", + "Comprehension", + "ComprehensionScope", + "Const", + "const_factory", + "Continue", + "Decorators", + "DelAttr", + "Delete", + "DelName", + "Dict", + "DictComp", + "DictUnpack", + "Ellipsis", + "EmptyNode", + "EvaluatedObject", + "ExceptHandler", + "Expr", + "ExtSlice", + "For", + "FormattedValue", + "FunctionDef", + "function_to_method", + "GeneratorExp", + "get_wrapping_class", + "Global", + "If", + "IfExp", + "Import", + "ImportFrom", + "Index", + "JoinedStr", + "Keyword", + "Lambda", + "List", + "ListComp", + "LocalsDictNodeNG", + "Match", + "MatchAs", + "MatchCase", + "MatchClass", + "MatchMapping", + "MatchOr", + "MatchSequence", + "MatchSingleton", + "MatchStar", + "MatchValue", + "Module", + "Name", + "NamedExpr", + "NodeNG", + "Nonlocal", + "Pass", + "Position", + "Raise", + "Return", + "Set", + "SetComp", + "Slice", + "Starred", + "Statement", + "Subscript", + "TryExcept", + "TryFinally", + "Tuple", + "UnaryOp", + "Unknown", + "unpack_infer", + "While", + "With", + "Yield", + "YieldFrom", +) diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/as_string.py b/myenv/lib/python3.9/site-packages/astroid/nodes/as_string.py new file mode 100644 index 0000000..2e2bdcf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/as_string.py @@ -0,0 +1,650 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""This module renders Astroid nodes as string""" +from typing import TYPE_CHECKING, List, Optional + +if TYPE_CHECKING: + from astroid.nodes import Const + from astroid.nodes.node_classes import ( + Match, + MatchAs, + MatchCase, + MatchClass, + MatchMapping, + MatchOr, + MatchSequence, + MatchSingleton, + MatchStar, + MatchValue, + Unknown, + ) + +# pylint: disable=unused-argument + +DOC_NEWLINE = "\0" + + +# Visitor pattern require argument all the time and is not better with staticmethod +# noinspection PyUnusedLocal,PyMethodMayBeStatic +class AsStringVisitor: + """Visitor to render an Astroid node as a valid python code string""" + + def __init__(self, indent=" "): + self.indent = indent + + def __call__(self, node): + """Makes this visitor behave as a simple function""" + return node.accept(self).replace(DOC_NEWLINE, "\n") + + def _docs_dedent(self, doc_node: Optional["Const"]) -> str: + """Stop newlines in docs being indented by self._stmt_list""" + if not doc_node: + return "" + + return '\n{}"""{}"""'.format( + self.indent, doc_node.value.replace("\n", DOC_NEWLINE) + ) + + def _stmt_list(self, stmts, indent=True): + """return a list of nodes to string""" + stmts = "\n".join(nstr for nstr in [n.accept(self) for n in stmts] if nstr) + if indent: + return self.indent + stmts.replace("\n", "\n" + self.indent) + + return stmts + + def _precedence_parens(self, node, child, is_left=True): + """Wrap child in parens only if required to keep same semantics""" + if self._should_wrap(node, child, is_left): + return f"({child.accept(self)})" + + return child.accept(self) + + def _should_wrap(self, node, child, is_left): + """Wrap child if: + - it has lower precedence + - same precedence with position opposite to associativity direction + """ + node_precedence = node.op_precedence() + child_precedence = child.op_precedence() + + if node_precedence > child_precedence: + # 3 * (4 + 5) + return True + + if ( + node_precedence == child_precedence + and is_left != node.op_left_associative() + ): + # 3 - (4 - 5) + # (2**3)**4 + return True + + return False + + # visit_ methods ########################################### + + def visit_await(self, node): + return f"await {node.value.accept(self)}" + + def visit_asyncwith(self, node): + return f"async {self.visit_with(node)}" + + def visit_asyncfor(self, node): + return f"async {self.visit_for(node)}" + + def visit_arguments(self, node): + """return an astroid.Function node as string""" + return node.format_args() + + def visit_assignattr(self, node): + """return an astroid.AssAttr node as string""" + return self.visit_attribute(node) + + def visit_assert(self, node): + """return an astroid.Assert node as string""" + if node.fail: + return f"assert {node.test.accept(self)}, {node.fail.accept(self)}" + return f"assert {node.test.accept(self)}" + + def visit_assignname(self, node): + """return an astroid.AssName node as string""" + return node.name + + def visit_assign(self, node): + """return an astroid.Assign node as string""" + lhs = " = ".join(n.accept(self) for n in node.targets) + return f"{lhs} = {node.value.accept(self)}" + + def visit_augassign(self, node): + """return an astroid.AugAssign node as string""" + return f"{node.target.accept(self)} {node.op} {node.value.accept(self)}" + + def visit_annassign(self, node): + """Return an astroid.AugAssign node as string""" + + target = node.target.accept(self) + annotation = node.annotation.accept(self) + if node.value is None: + return f"{target}: {annotation}" + return f"{target}: {annotation} = {node.value.accept(self)}" + + def visit_binop(self, node): + """return an astroid.BinOp node as string""" + left = self._precedence_parens(node, node.left) + right = self._precedence_parens(node, node.right, is_left=False) + if node.op == "**": + return f"{left}{node.op}{right}" + + return f"{left} {node.op} {right}" + + def visit_boolop(self, node): + """return an astroid.BoolOp node as string""" + values = [f"{self._precedence_parens(node, n)}" for n in node.values] + return (f" {node.op} ").join(values) + + def visit_break(self, node): + """return an astroid.Break node as string""" + return "break" + + def visit_call(self, node): + """return an astroid.Call node as string""" + expr_str = self._precedence_parens(node, node.func) + args = [arg.accept(self) for arg in node.args] + if node.keywords: + keywords = [kwarg.accept(self) for kwarg in node.keywords] + else: + keywords = [] + + args.extend(keywords) + return f"{expr_str}({', '.join(args)})" + + def visit_classdef(self, node): + """return an astroid.ClassDef node as string""" + decorate = node.decorators.accept(self) if node.decorators else "" + args = [n.accept(self) for n in node.bases] + if node._metaclass and not node.has_metaclass_hack(): + args.append("metaclass=" + node._metaclass.accept(self)) + args += [n.accept(self) for n in node.keywords] + args = f"({', '.join(args)})" if args else "" + docs = self._docs_dedent(node.doc_node) + return "\n\n{}class {}{}:{}\n{}\n".format( + decorate, node.name, args, docs, self._stmt_list(node.body) + ) + + def visit_compare(self, node): + """return an astroid.Compare node as string""" + rhs_str = " ".join( + f"{op} {self._precedence_parens(node, expr, is_left=False)}" + for op, expr in node.ops + ) + return f"{self._precedence_parens(node, node.left)} {rhs_str}" + + def visit_comprehension(self, node): + """return an astroid.Comprehension node as string""" + ifs = "".join(f" if {n.accept(self)}" for n in node.ifs) + generated = f"for {node.target.accept(self)} in {node.iter.accept(self)}{ifs}" + return f"{'async ' if node.is_async else ''}{generated}" + + def visit_const(self, node): + """return an astroid.Const node as string""" + if node.value is Ellipsis: + return "..." + return repr(node.value) + + def visit_continue(self, node): + """return an astroid.Continue node as string""" + return "continue" + + def visit_delete(self, node): # XXX check if correct + """return an astroid.Delete node as string""" + return f"del {', '.join(child.accept(self) for child in node.targets)}" + + def visit_delattr(self, node): + """return an astroid.DelAttr node as string""" + return self.visit_attribute(node) + + def visit_delname(self, node): + """return an astroid.DelName node as string""" + return node.name + + def visit_decorators(self, node): + """return an astroid.Decorators node as string""" + return "@%s\n" % "\n@".join(item.accept(self) for item in node.nodes) + + def visit_dict(self, node): + """return an astroid.Dict node as string""" + return "{%s}" % ", ".join(self._visit_dict(node)) + + def _visit_dict(self, node): + for key, value in node.items: + key = key.accept(self) + value = value.accept(self) + if key == "**": + # It can only be a DictUnpack node. + yield key + value + else: + yield f"{key}: {value}" + + def visit_dictunpack(self, node): + return "**" + + def visit_dictcomp(self, node): + """return an astroid.DictComp node as string""" + return "{{{}: {} {}}}".format( + node.key.accept(self), + node.value.accept(self), + " ".join(n.accept(self) for n in node.generators), + ) + + def visit_expr(self, node): + """return an astroid.Discard node as string""" + return node.value.accept(self) + + def visit_emptynode(self, node): + """dummy method for visiting an Empty node""" + return "" + + def visit_excepthandler(self, node): + if node.type: + if node.name: + excs = f"except {node.type.accept(self)} as {node.name.accept(self)}" + else: + excs = f"except {node.type.accept(self)}" + else: + excs = "except" + return f"{excs}:\n{self._stmt_list(node.body)}" + + def visit_empty(self, node): + """return an Empty node as string""" + return "" + + def visit_for(self, node): + """return an astroid.For node as string""" + fors = "for {} in {}:\n{}".format( + node.target.accept(self), node.iter.accept(self), self._stmt_list(node.body) + ) + if node.orelse: + fors = f"{fors}\nelse:\n{self._stmt_list(node.orelse)}" + return fors + + def visit_importfrom(self, node): + """return an astroid.ImportFrom node as string""" + return "from {} import {}".format( + "." * (node.level or 0) + node.modname, _import_string(node.names) + ) + + def visit_joinedstr(self, node): + string = "".join( + # Use repr on the string literal parts + # to get proper escapes, e.g. \n, \\, \" + # But strip the quotes off the ends + # (they will always be one character: ' or ") + repr(value.value)[1:-1] + # Literal braces must be doubled to escape them + .replace("{", "{{").replace("}", "}}") + # Each value in values is either a string literal (Const) + # or a FormattedValue + if type(value).__name__ == "Const" else value.accept(self) + for value in node.values + ) + + # Try to find surrounding quotes that don't appear at all in the string. + # Because the formatted values inside {} can't contain backslash (\) + # using a triple quote is sometimes necessary + for quote in ("'", '"', '"""', "'''"): + if quote not in string: + break + + return "f" + quote + string + quote + + def visit_formattedvalue(self, node): + result = node.value.accept(self) + if node.conversion and node.conversion >= 0: + # e.g. if node.conversion == 114: result += "!r" + result += "!" + chr(node.conversion) + if node.format_spec: + # The format spec is itself a JoinedString, i.e. an f-string + # We strip the f and quotes of the ends + result += ":" + node.format_spec.accept(self)[2:-1] + return "{%s}" % result + + def handle_functiondef(self, node, keyword): + """return a (possibly async) function definition node as string""" + decorate = node.decorators.accept(self) if node.decorators else "" + docs = self._docs_dedent(node.doc_node) + trailer = ":" + if node.returns: + return_annotation = " -> " + node.returns.as_string() + trailer = return_annotation + ":" + def_format = "\n%s%s %s(%s)%s%s\n%s" + return def_format % ( + decorate, + keyword, + node.name, + node.args.accept(self), + trailer, + docs, + self._stmt_list(node.body), + ) + + def visit_functiondef(self, node): + """return an astroid.FunctionDef node as string""" + return self.handle_functiondef(node, "def") + + def visit_asyncfunctiondef(self, node): + """return an astroid.AsyncFunction node as string""" + return self.handle_functiondef(node, "async def") + + def visit_generatorexp(self, node): + """return an astroid.GeneratorExp node as string""" + return "({} {})".format( + node.elt.accept(self), " ".join(n.accept(self) for n in node.generators) + ) + + def visit_attribute(self, node): + """return an astroid.Getattr node as string""" + left = self._precedence_parens(node, node.expr) + if left.isdigit(): + left = f"({left})" + return f"{left}.{node.attrname}" + + def visit_global(self, node): + """return an astroid.Global node as string""" + return f"global {', '.join(node.names)}" + + def visit_if(self, node): + """return an astroid.If node as string""" + ifs = [f"if {node.test.accept(self)}:\n{self._stmt_list(node.body)}"] + if node.has_elif_block(): + ifs.append(f"el{self._stmt_list(node.orelse, indent=False)}") + elif node.orelse: + ifs.append(f"else:\n{self._stmt_list(node.orelse)}") + return "\n".join(ifs) + + def visit_ifexp(self, node): + """return an astroid.IfExp node as string""" + return "{} if {} else {}".format( + self._precedence_parens(node, node.body, is_left=True), + self._precedence_parens(node, node.test, is_left=True), + self._precedence_parens(node, node.orelse, is_left=False), + ) + + def visit_import(self, node): + """return an astroid.Import node as string""" + return f"import {_import_string(node.names)}" + + def visit_keyword(self, node): + """return an astroid.Keyword node as string""" + if node.arg is None: + return f"**{node.value.accept(self)}" + return f"{node.arg}={node.value.accept(self)}" + + def visit_lambda(self, node): + """return an astroid.Lambda node as string""" + args = node.args.accept(self) + body = node.body.accept(self) + if args: + return f"lambda {args}: {body}" + + return f"lambda: {body}" + + def visit_list(self, node): + """return an astroid.List node as string""" + return f"[{', '.join(child.accept(self) for child in node.elts)}]" + + def visit_listcomp(self, node): + """return an astroid.ListComp node as string""" + return "[{} {}]".format( + node.elt.accept(self), " ".join(n.accept(self) for n in node.generators) + ) + + def visit_module(self, node): + """return an astroid.Module node as string""" + docs = f'"""{node.doc_node.value}"""\n\n' if node.doc_node else "" + return docs + "\n".join(n.accept(self) for n in node.body) + "\n\n" + + def visit_name(self, node): + """return an astroid.Name node as string""" + return node.name + + def visit_namedexpr(self, node): + """Return an assignment expression node as string""" + target = node.target.accept(self) + value = node.value.accept(self) + return f"{target} := {value}" + + def visit_nonlocal(self, node): + """return an astroid.Nonlocal node as string""" + return f"nonlocal {', '.join(node.names)}" + + def visit_pass(self, node): + """return an astroid.Pass node as string""" + return "pass" + + def visit_raise(self, node): + """return an astroid.Raise node as string""" + if node.exc: + if node.cause: + return f"raise {node.exc.accept(self)} from {node.cause.accept(self)}" + return f"raise {node.exc.accept(self)}" + return "raise" + + def visit_return(self, node): + """return an astroid.Return node as string""" + if node.is_tuple_return() and len(node.value.elts) > 1: + elts = [child.accept(self) for child in node.value.elts] + return f"return {', '.join(elts)}" + + if node.value: + return f"return {node.value.accept(self)}" + + return "return" + + def visit_set(self, node): + """return an astroid.Set node as string""" + return "{%s}" % ", ".join(child.accept(self) for child in node.elts) + + def visit_setcomp(self, node): + """return an astroid.SetComp node as string""" + return "{{{} {}}}".format( + node.elt.accept(self), " ".join(n.accept(self) for n in node.generators) + ) + + def visit_slice(self, node): + """return an astroid.Slice node as string""" + lower = node.lower.accept(self) if node.lower else "" + upper = node.upper.accept(self) if node.upper else "" + step = node.step.accept(self) if node.step else "" + if step: + return f"{lower}:{upper}:{step}" + return f"{lower}:{upper}" + + def visit_subscript(self, node): + """return an astroid.Subscript node as string""" + idx = node.slice + if idx.__class__.__name__.lower() == "index": + idx = idx.value + idxstr = idx.accept(self) + if idx.__class__.__name__.lower() == "tuple" and idx.elts: + # Remove parenthesis in tuple and extended slice. + # a[(::1, 1:)] is not valid syntax. + idxstr = idxstr[1:-1] + return f"{self._precedence_parens(node, node.value)}[{idxstr}]" + + def visit_tryexcept(self, node): + """return an astroid.TryExcept node as string""" + trys = [f"try:\n{self._stmt_list(node.body)}"] + for handler in node.handlers: + trys.append(handler.accept(self)) + if node.orelse: + trys.append(f"else:\n{self._stmt_list(node.orelse)}") + return "\n".join(trys) + + def visit_tryfinally(self, node): + """return an astroid.TryFinally node as string""" + return "try:\n{}\nfinally:\n{}".format( + self._stmt_list(node.body), self._stmt_list(node.finalbody) + ) + + def visit_tuple(self, node): + """return an astroid.Tuple node as string""" + if len(node.elts) == 1: + return f"({node.elts[0].accept(self)}, )" + return f"({', '.join(child.accept(self) for child in node.elts)})" + + def visit_unaryop(self, node): + """return an astroid.UnaryOp node as string""" + if node.op == "not": + operator = "not " + else: + operator = node.op + return f"{operator}{self._precedence_parens(node, node.operand)}" + + def visit_while(self, node): + """return an astroid.While node as string""" + whiles = f"while {node.test.accept(self)}:\n{self._stmt_list(node.body)}" + if node.orelse: + whiles = f"{whiles}\nelse:\n{self._stmt_list(node.orelse)}" + return whiles + + def visit_with(self, node): # 'with' without 'as' is possible + """return an astroid.With node as string""" + items = ", ".join( + f"{expr.accept(self)}" + (v and f" as {v.accept(self)}" or "") + for expr, v in node.items + ) + return f"with {items}:\n{self._stmt_list(node.body)}" + + def visit_yield(self, node): + """yield an ast.Yield node as string""" + yi_val = (" " + node.value.accept(self)) if node.value else "" + expr = "yield" + yi_val + if node.parent.is_statement: + return expr + + return f"({expr})" + + def visit_yieldfrom(self, node): + """Return an astroid.YieldFrom node as string.""" + yi_val = (" " + node.value.accept(self)) if node.value else "" + expr = "yield from" + yi_val + if node.parent.is_statement: + return expr + + return f"({expr})" + + def visit_starred(self, node): + """return Starred node as string""" + return "*" + node.value.accept(self) + + def visit_match(self, node: "Match") -> str: + """Return an astroid.Match node as string.""" + return f"match {node.subject.accept(self)}:\n{self._stmt_list(node.cases)}" + + def visit_matchcase(self, node: "MatchCase") -> str: + """Return an astroid.MatchCase node as string.""" + guard_str = f" if {node.guard.accept(self)}" if node.guard else "" + return ( + f"case {node.pattern.accept(self)}{guard_str}:\n" + f"{self._stmt_list(node.body)}" + ) + + def visit_matchvalue(self, node: "MatchValue") -> str: + """Return an astroid.MatchValue node as string.""" + return node.value.accept(self) + + @staticmethod + def visit_matchsingleton(node: "MatchSingleton") -> str: + """Return an astroid.MatchSingleton node as string.""" + return str(node.value) + + def visit_matchsequence(self, node: "MatchSequence") -> str: + """Return an astroid.MatchSequence node as string.""" + if node.patterns is None: + return "[]" + return f"[{', '.join(p.accept(self) for p in node.patterns)}]" + + def visit_matchmapping(self, node: "MatchMapping") -> str: + """Return an astroid.MatchMapping node as string.""" + mapping_strings: List[str] = [] + if node.keys and node.patterns: + mapping_strings.extend( + f"{key.accept(self)}: {p.accept(self)}" + for key, p in zip(node.keys, node.patterns) + ) + if node.rest: + mapping_strings.append(f"**{node.rest.accept(self)}") + return f"{'{'}{', '.join(mapping_strings)}{'}'}" + + def visit_matchclass(self, node: "MatchClass") -> str: + """Return an astroid.MatchClass node as string.""" + if node.cls is None: + raise Exception(f"{node} does not have a 'cls' node") + class_strings: List[str] = [] + if node.patterns: + class_strings.extend(p.accept(self) for p in node.patterns) + if node.kwd_attrs and node.kwd_patterns: + for attr, pattern in zip(node.kwd_attrs, node.kwd_patterns): + class_strings.append(f"{attr}={pattern.accept(self)}") + return f"{node.cls.accept(self)}({', '.join(class_strings)})" + + def visit_matchstar(self, node: "MatchStar") -> str: + """Return an astroid.MatchStar node as string.""" + return f"*{node.name.accept(self) if node.name else '_'}" + + def visit_matchas(self, node: "MatchAs") -> str: + """Return an astroid.MatchAs node as string.""" + # pylint: disable=import-outside-toplevel + # Prevent circular dependency + from astroid.nodes.node_classes import MatchClass, MatchMapping, MatchSequence + + if isinstance(node.parent, (MatchSequence, MatchMapping, MatchClass)): + return node.name.accept(self) if node.name else "_" + return ( + f"{node.pattern.accept(self) if node.pattern else '_'}" + f"{f' as {node.name.accept(self)}' if node.name else ''}" + ) + + def visit_matchor(self, node: "MatchOr") -> str: + """Return an astroid.MatchOr node as string.""" + if node.patterns is None: + raise Exception(f"{node} does not have pattern nodes") + return " | ".join(p.accept(self) for p in node.patterns) + + # These aren't for real AST nodes, but for inference objects. + + def visit_frozenset(self, node): + return node.parent.accept(self) + + def visit_super(self, node): + return node.parent.accept(self) + + def visit_uninferable(self, node): + return str(node) + + def visit_property(self, node): + return node.function.accept(self) + + def visit_evaluatedobject(self, node): + return node.original.accept(self) + + def visit_unknown(self, node: "Unknown") -> str: + return str(node) + + +def _import_string(names): + """return a list of (name, asname) formatted as a string""" + _names = [] + for name, asname in names: + if asname is not None: + _names.append(f"{name} as {asname}") + else: + _names.append(name) + return ", ".join(_names) + + +# This sets the default indent to 4 spaces. +to_code = AsStringVisitor(" ") diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/const.py b/myenv/lib/python3.9/site-packages/astroid/nodes/const.py new file mode 100644 index 0000000..6782cc3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/const.py @@ -0,0 +1,27 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +OP_PRECEDENCE = { + op: precedence + for precedence, ops in enumerate( + [ + ["Lambda"], # lambda x: x + 1 + ["IfExp"], # 1 if True else 2 + ["or"], + ["and"], + ["not"], + ["Compare"], # in, not in, is, is not, <, <=, >, >=, !=, == + ["|"], + ["^"], + ["&"], + ["<<", ">>"], + ["+", "-"], + ["*", "@", "/", "//", "%"], + ["UnaryOp"], # +, -, ~ + ["**"], + ["Await"], + ] + ) + for op in ops +} diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/node_classes.py b/myenv/lib/python3.9/site-packages/astroid/nodes/node_classes.py new file mode 100644 index 0000000..c941690 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/node_classes.py @@ -0,0 +1,5443 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Module for some node classes. More nodes in scoped_nodes.py""" + +import abc +import itertools +import sys +import typing +import warnings +from functools import lru_cache +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Generator, + Optional, + Type, + TypeVar, + Union, +) + +from astroid import decorators, mixins, util +from astroid.bases import Instance, _infer_stmts +from astroid.const import Context +from astroid.context import InferenceContext +from astroid.exceptions import ( + AstroidIndexError, + AstroidTypeError, + InferenceError, + NoDefault, + ParentMissingError, +) +from astroid.manager import AstroidManager +from astroid.nodes.const import OP_PRECEDENCE +from astroid.nodes.node_ng import NodeNG + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +if TYPE_CHECKING: + from astroid import nodes + from astroid.nodes import LocalsDictNodeNG + +if sys.version_info >= (3, 8) or TYPE_CHECKING: + # pylint: disable-next=ungrouped-imports + from functools import cached_property +else: + from astroid.decorators import cachedproperty as cached_property + + +def _is_const(value): + return isinstance(value, tuple(CONST_CLS)) + + +T_Nodes = TypeVar("T_Nodes", bound=NodeNG) + +AssignedStmtsPossibleNode = Union["List", "Tuple", "AssignName", "AssignAttr", None] +AssignedStmtsCall = Callable[ + [ + T_Nodes, + AssignedStmtsPossibleNode, + Optional[InferenceContext], + Optional[typing.List[int]], + ], + Any, +] + + +@decorators.raise_if_nothing_inferred +def unpack_infer(stmt, context=None): + """recursively generate nodes inferred by the given statement. + If the inferred value is a list or a tuple, recurse on the elements + """ + if isinstance(stmt, (List, Tuple)): + for elt in stmt.elts: + if elt is util.Uninferable: + yield elt + continue + yield from unpack_infer(elt, context) + return dict(node=stmt, context=context) + # if inferred is a final node, return it and stop + inferred = next(stmt.infer(context), util.Uninferable) + if inferred is stmt: + yield inferred + return dict(node=stmt, context=context) + # else, infer recursively, except Uninferable object that should be returned as is + for inferred in stmt.infer(context): + if inferred is util.Uninferable: + yield inferred + else: + yield from unpack_infer(inferred, context) + + return dict(node=stmt, context=context) + + +def are_exclusive(stmt1, stmt2, exceptions: Optional[typing.List[str]] = None) -> bool: + """return true if the two given statements are mutually exclusive + + `exceptions` may be a list of exception names. If specified, discard If + branches and check one of the statement is in an exception handler catching + one of the given exceptions. + + algorithm : + 1) index stmt1's parents + 2) climb among stmt2's parents until we find a common parent + 3) if the common parent is a If or TryExcept statement, look if nodes are + in exclusive branches + """ + # index stmt1's parents + stmt1_parents = {} + children = {} + previous = stmt1 + for node in stmt1.node_ancestors(): + stmt1_parents[node] = 1 + children[node] = previous + previous = node + # climb among stmt2's parents until we find a common parent + previous = stmt2 + for node in stmt2.node_ancestors(): + if node in stmt1_parents: + # if the common parent is a If or TryExcept statement, look if + # nodes are in exclusive branches + if isinstance(node, If) and exceptions is None: + if ( + node.locate_child(previous)[1] + is not node.locate_child(children[node])[1] + ): + return True + elif isinstance(node, TryExcept): + c2attr, c2node = node.locate_child(previous) + c1attr, c1node = node.locate_child(children[node]) + if c1node is not c2node: + first_in_body_caught_by_handlers = ( + c2attr == "handlers" + and c1attr == "body" + and previous.catch(exceptions) + ) + second_in_body_caught_by_handlers = ( + c2attr == "body" + and c1attr == "handlers" + and children[node].catch(exceptions) + ) + first_in_else_other_in_handlers = ( + c2attr == "handlers" and c1attr == "orelse" + ) + second_in_else_other_in_handlers = ( + c2attr == "orelse" and c1attr == "handlers" + ) + if any( + ( + first_in_body_caught_by_handlers, + second_in_body_caught_by_handlers, + first_in_else_other_in_handlers, + second_in_else_other_in_handlers, + ) + ): + return True + elif c2attr == "handlers" and c1attr == "handlers": + return previous is not children[node] + return False + previous = node + return False + + +# getitem() helpers. + +_SLICE_SENTINEL = object() + + +def _slice_value(index, context=None): + """Get the value of the given slice index.""" + + if isinstance(index, Const): + if isinstance(index.value, (int, type(None))): + return index.value + elif index is None: + return None + else: + # Try to infer what the index actually is. + # Since we can't return all the possible values, + # we'll stop at the first possible value. + try: + inferred = next(index.infer(context=context)) + except (InferenceError, StopIteration): + pass + else: + if isinstance(inferred, Const): + if isinstance(inferred.value, (int, type(None))): + return inferred.value + + # Use a sentinel, because None can be a valid + # value that this function can return, + # as it is the case for unspecified bounds. + return _SLICE_SENTINEL + + +def _infer_slice(node, context=None): + lower = _slice_value(node.lower, context) + upper = _slice_value(node.upper, context) + step = _slice_value(node.step, context) + if all(elem is not _SLICE_SENTINEL for elem in (lower, upper, step)): + return slice(lower, upper, step) + + raise AstroidTypeError( + message="Could not infer slice used in subscript", + node=node, + index=node.parent, + context=context, + ) + + +def _container_getitem(instance, elts, index, context=None): + """Get a slice or an item, using the given *index*, for the given sequence.""" + try: + if isinstance(index, Slice): + index_slice = _infer_slice(index, context=context) + new_cls = instance.__class__() + new_cls.elts = elts[index_slice] + new_cls.parent = instance.parent + return new_cls + if isinstance(index, Const): + return elts[index.value] + except IndexError as exc: + raise AstroidIndexError( + message="Index {index!s} out of range", + node=instance, + index=index, + context=context, + ) from exc + except TypeError as exc: + raise AstroidTypeError( + message="Type error {error!r}", node=instance, index=index, context=context + ) from exc + + raise AstroidTypeError(f"Could not use {index} as subscript index") + + +class Statement(NodeNG): + """Statement node adding a few attributes""" + + is_statement = True + """Whether this node indicates a statement.""" + + def next_sibling(self): + """The next sibling statement node. + + :returns: The next sibling statement node. + :rtype: NodeNG or None + """ + stmts = self.parent.child_sequence(self) + index = stmts.index(self) + try: + return stmts[index + 1] + except IndexError: + return None + + def previous_sibling(self): + """The previous sibling statement. + + :returns: The previous sibling statement node. + :rtype: NodeNG or None + """ + stmts = self.parent.child_sequence(self) + index = stmts.index(self) + if index >= 1: + return stmts[index - 1] + return None + + +class BaseContainer( + mixins.ParentAssignTypeMixin, NodeNG, Instance, metaclass=abc.ABCMeta +): + """Base class for Set, FrozenSet, Tuple and List.""" + + _astroid_fields = ("elts",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.elts: typing.List[NodeNG] = [] + """The elements in the node.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, elts: typing.List[NodeNG]) -> None: + """Do some setup after initialisation. + + :param elts: The list of elements the that node contains. + """ + self.elts = elts + + @classmethod + def from_elements(cls, elts=None): + """Create a node of this type from the given list of elements. + + :param elts: The list of elements that the node should contain. + :type elts: list(NodeNG) + + :returns: A new node containing the given elements. + :rtype: NodeNG + """ + node = cls() + if elts is None: + node.elts = [] + else: + node.elts = [const_factory(e) if _is_const(e) else e for e in elts] + return node + + def itered(self): + """An iterator over the elements this node contains. + + :returns: The contents of this node. + :rtype: iterable(NodeNG) + """ + return self.elts + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + :rtype: bool or Uninferable + """ + return bool(self.elts) + + @abc.abstractmethod + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + + def get_children(self): + yield from self.elts + + +class LookupMixIn: + """Mixin to look up a name in the right scope.""" + + @lru_cache(maxsize=None) # pylint: disable=cache-max-size-none # noqa + def lookup(self, name: str) -> typing.Tuple[str, typing.List[NodeNG]]: + """Lookup where the given variable is assigned. + + The lookup starts from self's scope. If self is not a frame itself + and the name is found in the inner frame locals, statements will be + filtered to remove ignorable statements according to self's location. + + :param name: The name of the variable to find assignments for. + + :returns: The scope node and the list of assignments associated to the + given name according to the scope where it has been found (locals, + globals or builtin). + """ + return self.scope().scope_lookup(self, name) + + def ilookup(self, name): + """Lookup the inferred values of the given variable. + + :param name: The variable name to find values for. + :type name: str + + :returns: The inferred values of the statements returned from + :meth:`lookup`. + :rtype: iterable + """ + frame, stmts = self.lookup(name) + context = InferenceContext() + return _infer_stmts(stmts, context, frame) + + +# Name classes + + +class AssignName( + mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG +): + """Variation of :class:`ast.Assign` representing assignment to a name. + + An :class:`AssignName` is the name of something that is assigned to. + This includes variables defined in a function signature or in a loop. + + >>> import astroid + >>> node = astroid.extract_node('variable = range(10)') + >>> node + + >>> list(node.get_children()) + [, ] + >>> list(node.get_children())[0].as_string() + 'variable' + """ + + _other_fields = ("name",) + + @decorators.deprecate_default_argument_values(name="str") + def __init__( + self, + name: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param name: The name that is assigned to. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.name: Optional[str] = name + """The name that is assigned to.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + assigned_stmts: ClassVar[AssignedStmtsCall["AssignName"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + +class DelName( + mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG +): + """Variation of :class:`ast.Delete` representing deletion of a name. + + A :class:`DelName` is the name of something that is deleted. + + >>> import astroid + >>> node = astroid.extract_node("del variable #@") + >>> list(node.get_children()) + [] + >>> list(node.get_children())[0].as_string() + 'variable' + """ + + _other_fields = ("name",) + + @decorators.deprecate_default_argument_values(name="str") + def __init__( + self, + name: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param name: The name that is being deleted. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.name: Optional[str] = name + """The name that is being deleted.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + +class Name(mixins.NoChildrenMixin, LookupMixIn, NodeNG): + """Class representing an :class:`ast.Name` node. + + A :class:`Name` node is something that is named, but not covered by + :class:`AssignName` or :class:`DelName`. + + >>> import astroid + >>> node = astroid.extract_node('range(10)') + >>> node + + >>> list(node.get_children()) + [, ] + >>> list(node.get_children())[0].as_string() + 'range' + """ + + _other_fields = ("name",) + + @decorators.deprecate_default_argument_values(name="str") + def __init__( + self, + name: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param name: The name that this node refers to. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.name: Optional[str] = name + """The name that this node refers to.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def _get_name_nodes(self): + yield self + + for child_node in self.get_children(): + yield from child_node._get_name_nodes() + + +class Arguments(mixins.AssignTypeMixin, NodeNG): + """Class representing an :class:`ast.arguments` node. + + An :class:`Arguments` node represents that arguments in a + function definition. + + >>> import astroid + >>> node = astroid.extract_node('def foo(bar): pass') + >>> node + + >>> node.args + + """ + + # Python 3.4+ uses a different approach regarding annotations, + # each argument is a new class, _ast.arg, which exposes an + # 'annotation' attribute. In astroid though, arguments are exposed + # as is in the Arguments node and the only way to expose annotations + # is by using something similar with Python 3.3: + # - we expose 'varargannotation' and 'kwargannotation' of annotations + # of varargs and kwargs. + # - we expose 'annotation', a list with annotations for + # for each normal argument. If an argument doesn't have an + # annotation, its value will be None. + _astroid_fields = ( + "args", + "defaults", + "kwonlyargs", + "posonlyargs", + "posonlyargs_annotations", + "kw_defaults", + "annotations", + "varargannotation", + "kwargannotation", + "kwonlyargs_annotations", + "type_comment_args", + "type_comment_kwonlyargs", + "type_comment_posonlyargs", + ) + + _other_fields = ("vararg", "kwarg") + + lineno: None + col_offset: None + end_lineno: None + end_col_offset: None + + def __init__( + self, + vararg: Optional[str] = None, + kwarg: Optional[str] = None, + parent: Optional[NodeNG] = None, + ) -> None: + """ + :param vararg: The name of the variable length arguments. + + :param kwarg: The name of the variable length keyword arguments. + + :param parent: The parent node in the syntax tree. + """ + super().__init__(parent=parent) + + self.vararg: Optional[str] = vararg # can be None + """The name of the variable length arguments.""" + + self.kwarg: Optional[str] = kwarg # can be None + """The name of the variable length keyword arguments.""" + + self.args: Optional[typing.List[AssignName]] + """The names of the required arguments. + + Can be None if the associated function does not have a retrievable + signature and the arguments are therefore unknown. + This happens with builtin functions implemented in C. + """ + + self.defaults: typing.List[NodeNG] + """The default values for arguments that can be passed positionally.""" + + self.kwonlyargs: typing.List[AssignName] + """The keyword arguments that cannot be passed positionally.""" + + self.posonlyargs: typing.List[AssignName] = [] + """The arguments that can only be passed positionally.""" + + self.kw_defaults: typing.List[Optional[NodeNG]] + """The default values for keyword arguments that cannot be passed positionally.""" + + self.annotations: typing.List[Optional[NodeNG]] + """The type annotations of arguments that can be passed positionally.""" + + self.posonlyargs_annotations: typing.List[Optional[NodeNG]] = [] + """The type annotations of arguments that can only be passed positionally.""" + + self.kwonlyargs_annotations: typing.List[Optional[NodeNG]] = [] + """The type annotations of arguments that cannot be passed positionally.""" + + self.type_comment_args: typing.List[Optional[NodeNG]] = [] + """The type annotation, passed by a type comment, of each argument. + + If an argument does not have a type comment, + the value for that argument will be None. + """ + + self.type_comment_kwonlyargs: typing.List[Optional[NodeNG]] = [] + """The type annotation, passed by a type comment, of each keyword only argument. + + If an argument does not have a type comment, + the value for that argument will be None. + """ + + self.type_comment_posonlyargs: typing.List[Optional[NodeNG]] = [] + """The type annotation, passed by a type comment, of each positional argument. + + If an argument does not have a type comment, + the value for that argument will be None. + """ + + self.varargannotation: Optional[NodeNG] = None # can be None + """The type annotation for the variable length arguments.""" + + self.kwargannotation: Optional[NodeNG] = None # can be None + """The type annotation for the variable length keyword arguments.""" + + # pylint: disable=too-many-arguments + def postinit( + self, + args: typing.List[AssignName], + defaults: typing.List[NodeNG], + kwonlyargs: typing.List[AssignName], + kw_defaults: typing.List[Optional[NodeNG]], + annotations: typing.List[Optional[NodeNG]], + posonlyargs: Optional[typing.List[AssignName]] = None, + kwonlyargs_annotations: Optional[typing.List[Optional[NodeNG]]] = None, + posonlyargs_annotations: Optional[typing.List[Optional[NodeNG]]] = None, + varargannotation: Optional[NodeNG] = None, + kwargannotation: Optional[NodeNG] = None, + type_comment_args: Optional[typing.List[Optional[NodeNG]]] = None, + type_comment_kwonlyargs: Optional[typing.List[Optional[NodeNG]]] = None, + type_comment_posonlyargs: Optional[typing.List[Optional[NodeNG]]] = None, + ) -> None: + """Do some setup after initialisation. + + :param args: The names of the required arguments. + + :param defaults: The default values for arguments that can be passed + positionally. + + :param kwonlyargs: The keyword arguments that cannot be passed + positionally. + + :param posonlyargs: The arguments that can only be passed + positionally. + + :param kw_defaults: The default values for keyword arguments that + cannot be passed positionally. + + :param annotations: The type annotations of arguments that can be + passed positionally. + + :param kwonlyargs_annotations: The type annotations of arguments that + cannot be passed positionally. This should always be passed in + Python 3. + + :param posonlyargs_annotations: The type annotations of arguments that + can only be passed positionally. This should always be passed in + Python 3. + + :param varargannotation: The type annotation for the variable length + arguments. + + :param kwargannotation: The type annotation for the variable length + keyword arguments. + + :param type_comment_args: The type annotation, + passed by a type comment, of each argument. + + :param type_comment_args: The type annotation, + passed by a type comment, of each keyword only argument. + + :param type_comment_args: The type annotation, + passed by a type comment, of each positional argument. + """ + self.args = args + self.defaults = defaults + self.kwonlyargs = kwonlyargs + if posonlyargs is not None: + self.posonlyargs = posonlyargs + self.kw_defaults = kw_defaults + self.annotations = annotations + if kwonlyargs_annotations is not None: + self.kwonlyargs_annotations = kwonlyargs_annotations + if posonlyargs_annotations is not None: + self.posonlyargs_annotations = posonlyargs_annotations + self.varargannotation = varargannotation + self.kwargannotation = kwargannotation + if type_comment_args is not None: + self.type_comment_args = type_comment_args + if type_comment_kwonlyargs is not None: + self.type_comment_kwonlyargs = type_comment_kwonlyargs + if type_comment_posonlyargs is not None: + self.type_comment_posonlyargs = type_comment_posonlyargs + + assigned_stmts: ClassVar[AssignedStmtsCall["Arguments"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def _infer_name(self, frame, name): + if self.parent is frame: + return name + return None + + @cached_property + def fromlineno(self): + """The first line that this node appears on in the source code. + + :type: int or None + """ + lineno = super().fromlineno + return max(lineno, self.parent.fromlineno or 0) + + @cached_property + def arguments(self): + """Get all the arguments for this node, including positional only and positional and keyword""" + return list(itertools.chain((self.posonlyargs or ()), self.args or ())) + + def format_args(self): + """Get the arguments formatted as string. + + :returns: The formatted arguments. + :rtype: str + """ + result = [] + positional_only_defaults = [] + positional_or_keyword_defaults = self.defaults + if self.defaults: + args = self.args or [] + positional_or_keyword_defaults = self.defaults[-len(args) :] + positional_only_defaults = self.defaults[: len(self.defaults) - len(args)] + + if self.posonlyargs: + result.append( + _format_args( + self.posonlyargs, + positional_only_defaults, + self.posonlyargs_annotations, + ) + ) + result.append("/") + if self.args: + result.append( + _format_args( + self.args, + positional_or_keyword_defaults, + getattr(self, "annotations", None), + ) + ) + if self.vararg: + result.append(f"*{self.vararg}") + if self.kwonlyargs: + if not self.vararg: + result.append("*") + result.append( + _format_args( + self.kwonlyargs, self.kw_defaults, self.kwonlyargs_annotations + ) + ) + if self.kwarg: + result.append(f"**{self.kwarg}") + return ", ".join(result) + + def default_value(self, argname): + """Get the default value for an argument. + + :param argname: The name of the argument to get the default value for. + :type argname: str + + :raises NoDefault: If there is no default value defined for the + given argument. + """ + args = self.arguments + index = _find_arg(argname, args)[0] + if index is not None: + idx = index - (len(args) - len(self.defaults)) + if idx >= 0: + return self.defaults[idx] + index = _find_arg(argname, self.kwonlyargs)[0] + if index is not None and self.kw_defaults[index] is not None: + return self.kw_defaults[index] + raise NoDefault(func=self.parent, name=argname) + + def is_argument(self, name): + """Check if the given name is defined in the arguments. + + :param name: The name to check for. + :type name: str + + :returns: True if the given name is defined in the arguments, + False otherwise. + :rtype: bool + """ + if name == self.vararg: + return True + if name == self.kwarg: + return True + return ( + self.find_argname(name, rec=True)[1] is not None + or self.kwonlyargs + and _find_arg(name, self.kwonlyargs, rec=True)[1] is not None + ) + + def find_argname(self, argname, rec=False): + """Get the index and :class:`AssignName` node for given name. + + :param argname: The name of the argument to search for. + :type argname: str + + :param rec: Whether or not to include arguments in unpacked tuples + in the search. + :type rec: bool + + :returns: The index and node for the argument. + :rtype: tuple(str or None, AssignName or None) + """ + if self.arguments: + return _find_arg(argname, self.arguments, rec) + return None, None + + def get_children(self): + yield from self.posonlyargs or () + + for elt in self.posonlyargs_annotations: + if elt is not None: + yield elt + + yield from self.args or () + + yield from self.defaults + yield from self.kwonlyargs + + for elt in self.kw_defaults: + if elt is not None: + yield elt + + for elt in self.annotations: + if elt is not None: + yield elt + + if self.varargannotation is not None: + yield self.varargannotation + + if self.kwargannotation is not None: + yield self.kwargannotation + + for elt in self.kwonlyargs_annotations: + if elt is not None: + yield elt + + +def _find_arg(argname, args, rec=False): + for i, arg in enumerate(args): + if isinstance(arg, Tuple): + if rec: + found = _find_arg(argname, arg.elts) + if found[0] is not None: + return found + elif arg.name == argname: + return i, arg + return None, None + + +def _format_args(args, defaults=None, annotations=None): + values = [] + if args is None: + return "" + if annotations is None: + annotations = [] + if defaults is not None: + default_offset = len(args) - len(defaults) + packed = itertools.zip_longest(args, annotations) + for i, (arg, annotation) in enumerate(packed): + if isinstance(arg, Tuple): + values.append(f"({_format_args(arg.elts)})") + else: + argname = arg.name + default_sep = "=" + if annotation is not None: + argname += ": " + annotation.as_string() + default_sep = " = " + values.append(argname) + + if defaults is not None and i >= default_offset: + if defaults[i - default_offset] is not None: + values[-1] += default_sep + defaults[i - default_offset].as_string() + return ", ".join(values) + + +class AssignAttr(mixins.ParentAssignTypeMixin, NodeNG): + """Variation of :class:`ast.Assign` representing assignment to an attribute. + + >>> import astroid + >>> node = astroid.extract_node('self.attribute = range(10)') + >>> node + + >>> list(node.get_children()) + [, ] + >>> list(node.get_children())[0].as_string() + 'self.attribute' + """ + + _astroid_fields = ("expr",) + _other_fields = ("attrname",) + + @decorators.deprecate_default_argument_values(attrname="str") + def __init__( + self, + attrname: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param attrname: The name of the attribute being assigned to. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.expr: Optional[NodeNG] = None + """What has the attribute that is being assigned to.""" + + self.attrname: Optional[str] = attrname + """The name of the attribute being assigned to.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, expr: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param expr: What has the attribute that is being assigned to. + """ + self.expr = expr + + assigned_stmts: ClassVar[AssignedStmtsCall["AssignAttr"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def get_children(self): + yield self.expr + + +class Assert(Statement): + """Class representing an :class:`ast.Assert` node. + + An :class:`Assert` node represents an assert statement. + + >>> import astroid + >>> node = astroid.extract_node('assert len(things) == 10, "Not enough things"') + >>> node + + """ + + _astroid_fields = ("test", "fail") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.test: Optional[NodeNG] = None + """The test that passes or fails the assertion.""" + + self.fail: Optional[NodeNG] = None # can be None + """The message shown when the assertion fails.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, test: Optional[NodeNG] = None, fail: Optional[NodeNG] = None + ) -> None: + """Do some setup after initialisation. + + :param test: The test that passes or fails the assertion. + + :param fail: The message shown when the assertion fails. + """ + self.fail = fail + self.test = test + + def get_children(self): + yield self.test + + if self.fail is not None: + yield self.fail + + +class Assign(mixins.AssignTypeMixin, Statement): + """Class representing an :class:`ast.Assign` node. + + An :class:`Assign` is a statement where something is explicitly + asssigned to. + + >>> import astroid + >>> node = astroid.extract_node('variable = range(10)') + >>> node + + """ + + _astroid_fields = ("targets", "value") + _other_other_fields = ("type_annotation",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.targets: typing.List[NodeNG] = [] + """What is being assigned to.""" + + self.value: Optional[NodeNG] = None + """The value being assigned to the variables.""" + + self.type_annotation: Optional[NodeNG] = None # can be None + """If present, this will contain the type annotation passed by a type comment""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + targets: Optional[typing.List[NodeNG]] = None, + value: Optional[NodeNG] = None, + type_annotation: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param targets: What is being assigned to. + :param value: The value being assigned to the variables. + :param type_annotation: + """ + if targets is not None: + self.targets = targets + self.value = value + self.type_annotation = type_annotation + + assigned_stmts: ClassVar[AssignedStmtsCall["Assign"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def get_children(self): + yield from self.targets + + yield self.value + + @decorators.cached + def _get_assign_nodes(self): + return [self] + list(self.value._get_assign_nodes()) + + def _get_yield_nodes_skip_lambdas(self): + yield from self.value._get_yield_nodes_skip_lambdas() + + +class AnnAssign(mixins.AssignTypeMixin, Statement): + """Class representing an :class:`ast.AnnAssign` node. + + An :class:`AnnAssign` is an assignment with a type annotation. + + >>> import astroid + >>> node = astroid.extract_node('variable: List[int] = range(10)') + >>> node + + """ + + _astroid_fields = ("target", "annotation", "value") + _other_fields = ("simple",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.target: Optional[NodeNG] = None + """What is being assigned to.""" + + self.annotation: Optional[NodeNG] = None + """The type annotation of what is being assigned to.""" + + self.value: Optional[NodeNG] = None # can be None + """The value being assigned to the variables.""" + + self.simple: Optional[int] = None + """Whether :attr:`target` is a pure name or a complex statement.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + target: NodeNG, + annotation: NodeNG, + simple: int, + value: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param target: What is being assigned to. + + :param annotation: The type annotation of what is being assigned to. + + :param simple: Whether :attr:`target` is a pure name + or a complex statement. + + :param value: The value being assigned to the variables. + """ + self.target = target + self.annotation = annotation + self.value = value + self.simple = simple + + assigned_stmts: ClassVar[AssignedStmtsCall["AnnAssign"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def get_children(self): + yield self.target + yield self.annotation + + if self.value is not None: + yield self.value + + +class AugAssign(mixins.AssignTypeMixin, Statement): + """Class representing an :class:`ast.AugAssign` node. + + An :class:`AugAssign` is an assignment paired with an operator. + + >>> import astroid + >>> node = astroid.extract_node('variable += 1') + >>> node + + """ + + _astroid_fields = ("target", "value") + _other_fields = ("op",) + + @decorators.deprecate_default_argument_values(op="str") + def __init__( + self, + op: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param op: The operator that is being combined with the assignment. + This includes the equals sign. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.target: Optional[NodeNG] = None + """What is being assigned to.""" + + self.op: Optional[str] = op + """The operator that is being combined with the assignment. + + This includes the equals sign. + """ + + self.value: Optional[NodeNG] = None + """The value being assigned to the variable.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, target: Optional[NodeNG] = None, value: Optional[NodeNG] = None + ) -> None: + """Do some setup after initialisation. + + :param target: What is being assigned to. + + :param value: The value being assigned to the variable. + """ + self.target = target + self.value = value + + assigned_stmts: ClassVar[AssignedStmtsCall["AugAssign"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + # This is set by inference.py + def _infer_augassign(self, context=None): + raise NotImplementedError + + def type_errors(self, context=None): + """Get a list of type errors which can occur during inference. + + Each TypeError is represented by a :class:`BadBinaryOperationMessage` , + which holds the original exception. + + :returns: The list of possible type errors. + :rtype: list(BadBinaryOperationMessage) + """ + try: + results = self._infer_augassign(context=context) + return [ + result + for result in results + if isinstance(result, util.BadBinaryOperationMessage) + ] + except InferenceError: + return [] + + def get_children(self): + yield self.target + yield self.value + + def _get_yield_nodes_skip_lambdas(self): + """An AugAssign node can contain a Yield node in the value""" + yield from self.value._get_yield_nodes_skip_lambdas() + yield from super()._get_yield_nodes_skip_lambdas() + + +class BinOp(NodeNG): + """Class representing an :class:`ast.BinOp` node. + + A :class:`BinOp` node is an application of a binary operator. + + >>> import astroid + >>> node = astroid.extract_node('a + b') + >>> node + + """ + + _astroid_fields = ("left", "right") + _other_fields = ("op",) + + @decorators.deprecate_default_argument_values(op="str") + def __init__( + self, + op: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param op: The operator. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.left: Optional[NodeNG] = None + """What is being applied to the operator on the left side.""" + + self.op: Optional[str] = op + """The operator.""" + + self.right: Optional[NodeNG] = None + """What is being applied to the operator on the right side.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, left: Optional[NodeNG] = None, right: Optional[NodeNG] = None + ) -> None: + """Do some setup after initialisation. + + :param left: What is being applied to the operator on the left side. + + :param right: What is being applied to the operator on the right side. + """ + self.left = left + self.right = right + + # This is set by inference.py + def _infer_binop(self, context=None): + raise NotImplementedError + + def type_errors(self, context=None): + """Get a list of type errors which can occur during inference. + + Each TypeError is represented by a :class:`BadBinaryOperationMessage`, + which holds the original exception. + + :returns: The list of possible type errors. + :rtype: list(BadBinaryOperationMessage) + """ + try: + results = self._infer_binop(context=context) + return [ + result + for result in results + if isinstance(result, util.BadBinaryOperationMessage) + ] + except InferenceError: + return [] + + def get_children(self): + yield self.left + yield self.right + + def op_precedence(self): + return OP_PRECEDENCE[self.op] + + def op_left_associative(self): + # 2**3**4 == 2**(3**4) + return self.op != "**" + + +class BoolOp(NodeNG): + """Class representing an :class:`ast.BoolOp` node. + + A :class:`BoolOp` is an application of a boolean operator. + + >>> import astroid + >>> node = astroid.extract_node('a and b') + >>> node + + """ + + _astroid_fields = ("values",) + _other_fields = ("op",) + + @decorators.deprecate_default_argument_values(op="str") + def __init__( + self, + op: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param op: The operator. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.op: Optional[str] = op + """The operator.""" + + self.values: typing.List[NodeNG] = [] + """The values being applied to the operator.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, values: Optional[typing.List[NodeNG]] = None) -> None: + """Do some setup after initialisation. + + :param values: The values being applied to the operator. + """ + if values is not None: + self.values = values + + def get_children(self): + yield from self.values + + def op_precedence(self): + return OP_PRECEDENCE[self.op] + + +class Break(mixins.NoChildrenMixin, Statement): + """Class representing an :class:`ast.Break` node. + + >>> import astroid + >>> node = astroid.extract_node('break') + >>> node + + """ + + +class Call(NodeNG): + """Class representing an :class:`ast.Call` node. + + A :class:`Call` node is a call to a function, method, etc. + + >>> import astroid + >>> node = astroid.extract_node('function()') + >>> node + + """ + + _astroid_fields = ("func", "args", "keywords") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.func: Optional[NodeNG] = None + """What is being called.""" + + self.args: typing.List[NodeNG] = [] + """The positional arguments being given to the call.""" + + self.keywords: typing.List["Keyword"] = [] + """The keyword arguments being given to the call.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + func: Optional[NodeNG] = None, + args: Optional[typing.List[NodeNG]] = None, + keywords: Optional[typing.List["Keyword"]] = None, + ) -> None: + """Do some setup after initialisation. + + :param func: What is being called. + + :param args: The positional arguments being given to the call. + + :param keywords: The keyword arguments being given to the call. + """ + self.func = func + if args is not None: + self.args = args + if keywords is not None: + self.keywords = keywords + + @property + def starargs(self) -> typing.List["Starred"]: + """The positional arguments that unpack something.""" + return [arg for arg in self.args if isinstance(arg, Starred)] + + @property + def kwargs(self) -> typing.List["Keyword"]: + """The keyword arguments that unpack something.""" + return [keyword for keyword in self.keywords if keyword.arg is None] + + def get_children(self): + yield self.func + + yield from self.args + + yield from self.keywords + + +class Compare(NodeNG): + """Class representing an :class:`ast.Compare` node. + + A :class:`Compare` node indicates a comparison. + + >>> import astroid + >>> node = astroid.extract_node('a <= b <= c') + >>> node + + >>> node.ops + [('<=', ), ('<=', )] + """ + + _astroid_fields = ("left", "ops") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.left: Optional[NodeNG] = None + """The value at the left being applied to a comparison operator.""" + + self.ops: typing.List[typing.Tuple[str, NodeNG]] = [] + """The remainder of the operators and their relevant right hand value.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + left: Optional[NodeNG] = None, + ops: Optional[typing.List[typing.Tuple[str, NodeNG]]] = None, + ) -> None: + """Do some setup after initialisation. + + :param left: The value at the left being applied to a comparison + operator. + + :param ops: The remainder of the operators + and their relevant right hand value. + """ + self.left = left + if ops is not None: + self.ops = ops + + def get_children(self): + """Get the child nodes below this node. + + Overridden to handle the tuple fields and skip returning the operator + strings. + + :returns: The children. + :rtype: iterable(NodeNG) + """ + yield self.left + for _, comparator in self.ops: + yield comparator # we don't want the 'op' + + def last_child(self): + """An optimized version of list(get_children())[-1] + + :returns: The last child. + :rtype: NodeNG + """ + # XXX maybe if self.ops: + return self.ops[-1][1] + # return self.left + + +class Comprehension(NodeNG): + """Class representing an :class:`ast.comprehension` node. + + A :class:`Comprehension` indicates the loop inside any type of + comprehension including generator expressions. + + >>> import astroid + >>> node = astroid.extract_node('[x for x in some_values]') + >>> list(node.get_children()) + [, ] + >>> list(node.get_children())[1].as_string() + 'for x in some_values' + """ + + _astroid_fields = ("target", "iter", "ifs") + _other_fields = ("is_async",) + + optional_assign = True + """Whether this node optionally assigns a variable.""" + + lineno: None + col_offset: None + end_lineno: None + end_col_offset: None + + def __init__(self, parent: Optional[NodeNG] = None) -> None: + """ + :param parent: The parent node in the syntax tree. + """ + self.target: Optional[NodeNG] = None + """What is assigned to by the comprehension.""" + + self.iter: Optional[NodeNG] = None + """What is iterated over by the comprehension.""" + + self.ifs: typing.List[NodeNG] = [] + """The contents of any if statements that filter the comprehension.""" + + self.is_async: Optional[bool] = None + """Whether this is an asynchronous comprehension or not.""" + + super().__init__(parent=parent) + + # pylint: disable=redefined-builtin; same name as builtin ast module. + def postinit( + self, + target: Optional[NodeNG] = None, + iter: Optional[NodeNG] = None, + ifs: Optional[typing.List[NodeNG]] = None, + is_async: Optional[bool] = None, + ) -> None: + """Do some setup after initialisation. + + :param target: What is assigned to by the comprehension. + + :param iter: What is iterated over by the comprehension. + + :param ifs: The contents of any if statements that filter + the comprehension. + + :param is_async: Whether this is an asynchronous comprehension or not. + """ + self.target = target + self.iter = iter + if ifs is not None: + self.ifs = ifs + self.is_async = is_async + + assigned_stmts: ClassVar[AssignedStmtsCall["Comprehension"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def assign_type(self): + """The type of assignment that this node performs. + + :returns: The assignment type. + :rtype: NodeNG + """ + return self + + def _get_filtered_stmts( + self, lookup_node, node, stmts, mystmt: Optional[Statement] + ): + """method used in filter_stmts""" + if self is mystmt: + if isinstance(lookup_node, (Const, Name)): + return [lookup_node], True + + elif self.statement(future=True) is mystmt: + # original node's statement is the assignment, only keeps + # current node (gen exp, list comp) + + return [node], True + + return stmts, False + + def get_children(self): + yield self.target + yield self.iter + + yield from self.ifs + + +class Const(mixins.NoChildrenMixin, NodeNG, Instance): + """Class representing any constant including num, str, bool, None, bytes. + + >>> import astroid + >>> node = astroid.extract_node('(5, "This is a string.", True, None, b"bytes")') + >>> node + + >>> list(node.get_children()) + [, + , + , + , + ] + """ + + _other_fields = ("value", "kind") + + def __init__( + self, + value: Any, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + kind: Optional[str] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param value: The value that the constant represents. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param kind: The string prefix. "u" for u-prefixed strings and ``None`` otherwise. Python 3.8+ only. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Any = value + """The value that the constant represents.""" + + self.kind: Optional[str] = kind # can be None + """"The string prefix. "u" for u-prefixed strings and ``None`` otherwise. Python 3.8+ only.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def __getattr__(self, name): + # This is needed because of Proxy's __getattr__ method. + # Calling object.__new__ on this class without calling + # __init__ would result in an infinite loop otherwise + # since __getattr__ is called when an attribute doesn't + # exist and self._proxied indirectly calls self.value + # and Proxy __getattr__ calls self.value + if name == "value": + raise AttributeError + return super().__getattr__(name) + + def getitem(self, index, context=None): + """Get an item from this node if subscriptable. + + :param index: The node to use as a subscript index. + :type index: Const or Slice + + :raises AstroidTypeError: When the given index cannot be used as a + subscript index, or if this node is not subscriptable. + """ + if isinstance(index, Const): + index_value = index.value + elif isinstance(index, Slice): + index_value = _infer_slice(index, context=context) + + else: + raise AstroidTypeError( + f"Could not use type {type(index)} as subscript index" + ) + + try: + if isinstance(self.value, (str, bytes)): + return Const(self.value[index_value]) + except IndexError as exc: + raise AstroidIndexError( + message="Index {index!r} out of range", + node=self, + index=index, + context=context, + ) from exc + except TypeError as exc: + raise AstroidTypeError( + message="Type error {error!r}", node=self, index=index, context=context + ) from exc + + raise AstroidTypeError(f"{self!r} (value={self.value})") + + def has_dynamic_getattr(self): + """Check if the node has a custom __getattr__ or __getattribute__. + + :returns: True if the class has a custom + __getattr__ or __getattribute__, False otherwise. + For a :class:`Const` this is always ``False``. + :rtype: bool + """ + return False + + def itered(self): + """An iterator over the elements this node contains. + + :returns: The contents of this node. + :rtype: iterable(Const) + + :raises TypeError: If this node does not represent something that is iterable. + """ + if isinstance(self.value, str): + return [const_factory(elem) for elem in self.value] + raise TypeError(f"Cannot iterate over type {type(self.value)!r}") + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return self._proxied.qname() + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + :rtype: bool + """ + return bool(self.value) + + +class Continue(mixins.NoChildrenMixin, Statement): + """Class representing an :class:`ast.Continue` node. + + >>> import astroid + >>> node = astroid.extract_node('continue') + >>> node + + """ + + +class Decorators(NodeNG): + """A node representing a list of decorators. + + A :class:`Decorators` is the decorators that are applied to + a method or function. + + >>> import astroid + >>> node = astroid.extract_node(''' + @property + def my_property(self): + return 3 + ''') + >>> node + + >>> list(node.get_children())[0] + + """ + + _astroid_fields = ("nodes",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.nodes: typing.List[NodeNG] + """The decorators that this node contains. + + :type: list(Name or Call) or None + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, nodes: typing.List[NodeNG]) -> None: + """Do some setup after initialisation. + + :param nodes: The decorators that this node contains. + :type nodes: list(Name or Call) + """ + self.nodes = nodes + + def scope(self) -> "LocalsDictNodeNG": + """The first parent node defining a new scope. + These can be Module, FunctionDef, ClassDef, Lambda, or GeneratorExp nodes. + + :returns: The first parent scope node. + """ + # skip the function node to go directly to the upper level scope + if not self.parent: + raise ParentMissingError(target=self) + if not self.parent.parent: + raise ParentMissingError(target=self.parent) + return self.parent.parent.scope() + + def get_children(self): + yield from self.nodes + + +class DelAttr(mixins.ParentAssignTypeMixin, NodeNG): + """Variation of :class:`ast.Delete` representing deletion of an attribute. + + >>> import astroid + >>> node = astroid.extract_node('del self.attr') + >>> node + + >>> list(node.get_children())[0] + + """ + + _astroid_fields = ("expr",) + _other_fields = ("attrname",) + + @decorators.deprecate_default_argument_values(attrname="str") + def __init__( + self, + attrname: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param attrname: The name of the attribute that is being deleted. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.expr: Optional[NodeNG] = None + """The name that this node represents. + + :type: Name or None + """ + + self.attrname: Optional[str] = attrname + """The name of the attribute that is being deleted.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, expr: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param expr: The name that this node represents. + :type expr: Name or None + """ + self.expr = expr + + def get_children(self): + yield self.expr + + +class Delete(mixins.AssignTypeMixin, Statement): + """Class representing an :class:`ast.Delete` node. + + A :class:`Delete` is a ``del`` statement this is deleting something. + + >>> import astroid + >>> node = astroid.extract_node('del self.attr') + >>> node + + """ + + _astroid_fields = ("targets",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.targets: typing.List[NodeNG] = [] + """What is being deleted.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, targets: Optional[typing.List[NodeNG]] = None) -> None: + """Do some setup after initialisation. + + :param targets: What is being deleted. + """ + if targets is not None: + self.targets = targets + + def get_children(self): + yield from self.targets + + +class Dict(NodeNG, Instance): + """Class representing an :class:`ast.Dict` node. + + A :class:`Dict` is a dictionary that is created with ``{}`` syntax. + + >>> import astroid + >>> node = astroid.extract_node('{1: "1"}') + >>> node + + """ + + _astroid_fields = ("items",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.items: typing.List[typing.Tuple[NodeNG, NodeNG]] = [] + """The key-value pairs contained in the dictionary.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, items: typing.List[typing.Tuple[NodeNG, NodeNG]]) -> None: + """Do some setup after initialisation. + + :param items: The key-value pairs contained in the dictionary. + """ + self.items = items + + @classmethod + def from_elements(cls, items=None): + """Create a :class:`Dict` of constants from a live dictionary. + + :param items: The items to store in the node. + :type items: dict + + :returns: The created dictionary node. + :rtype: Dict + """ + node = cls() + if items is None: + node.items = [] + else: + node.items = [ + (const_factory(k), const_factory(v) if _is_const(v) else v) + for k, v in items.items() + # The keys need to be constants + if _is_const(k) + ] + return node + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return "builtins.dict" + + def get_children(self): + """Get the key and value nodes below this node. + + Children are returned in the order that they are defined in the source + code, key first then the value. + + :returns: The children. + :rtype: iterable(NodeNG) + """ + for key, value in self.items: + yield key + yield value + + def last_child(self): + """An optimized version of list(get_children())[-1] + + :returns: The last child, or None if no children exist. + :rtype: NodeNG or None + """ + if self.items: + return self.items[-1][1] + return None + + def itered(self): + """An iterator over the keys this node contains. + + :returns: The keys of this node. + :rtype: iterable(NodeNG) + """ + return [key for (key, _) in self.items] + + def getitem(self, index, context=None): + """Get an item from this node. + + :param index: The node to use as a subscript index. + :type index: Const or Slice + + :raises AstroidTypeError: When the given index cannot be used as a + subscript index, or if this node is not subscriptable. + :raises AstroidIndexError: If the given index does not exist in the + dictionary. + """ + for key, value in self.items: + # TODO(cpopa): no support for overriding yet, {1:2, **{1: 3}}. + if isinstance(key, DictUnpack): + try: + return value.getitem(index, context) + except (AstroidTypeError, AstroidIndexError): + continue + for inferredkey in key.infer(context): + if inferredkey is util.Uninferable: + continue + if isinstance(inferredkey, Const) and isinstance(index, Const): + if inferredkey.value == index.value: + return value + + raise AstroidIndexError(index) + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + :rtype: bool + """ + return bool(self.items) + + +class Expr(Statement): + """Class representing an :class:`ast.Expr` node. + + An :class:`Expr` is any expression that does not have its value used or + stored. + + >>> import astroid + >>> node = astroid.extract_node('method()') + >>> node + + >>> node.parent + + """ + + _astroid_fields = ("value",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Optional[NodeNG] = None + """What the expression does.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, value: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param value: What the expression does. + """ + self.value = value + + def get_children(self): + yield self.value + + def _get_yield_nodes_skip_lambdas(self): + if not self.value.is_lambda: + yield from self.value._get_yield_nodes_skip_lambdas() + + +class Ellipsis(mixins.NoChildrenMixin, NodeNG): # pylint: disable=redefined-builtin + """Class representing an :class:`ast.Ellipsis` node. + + An :class:`Ellipsis` is the ``...`` syntax. + + Deprecated since v2.6.0 - Use :class:`Const` instead. + Will be removed with the release v2.7.0 + """ + + +class EmptyNode(mixins.NoChildrenMixin, NodeNG): + """Holds an arbitrary object in the :attr:`LocalsDictNodeNG.locals`.""" + + object = None + + +class ExceptHandler(mixins.MultiLineBlockMixin, mixins.AssignTypeMixin, Statement): + """Class representing an :class:`ast.ExceptHandler`. node. + + An :class:`ExceptHandler` is an ``except`` block on a try-except. + + >>> import astroid + >>> node = astroid.extract_node(''' + try: + do_something() + except Exception as error: + print("Error!") + ''') + >>> node + + >>> node.handlers + [] + """ + + _astroid_fields = ("type", "name", "body") + _multi_line_block_fields = ("body",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.type: Optional[NodeNG] = None # can be None + """The types that the block handles. + + :type: Tuple or NodeNG or None + """ + + self.name: Optional[AssignName] = None # can be None + """The name that the caught exception is assigned to.""" + + self.body: typing.List[NodeNG] = [] + """The contents of the block.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + assigned_stmts: ClassVar[AssignedStmtsCall["ExceptHandler"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def get_children(self): + if self.type is not None: + yield self.type + + if self.name is not None: + yield self.name + + yield from self.body + + # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. + def postinit( + self, + type: Optional[NodeNG] = None, + name: Optional[AssignName] = None, + body: Optional[typing.List[NodeNG]] = None, + ) -> None: + """Do some setup after initialisation. + + :param type: The types that the block handles. + :type type: Tuple or NodeNG or None + + :param name: The name that the caught exception is assigned to. + + :param body:The contents of the block. + """ + self.type = type + self.name = name + if body is not None: + self.body = body + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + if self.name: + return self.name.tolineno + if self.type: + return self.type.tolineno + return self.lineno + + def catch(self, exceptions: Optional[typing.List[str]]) -> bool: + """Check if this node handles any of the given + + :param exceptions: The names of the exceptions to check for. + """ + if self.type is None or exceptions is None: + return True + return any(node.name in exceptions for node in self.type._get_name_nodes()) + + +class ExtSlice(NodeNG): + """Class representing an :class:`ast.ExtSlice` node. + + An :class:`ExtSlice` is a complex slice expression. + + Deprecated since v2.6.0 - Now part of the :class:`Subscript` node. + Will be removed with the release of v2.7.0 + """ + + +class For( + mixins.MultiLineBlockMixin, + mixins.BlockRangeMixIn, + mixins.AssignTypeMixin, + Statement, +): + """Class representing an :class:`ast.For` node. + + >>> import astroid + >>> node = astroid.extract_node('for thing in things: print(thing)') + >>> node + + """ + + _astroid_fields = ("target", "iter", "body", "orelse") + _other_other_fields = ("type_annotation",) + _multi_line_block_fields = ("body", "orelse") + + optional_assign = True + """Whether this node optionally assigns a variable. + + This is always ``True`` for :class:`For` nodes. + """ + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.target: Optional[NodeNG] = None + """What the loop assigns to.""" + + self.iter: Optional[NodeNG] = None + """What the loop iterates over.""" + + self.body: typing.List[NodeNG] = [] + """The contents of the body of the loop.""" + + self.orelse: typing.List[NodeNG] = [] + """The contents of the ``else`` block of the loop.""" + + self.type_annotation: Optional[NodeNG] = None # can be None + """If present, this will contain the type annotation passed by a type comment""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. + def postinit( + self, + target: Optional[NodeNG] = None, + iter: Optional[NodeNG] = None, + body: Optional[typing.List[NodeNG]] = None, + orelse: Optional[typing.List[NodeNG]] = None, + type_annotation: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param target: What the loop assigns to. + + :param iter: What the loop iterates over. + + :param body: The contents of the body of the loop. + + :param orelse: The contents of the ``else`` block of the loop. + """ + self.target = target + self.iter = iter + if body is not None: + self.body = body + if orelse is not None: + self.orelse = orelse + self.type_annotation = type_annotation + + assigned_stmts: ClassVar[AssignedStmtsCall["For"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + return self.iter.tolineno + + def get_children(self): + yield self.target + yield self.iter + + yield from self.body + yield from self.orelse + + +class AsyncFor(For): + """Class representing an :class:`ast.AsyncFor` node. + + An :class:`AsyncFor` is an asynchronous :class:`For` built with + the ``async`` keyword. + + >>> import astroid + >>> node = astroid.extract_node(''' + async def func(things): + async for thing in things: + print(thing) + ''') + >>> node + + >>> node.body[0] + + """ + + +class Await(NodeNG): + """Class representing an :class:`ast.Await` node. + + An :class:`Await` is the ``await`` keyword. + + >>> import astroid + >>> node = astroid.extract_node(''' + async def func(things): + await other_func() + ''') + >>> node + + >>> node.body[0] + + >>> list(node.body[0].get_children())[0] + + """ + + _astroid_fields = ("value",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Optional[NodeNG] = None + """What to wait for.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, value: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param value: What to wait for. + """ + self.value = value + + def get_children(self): + yield self.value + + +class ImportFrom(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement): + """Class representing an :class:`ast.ImportFrom` node. + + >>> import astroid + >>> node = astroid.extract_node('from my_package import my_module') + >>> node + + """ + + _other_fields = ("modname", "names", "level") + + def __init__( + self, + fromname: Optional[str], + names: typing.List[typing.Tuple[str, Optional[str]]], + level: Optional[int] = 0, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param fromname: The module that is being imported from. + + :param names: What is being imported from the module. + + :param level: The level of relative import. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.modname: Optional[str] = fromname # can be None + """The module that is being imported from. + + This is ``None`` for relative imports. + """ + + self.names: typing.List[typing.Tuple[str, Optional[str]]] = names + """What is being imported from the module. + + Each entry is a :class:`tuple` of the name being imported, + and the alias that the name is assigned to (if any). + """ + + # TODO When is 'level' None? + self.level: Optional[int] = level # can be None + """The level of relative import. + + Essentially this is the number of dots in the import. + This is always 0 for absolute imports. + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + +class Attribute(NodeNG): + """Class representing an :class:`ast.Attribute` node.""" + + _astroid_fields = ("expr",) + _other_fields = ("attrname",) + + @decorators.deprecate_default_argument_values(attrname="str") + def __init__( + self, + attrname: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param attrname: The name of the attribute. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.expr: Optional[NodeNG] = None + """The name that this node represents. + + :type: Name or None + """ + + self.attrname: Optional[str] = attrname + """The name of the attribute.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, expr: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param expr: The name that this node represents. + :type expr: Name or None + """ + self.expr = expr + + def get_children(self): + yield self.expr + + +class Global(mixins.NoChildrenMixin, Statement): + """Class representing an :class:`ast.Global` node. + + >>> import astroid + >>> node = astroid.extract_node('global a_global') + >>> node + + """ + + _other_fields = ("names",) + + def __init__( + self, + names: typing.List[str], + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param names: The names being declared as global. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.names: typing.List[str] = names + """The names being declared as global.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def _infer_name(self, frame, name): + return name + + +class If(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): + """Class representing an :class:`ast.If` node. + + >>> import astroid + >>> node = astroid.extract_node('if condition: print(True)') + >>> node + + """ + + _astroid_fields = ("test", "body", "orelse") + _multi_line_block_fields = ("body", "orelse") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.test: Optional[NodeNG] = None + """The condition that the statement tests.""" + + self.body: typing.List[NodeNG] = [] + """The contents of the block.""" + + self.orelse: typing.List[NodeNG] = [] + """The contents of the ``else`` block.""" + + self.is_orelse: bool = False + """Whether the if-statement is the orelse-block of another if statement.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + test: Optional[NodeNG] = None, + body: Optional[typing.List[NodeNG]] = None, + orelse: Optional[typing.List[NodeNG]] = None, + ) -> None: + """Do some setup after initialisation. + + :param test: The condition that the statement tests. + + :param body: The contents of the block. + + :param orelse: The contents of the ``else`` block. + """ + self.test = test + if body is not None: + self.body = body + if orelse is not None: + self.orelse = orelse + if isinstance(self.parent, If) and self in self.parent.orelse: + self.is_orelse = True + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + return self.test.tolineno + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: The line number to start the range at. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + starting at the given line number. + :rtype: tuple(int, int) + """ + if lineno == self.body[0].fromlineno: + return lineno, lineno + if lineno <= self.body[-1].tolineno: + return lineno, self.body[-1].tolineno + return self._elsed_block_range(lineno, self.orelse, self.body[0].fromlineno - 1) + + def get_children(self): + yield self.test + + yield from self.body + yield from self.orelse + + def has_elif_block(self): + return len(self.orelse) == 1 and isinstance(self.orelse[0], If) + + def _get_yield_nodes_skip_lambdas(self): + """An If node can contain a Yield node in the test""" + yield from self.test._get_yield_nodes_skip_lambdas() + yield from super()._get_yield_nodes_skip_lambdas() + + def is_sys_guard(self) -> bool: + """Return True if IF stmt is a sys.version_info guard. + + >>> import astroid + >>> node = astroid.extract_node(''' + import sys + if sys.version_info > (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + ''') + >>> node.is_sys_guard() + True + """ + warnings.warn( + "The 'is_sys_guard' function is deprecated and will be removed in astroid 3.0.0 " + "It has been moved to pylint and can be imported from 'pylint.checkers.utils' " + "starting with pylint 2.12", + DeprecationWarning, + ) + if isinstance(self.test, Compare): + value = self.test.left + if isinstance(value, Subscript): + value = value.value + if isinstance(value, Attribute) and value.as_string() == "sys.version_info": + return True + + return False + + def is_typing_guard(self) -> bool: + """Return True if IF stmt is a typing guard. + + >>> import astroid + >>> node = astroid.extract_node(''' + from typing import TYPE_CHECKING + if TYPE_CHECKING: + from xyz import a + ''') + >>> node.is_typing_guard() + True + """ + warnings.warn( + "The 'is_typing_guard' function is deprecated and will be removed in astroid 3.0.0 " + "It has been moved to pylint and can be imported from 'pylint.checkers.utils' " + "starting with pylint 2.12", + DeprecationWarning, + ) + return isinstance( + self.test, (Name, Attribute) + ) and self.test.as_string().endswith("TYPE_CHECKING") + + +class IfExp(NodeNG): + """Class representing an :class:`ast.IfExp` node. + >>> import astroid + >>> node = astroid.extract_node('value if condition else other') + >>> node + + """ + + _astroid_fields = ("test", "body", "orelse") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.test: Optional[NodeNG] = None + """The condition that the statement tests.""" + + self.body: Optional[NodeNG] = None + """The contents of the block.""" + + self.orelse: Optional[NodeNG] = None + """The contents of the ``else`` block.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + test: Optional[NodeNG] = None, + body: Optional[NodeNG] = None, + orelse: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param test: The condition that the statement tests. + + :param body: The contents of the block. + + :param orelse: The contents of the ``else`` block. + """ + self.test = test + self.body = body + self.orelse = orelse + + def get_children(self): + yield self.test + yield self.body + yield self.orelse + + def op_left_associative(self): + # `1 if True else 2 if False else 3` is parsed as + # `1 if True else (2 if False else 3)` + return False + + +class Import(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement): + """Class representing an :class:`ast.Import` node. + >>> import astroid + >>> node = astroid.extract_node('import astroid') + >>> node + + """ + + _other_fields = ("names",) + + @decorators.deprecate_default_argument_values(names="list[tuple[str, str | None]]") + def __init__( + self, + names: Optional[typing.List[typing.Tuple[str, Optional[str]]]] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param names: The names being imported. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.names: typing.List[typing.Tuple[str, Optional[str]]] = names or [] + """The names being imported. + + Each entry is a :class:`tuple` of the name being imported, + and the alias that the name is assigned to (if any). + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + +class Index(NodeNG): + """Class representing an :class:`ast.Index` node. + + An :class:`Index` is a simple subscript. + + Deprecated since v2.6.0 - Now part of the :class:`Subscript` node. + Will be removed with the release of v2.7.0 + """ + + +class Keyword(NodeNG): + """Class representing an :class:`ast.keyword` node. + + >>> import astroid + >>> node = astroid.extract_node('function(a_kwarg=True)') + >>> node + + >>> node.keywords + [] + """ + + _astroid_fields = ("value",) + _other_fields = ("arg",) + + def __init__( + self, + arg: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param arg: The argument being assigned to. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.arg: Optional[str] = arg # can be None + """The argument being assigned to.""" + + self.value: Optional[NodeNG] = None + """The value being assigned to the keyword argument.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, value: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param value: The value being assigned to the keyword argument. + """ + self.value = value + + def get_children(self): + yield self.value + + +class List(BaseContainer): + """Class representing an :class:`ast.List` node. + + >>> import astroid + >>> node = astroid.extract_node('[1, 2, 3]') + >>> node + + """ + + _other_fields = ("ctx",) + + def __init__( + self, + ctx: Optional[Context] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param ctx: Whether the list is assigned to or loaded from. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.ctx: Optional[Context] = ctx + """Whether the list is assigned to or loaded from.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + assigned_stmts: ClassVar[AssignedStmtsCall["List"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return "builtins.list" + + def getitem(self, index, context=None): + """Get an item from this node. + + :param index: The node to use as a subscript index. + :type index: Const or Slice + """ + return _container_getitem(self, self.elts, index, context=context) + + +class Nonlocal(mixins.NoChildrenMixin, Statement): + """Class representing an :class:`ast.Nonlocal` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + def function(): + nonlocal var + ''') + >>> node + + >>> node.body[0] + + """ + + _other_fields = ("names",) + + def __init__( + self, + names: typing.List[str], + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param names: The names being declared as not local. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.names: typing.List[str] = names + """The names being declared as not local.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def _infer_name(self, frame, name): + return name + + +class Pass(mixins.NoChildrenMixin, Statement): + """Class representing an :class:`ast.Pass` node. + + >>> import astroid + >>> node = astroid.extract_node('pass') + >>> node + + """ + + +class Raise(Statement): + """Class representing an :class:`ast.Raise` node. + + >>> import astroid + >>> node = astroid.extract_node('raise RuntimeError("Something bad happened!")') + >>> node + + """ + + _astroid_fields = ("exc", "cause") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.exc: Optional[NodeNG] = None # can be None + """What is being raised.""" + + self.cause: Optional[NodeNG] = None # can be None + """The exception being used to raise this one.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + exc: Optional[NodeNG] = None, + cause: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param exc: What is being raised. + + :param cause: The exception being used to raise this one. + """ + self.exc = exc + self.cause = cause + + def raises_not_implemented(self): + """Check if this node raises a :class:`NotImplementedError`. + + :returns: True if this node raises a :class:`NotImplementedError`, + False otherwise. + :rtype: bool + """ + if not self.exc: + return False + return any( + name.name == "NotImplementedError" for name in self.exc._get_name_nodes() + ) + + def get_children(self): + if self.exc is not None: + yield self.exc + + if self.cause is not None: + yield self.cause + + +class Return(Statement): + """Class representing an :class:`ast.Return` node. + + >>> import astroid + >>> node = astroid.extract_node('return True') + >>> node + + """ + + _astroid_fields = ("value",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Optional[NodeNG] = None # can be None + """The value being returned.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, value: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param value: The value being returned. + """ + self.value = value + + def get_children(self): + if self.value is not None: + yield self.value + + def is_tuple_return(self): + return isinstance(self.value, Tuple) + + def _get_return_nodes_skip_functions(self): + yield self + + +class Set(BaseContainer): + """Class representing an :class:`ast.Set` node. + + >>> import astroid + >>> node = astroid.extract_node('{1, 2, 3}') + >>> node + + """ + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return "builtins.set" + + +class Slice(NodeNG): + """Class representing an :class:`ast.Slice` node. + + >>> import astroid + >>> node = astroid.extract_node('things[1:3]') + >>> node + + >>> node.slice + + """ + + _astroid_fields = ("lower", "upper", "step") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.lower: Optional[NodeNG] = None # can be None + """The lower index in the slice.""" + + self.upper: Optional[NodeNG] = None # can be None + """The upper index in the slice.""" + + self.step: Optional[NodeNG] = None # can be None + """The step to take between indexes.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + lower: Optional[NodeNG] = None, + upper: Optional[NodeNG] = None, + step: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param lower: The lower index in the slice. + + :param upper: The upper index in the slice. + + :param step: The step to take between index. + """ + self.lower = lower + self.upper = upper + self.step = step + + def _wrap_attribute(self, attr): + """Wrap the empty attributes of the Slice in a Const node.""" + if not attr: + const = const_factory(attr) + const.parent = self + return const + return attr + + @cached_property + def _proxied(self): + builtins = AstroidManager().builtins_module + return builtins.getattr("slice")[0] + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return "builtins.slice" + + def igetattr(self, attrname, context=None): + """Infer the possible values of the given attribute on the slice. + + :param attrname: The name of the attribute to infer. + :type attrname: str + + :returns: The inferred possible values. + :rtype: iterable(NodeNG) + """ + if attrname == "start": + yield self._wrap_attribute(self.lower) + elif attrname == "stop": + yield self._wrap_attribute(self.upper) + elif attrname == "step": + yield self._wrap_attribute(self.step) + else: + yield from self.getattr(attrname, context=context) + + def getattr(self, attrname, context=None): + return self._proxied.getattr(attrname, context) + + def get_children(self): + if self.lower is not None: + yield self.lower + + if self.upper is not None: + yield self.upper + + if self.step is not None: + yield self.step + + +class Starred(mixins.ParentAssignTypeMixin, NodeNG): + """Class representing an :class:`ast.Starred` node. + + >>> import astroid + >>> node = astroid.extract_node('*args') + >>> node + + """ + + _astroid_fields = ("value",) + _other_fields = ("ctx",) + + def __init__( + self, + ctx: Optional[Context] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param ctx: Whether the list is assigned to or loaded from. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Optional[NodeNG] = None + """What is being unpacked.""" + + self.ctx: Optional[Context] = ctx + """Whether the starred item is assigned to or loaded from.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, value: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param value: What is being unpacked. + """ + self.value = value + + assigned_stmts: ClassVar[AssignedStmtsCall["Starred"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def get_children(self): + yield self.value + + +class Subscript(NodeNG): + """Class representing an :class:`ast.Subscript` node. + + >>> import astroid + >>> node = astroid.extract_node('things[1:3]') + >>> node + + """ + + _astroid_fields = ("value", "slice") + _other_fields = ("ctx",) + + def __init__( + self, + ctx: Optional[Context] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param ctx: Whether the subscripted item is assigned to or loaded from. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Optional[NodeNG] = None + """What is being indexed.""" + + self.slice: Optional[NodeNG] = None + """The slice being used to lookup.""" + + self.ctx: Optional[Context] = ctx + """Whether the subscripted item is assigned to or loaded from.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. + def postinit( + self, value: Optional[NodeNG] = None, slice: Optional[NodeNG] = None + ) -> None: + """Do some setup after initialisation. + + :param value: What is being indexed. + + :param slice: The slice being used to lookup. + """ + self.value = value + self.slice = slice + + def get_children(self): + yield self.value + yield self.slice + + +class TryExcept(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): + """Class representing an :class:`ast.TryExcept` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + try: + do_something() + except Exception as error: + print("Error!") + ''') + >>> node + + """ + + _astroid_fields = ("body", "handlers", "orelse") + _multi_line_block_fields = ("body", "handlers", "orelse") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.body: typing.List[NodeNG] = [] + """The contents of the block to catch exceptions from.""" + + self.handlers: typing.List[ExceptHandler] = [] + """The exception handlers.""" + + self.orelse: typing.List[NodeNG] = [] + """The contents of the ``else`` block.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + body: Optional[typing.List[NodeNG]] = None, + handlers: Optional[typing.List[ExceptHandler]] = None, + orelse: Optional[typing.List[NodeNG]] = None, + ) -> None: + """Do some setup after initialisation. + + :param body: The contents of the block to catch exceptions from. + + :param handlers: The exception handlers. + + :param orelse: The contents of the ``else`` block. + """ + if body is not None: + self.body = body + if handlers is not None: + self.handlers = handlers + if orelse is not None: + self.orelse = orelse + + def _infer_name(self, frame, name): + return name + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: The line number to start the range at. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + starting at the given line number. + :rtype: tuple(int, int) + """ + last = None + for exhandler in self.handlers: + if exhandler.type and lineno == exhandler.type.fromlineno: + return lineno, lineno + if exhandler.body[0].fromlineno <= lineno <= exhandler.body[-1].tolineno: + return lineno, exhandler.body[-1].tolineno + if last is None: + last = exhandler.body[0].fromlineno - 1 + return self._elsed_block_range(lineno, self.orelse, last) + + def get_children(self): + yield from self.body + + yield from self.handlers or () + yield from self.orelse or () + + +class TryFinally(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): + """Class representing an :class:`ast.TryFinally` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + try: + do_something() + except Exception as error: + print("Error!") + finally: + print("Cleanup!") + ''') + >>> node + + """ + + _astroid_fields = ("body", "finalbody") + _multi_line_block_fields = ("body", "finalbody") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.body: typing.List[Union[NodeNG, TryExcept]] = [] + """The try-except that the finally is attached to.""" + + self.finalbody: typing.List[NodeNG] = [] + """The contents of the ``finally`` block.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + body: Optional[typing.List[Union[NodeNG, TryExcept]]] = None, + finalbody: Optional[typing.List[NodeNG]] = None, + ) -> None: + """Do some setup after initialisation. + + :param body: The try-except that the finally is attached to. + + :param finalbody: The contents of the ``finally`` block. + """ + if body is not None: + self.body = body + if finalbody is not None: + self.finalbody = finalbody + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: The line number to start the range at. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + starting at the given line number. + :rtype: tuple(int, int) + """ + child = self.body[0] + # py2.5 try: except: finally: + if ( + isinstance(child, TryExcept) + and child.fromlineno == self.fromlineno + and child.tolineno >= lineno > self.fromlineno + ): + return child.block_range(lineno) + return self._elsed_block_range(lineno, self.finalbody) + + def get_children(self): + yield from self.body + yield from self.finalbody + + +class Tuple(BaseContainer): + """Class representing an :class:`ast.Tuple` node. + + >>> import astroid + >>> node = astroid.extract_node('(1, 2, 3)') + >>> node + + """ + + _other_fields = ("ctx",) + + def __init__( + self, + ctx: Optional[Context] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param ctx: Whether the tuple is assigned to or loaded from. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.ctx: Optional[Context] = ctx + """Whether the tuple is assigned to or loaded from.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + assigned_stmts: ClassVar[AssignedStmtsCall["Tuple"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return "builtins.tuple" + + def getitem(self, index, context=None): + """Get an item from this node. + + :param index: The node to use as a subscript index. + :type index: Const or Slice + """ + return _container_getitem(self, self.elts, index, context=context) + + +class UnaryOp(NodeNG): + """Class representing an :class:`ast.UnaryOp` node. + + >>> import astroid + >>> node = astroid.extract_node('-5') + >>> node + + """ + + _astroid_fields = ("operand",) + _other_fields = ("op",) + + @decorators.deprecate_default_argument_values(op="str") + def __init__( + self, + op: Optional[str] = None, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param op: The operator. + + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.op: Optional[str] = op + """The operator.""" + + self.operand: Optional[NodeNG] = None + """What the unary operator is applied to.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, operand: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param operand: What the unary operator is applied to. + """ + self.operand = operand + + # This is set by inference.py + def _infer_unaryop(self, context=None): + raise NotImplementedError + + def type_errors(self, context=None): + """Get a list of type errors which can occur during inference. + + Each TypeError is represented by a :class:`BadBinaryOperationMessage`, + which holds the original exception. + + :returns: The list of possible type errors. + :rtype: list(BadBinaryOperationMessage) + """ + try: + results = self._infer_unaryop(context=context) + return [ + result + for result in results + if isinstance(result, util.BadUnaryOperationMessage) + ] + except InferenceError: + return [] + + def get_children(self): + yield self.operand + + def op_precedence(self): + if self.op == "not": + return OP_PRECEDENCE[self.op] + + return super().op_precedence() + + +class While(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): + """Class representing an :class:`ast.While` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + while condition(): + print("True") + ''') + >>> node + + """ + + _astroid_fields = ("test", "body", "orelse") + _multi_line_block_fields = ("body", "orelse") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.test: Optional[NodeNG] = None + """The condition that the loop tests.""" + + self.body: typing.List[NodeNG] = [] + """The contents of the loop.""" + + self.orelse: typing.List[NodeNG] = [] + """The contents of the ``else`` block.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + test: Optional[NodeNG] = None, + body: Optional[typing.List[NodeNG]] = None, + orelse: Optional[typing.List[NodeNG]] = None, + ) -> None: + """Do some setup after initialisation. + + :param test: The condition that the loop tests. + + :param body: The contents of the loop. + + :param orelse: The contents of the ``else`` block. + """ + self.test = test + if body is not None: + self.body = body + if orelse is not None: + self.orelse = orelse + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + return self.test.tolineno + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: The line number to start the range at. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + starting at the given line number. + :rtype: tuple(int, int) + """ + return self._elsed_block_range(lineno, self.orelse) + + def get_children(self): + yield self.test + + yield from self.body + yield from self.orelse + + def _get_yield_nodes_skip_lambdas(self): + """A While node can contain a Yield node in the test""" + yield from self.test._get_yield_nodes_skip_lambdas() + yield from super()._get_yield_nodes_skip_lambdas() + + +class With( + mixins.MultiLineBlockMixin, + mixins.BlockRangeMixIn, + mixins.AssignTypeMixin, + Statement, +): + """Class representing an :class:`ast.With` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + with open(file_path) as file_: + print(file_.read()) + ''') + >>> node + + """ + + _astroid_fields = ("items", "body") + _other_other_fields = ("type_annotation",) + _multi_line_block_fields = ("body",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.items: typing.List[typing.Tuple[NodeNG, Optional[NodeNG]]] = [] + """The pairs of context managers and the names they are assigned to.""" + + self.body: typing.List[NodeNG] = [] + """The contents of the ``with`` block.""" + + self.type_annotation: Optional[NodeNG] = None # can be None + """If present, this will contain the type annotation passed by a type comment""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + items: Optional[typing.List[typing.Tuple[NodeNG, Optional[NodeNG]]]] = None, + body: Optional[typing.List[NodeNG]] = None, + type_annotation: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param items: The pairs of context managers and the names + they are assigned to. + + :param body: The contents of the ``with`` block. + """ + if items is not None: + self.items = items + if body is not None: + self.body = body + self.type_annotation = type_annotation + + assigned_stmts: ClassVar[AssignedStmtsCall["With"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + return self.items[-1][0].tolineno + + def get_children(self): + """Get the child nodes below this node. + + :returns: The children. + :rtype: iterable(NodeNG) + """ + for expr, var in self.items: + yield expr + if var: + yield var + yield from self.body + + +class AsyncWith(With): + """Asynchronous ``with`` built with the ``async`` keyword.""" + + +class Yield(NodeNG): + """Class representing an :class:`ast.Yield` node. + + >>> import astroid + >>> node = astroid.extract_node('yield True') + >>> node + + """ + + _astroid_fields = ("value",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: Optional[NodeNG] = None # can be None + """The value to yield.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, value: Optional[NodeNG] = None) -> None: + """Do some setup after initialisation. + + :param value: The value to yield. + """ + self.value = value + + def get_children(self): + if self.value is not None: + yield self.value + + def _get_yield_nodes_skip_lambdas(self): + yield self + + +class YieldFrom(Yield): # TODO value is required, not optional + """Class representing an :class:`ast.YieldFrom` node.""" + + +class DictUnpack(mixins.NoChildrenMixin, NodeNG): + """Represents the unpacking of dicts into dicts using :pep:`448`.""" + + +class FormattedValue(NodeNG): + """Class representing an :class:`ast.FormattedValue` node. + + Represents a :pep:`498` format string. + + >>> import astroid + >>> node = astroid.extract_node('f"Format {type_}"') + >>> node + + >>> node.values + [, ] + """ + + _astroid_fields = ("value", "format_spec") + _other_fields = ("conversion",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.value: NodeNG + """The value to be formatted into the string.""" + + self.conversion: Optional[int] = None # can be None + """The type of formatting to be applied to the value. + + .. seealso:: + :class:`ast.FormattedValue` + """ + + self.format_spec: Optional[NodeNG] = None # can be None + """The formatting to be applied to the value. + + .. seealso:: + :class:`ast.FormattedValue` + + :type: JoinedStr or None + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + value: NodeNG, + conversion: Optional[int] = None, + format_spec: Optional[NodeNG] = None, + ) -> None: + """Do some setup after initialisation. + + :param value: The value to be formatted into the string. + + :param conversion: The type of formatting to be applied to the value. + + :param format_spec: The formatting to be applied to the value. + :type format_spec: JoinedStr or None + """ + self.value = value + self.conversion = conversion + self.format_spec = format_spec + + def get_children(self): + yield self.value + + if self.format_spec is not None: + yield self.format_spec + + +class JoinedStr(NodeNG): + """Represents a list of string expressions to be joined. + + >>> import astroid + >>> node = astroid.extract_node('f"Format {type_}"') + >>> node + + """ + + _astroid_fields = ("values",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.values: typing.List[NodeNG] = [] + """The string expressions to be joined. + + :type: list(FormattedValue or Const) + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, values: Optional[typing.List[NodeNG]] = None) -> None: + """Do some setup after initialisation. + + :param value: The string expressions to be joined. + + :type: list(FormattedValue or Const) + """ + if values is not None: + self.values = values + + def get_children(self): + yield from self.values + + +class NamedExpr(mixins.AssignTypeMixin, NodeNG): + """Represents the assignment from the assignment expression + + >>> import astroid + >>> module = astroid.parse('if a := 1: pass') + >>> module.body[0].test + + """ + + _astroid_fields = ("target", "value") + + optional_assign = True + """Whether this node optionally assigns a variable. + + Since NamedExpr are not always called they do not always assign.""" + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.target: NodeNG + """The assignment target + + :type: Name + """ + + self.value: NodeNG + """The value that gets assigned in the expression""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, target: NodeNG, value: NodeNG) -> None: + self.target = target + self.value = value + + assigned_stmts: ClassVar[AssignedStmtsCall["NamedExpr"]] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + def frame( + self, *, future: Literal[None, True] = None + ) -> Union["nodes.FunctionDef", "nodes.Module", "nodes.ClassDef", "nodes.Lambda"]: + """The first parent frame node. + + A frame node is a :class:`Module`, :class:`FunctionDef`, + or :class:`ClassDef`. + + :returns: The first parent frame node. + """ + if not self.parent: + raise ParentMissingError(target=self) + + # For certain parents NamedExpr evaluate to the scope of the parent + if isinstance(self.parent, (Arguments, Keyword, Comprehension)): + if not self.parent.parent: + raise ParentMissingError(target=self.parent) + if not self.parent.parent.parent: + raise ParentMissingError(target=self.parent.parent) + return self.parent.parent.parent.frame(future=True) + + return self.parent.frame(future=True) + + def scope(self) -> "LocalsDictNodeNG": + """The first parent node defining a new scope. + These can be Module, FunctionDef, ClassDef, Lambda, or GeneratorExp nodes. + + :returns: The first parent scope node. + """ + if not self.parent: + raise ParentMissingError(target=self) + + # For certain parents NamedExpr evaluate to the scope of the parent + if isinstance(self.parent, (Arguments, Keyword, Comprehension)): + if not self.parent.parent: + raise ParentMissingError(target=self.parent) + if not self.parent.parent.parent: + raise ParentMissingError(target=self.parent.parent) + return self.parent.parent.parent.scope() + + return self.parent.scope() + + def set_local(self, name: str, stmt: AssignName) -> None: + """Define that the given name is declared in the given statement node. + NamedExpr's in Arguments, Keyword or Comprehension are evaluated in their + parent's parent scope. So we add to their frame's locals. + + .. seealso:: :meth:`scope` + + :param name: The name that is being defined. + + :param stmt: The statement that defines the given name. + """ + self.frame(future=True).set_local(name, stmt) + + +class Unknown(mixins.AssignTypeMixin, NodeNG): + """This node represents a node in a constructed AST where + introspection is not possible. At the moment, it's only used in + the args attribute of FunctionDef nodes where function signature + introspection failed. + """ + + name = "Unknown" + + def qname(self): + return "Unknown" + + def _infer(self, context=None, **kwargs): + """Inference on an Unknown node immediately terminates.""" + yield util.Uninferable + + +class EvaluatedObject(NodeNG): + """Contains an object that has already been inferred + + This class is useful to pre-evaluate a particular node, + with the resulting class acting as the non-evaluated node. + """ + + name = "EvaluatedObject" + _astroid_fields = ("original",) + _other_fields = ("value",) + + def __init__( + self, original: NodeNG, value: Union[NodeNG, Type[util.Uninferable]] + ) -> None: + self.original: NodeNG = original + """The original node that has already been evaluated""" + + self.value: Union[NodeNG, Type[util.Uninferable]] = value + """The inferred value""" + + super().__init__( + lineno=self.original.lineno, + col_offset=self.original.col_offset, + parent=self.original.parent, + ) + + def infer(self, context=None, **kwargs): + yield self.value + + +# Pattern matching ####################################################### + + +class Match(Statement): + """Class representing a :class:`ast.Match` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case 200: + ... + case _: + ... + ''') + >>> node + + """ + + _astroid_fields = ("subject", "cases") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.subject: NodeNG + self.cases: typing.List["MatchCase"] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + *, + subject: NodeNG, + cases: typing.List["MatchCase"], + ) -> None: + self.subject = subject + self.cases = cases + + +class Pattern(NodeNG): + """Base class for all Pattern nodes.""" + + +class MatchCase(mixins.MultiLineBlockMixin, NodeNG): + """Class representing a :class:`ast.match_case` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case 200: + ... + ''') + >>> node.cases[0] + + """ + + _astroid_fields = ("pattern", "guard", "body") + _multi_line_block_fields = ("body",) + + lineno: None + col_offset: None + end_lineno: None + end_col_offset: None + + def __init__(self, *, parent: Optional[NodeNG] = None) -> None: + self.pattern: Pattern + self.guard: Optional[NodeNG] + self.body: typing.List[NodeNG] + super().__init__(parent=parent) + + def postinit( + self, + *, + pattern: Pattern, + guard: Optional[NodeNG], + body: typing.List[NodeNG], + ) -> None: + self.pattern = pattern + self.guard = guard + self.body = body + + +class MatchValue(Pattern): + """Class representing a :class:`ast.MatchValue` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case 200: + ... + ''') + >>> node.cases[0].pattern + + """ + + _astroid_fields = ("value",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.value: NodeNG + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, *, value: NodeNG) -> None: + self.value = value + + +class MatchSingleton(Pattern): + """Class representing a :class:`ast.MatchSingleton` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case True: + ... + case False: + ... + case None: + ... + ''') + >>> node.cases[0].pattern + + >>> node.cases[1].pattern + + >>> node.cases[2].pattern + + """ + + _other_fields = ("value",) + + def __init__( + self, + *, + value: Literal[True, False, None], + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + ) -> None: + self.value = value + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + +class MatchSequence(Pattern): + """Class representing a :class:`ast.MatchSequence` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case [1, 2]: + ... + case (1, 2, *_): + ... + ''') + >>> node.cases[0].pattern + + >>> node.cases[1].pattern + + """ + + _astroid_fields = ("patterns",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.patterns: typing.List[Pattern] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, *, patterns: typing.List[Pattern]) -> None: + self.patterns = patterns + + +class MatchMapping(mixins.AssignTypeMixin, Pattern): + """Class representing a :class:`ast.MatchMapping` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case {1: "Hello", 2: "World", 3: _, **rest}: + ... + ''') + >>> node.cases[0].pattern + + """ + + _astroid_fields = ("keys", "patterns", "rest") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.keys: typing.List[NodeNG] + self.patterns: typing.List[Pattern] + self.rest: Optional[AssignName] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + *, + keys: typing.List[NodeNG], + patterns: typing.List[Pattern], + rest: Optional[AssignName], + ) -> None: + self.keys = keys + self.patterns = patterns + self.rest = rest + + assigned_stmts: ClassVar[ + Callable[ + [ + "MatchMapping", + AssignName, + Optional[InferenceContext], + None, + ], + Generator[NodeNG, None, None], + ] + ] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + +class MatchClass(Pattern): + """Class representing a :class:`ast.MatchClass` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case Point2D(0, 0): + ... + case Point3D(x=0, y=0, z=0): + ... + ''') + >>> node.cases[0].pattern + + >>> node.cases[1].pattern + + """ + + _astroid_fields = ("cls", "patterns", "kwd_patterns") + _other_fields = ("kwd_attrs",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.cls: NodeNG + self.patterns: typing.List[Pattern] + self.kwd_attrs: typing.List[str] + self.kwd_patterns: typing.List[Pattern] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + *, + cls: NodeNG, + patterns: typing.List[Pattern], + kwd_attrs: typing.List[str], + kwd_patterns: typing.List[Pattern], + ) -> None: + self.cls = cls + self.patterns = patterns + self.kwd_attrs = kwd_attrs + self.kwd_patterns = kwd_patterns + + +class MatchStar(mixins.AssignTypeMixin, Pattern): + """Class representing a :class:`ast.MatchStar` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case [1, *_]: + ... + ''') + >>> node.cases[0].pattern.patterns[1] + + """ + + _astroid_fields = ("name",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.name: Optional[AssignName] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, *, name: Optional[AssignName]) -> None: + self.name = name + + assigned_stmts: ClassVar[ + Callable[ + [ + "MatchStar", + AssignName, + Optional[InferenceContext], + None, + ], + Generator[NodeNG, None, None], + ] + ] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + +class MatchAs(mixins.AssignTypeMixin, Pattern): + """Class representing a :class:`ast.MatchAs` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case [1, a]: + ... + case {'key': b}: + ... + case Point2D(0, 0) as c: + ... + case d: + ... + ''') + >>> node.cases[0].pattern.patterns[1] + + >>> node.cases[1].pattern.patterns[0] + + >>> node.cases[2].pattern + + >>> node.cases[3].pattern + + """ + + _astroid_fields = ("pattern", "name") + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.pattern: Optional[Pattern] + self.name: Optional[AssignName] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit( + self, + *, + pattern: Optional[Pattern], + name: Optional[AssignName], + ) -> None: + self.pattern = pattern + self.name = name + + assigned_stmts: ClassVar[ + Callable[ + [ + "MatchAs", + AssignName, + Optional[InferenceContext], + None, + ], + Generator[NodeNG, None, None], + ] + ] + """Returns the assigned statement (non inferred) according to the assignment type. + See astroid/protocols.py for actual implementation. + """ + + +class MatchOr(Pattern): + """Class representing a :class:`ast.MatchOr` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + match x: + case 400 | 401 | 402: + ... + ''') + >>> node.cases[0].pattern + + """ + + _astroid_fields = ("patterns",) + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional[NodeNG] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + self.patterns: typing.List[Pattern] + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, *, patterns: typing.List[Pattern]) -> None: + self.patterns = patterns + + +# constants ############################################################## + +CONST_CLS = { + list: List, + tuple: Tuple, + dict: Dict, + set: Set, + type(None): Const, + type(NotImplemented): Const, + type(...): Const, +} + + +def _update_const_classes(): + """update constant classes, so the keys of CONST_CLS can be reused""" + klasses = (bool, int, float, complex, str, bytes) + for kls in klasses: + CONST_CLS[kls] = Const + + +_update_const_classes() + + +def _two_step_initialization(cls, value): + instance = cls() + instance.postinit(value) + return instance + + +def _dict_initialization(cls, value): + if isinstance(value, dict): + value = tuple(value.items()) + return _two_step_initialization(cls, value) + + +_CONST_CLS_CONSTRUCTORS = { + List: _two_step_initialization, + Tuple: _two_step_initialization, + Dict: _dict_initialization, + Set: _two_step_initialization, + Const: lambda cls, value: cls(value), +} + + +def const_factory(value): + """return an astroid node for a python value""" + # XXX we should probably be stricter here and only consider stuff in + # CONST_CLS or do better treatment: in case where value is not in CONST_CLS, + # we should rather recall the builder on this value than returning an empty + # node (another option being that const_factory shouldn't be called with something + # not in CONST_CLS) + assert not isinstance(value, NodeNG) + + # Hack for ignoring elements of a sequence + # or a mapping, in order to avoid transforming + # each element to an AST. This is fixed in 2.0 + # and this approach is a temporary hack. + if isinstance(value, (list, set, tuple, dict)): + elts = [] + else: + elts = value + + try: + initializer_cls = CONST_CLS[value.__class__] + initializer = _CONST_CLS_CONSTRUCTORS[initializer_cls] + return initializer(initializer_cls, elts) + except (KeyError, AttributeError): + node = EmptyNode() + node.object = value + return node diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/node_ng.py b/myenv/lib/python3.9/site-packages/astroid/nodes/node_ng.py new file mode 100644 index 0000000..db249af --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/node_ng.py @@ -0,0 +1,814 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import pprint +import sys +import typing +import warnings +from functools import singledispatch as _singledispatch +from typing import ( + TYPE_CHECKING, + ClassVar, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) + +from astroid import decorators, util +from astroid.exceptions import ( + AstroidError, + InferenceError, + ParentMissingError, + StatementMissing, + UseInferenceDefault, +) +from astroid.manager import AstroidManager +from astroid.nodes.as_string import AsStringVisitor +from astroid.nodes.const import OP_PRECEDENCE +from astroid.nodes.utils import Position +from astroid.typing import InferFn + +if TYPE_CHECKING: + from astroid import nodes + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +if sys.version_info >= (3, 8) or TYPE_CHECKING: + # pylint: disable-next=ungrouped-imports + from functools import cached_property +else: + # pylint: disable-next=ungrouped-imports + from astroid.decorators import cachedproperty as cached_property + +# Types for 'NodeNG.nodes_of_class()' +T_Nodes = TypeVar("T_Nodes", bound="NodeNG") +T_Nodes2 = TypeVar("T_Nodes2", bound="NodeNG") +T_Nodes3 = TypeVar("T_Nodes3", bound="NodeNG") +SkipKlassT = Union[None, Type["NodeNG"], Tuple[Type["NodeNG"], ...]] + + +class NodeNG: + """A node of the new Abstract Syntax Tree (AST). + + This is the base class for all Astroid node classes. + """ + + is_statement: ClassVar[bool] = False + """Whether this node indicates a statement.""" + optional_assign: ClassVar[ + bool + ] = False # True for For (and for Comprehension if py <3.0) + """Whether this node optionally assigns a variable. + + This is for loop assignments because loop won't necessarily perform an + assignment if the loop has no iterations. + This is also the case from comprehensions in Python 2. + """ + is_function: ClassVar[bool] = False # True for FunctionDef nodes + """Whether this node indicates a function.""" + is_lambda: ClassVar[bool] = False + + # Attributes below are set by the builder module or by raw factories + _astroid_fields: ClassVar[typing.Tuple[str, ...]] = () + """Node attributes that contain child nodes. + + This is redefined in most concrete classes. + """ + _other_fields: ClassVar[typing.Tuple[str, ...]] = () + """Node attributes that do not contain child nodes.""" + _other_other_fields: ClassVar[typing.Tuple[str, ...]] = () + """Attributes that contain AST-dependent fields.""" + # instance specific inference function infer(node, context) + _explicit_inference: Optional[InferFn] = None + + def __init__( + self, + lineno: Optional[int] = None, + col_offset: Optional[int] = None, + parent: Optional["NodeNG"] = None, + *, + end_lineno: Optional[int] = None, + end_col_offset: Optional[int] = None, + ) -> None: + """ + :param lineno: The line that this node appears on in the source code. + + :param col_offset: The column that this node appears on in the + source code. + + :param parent: The parent node in the syntax tree. + + :param end_lineno: The last line this node appears on in the source code. + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + """ + self.lineno: Optional[int] = lineno + """The line that this node appears on in the source code.""" + + self.col_offset: Optional[int] = col_offset + """The column that this node appears on in the source code.""" + + self.parent: Optional["NodeNG"] = parent + """The parent node in the syntax tree.""" + + self.end_lineno: Optional[int] = end_lineno + """The last line this node appears on in the source code.""" + + self.end_col_offset: Optional[int] = end_col_offset + """The end column this node appears on in the source code. + Note: This is after the last symbol. + """ + + self.position: Optional[Position] = None + """Position of keyword(s) and name. Used as fallback for block nodes + which might not provide good enough positional information. + E.g. ClassDef, FunctionDef. + """ + + def infer(self, context=None, **kwargs): + """Get a generator of the inferred values. + + This is the main entry point to the inference system. + + .. seealso:: :ref:`inference` + + If the instance has some explicit inference function set, it will be + called instead of the default interface. + + :returns: The inferred values. + :rtype: iterable + """ + if context is not None: + context = context.extra_context.get(self, context) + if self._explicit_inference is not None: + # explicit_inference is not bound, give it self explicitly + try: + # pylint: disable=not-callable + results = list(self._explicit_inference(self, context, **kwargs)) + if context is not None: + context.nodes_inferred += len(results) + yield from results + return + except UseInferenceDefault: + pass + + if not context: + # nodes_inferred? + yield from self._infer(context=context, **kwargs) + return + + key = (self, context.lookupname, context.callcontext, context.boundnode) + if key in context.inferred: + yield from context.inferred[key] + return + + generator = self._infer(context=context, **kwargs) + results = [] + + # Limit inference amount to help with performance issues with + # exponentially exploding possible results. + limit = AstroidManager().max_inferable_values + for i, result in enumerate(generator): + if i >= limit or (context.nodes_inferred > context.max_inferred): + uninferable = util.Uninferable + results.append(uninferable) + yield uninferable + break + results.append(result) + yield result + context.nodes_inferred += 1 + + # Cache generated results for subsequent inferences of the + # same node using the same context + context.inferred[key] = tuple(results) + return + + def _repr_name(self) -> str: + """Get a name for nice representation. + + This is either :attr:`name`, :attr:`attrname`, or the empty string. + + :returns: The nice name. + :rtype: str + """ + if all(name not in self._astroid_fields for name in ("name", "attrname")): + return getattr(self, "name", "") or getattr(self, "attrname", "") + return "" + + def __str__(self) -> str: + rname = self._repr_name() + cname = type(self).__name__ + if rname: + string = "%(cname)s.%(rname)s(%(fields)s)" + alignment = len(cname) + len(rname) + 2 + else: + string = "%(cname)s(%(fields)s)" + alignment = len(cname) + 1 + result = [] + for field in self._other_fields + self._astroid_fields: + value = getattr(self, field) + width = 80 - len(field) - alignment + lines = pprint.pformat(value, indent=2, width=width).splitlines(True) + + inner = [lines[0]] + for line in lines[1:]: + inner.append(" " * alignment + line) + result.append(f"{field}={''.join(inner)}") + + return string % { + "cname": cname, + "rname": rname, + "fields": (",\n" + " " * alignment).join(result), + } + + def __repr__(self) -> str: + rname = self._repr_name() + if rname: + string = "<%(cname)s.%(rname)s l.%(lineno)s at 0x%(id)x>" + else: + string = "<%(cname)s l.%(lineno)s at 0x%(id)x>" + return string % { + "cname": type(self).__name__, + "rname": rname, + "lineno": self.fromlineno, + "id": id(self), + } + + def accept(self, visitor): + """Visit this node using the given visitor.""" + func = getattr(visitor, "visit_" + self.__class__.__name__.lower()) + return func(self) + + def get_children(self) -> Iterator["NodeNG"]: + """Get the child nodes below this node.""" + for field in self._astroid_fields: + attr = getattr(self, field) + if attr is None: + continue + if isinstance(attr, (list, tuple)): + yield from attr + else: + yield attr + yield from () + + def last_child(self) -> Optional["NodeNG"]: + """An optimized version of list(get_children())[-1]""" + for field in self._astroid_fields[::-1]: + attr = getattr(self, field) + if not attr: # None or empty list / tuple + continue + if isinstance(attr, (list, tuple)): + return attr[-1] + return attr + return None + + def node_ancestors(self) -> Iterator["NodeNG"]: + """Yield parent, grandparent, etc until there are no more.""" + parent = self.parent + while parent is not None: + yield parent + parent = parent.parent + + def parent_of(self, node): + """Check if this node is the parent of the given node. + + :param node: The node to check if it is the child. + :type node: NodeNG + + :returns: True if this node is the parent of the given node, + False otherwise. + :rtype: bool + """ + return any(self is parent for parent in node.node_ancestors()) + + @overload + def statement( + self, *, future: None = ... + ) -> Union["nodes.Statement", "nodes.Module"]: + ... + + @overload + def statement(self, *, future: Literal[True]) -> "nodes.Statement": + ... + + def statement( + self, *, future: Literal[None, True] = None + ) -> Union["nodes.Statement", "nodes.Module"]: + """The first parent node, including self, marked as statement node. + + TODO: Deprecate the future parameter and only raise StatementMissing and return + nodes.Statement + + :raises AttributeError: If self has no parent attribute + :raises StatementMissing: If self has no parent attribute and future is True + """ + if self.is_statement: + return cast("nodes.Statement", self) + if not self.parent: + if future: + raise StatementMissing(target=self) + warnings.warn( + "In astroid 3.0.0 NodeNG.statement() will return either a nodes.Statement " + "or raise a StatementMissing exception. AttributeError will no longer be raised. " + "This behaviour can already be triggered " + "by passing 'future=True' to a statement() call.", + DeprecationWarning, + ) + raise AttributeError(f"{self} object has no attribute 'parent'") + return self.parent.statement(future=future) + + def frame( + self, *, future: Literal[None, True] = None + ) -> Union["nodes.FunctionDef", "nodes.Module", "nodes.ClassDef", "nodes.Lambda"]: + """The first parent frame node. + + A frame node is a :class:`Module`, :class:`FunctionDef`, + :class:`ClassDef` or :class:`Lambda`. + + :returns: The first parent frame node. + """ + if self.parent is None: + if future: + raise ParentMissingError(target=self) + warnings.warn( + "In astroid 3.0.0 NodeNG.frame() will return either a Frame node, " + "or raise ParentMissingError. AttributeError will no longer be raised. " + "This behaviour can already be triggered " + "by passing 'future=True' to a frame() call.", + DeprecationWarning, + ) + raise AttributeError(f"{self} object has no attribute 'parent'") + + return self.parent.frame(future=future) + + def scope(self) -> "nodes.LocalsDictNodeNG": + """The first parent node defining a new scope. + These can be Module, FunctionDef, ClassDef, Lambda, or GeneratorExp nodes. + + :returns: The first parent scope node. + """ + if not self.parent: + raise ParentMissingError(target=self) + return self.parent.scope() + + def root(self): + """Return the root node of the syntax tree. + + :returns: The root node. + :rtype: Module + """ + if self.parent: + return self.parent.root() + return self + + def child_sequence(self, child): + """Search for the sequence that contains this child. + + :param child: The child node to search sequences for. + :type child: NodeNG + + :returns: The sequence containing the given child node. + :rtype: iterable(NodeNG) + + :raises AstroidError: If no sequence could be found that contains + the given child. + """ + for field in self._astroid_fields: + node_or_sequence = getattr(self, field) + if node_or_sequence is child: + return [node_or_sequence] + # /!\ compiler.ast Nodes have an __iter__ walking over child nodes + if ( + isinstance(node_or_sequence, (tuple, list)) + and child in node_or_sequence + ): + return node_or_sequence + + msg = "Could not find %s in %s's children" + raise AstroidError(msg % (repr(child), repr(self))) + + def locate_child(self, child): + """Find the field of this node that contains the given child. + + :param child: The child node to search fields for. + :type child: NodeNG + + :returns: A tuple of the name of the field that contains the child, + and the sequence or node that contains the child node. + :rtype: tuple(str, iterable(NodeNG) or NodeNG) + + :raises AstroidError: If no field could be found that contains + the given child. + """ + for field in self._astroid_fields: + node_or_sequence = getattr(self, field) + # /!\ compiler.ast Nodes have an __iter__ walking over child nodes + if child is node_or_sequence: + return field, child + if ( + isinstance(node_or_sequence, (tuple, list)) + and child in node_or_sequence + ): + return field, node_or_sequence + msg = "Could not find %s in %s's children" + raise AstroidError(msg % (repr(child), repr(self))) + + # FIXME : should we merge child_sequence and locate_child ? locate_child + # is only used in are_exclusive, child_sequence one time in pylint. + + def next_sibling(self): + """The next sibling statement node. + + :returns: The next sibling statement node. + :rtype: NodeNG or None + """ + return self.parent.next_sibling() + + def previous_sibling(self): + """The previous sibling statement. + + :returns: The previous sibling statement node. + :rtype: NodeNG or None + """ + return self.parent.previous_sibling() + + # these are lazy because they're relatively expensive to compute for every + # single node, and they rarely get looked at + + @cached_property + def fromlineno(self) -> Optional[int]: + """The first line that this node appears on in the source code.""" + if self.lineno is None: + return self._fixed_source_line() + return self.lineno + + @cached_property + def tolineno(self) -> Optional[int]: + """The last line that this node appears on in the source code.""" + if self.end_lineno is not None: + return self.end_lineno + if not self._astroid_fields: + # can't have children + last_child = None + else: + last_child = self.last_child() + if last_child is None: + return self.fromlineno + return last_child.tolineno + + def _fixed_source_line(self) -> Optional[int]: + """Attempt to find the line that this node appears on. + + We need this method since not all nodes have :attr:`lineno` set. + """ + line = self.lineno + _node: Optional[NodeNG] = self + try: + while line is None: + _node = next(_node.get_children()) + line = _node.lineno + except StopIteration: + _node = self.parent + while _node and line is None: + line = _node.lineno + _node = _node.parent + return line + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: The line number to start the range at. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + starting at the given line number. + :rtype: tuple(int, int or None) + """ + return lineno, self.tolineno + + def set_local(self, name, stmt): + """Define that the given name is declared in the given statement node. + + This definition is stored on the parent scope node. + + .. seealso:: :meth:`scope` + + :param name: The name that is being defined. + :type name: str + + :param stmt: The statement that defines the given name. + :type stmt: NodeNG + """ + self.parent.set_local(name, stmt) + + @overload + def nodes_of_class( + self, + klass: Type[T_Nodes], + skip_klass: SkipKlassT = None, + ) -> Iterator[T_Nodes]: + ... + + @overload + def nodes_of_class( + self, + klass: Tuple[Type[T_Nodes], Type[T_Nodes2]], + skip_klass: SkipKlassT = None, + ) -> Union[Iterator[T_Nodes], Iterator[T_Nodes2]]: + ... + + @overload + def nodes_of_class( + self, + klass: Tuple[Type[T_Nodes], Type[T_Nodes2], Type[T_Nodes3]], + skip_klass: SkipKlassT = None, + ) -> Union[Iterator[T_Nodes], Iterator[T_Nodes2], Iterator[T_Nodes3]]: + ... + + @overload + def nodes_of_class( + self, + klass: Tuple[Type[T_Nodes], ...], + skip_klass: SkipKlassT = None, + ) -> Iterator[T_Nodes]: + ... + + def nodes_of_class( # type: ignore[misc] # mypy doesn't correctly recognize the overloads + self, + klass: Union[ + Type[T_Nodes], + Tuple[Type[T_Nodes], Type[T_Nodes2]], + Tuple[Type[T_Nodes], Type[T_Nodes2], Type[T_Nodes3]], + Tuple[Type[T_Nodes], ...], + ], + skip_klass: SkipKlassT = None, + ) -> Union[Iterator[T_Nodes], Iterator[T_Nodes2], Iterator[T_Nodes3]]: + """Get the nodes (including this one or below) of the given types. + + :param klass: The types of node to search for. + + :param skip_klass: The types of node to ignore. This is useful to ignore + subclasses of :attr:`klass`. + + :returns: The node of the given types. + """ + if isinstance(self, klass): + yield self + + if skip_klass is None: + for child_node in self.get_children(): + yield from child_node.nodes_of_class(klass, skip_klass) + + return + + for child_node in self.get_children(): + if isinstance(child_node, skip_klass): + continue + yield from child_node.nodes_of_class(klass, skip_klass) + + @decorators.cached + def _get_assign_nodes(self): + return [] + + def _get_name_nodes(self): + for child_node in self.get_children(): + yield from child_node._get_name_nodes() + + def _get_return_nodes_skip_functions(self): + yield from () + + def _get_yield_nodes_skip_lambdas(self): + yield from () + + def _infer_name(self, frame, name): + # overridden for ImportFrom, Import, Global, TryExcept and Arguments + pass + + def _infer(self, context=None): + """we don't know how to resolve a statement by default""" + # this method is overridden by most concrete classes + raise InferenceError( + "No inference function for {node!r}.", node=self, context=context + ) + + def inferred(self): + """Get a list of the inferred values. + + .. seealso:: :ref:`inference` + + :returns: The inferred values. + :rtype: list + """ + return list(self.infer()) + + def instantiate_class(self): + """Instantiate an instance of the defined class. + + .. note:: + + On anything other than a :class:`ClassDef` this will return self. + + :returns: An instance of the defined class. + :rtype: object + """ + return self + + def has_base(self, node): + """Check if this node inherits from the given type. + + :param node: The node defining the base to look for. + Usually this is a :class:`Name` node. + :type node: NodeNG + """ + return False + + def callable(self): + """Whether this node defines something that is callable. + + :returns: True if this defines something that is callable, + False otherwise. + :rtype: bool + """ + return False + + def eq(self, value): + return False + + def as_string(self) -> str: + """Get the source code that this node represents.""" + return AsStringVisitor()(self) + + def repr_tree( + self, + ids=False, + include_linenos=False, + ast_state=False, + indent=" ", + max_depth=0, + max_width=80, + ) -> str: + """Get a string representation of the AST from this node. + + :param ids: If true, includes the ids with the node type names. + :type ids: bool + + :param include_linenos: If true, includes the line numbers and + column offsets. + :type include_linenos: bool + + :param ast_state: If true, includes information derived from + the whole AST like local and global variables. + :type ast_state: bool + + :param indent: A string to use to indent the output string. + :type indent: str + + :param max_depth: If set to a positive integer, won't return + nodes deeper than max_depth in the string. + :type max_depth: int + + :param max_width: Attempt to format the output string to stay + within this number of characters, but can exceed it under some + circumstances. Only positive integer values are valid, the default is 80. + :type max_width: int + + :returns: The string representation of the AST. + :rtype: str + """ + + @_singledispatch + def _repr_tree(node, result, done, cur_indent="", depth=1): + """Outputs a representation of a non-tuple/list, non-node that's + contained within an AST, including strings. + """ + lines = pprint.pformat( + node, width=max(max_width - len(cur_indent), 1) + ).splitlines(True) + result.append(lines[0]) + result.extend([cur_indent + line for line in lines[1:]]) + return len(lines) != 1 + + # pylint: disable=unused-variable,useless-suppression; doesn't understand singledispatch + @_repr_tree.register(tuple) + @_repr_tree.register(list) + def _repr_seq(node, result, done, cur_indent="", depth=1): + """Outputs a representation of a sequence that's contained within an AST.""" + cur_indent += indent + result.append("[") + if not node: + broken = False + elif len(node) == 1: + broken = _repr_tree(node[0], result, done, cur_indent, depth) + elif len(node) == 2: + broken = _repr_tree(node[0], result, done, cur_indent, depth) + if not broken: + result.append(", ") + else: + result.append(",\n") + result.append(cur_indent) + broken = _repr_tree(node[1], result, done, cur_indent, depth) or broken + else: + result.append("\n") + result.append(cur_indent) + for child in node[:-1]: + _repr_tree(child, result, done, cur_indent, depth) + result.append(",\n") + result.append(cur_indent) + _repr_tree(node[-1], result, done, cur_indent, depth) + broken = True + result.append("]") + return broken + + # pylint: disable=unused-variable,useless-suppression; doesn't understand singledispatch + @_repr_tree.register(NodeNG) + def _repr_node(node, result, done, cur_indent="", depth=1): + """Outputs a strings representation of an astroid node.""" + if node in done: + result.append( + indent + f" max_depth: + result.append("...") + return False + depth += 1 + cur_indent += indent + if ids: + result.append(f"{type(node).__name__}<0x{id(node):x}>(\n") + else: + result.append(f"{type(node).__name__}(") + fields = [] + if include_linenos: + fields.extend(("lineno", "col_offset")) + fields.extend(node._other_fields) + fields.extend(node._astroid_fields) + if ast_state: + fields.extend(node._other_other_fields) + if not fields: + broken = False + elif len(fields) == 1: + result.append(f"{fields[0]}=") + broken = _repr_tree( + getattr(node, fields[0]), result, done, cur_indent, depth + ) + else: + result.append("\n") + result.append(cur_indent) + for field in fields[:-1]: + # TODO: Remove this after removal of the 'doc' attribute + if field == "doc": + continue + result.append(f"{field}=") + _repr_tree(getattr(node, field), result, done, cur_indent, depth) + result.append(",\n") + result.append(cur_indent) + result.append(f"{fields[-1]}=") + _repr_tree(getattr(node, fields[-1]), result, done, cur_indent, depth) + broken = True + result.append(")") + return broken + + result: List[str] = [] + _repr_tree(self, result, set()) + return "".join(result) + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + The boolean value of a node can have three + possible values: + + * False: For instance, empty data structures, + False, empty strings, instances which return + explicitly False from the __nonzero__ / __bool__ + method. + * True: Most of constructs are True by default: + classes, functions, modules etc + * Uninferable: The inference engine is uncertain of the + node's value. + + :returns: The boolean value of this node. + :rtype: bool or Uninferable + """ + return util.Uninferable + + def op_precedence(self): + # Look up by class name or default to highest precedence + return OP_PRECEDENCE.get(self.__class__.__name__, len(OP_PRECEDENCE)) + + def op_left_associative(self): + # Everything is left associative except `**` and IfExp + return True diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/__init__.py b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/__init__.py new file mode 100644 index 0000000..816bd83 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/__init__.py @@ -0,0 +1,43 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""This module contains all classes that are considered a "scoped" node and anything related. +A scope node is a node that opens a new local scope in the language definition: +Module, ClassDef, FunctionDef (and Lambda, GeneratorExp, DictComp and SetComp to some extent). +""" + +from astroid.nodes.scoped_nodes.mixin import ComprehensionScope, LocalsDictNodeNG +from astroid.nodes.scoped_nodes.scoped_nodes import ( + AsyncFunctionDef, + ClassDef, + DictComp, + FunctionDef, + GeneratorExp, + Lambda, + ListComp, + Module, + SetComp, + _is_metaclass, + function_to_method, + get_wrapping_class, +) +from astroid.nodes.scoped_nodes.utils import builtin_lookup + +__all__ = ( + "AsyncFunctionDef", + "ClassDef", + "ComprehensionScope", + "DictComp", + "FunctionDef", + "GeneratorExp", + "Lambda", + "ListComp", + "LocalsDictNodeNG", + "Module", + "SetComp", + "builtin_lookup", + "function_to_method", + "get_wrapping_class", + "_is_metaclass", +) diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/mixin.py b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/mixin.py new file mode 100644 index 0000000..bb1e76f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/mixin.py @@ -0,0 +1,171 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""This module contains mixin classes for scoped nodes.""" + +from typing import TYPE_CHECKING, Dict, List, TypeVar + +from astroid.filter_statements import _filter_stmts +from astroid.nodes import node_classes, scoped_nodes +from astroid.nodes.scoped_nodes.utils import builtin_lookup + +if TYPE_CHECKING: + from astroid import nodes + +_T = TypeVar("_T") + + +class LocalsDictNodeNG(node_classes.LookupMixIn, node_classes.NodeNG): + """this class provides locals handling common to Module, FunctionDef + and ClassDef nodes, including a dict like interface for direct access + to locals information + """ + + # attributes below are set by the builder module or by raw factories + + locals: Dict[str, List["nodes.NodeNG"]] = {} + """A map of the name of a local variable to the node defining the local.""" + + def qname(self): + """Get the 'qualified' name of the node. + + For example: module.name, module.class.name ... + + :returns: The qualified name. + :rtype: str + """ + # pylint: disable=no-member; github.com/pycqa/astroid/issues/278 + if self.parent is None: + return self.name + return f"{self.parent.frame(future=True).qname()}.{self.name}" + + def scope(self: _T) -> _T: + """The first parent node defining a new scope. + + :returns: The first parent scope node. + :rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr + """ + return self + + def _scope_lookup(self, node, name, offset=0): + """XXX method for interfacing the scope lookup""" + try: + stmts = _filter_stmts(node, self.locals[name], self, offset) + except KeyError: + stmts = () + if stmts: + return self, stmts + + # Handle nested scopes: since class names do not extend to nested + # scopes (e.g., methods), we find the next enclosing non-class scope + pscope = self.parent and self.parent.scope() + while pscope is not None: + if not isinstance(pscope, scoped_nodes.ClassDef): + return pscope.scope_lookup(node, name) + pscope = pscope.parent and pscope.parent.scope() + + # self is at the top level of a module, or is enclosed only by ClassDefs + return builtin_lookup(name) + + def set_local(self, name, stmt): + """Define that the given name is declared in the given statement node. + + .. seealso:: :meth:`scope` + + :param name: The name that is being defined. + :type name: str + + :param stmt: The statement that defines the given name. + :type stmt: NodeNG + """ + # assert not stmt in self.locals.get(name, ()), (self, stmt) + self.locals.setdefault(name, []).append(stmt) + + __setitem__ = set_local + + def _append_node(self, child): + """append a child, linking it in the tree""" + # pylint: disable=no-member; depending by the class + # which uses the current class as a mixin or base class. + # It's rewritten in 2.0, so it makes no sense for now + # to spend development time on it. + self.body.append(child) + child.parent = self + + def add_local_node(self, child_node, name=None): + """Append a child that should alter the locals of this scope node. + + :param child_node: The child node that will alter locals. + :type child_node: NodeNG + + :param name: The name of the local that will be altered by + the given child node. + :type name: str or None + """ + if name != "__class__": + # add __class__ node as a child will cause infinite recursion later! + self._append_node(child_node) + self.set_local(name or child_node.name, child_node) + + def __getitem__(self, item: str) -> "nodes.NodeNG": + """The first node the defines the given local. + + :param item: The name of the locally defined object. + + :raises KeyError: If the name is not defined. + """ + return self.locals[item][0] + + def __iter__(self): + """Iterate over the names of locals defined in this scoped node. + + :returns: The names of the defined locals. + :rtype: iterable(str) + """ + return iter(self.keys()) + + def keys(self): + """The names of locals defined in this scoped node. + + :returns: The names of the defined locals. + :rtype: list(str) + """ + return list(self.locals.keys()) + + def values(self): + """The nodes that define the locals in this scoped node. + + :returns: The nodes that define locals. + :rtype: list(NodeNG) + """ + # pylint: disable=consider-using-dict-items + # It look like this class override items/keys/values, + # probably not worth the headache + return [self[key] for key in self.keys()] + + def items(self): + """Get the names of the locals and the node that defines the local. + + :returns: The names of locals and their associated node. + :rtype: list(tuple(str, NodeNG)) + """ + return list(zip(self.keys(), self.values())) + + def __contains__(self, name): + """Check if a local is defined in this scope. + + :param name: The name of the local to check for. + :type name: str + + :returns: True if this node has a local of the given name, + False otherwise. + :rtype: bool + """ + return name in self.locals + + +class ComprehensionScope(LocalsDictNodeNG): + """Scoping for different types of comprehensions.""" + + scope_lookup = LocalsDictNodeNG._scope_lookup diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/scoped_nodes.py b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/scoped_nodes.py new file mode 100644 index 0000000..b3358d9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/scoped_nodes.py @@ -0,0 +1,3122 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +This module contains the classes for "scoped" node, i.e. which are opening a +new local scope in the language definition : Module, ClassDef, FunctionDef (and +Lambda, GeneratorExp, DictComp and SetComp to some extent). +""" +import io +import itertools +import os +import sys +import typing +import warnings +from typing import TYPE_CHECKING, Dict, List, Optional, Set, TypeVar, Union, overload + +from astroid import bases +from astroid import decorators as decorators_mod +from astroid import mixins, util +from astroid.const import IS_PYPY, PY38, PY38_PLUS, PY39_PLUS +from astroid.context import ( + CallContext, + InferenceContext, + bind_context_to_node, + copy_context, +) +from astroid.exceptions import ( + AstroidBuildingError, + AstroidTypeError, + AttributeInferenceError, + DuplicateBasesError, + InconsistentMroError, + InferenceError, + MroError, + StatementMissing, + TooManyLevelsError, +) +from astroid.interpreter.dunder_lookup import lookup +from astroid.interpreter.objectmodel import ClassModel, FunctionModel, ModuleModel +from astroid.manager import AstroidManager +from astroid.nodes import Arguments, Const, NodeNG, node_classes +from astroid.nodes.scoped_nodes.mixin import ComprehensionScope, LocalsDictNodeNG +from astroid.nodes.scoped_nodes.utils import builtin_lookup +from astroid.nodes.utils import Position + +if sys.version_info >= (3, 6, 2): + from typing import NoReturn +else: + from typing_extensions import NoReturn + + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +if sys.version_info >= (3, 8) or TYPE_CHECKING: + from functools import cached_property +else: + # pylint: disable-next=ungrouped-imports + from astroid.decorators import cachedproperty as cached_property + + +ITER_METHODS = ("__iter__", "__getitem__") +EXCEPTION_BASE_CLASSES = frozenset({"Exception", "BaseException"}) +objects = util.lazy_import("objects") +BUILTIN_DESCRIPTORS = frozenset( + {"classmethod", "staticmethod", "builtins.classmethod", "builtins.staticmethod"} +) + +T = TypeVar("T") + + +def _c3_merge(sequences, cls, context): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if not candidate: + # Show all the remaining bases, which were considered as + # candidates for the next mro sequence. + raise InconsistentMroError( + message="Cannot create a consistent method resolution order " + "for MROs {mros} of class {cls!r}.", + mros=sequences, + cls=cls, + context=context, + ) + + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + return None + + +def clean_typing_generic_mro(sequences: List[List["ClassDef"]]) -> None: + """A class can inherit from typing.Generic directly, as base, + and as base of bases. The merged MRO must however only contain the last entry. + To prepare for _c3_merge, remove some typing.Generic entries from + sequences if multiple are present. + + This method will check if Generic is in inferred_bases and also + part of bases_mro. If true, remove it from inferred_bases + as well as its entry the bases_mro. + + Format sequences: [[self]] + bases_mro + [inferred_bases] + """ + bases_mro = sequences[1:-1] + inferred_bases = sequences[-1] + # Check if Generic is part of inferred_bases + for i, base in enumerate(inferred_bases): + if base.qname() == "typing.Generic": + position_in_inferred_bases = i + break + else: + return + # Check if also part of bases_mro + # Ignore entry for typing.Generic + for i, seq in enumerate(bases_mro): + if i == position_in_inferred_bases: + continue + if any(base.qname() == "typing.Generic" for base in seq): + break + else: + return + # Found multiple Generics in mro, remove entry from inferred_bases + # and the corresponding one from bases_mro + inferred_bases.pop(position_in_inferred_bases) + bases_mro.pop(position_in_inferred_bases) + + +def clean_duplicates_mro(sequences, cls, context): + for sequence in sequences: + names = [ + (node.lineno, node.qname()) if node.name else None for node in sequence + ] + last_index = dict(map(reversed, enumerate(names))) + if names and names[0] is not None and last_index[names[0]] != 0: + raise DuplicateBasesError( + message="Duplicates found in MROs {mros} for {cls!r}.", + mros=sequences, + cls=cls, + context=context, + ) + yield [ + node + for i, (node, name) in enumerate(zip(sequence, names)) + if name is None or last_index[name] == i + ] + + +def function_to_method(n, klass): + if isinstance(n, FunctionDef): + if n.type == "classmethod": + return bases.BoundMethod(n, klass) + if n.type == "property": + return n + if n.type != "staticmethod": + return bases.UnboundMethod(n) + return n + + +class Module(LocalsDictNodeNG): + """Class representing an :class:`ast.Module` node. + + >>> import astroid + >>> node = astroid.extract_node('import astroid') + >>> node + + >>> node.parent + + """ + + _astroid_fields = ("doc_node", "body") + + fromlineno: Literal[0] = 0 + """The first line that this node appears on in the source code.""" + + lineno: Literal[0] = 0 + """The line that this node appears on in the source code.""" + + # attributes below are set by the builder module or by raw factories + + file_bytes: Union[str, bytes, None] = None + """The string/bytes that this ast was built from.""" + + file_encoding: Optional[str] = None + """The encoding of the source file. + + This is used to get unicode out of a source file. + Python 2 only. + """ + + special_attributes = ModuleModel() + """The names of special attributes that this module has.""" + + # names of module attributes available through the global scope + scope_attrs = {"__name__", "__doc__", "__file__", "__path__", "__package__"} + """The names of module attributes available through the global scope.""" + + _other_fields = ( + "name", + "doc", + "file", + "path", + "package", + "pure_python", + "future_imports", + ) + _other_other_fields = ("locals", "globals") + + col_offset: None + end_lineno: None + end_col_offset: None + parent: None + + @decorators_mod.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead") + def __init__( + self, + name: str, + doc: Optional[str] = None, + file: Optional[str] = None, + path: Optional[List[str]] = None, + package: Optional[bool] = None, + parent: None = None, + pure_python: Optional[bool] = True, + ) -> None: + """ + :param name: The name of the module. + + :param doc: The module docstring. + + :param file: The path to the file that this ast has been extracted from. + + :param path: + + :param package: Whether the node represents a package or a module. + + :param parent: The parent node in the syntax tree. + + :param pure_python: Whether the ast was built from source. + """ + self.name = name + """The name of the module.""" + + self._doc = doc + """The module docstring.""" + + self.file = file + """The path to the file that this ast has been extracted from. + + This will be ``None`` when the representation has been built from a + built-in module. + """ + + self.path = path + + self.package = package + """Whether the node represents a package or a module.""" + + self.pure_python = pure_python + """Whether the ast was built from source.""" + + self.globals: Dict[str, List[node_classes.NodeNG]] + """A map of the name of a global variable to the node defining the global.""" + + self.locals = self.globals = {} + """A map of the name of a local variable to the node defining the local.""" + + self.body: Optional[List[node_classes.NodeNG]] = [] + """The contents of the module.""" + + self.doc_node: Optional[Const] = None + """The doc node associated with this node.""" + + self.future_imports: Set[str] = set() + """The imports from ``__future__``.""" + + super().__init__(lineno=0, parent=parent) + + # pylint: enable=redefined-builtin + + def postinit(self, body=None, *, doc_node: Optional[Const] = None): + """Do some setup after initialisation. + + :param body: The contents of the module. + :type body: list(NodeNG) or None + :param doc_node: The doc node associated with this node. + """ + self.body = body + self.doc_node = doc_node + if doc_node: + self._doc = doc_node.value + + @property + def doc(self) -> Optional[str]: + """The module docstring.""" + warnings.warn( + "The 'Module.doc' attribute is deprecated, " + "use 'Module.doc_node' instead.", + DeprecationWarning, + ) + return self._doc + + @doc.setter + def doc(self, value: Optional[str]) -> None: + warnings.warn( + "Setting the 'Module.doc' attribute is deprecated, " + "use 'Module.doc_node' instead.", + DeprecationWarning, + ) + self._doc = value + + def _get_stream(self): + if self.file_bytes is not None: + return io.BytesIO(self.file_bytes) + if self.file is not None: + # pylint: disable=consider-using-with + stream = open(self.file, "rb") + return stream + return None + + def stream(self): + """Get a stream to the underlying file or bytes. + + :type: file or io.BytesIO or None + """ + return self._get_stream() + + def block_range(self, lineno): + """Get a range from where this node starts to where this node ends. + + :param lineno: Unused. + :type lineno: int + + :returns: The range of line numbers that this node belongs to. + :rtype: tuple(int, int) + """ + return self.fromlineno, self.tolineno + + def scope_lookup(self, node, name, offset=0): + """Lookup where the given variable is assigned. + + :param node: The node to look for assignments up to. + Any assignments after the given node are ignored. + :type node: NodeNG + + :param name: The name of the variable to find assignments for. + :type name: str + + :param offset: The line offset to filter statements up to. + :type offset: int + + :returns: This scope node and the list of assignments associated to the + given name according to the scope where it has been found (locals, + globals or builtin). + :rtype: tuple(str, list(NodeNG)) + """ + if name in self.scope_attrs and name not in self.locals: + try: + return self, self.getattr(name) + except AttributeInferenceError: + return self, () + return self._scope_lookup(node, name, offset) + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + return "builtins.module" + + def display_type(self): + """A human readable type of this node. + + :returns: The type of this node. + :rtype: str + """ + return "Module" + + def getattr(self, name, context=None, ignore_locals=False): + if not name: + raise AttributeInferenceError(target=self, attribute=name, context=context) + + result = [] + name_in_locals = name in self.locals + + if name in self.special_attributes and not ignore_locals and not name_in_locals: + result = [self.special_attributes.lookup(name)] + elif not ignore_locals and name_in_locals: + result = self.locals[name] + elif self.package: + try: + result = [self.import_module(name, relative_only=True)] + except (AstroidBuildingError, SyntaxError) as exc: + raise AttributeInferenceError( + target=self, attribute=name, context=context + ) from exc + result = [n for n in result if not isinstance(n, node_classes.DelName)] + if result: + return result + raise AttributeInferenceError(target=self, attribute=name, context=context) + + def igetattr(self, name, context=None): + """Infer the possible values of the given variable. + + :param name: The name of the variable to infer. + :type name: str + + :returns: The inferred possible values. + :rtype: iterable(NodeNG) or None + """ + # set lookup name since this is necessary to infer on import nodes for + # instance + context = copy_context(context) + context.lookupname = name + try: + return bases._infer_stmts(self.getattr(name, context), context, frame=self) + except AttributeInferenceError as error: + raise InferenceError( + str(error), target=self, attribute=name, context=context + ) from error + + def fully_defined(self): + """Check if this module has been build from a .py file. + + If so, the module contains a complete representation, + including the code. + + :returns: True if the module has been built from a .py file. + :rtype: bool + """ + return self.file is not None and self.file.endswith(".py") + + @overload + def statement(self, *, future: None = ...) -> "Module": + ... + + @overload + def statement(self, *, future: Literal[True]) -> NoReturn: + ... + + def statement( + self, *, future: Literal[None, True] = None + ) -> Union["NoReturn", "Module"]: + """The first parent node, including self, marked as statement node. + + When called on a :class:`Module` with the future parameter this raises an error. + + TODO: Deprecate the future parameter and only raise StatementMissing + + :raises StatementMissing: If no self has no parent attribute and future is True + """ + if future: + raise StatementMissing(target=self) + warnings.warn( + "In astroid 3.0.0 NodeNG.statement() will return either a nodes.Statement " + "or raise a StatementMissing exception. nodes.Module will no longer be " + "considered a statement. This behaviour can already be triggered " + "by passing 'future=True' to a statement() call.", + DeprecationWarning, + ) + return self + + def previous_sibling(self): + """The previous sibling statement. + + :returns: The previous sibling statement node. + :rtype: NodeNG or None + """ + + def next_sibling(self): + """The next sibling statement node. + + :returns: The next sibling statement node. + :rtype: NodeNG or None + """ + + _absolute_import_activated = True + + def absolute_import_activated(self): + """Whether :pep:`328` absolute import behaviour has been enabled. + + :returns: True if :pep:`328` has been enabled, False otherwise. + :rtype: bool + """ + return self._absolute_import_activated + + def import_module(self, modname, relative_only=False, level=None): + """Get the ast for a given module as if imported from this module. + + :param modname: The name of the module to "import". + :type modname: str + + :param relative_only: Whether to only consider relative imports. + :type relative_only: bool + + :param level: The level of relative import. + :type level: int or None + + :returns: The imported module ast. + :rtype: NodeNG + """ + if relative_only and level is None: + level = 0 + absmodname = self.relative_to_absolute_name(modname, level) + + try: + return AstroidManager().ast_from_module_name(absmodname) + except AstroidBuildingError: + # we only want to import a sub module or package of this module, + # skip here + if relative_only: + raise + return AstroidManager().ast_from_module_name(modname) + + def relative_to_absolute_name(self, modname: str, level: int) -> str: + """Get the absolute module name for a relative import. + + The relative import can be implicit or explicit. + + :param modname: The module name to convert. + + :param level: The level of relative import. + + :returns: The absolute module name. + + :raises TooManyLevelsError: When the relative import refers to a + module too far above this one. + """ + # XXX this returns non sens when called on an absolute import + # like 'pylint.checkers.astroid.utils' + # XXX doesn't return absolute name if self.name isn't absolute name + if self.absolute_import_activated() and level is None: + return modname + if level: + if self.package: + level = level - 1 + package_name = self.name.rsplit(".", level)[0] + elif ( + self.path + and not os.path.exists(os.path.dirname(self.path[0]) + "/__init__.py") + and os.path.exists( + os.path.dirname(self.path[0]) + "/" + modname.split(".")[0] + ) + ): + level = level - 1 + package_name = "" + else: + package_name = self.name.rsplit(".", level)[0] + if level and self.name.count(".") < level: + raise TooManyLevelsError(level=level, name=self.name) + + elif self.package: + package_name = self.name + else: + package_name = self.name.rsplit(".", 1)[0] + + if package_name: + if not modname: + return package_name + return f"{package_name}.{modname}" + return modname + + def wildcard_import_names(self): + """The list of imported names when this module is 'wildcard imported'. + + It doesn't include the '__builtins__' name which is added by the + current CPython implementation of wildcard imports. + + :returns: The list of imported names. + :rtype: list(str) + """ + # We separate the different steps of lookup in try/excepts + # to avoid catching too many Exceptions + default = [name for name in self.keys() if not name.startswith("_")] + try: + all_values = self["__all__"] + except KeyError: + return default + + try: + explicit = next(all_values.assigned_stmts()) + except (InferenceError, StopIteration): + return default + except AttributeError: + # not an assignment node + # XXX infer? + return default + + # Try our best to detect the exported name. + inferred = [] + try: + explicit = next(explicit.infer()) + except (InferenceError, StopIteration): + return default + if not isinstance(explicit, (node_classes.Tuple, node_classes.List)): + return default + + def str_const(node): + return isinstance(node, node_classes.Const) and isinstance(node.value, str) + + for node in explicit.elts: + if str_const(node): + inferred.append(node.value) + else: + try: + inferred_node = next(node.infer()) + except (InferenceError, StopIteration): + continue + if str_const(inferred_node): + inferred.append(inferred_node.value) + return inferred + + def public_names(self): + """The list of the names that are publicly available in this module. + + :returns: The list of public names. + :rtype: list(str) + """ + return [name for name in self.keys() if not name.startswith("_")] + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`Module` this is always ``True``. + :rtype: bool + """ + return True + + def get_children(self): + yield from self.body + + def frame(self: T, *, future: Literal[None, True] = None) -> T: + """The node's frame node. + + A frame node is a :class:`Module`, :class:`FunctionDef`, + :class:`ClassDef` or :class:`Lambda`. + + :returns: The node itself. + """ + return self + + +class GeneratorExp(ComprehensionScope): + """Class representing an :class:`ast.GeneratorExp` node. + + >>> import astroid + >>> node = astroid.extract_node('(thing for thing in things if thing)') + >>> node + + """ + + _astroid_fields = ("elt", "generators") + _other_other_fields = ("locals",) + elt = None + """The element that forms the output of the expression. + + :type: NodeNG or None + """ + generators = None + """The generators that are looped through. + + :type: list(Comprehension) or None + """ + + def __init__( + self, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + """ + :param lineno: The line that this node appears on in the source code. + :type lineno: int or None + + :param col_offset: The column that this node appears on in the + source code. + :type col_offset: int or None + + :param parent: The parent node in the syntax tree. + :type parent: NodeNG or None + + :param end_lineno: The last line this node appears on in the source code. + :type end_lineno: Optional[int] + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + :type end_col_offset: Optional[int] + """ + self.locals = {} + """A map of the name of a local variable to the node defining the local. + + :type: dict(str, NodeNG) + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, elt=None, generators=None): + """Do some setup after initialisation. + + :param elt: The element that forms the output of the expression. + :type elt: NodeNG or None + + :param generators: The generators that are looped through. + :type generators: list(Comprehension) or None + """ + self.elt = elt + if generators is None: + self.generators = [] + else: + self.generators = generators + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`GeneratorExp` this is always ``True``. + :rtype: bool + """ + return True + + def get_children(self): + yield self.elt + + yield from self.generators + + +class DictComp(ComprehensionScope): + """Class representing an :class:`ast.DictComp` node. + + >>> import astroid + >>> node = astroid.extract_node('{k:v for k, v in things if k > v}') + >>> node + + """ + + _astroid_fields = ("key", "value", "generators") + _other_other_fields = ("locals",) + key = None + """What produces the keys. + + :type: NodeNG or None + """ + value = None + """What produces the values. + + :type: NodeNG or None + """ + generators = None + """The generators that are looped through. + + :type: list(Comprehension) or None + """ + + def __init__( + self, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + """ + :param lineno: The line that this node appears on in the source code. + :type lineno: int or None + + :param col_offset: The column that this node appears on in the + source code. + :type col_offset: int or None + + :param parent: The parent node in the syntax tree. + :type parent: NodeNG or None + + :param end_lineno: The last line this node appears on in the source code. + :type end_lineno: Optional[int] + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + :type end_col_offset: Optional[int] + """ + self.locals = {} + """A map of the name of a local variable to the node defining the local. + + :type: dict(str, NodeNG) + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, key=None, value=None, generators=None): + """Do some setup after initialisation. + + :param key: What produces the keys. + :type key: NodeNG or None + + :param value: What produces the values. + :type value: NodeNG or None + + :param generators: The generators that are looped through. + :type generators: list(Comprehension) or None + """ + self.key = key + self.value = value + if generators is None: + self.generators = [] + else: + self.generators = generators + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`DictComp` this is always :class:`Uninferable`. + :rtype: Uninferable + """ + return util.Uninferable + + def get_children(self): + yield self.key + yield self.value + + yield from self.generators + + +class SetComp(ComprehensionScope): + """Class representing an :class:`ast.SetComp` node. + + >>> import astroid + >>> node = astroid.extract_node('{thing for thing in things if thing}') + >>> node + + """ + + _astroid_fields = ("elt", "generators") + _other_other_fields = ("locals",) + elt = None + """The element that forms the output of the expression. + + :type: NodeNG or None + """ + generators = None + """The generators that are looped through. + + :type: list(Comprehension) or None + """ + + def __init__( + self, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + """ + :param lineno: The line that this node appears on in the source code. + :type lineno: int or None + + :param col_offset: The column that this node appears on in the + source code. + :type col_offset: int or None + + :param parent: The parent node in the syntax tree. + :type parent: NodeNG or None + + :param end_lineno: The last line this node appears on in the source code. + :type end_lineno: Optional[int] + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + :type end_col_offset: Optional[int] + """ + self.locals = {} + """A map of the name of a local variable to the node defining the local. + + :type: dict(str, NodeNG) + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, elt=None, generators=None): + """Do some setup after initialisation. + + :param elt: The element that forms the output of the expression. + :type elt: NodeNG or None + + :param generators: The generators that are looped through. + :type generators: list(Comprehension) or None + """ + self.elt = elt + if generators is None: + self.generators = [] + else: + self.generators = generators + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`SetComp` this is always :class:`Uninferable`. + :rtype: Uninferable + """ + return util.Uninferable + + def get_children(self): + yield self.elt + + yield from self.generators + + +class ListComp(ComprehensionScope): + """Class representing an :class:`ast.ListComp` node. + + >>> import astroid + >>> node = astroid.extract_node('[thing for thing in things if thing]') + >>> node + + """ + + _astroid_fields = ("elt", "generators") + _other_other_fields = ("locals",) + + elt = None + """The element that forms the output of the expression. + + :type: NodeNG or None + """ + + generators = None + """The generators that are looped through. + + :type: list(Comprehension) or None + """ + + def __init__( + self, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + self.locals = {} + """A map of the name of a local variable to the node defining it. + + :type: dict(str, NodeNG) + """ + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, elt=None, generators=None): + """Do some setup after initialisation. + + :param elt: The element that forms the output of the expression. + :type elt: NodeNG or None + + :param generators: The generators that are looped through. + :type generators: list(Comprehension) or None + """ + self.elt = elt + self.generators = generators + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`ListComp` this is always :class:`Uninferable`. + :rtype: Uninferable + """ + return util.Uninferable + + def get_children(self): + yield self.elt + + yield from self.generators + + +def _infer_decorator_callchain(node): + """Detect decorator call chaining and see if the end result is a + static or a classmethod. + """ + if not isinstance(node, FunctionDef): + return None + if not node.parent: + return None + try: + result = next(node.infer_call_result(node.parent), None) + except InferenceError: + return None + if isinstance(result, bases.Instance): + result = result._proxied + if isinstance(result, ClassDef): + if result.is_subtype_of("builtins.classmethod"): + return "classmethod" + if result.is_subtype_of("builtins.staticmethod"): + return "staticmethod" + if isinstance(result, FunctionDef): + if not result.decorators: + return None + # Determine if this function is decorated with one of the builtin descriptors we want. + for decorator in result.decorators.nodes: + if isinstance(decorator, node_classes.Name): + if decorator.name in BUILTIN_DESCRIPTORS: + return decorator.name + if ( + isinstance(decorator, node_classes.Attribute) + and isinstance(decorator.expr, node_classes.Name) + and decorator.expr.name == "builtins" + and decorator.attrname in BUILTIN_DESCRIPTORS + ): + return decorator.attrname + return None + + +class Lambda(mixins.FilterStmtsMixin, LocalsDictNodeNG): + """Class representing an :class:`ast.Lambda` node. + + >>> import astroid + >>> node = astroid.extract_node('lambda arg: arg + 1') + >>> node + l.1 at 0x7f23b2e41518> + """ + + _astroid_fields = ("args", "body") + _other_other_fields = ("locals",) + name = "" + is_lambda = True + special_attributes = FunctionModel() + """The names of special attributes that this function has.""" + + def implicit_parameters(self): + return 0 + + # function's type, 'function' | 'method' | 'staticmethod' | 'classmethod' + @property + def type(self): + """Whether this is a method or function. + + :returns: 'method' if this is a method, 'function' otherwise. + :rtype: str + """ + if self.args.arguments and self.args.arguments[0].name == "self": + if isinstance(self.parent.scope(), ClassDef): + return "method" + return "function" + + def __init__( + self, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + """ + :param lineno: The line that this node appears on in the source code. + :type lineno: int or None + + :param col_offset: The column that this node appears on in the + source code. + :type col_offset: int or None + + :param parent: The parent node in the syntax tree. + :type parent: NodeNG or None + + :param end_lineno: The last line this node appears on in the source code. + :type end_lineno: Optional[int] + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + :type end_col_offset: Optional[int] + """ + self.locals = {} + """A map of the name of a local variable to the node defining it. + + :type: dict(str, NodeNG) + """ + + self.args: Arguments + """The arguments that the function takes.""" + + self.body = [] + """The contents of the function body. + + :type: list(NodeNG) + """ + + self.instance_attrs: Dict[str, List[NodeNG]] = {} + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + + def postinit(self, args: Arguments, body): + """Do some setup after initialisation. + + :param args: The arguments that the function takes. + + :param body: The contents of the function body. + :type body: list(NodeNG) + """ + self.args = args + self.body = body + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + if "method" in self.type: + return "builtins.instancemethod" + return "builtins.function" + + def display_type(self): + """A human readable type of this node. + + :returns: The type of this node. + :rtype: str + """ + if "method" in self.type: + return "Method" + return "Function" + + def callable(self): + """Whether this node defines something that is callable. + + :returns: True if this defines something that is callable, + False otherwise. + For a :class:`Lambda` this is always ``True``. + :rtype: bool + """ + return True + + def argnames(self) -> List[str]: + """Get the names of each of the arguments, including that + of the collections of variable-length arguments ("args", "kwargs", + etc.), as well as positional-only and keyword-only arguments. + + :returns: The names of the arguments. + :rtype: list(str) + """ + if self.args.arguments: # maybe None with builtin functions + names = _rec_get_names(self.args.arguments) + else: + names = [] + if self.args.vararg: + names.append(self.args.vararg) + names += [elt.name for elt in self.args.kwonlyargs] + if self.args.kwarg: + names.append(self.args.kwarg) + return names + + def infer_call_result(self, caller, context=None): + """Infer what the function returns when called. + + :param caller: Unused + :type caller: object + """ + # pylint: disable=no-member; github.com/pycqa/astroid/issues/291 + # args is in fact redefined later on by postinit. Can't be changed + # to None due to a strong interaction between Lambda and FunctionDef. + return self.body.infer(context) + + def scope_lookup(self, node, name, offset=0): + """Lookup where the given names is assigned. + + :param node: The node to look for assignments up to. + Any assignments after the given node are ignored. + :type node: NodeNG + + :param name: The name to find assignments for. + :type name: str + + :param offset: The line offset to filter statements up to. + :type offset: int + + :returns: This scope node and the list of assignments associated to the + given name according to the scope where it has been found (locals, + globals or builtin). + :rtype: tuple(str, list(NodeNG)) + """ + if node in self.args.defaults or node in self.args.kw_defaults: + frame = self.parent.frame(future=True) + # line offset to avoid that def func(f=func) resolve the default + # value to the defined function + offset = -1 + else: + # check this is not used in function decorators + frame = self + return frame._scope_lookup(node, name, offset) + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`Lambda` this is always ``True``. + :rtype: bool + """ + return True + + def get_children(self): + yield self.args + yield self.body + + def frame(self: T, *, future: Literal[None, True] = None) -> T: + """The node's frame node. + + A frame node is a :class:`Module`, :class:`FunctionDef`, + :class:`ClassDef` or :class:`Lambda`. + + :returns: The node itself. + """ + return self + + def getattr( + self, name: str, context: Optional[InferenceContext] = None + ) -> List[NodeNG]: + if not name: + raise AttributeInferenceError(target=self, attribute=name, context=context) + + found_attrs = [] + if name in self.instance_attrs: + found_attrs = self.instance_attrs[name] + if name in self.special_attributes: + found_attrs.append(self.special_attributes.lookup(name)) + if found_attrs: + return found_attrs + raise AttributeInferenceError(target=self, attribute=name) + + +class FunctionDef(mixins.MultiLineBlockMixin, node_classes.Statement, Lambda): + """Class representing an :class:`ast.FunctionDef`. + + >>> import astroid + >>> node = astroid.extract_node(''' + ... def my_func(arg): + ... return arg + 1 + ... ''') + >>> node + + """ + + _astroid_fields = ("decorators", "args", "returns", "doc_node", "body") + _multi_line_block_fields = ("body",) + returns = None + decorators: Optional[node_classes.Decorators] = None + """The decorators that are applied to this method or function.""" + + is_function = True + """Whether this node indicates a function. + + For a :class:`FunctionDef` this is always ``True``. + + :type: bool + """ + type_annotation = None + """If present, this will contain the type annotation passed by a type comment + + :type: NodeNG or None + """ + type_comment_args = None + """ + If present, this will contain the type annotation for arguments + passed by a type comment + """ + type_comment_returns = None + """If present, this will contain the return type annotation, passed by a type comment""" + # attributes below are set by the builder module or by raw factories + _other_fields = ("name", "doc", "position") + _other_other_fields = ( + "locals", + "_type", + "type_comment_returns", + "type_comment_args", + ) + _type = None + + @decorators_mod.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead") + def __init__( + self, + name=None, + doc: Optional[str] = None, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + """ + :param name: The name of the function. + :type name: str or None + + :param doc: The function docstring. + + :param lineno: The line that this node appears on in the source code. + :type lineno: int or None + + :param col_offset: The column that this node appears on in the + source code. + :type col_offset: int or None + + :param parent: The parent node in the syntax tree. + :type parent: NodeNG or None + + :param end_lineno: The last line this node appears on in the source code. + :type end_lineno: Optional[int] + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + :type end_col_offset: Optional[int] + """ + self.name = name + """The name of the function. + + :type name: str or None + """ + + self._doc = doc + """The function docstring.""" + + self.doc_node: Optional[Const] = None + """The doc node associated with this node.""" + + self.instance_attrs = {} + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + if parent: + frame = parent.frame(future=True) + frame.set_local(name, self) + + # pylint: disable=arguments-differ; different than Lambdas + def postinit( + self, + args: Arguments, + body, + decorators: Optional[node_classes.Decorators] = None, + returns=None, + type_comment_returns=None, + type_comment_args=None, + *, + position: Optional[Position] = None, + doc_node: Optional[Const] = None, + ): + """Do some setup after initialisation. + + :param args: The arguments that the function takes. + + :param body: The contents of the function body. + :type body: list(NodeNG) + + :param decorators: The decorators that are applied to this + method or function. + :type decorators: Decorators or None + :params type_comment_returns: + The return type annotation passed via a type comment. + :params type_comment_args: + The args type annotation passed via a type comment. + :params position: + Position of function keyword(s) and name. + :param doc_node: + The doc node associated with this node. + """ + self.args = args + self.body = body + self.decorators = decorators + self.returns = returns + self.type_comment_returns = type_comment_returns + self.type_comment_args = type_comment_args + self.position = position + self.doc_node = doc_node + if doc_node: + self._doc = doc_node.value + + @property + def doc(self) -> Optional[str]: + """The function docstring.""" + warnings.warn( + "The 'FunctionDef.doc' attribute is deprecated, " + "use 'FunctionDef.doc_node' instead.", + DeprecationWarning, + ) + return self._doc + + @doc.setter + def doc(self, value: Optional[str]) -> None: + warnings.warn( + "Setting the 'FunctionDef.doc' attribute is deprecated, " + "use 'FunctionDef.doc_node' instead.", + DeprecationWarning, + ) + self._doc = value + + @cached_property + def extra_decorators(self) -> List[node_classes.Call]: + """The extra decorators that this function can have. + + Additional decorators are considered when they are used as + assignments, as in ``method = staticmethod(method)``. + The property will return all the callables that are used for + decoration. + """ + frame = self.parent.frame(future=True) + if not isinstance(frame, ClassDef): + return [] + + decorators: List[node_classes.Call] = [] + for assign in frame._get_assign_nodes(): + if isinstance(assign.value, node_classes.Call) and isinstance( + assign.value.func, node_classes.Name + ): + for assign_node in assign.targets: + if not isinstance(assign_node, node_classes.AssignName): + # Support only `name = callable(name)` + continue + + if assign_node.name != self.name: + # Interested only in the assignment nodes that + # decorates the current method. + continue + try: + meth = frame[self.name] + except KeyError: + continue + else: + # Must be a function and in the same frame as the + # original method. + if ( + isinstance(meth, FunctionDef) + and assign_node.frame(future=True) == frame + ): + decorators.append(assign.value) + return decorators + + @cached_property + def type( + self, + ): # pylint: disable=invalid-overridden-method,too-many-return-statements + """The function type for this node. + + Possible values are: method, function, staticmethod, classmethod. + + :type: str + """ + for decorator in self.extra_decorators: + if decorator.func.name in BUILTIN_DESCRIPTORS: + return decorator.func.name + + frame = self.parent.frame(future=True) + type_name = "function" + if isinstance(frame, ClassDef): + if self.name == "__new__": + return "classmethod" + if self.name == "__init_subclass__": + return "classmethod" + if self.name == "__class_getitem__": + return "classmethod" + + type_name = "method" + + if not self.decorators: + return type_name + + for node in self.decorators.nodes: + if isinstance(node, node_classes.Name): + if node.name in BUILTIN_DESCRIPTORS: + return node.name + if ( + isinstance(node, node_classes.Attribute) + and isinstance(node.expr, node_classes.Name) + and node.expr.name == "builtins" + and node.attrname in BUILTIN_DESCRIPTORS + ): + return node.attrname + + if isinstance(node, node_classes.Call): + # Handle the following case: + # @some_decorator(arg1, arg2) + # def func(...) + # + try: + current = next(node.func.infer()) + except (InferenceError, StopIteration): + continue + _type = _infer_decorator_callchain(current) + if _type is not None: + return _type + + try: + for inferred in node.infer(): + # Check to see if this returns a static or a class method. + _type = _infer_decorator_callchain(inferred) + if _type is not None: + return _type + + if not isinstance(inferred, ClassDef): + continue + for ancestor in inferred.ancestors(): + if not isinstance(ancestor, ClassDef): + continue + if ancestor.is_subtype_of("builtins.classmethod"): + return "classmethod" + if ancestor.is_subtype_of("builtins.staticmethod"): + return "staticmethod" + except InferenceError: + pass + return type_name + + @cached_property + def fromlineno(self) -> Optional[int]: + """The first line that this node appears on in the source code.""" + # lineno is the line number of the first decorator, we want the def + # statement lineno. Similar to 'ClassDef.fromlineno' + lineno = self.lineno + if self.decorators is not None: + lineno += sum( + node.tolineno - node.lineno + 1 for node in self.decorators.nodes + ) + + return lineno + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + return self.args.tolineno + + def implicit_parameters(self) -> Literal[0, 1]: + return 1 if self.is_bound() else 0 + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: Unused. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + :rtype: tuple(int, int) + """ + return self.fromlineno, self.tolineno + + def igetattr(self, name, context=None): + """Inferred getattr, which returns an iterator of inferred statements.""" + try: + return bases._infer_stmts(self.getattr(name, context), context, frame=self) + except AttributeInferenceError as error: + raise InferenceError( + str(error), target=self, attribute=name, context=context + ) from error + + def is_method(self): + """Check if this function node represents a method. + + :returns: True if this is a method, False otherwise. + :rtype: bool + """ + # check we are defined in a ClassDef, because this is usually expected + # (e.g. pylint...) when is_method() return True + return self.type != "function" and isinstance( + self.parent.frame(future=True), ClassDef + ) + + @decorators_mod.cached + def decoratornames(self, context=None): + """Get the qualified names of each of the decorators on this function. + + :param context: + An inference context that can be passed to inference functions + :returns: The names of the decorators. + :rtype: set(str) + """ + result = set() + decoratornodes = [] + if self.decorators is not None: + decoratornodes += self.decorators.nodes + decoratornodes += self.extra_decorators + for decnode in decoratornodes: + try: + for infnode in decnode.infer(context=context): + result.add(infnode.qname()) + except InferenceError: + continue + return result + + def is_bound(self): + """Check if the function is bound to an instance or class. + + :returns: True if the function is bound to an instance or class, + False otherwise. + :rtype: bool + """ + return self.type in {"method", "classmethod"} + + def is_abstract(self, pass_is_abstract=True, any_raise_is_abstract=False): + """Check if the method is abstract. + + A method is considered abstract if any of the following is true: + * The only statement is 'raise NotImplementedError' + * The only statement is 'raise ' and any_raise_is_abstract is True + * The only statement is 'pass' and pass_is_abstract is True + * The method is annotated with abc.astractproperty/abc.abstractmethod + + :returns: True if the method is abstract, False otherwise. + :rtype: bool + """ + if self.decorators: + for node in self.decorators.nodes: + try: + inferred = next(node.infer()) + except (InferenceError, StopIteration): + continue + if inferred and inferred.qname() in { + "abc.abstractproperty", + "abc.abstractmethod", + }: + return True + + for child_node in self.body: + if isinstance(child_node, node_classes.Raise): + if any_raise_is_abstract: + return True + if child_node.raises_not_implemented(): + return True + return pass_is_abstract and isinstance(child_node, node_classes.Pass) + # empty function is the same as function with a single "pass" statement + if pass_is_abstract: + return True + + def is_generator(self): + """Check if this is a generator function. + + :returns: True is this is a generator function, False otherwise. + :rtype: bool + """ + return bool(next(self._get_yield_nodes_skip_lambdas(), False)) + + def infer_yield_result(self, context=None): + """Infer what the function yields when called + + :returns: What the function yields + :rtype: iterable(NodeNG or Uninferable) or None + """ + # pylint: disable=not-an-iterable + # https://github.com/PyCQA/astroid/issues/1015 + for yield_ in self.nodes_of_class(node_classes.Yield): + if yield_.value is None: + const = node_classes.Const(None) + const.parent = yield_ + const.lineno = yield_.lineno + yield const + elif yield_.scope() == self: + yield from yield_.value.infer(context=context) + + def infer_call_result(self, caller=None, context=None): + """Infer what the function returns when called. + + :returns: What the function returns. + :rtype: iterable(NodeNG or Uninferable) or None + """ + if self.is_generator(): + if isinstance(self, AsyncFunctionDef): + generator_cls = bases.AsyncGenerator + else: + generator_cls = bases.Generator + result = generator_cls(self, generator_initial_context=context) + yield result + return + # This is really a gigantic hack to work around metaclass generators + # that return transient class-generating functions. Pylint's AST structure + # cannot handle a base class object that is only used for calling __new__, + # but does not contribute to the inheritance structure itself. We inject + # a fake class into the hierarchy here for several well-known metaclass + # generators, and filter it out later. + if ( + self.name == "with_metaclass" + and len(self.args.args) == 1 + and self.args.vararg is not None + ): + metaclass = next(caller.args[0].infer(context), None) + if isinstance(metaclass, ClassDef): + try: + class_bases = [next(arg.infer(context)) for arg in caller.args[1:]] + except StopIteration as e: + raise InferenceError(node=caller.args[1:], context=context) from e + new_class = ClassDef(name="temporary_class") + new_class.hide = True + new_class.parent = self + new_class.postinit( + bases=[base for base in class_bases if base != util.Uninferable], + body=[], + decorators=[], + metaclass=metaclass, + ) + yield new_class + return + returns = self._get_return_nodes_skip_functions() + + first_return = next(returns, None) + if not first_return: + if self.body: + if self.is_abstract(pass_is_abstract=True, any_raise_is_abstract=True): + yield util.Uninferable + else: + yield node_classes.Const(None) + return + + raise InferenceError("The function does not have any return statements") + + for returnnode in itertools.chain((first_return,), returns): + if returnnode.value is None: + yield node_classes.Const(None) + else: + try: + yield from returnnode.value.infer(context) + except InferenceError: + yield util.Uninferable + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`FunctionDef` this is always ``True``. + :rtype: bool + """ + return True + + def get_children(self): + if self.decorators is not None: + yield self.decorators + + yield self.args + + if self.returns is not None: + yield self.returns + + yield from self.body + + def scope_lookup(self, node, name, offset=0): + """Lookup where the given name is assigned.""" + if name == "__class__": + # __class__ is an implicit closure reference created by the compiler + # if any methods in a class body refer to either __class__ or super. + # In our case, we want to be able to look it up in the current scope + # when `__class__` is being used. + frame = self.parent.frame(future=True) + if isinstance(frame, ClassDef): + return self, [frame] + return super().scope_lookup(node, name, offset) + + def frame(self: T, *, future: Literal[None, True] = None) -> T: + """The node's frame node. + + A frame node is a :class:`Module`, :class:`FunctionDef`, + :class:`ClassDef` or :class:`Lambda`. + + :returns: The node itself. + """ + return self + + +class AsyncFunctionDef(FunctionDef): + """Class representing an :class:`ast.FunctionDef` node. + + A :class:`AsyncFunctionDef` is an asynchronous function + created with the `async` keyword. + + >>> import astroid + >>> node = astroid.extract_node(''' + async def func(things): + async for thing in things: + print(thing) + ''') + >>> node + + >>> node.body[0] + + """ + + +def _rec_get_names(args, names: Optional[List[str]] = None) -> List[str]: + """return a list of all argument names""" + if names is None: + names = [] + for arg in args: + if isinstance(arg, node_classes.Tuple): + _rec_get_names(arg.elts, names) + else: + names.append(arg.name) + return names + + +def _is_metaclass(klass, seen=None): + """Return if the given class can be + used as a metaclass. + """ + if klass.name == "type": + return True + if seen is None: + seen = set() + for base in klass.bases: + try: + for baseobj in base.infer(): + baseobj_name = baseobj.qname() + if baseobj_name in seen: + continue + + seen.add(baseobj_name) + if isinstance(baseobj, bases.Instance): + # not abstract + return False + if baseobj is util.Uninferable: + continue + if baseobj is klass: + continue + if not isinstance(baseobj, ClassDef): + continue + if baseobj._type == "metaclass": + return True + if _is_metaclass(baseobj, seen): + return True + except InferenceError: + continue + return False + + +def _class_type(klass, ancestors=None): + """return a ClassDef node type to differ metaclass and exception + from 'regular' classes + """ + # XXX we have to store ancestors in case we have an ancestor loop + if klass._type is not None: + return klass._type + if _is_metaclass(klass): + klass._type = "metaclass" + elif klass.name.endswith("Exception"): + klass._type = "exception" + else: + if ancestors is None: + ancestors = set() + klass_name = klass.qname() + if klass_name in ancestors: + # XXX we are in loop ancestors, and have found no type + klass._type = "class" + return "class" + ancestors.add(klass_name) + for base in klass.ancestors(recurs=False): + name = _class_type(base, ancestors) + if name != "class": + if name == "metaclass" and not _is_metaclass(klass): + # don't propagate it if the current class + # can't be a metaclass + continue + klass._type = base.type + break + if klass._type is None: + klass._type = "class" + return klass._type + + +def get_wrapping_class(node): + """Get the class that wraps the given node. + + We consider that a class wraps a node if the class + is a parent for the said node. + + :returns: The class that wraps the given node + :rtype: ClassDef or None + """ + + klass = node.frame(future=True) + while klass is not None and not isinstance(klass, ClassDef): + if klass.parent is None: + klass = None + else: + klass = klass.parent.frame(future=True) + return klass + + +# pylint: disable=too-many-instance-attributes +class ClassDef(mixins.FilterStmtsMixin, LocalsDictNodeNG, node_classes.Statement): + """Class representing an :class:`ast.ClassDef` node. + + >>> import astroid + >>> node = astroid.extract_node(''' + class Thing: + def my_meth(self, arg): + return arg + self.offset + ''') + >>> node + + """ + + # some of the attributes below are set by the builder module or + # by a raw factories + + # a dictionary of class instances attributes + _astroid_fields = ("decorators", "bases", "keywords", "doc_node", "body") # name + + decorators = None + """The decorators that are applied to this class. + + :type: Decorators or None + """ + special_attributes = ClassModel() + """The names of special attributes that this class has. + + :type: objectmodel.ClassModel + """ + + _type = None + _metaclass_hack = False + hide = False + type = property( + _class_type, + doc=( + "The class type for this node.\n\n" + "Possible values are: class, metaclass, exception.\n\n" + ":type: str" + ), + ) + _other_fields = ("name", "doc", "is_dataclass", "position") + _other_other_fields = ("locals", "_newstyle") + _newstyle = None + + @decorators_mod.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead") + def __init__( + self, + name=None, + doc: Optional[str] = None, + lineno=None, + col_offset=None, + parent=None, + *, + end_lineno=None, + end_col_offset=None, + ): + """ + :param name: The name of the class. + :type name: str or None + + :param doc: The class docstring. + + :param lineno: The line that this node appears on in the source code. + :type lineno: int or None + + :param col_offset: The column that this node appears on in the + source code. + :type col_offset: int or None + + :param parent: The parent node in the syntax tree. + :type parent: NodeNG or None + + :param end_lineno: The last line this node appears on in the source code. + :type end_lineno: Optional[int] + + :param end_col_offset: The end column this node appears on in the + source code. Note: This is after the last symbol. + :type end_col_offset: Optional[int] + """ + self.instance_attrs = {} + self.locals = {} + """A map of the name of a local variable to the node defining it. + + :type: dict(str, NodeNG) + """ + + self.keywords = [] + """The keywords given to the class definition. + + This is usually for :pep:`3115` style metaclass declaration. + + :type: list(Keyword) or None + """ + + self.bases = [] + """What the class inherits from. + + :type: list(NodeNG) + """ + + self.body = [] + """The contents of the class body. + + :type: list(NodeNG) + """ + + self.name = name + """The name of the class. + + :type name: str or None + """ + + self._doc = doc + """The class docstring.""" + + self.doc_node: Optional[Const] = None + """The doc node associated with this node.""" + + self.is_dataclass: bool = False + """Whether this class is a dataclass.""" + + super().__init__( + lineno=lineno, + col_offset=col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + if parent is not None: + parent.frame(future=True).set_local(name, self) + + for local_name, node in self.implicit_locals(): + self.add_local_node(node, local_name) + + @property + def doc(self) -> Optional[str]: + """The class docstring.""" + warnings.warn( + "The 'ClassDef.doc' attribute is deprecated, " + "use 'ClassDef.doc_node' instead.", + DeprecationWarning, + ) + return self._doc + + @doc.setter + def doc(self, value: Optional[str]) -> None: + warnings.warn( + "Setting the 'ClassDef.doc' attribute is deprecated, " + "use 'ClassDef.doc_node.value' instead.", + DeprecationWarning, + ) + self._doc = value + + def implicit_parameters(self): + return 1 + + def implicit_locals(self): + """Get implicitly defined class definition locals. + + :returns: the the name and Const pair for each local + :rtype: tuple(tuple(str, node_classes.Const), ...) + """ + locals_ = (("__module__", self.special_attributes.attr___module__),) + # __qualname__ is defined in PEP3155 + locals_ += (("__qualname__", self.special_attributes.attr___qualname__),) + return locals_ + + # pylint: disable=redefined-outer-name + def postinit( + self, + bases, + body, + decorators, + newstyle=None, + metaclass=None, + keywords=None, + *, + position: Optional[Position] = None, + doc_node: Optional[Const] = None, + ): + """Do some setup after initialisation. + + :param bases: What the class inherits from. + :type bases: list(NodeNG) + + :param body: The contents of the class body. + :type body: list(NodeNG) + + :param decorators: The decorators that are applied to this class. + :type decorators: Decorators or None + + :param newstyle: Whether this is a new style class or not. + :type newstyle: bool or None + + :param metaclass: The metaclass of this class. + :type metaclass: NodeNG or None + + :param keywords: The keywords given to the class definition. + :type keywords: list(Keyword) or None + + :param position: Position of class keyword and name. + + :param doc_node: The doc node associated with this node. + """ + if keywords is not None: + self.keywords = keywords + self.bases = bases + self.body = body + self.decorators = decorators + if newstyle is not None: + self._newstyle = newstyle + if metaclass is not None: + self._metaclass = metaclass + self.position = position + self.doc_node = doc_node + if doc_node: + self._doc = doc_node.value + + def _newstyle_impl(self, context=None): + if context is None: + context = InferenceContext() + if self._newstyle is not None: + return self._newstyle + for base in self.ancestors(recurs=False, context=context): + if base._newstyle_impl(context): + self._newstyle = True + break + klass = self.declared_metaclass() + # could be any callable, we'd need to infer the result of klass(name, + # bases, dict). punt if it's not a class node. + if klass is not None and isinstance(klass, ClassDef): + self._newstyle = klass._newstyle_impl(context) + if self._newstyle is None: + self._newstyle = False + return self._newstyle + + _newstyle = None + newstyle = property( + _newstyle_impl, + doc=("Whether this is a new style class or not\n\n" ":type: bool or None"), + ) + + @cached_property + def fromlineno(self) -> Optional[int]: + """The first line that this node appears on in the source code.""" + if not PY38_PLUS or PY38 and IS_PYPY: + # For Python < 3.8 the lineno is the line number of the first decorator. + # We want the class statement lineno. Similar to 'FunctionDef.fromlineno' + lineno = self.lineno + if self.decorators is not None: + lineno += sum( + node.tolineno - node.lineno + 1 for node in self.decorators.nodes + ) + + return lineno + return super().fromlineno + + @cached_property + def blockstart_tolineno(self): + """The line on which the beginning of this block ends. + + :type: int + """ + if self.bases: + return self.bases[-1].tolineno + + return self.fromlineno + + def block_range(self, lineno): + """Get a range from the given line number to where this node ends. + + :param lineno: Unused. + :type lineno: int + + :returns: The range of line numbers that this node belongs to, + :rtype: tuple(int, int) + """ + return self.fromlineno, self.tolineno + + def pytype(self): + """Get the name of the type that this node represents. + + :returns: The name of the type. + :rtype: str + """ + if self.newstyle: + return "builtins.type" + return "builtins.classobj" + + def display_type(self): + """A human readable type of this node. + + :returns: The type of this node. + :rtype: str + """ + return "Class" + + def callable(self): + """Whether this node defines something that is callable. + + :returns: True if this defines something that is callable, + False otherwise. + For a :class:`ClassDef` this is always ``True``. + :rtype: bool + """ + return True + + def is_subtype_of(self, type_name, context=None): + """Whether this class is a subtype of the given type. + + :param type_name: The name of the type of check against. + :type type_name: str + + :returns: True if this class is a subtype of the given type, + False otherwise. + :rtype: bool + """ + if self.qname() == type_name: + return True + + return any(anc.qname() == type_name for anc in self.ancestors(context=context)) + + def _infer_type_call(self, caller, context): + try: + name_node = next(caller.args[0].infer(context)) + except StopIteration as e: + raise InferenceError(node=caller.args[0], context=context) from e + if isinstance(name_node, node_classes.Const) and isinstance( + name_node.value, str + ): + name = name_node.value + else: + return util.Uninferable + + result = ClassDef(name) + + # Get the bases of the class. + try: + class_bases = next(caller.args[1].infer(context)) + except StopIteration as e: + raise InferenceError(node=caller.args[1], context=context) from e + if isinstance(class_bases, (node_classes.Tuple, node_classes.List)): + bases = [] + for base in class_bases.itered(): + inferred = next(base.infer(context=context), None) + if inferred: + bases.append( + node_classes.EvaluatedObject(original=base, value=inferred) + ) + result.bases = bases + else: + # There is currently no AST node that can represent an 'unknown' + # node (Uninferable is not an AST node), therefore we simply return Uninferable here + # although we know at least the name of the class. + return util.Uninferable + + # Get the members of the class + try: + members = next(caller.args[2].infer(context)) + except (InferenceError, StopIteration): + members = None + + if members and isinstance(members, node_classes.Dict): + for attr, value in members.items: + if isinstance(attr, node_classes.Const) and isinstance(attr.value, str): + result.locals[attr.value] = [value] + + result.parent = caller.parent + return result + + def infer_call_result(self, caller, context=None): + """infer what a class is returning when called""" + if self.is_subtype_of("builtins.type", context) and len(caller.args) == 3: + result = self._infer_type_call(caller, context) + yield result + return + + dunder_call = None + try: + metaclass = self.metaclass(context=context) + if metaclass is not None: + dunder_call = next(metaclass.igetattr("__call__", context)) + except (AttributeInferenceError, StopIteration): + pass + + if dunder_call and dunder_call.qname() != "builtins.type.__call__": + # Call type.__call__ if not set metaclass + # (since type is the default metaclass) + context = bind_context_to_node(context, self) + context.callcontext.callee = dunder_call + yield from dunder_call.infer_call_result(caller, context) + else: + yield self.instantiate_class() + + def scope_lookup(self, node, name, offset=0): + """Lookup where the given name is assigned. + + :param node: The node to look for assignments up to. + Any assignments after the given node are ignored. + :type node: NodeNG + + :param name: The name to find assignments for. + :type name: str + + :param offset: The line offset to filter statements up to. + :type offset: int + + :returns: This scope node and the list of assignments associated to the + given name according to the scope where it has been found (locals, + globals or builtin). + :rtype: tuple(str, list(NodeNG)) + """ + # If the name looks like a builtin name, just try to look + # into the upper scope of this class. We might have a + # decorator that it's poorly named after a builtin object + # inside this class. + lookup_upper_frame = ( + isinstance(node.parent, node_classes.Decorators) + and name in AstroidManager().builtins_module + ) + if ( + any(node == base or base.parent_of(node) for base in self.bases) + or lookup_upper_frame + ): + # Handle the case where we have either a name + # in the bases of a class, which exists before + # the actual definition or the case where we have + # a Getattr node, with that name. + # + # name = ... + # class A(name): + # def name(self): ... + # + # import name + # class A(name.Name): + # def name(self): ... + + frame = self.parent.frame(future=True) + # line offset to avoid that class A(A) resolve the ancestor to + # the defined class + offset = -1 + else: + frame = self + return frame._scope_lookup(node, name, offset) + + @property + def basenames(self): + """The names of the parent classes + + Names are given in the order they appear in the class definition. + + :type: list(str) + """ + return [bnode.as_string() for bnode in self.bases] + + def ancestors(self, recurs=True, context=None): + """Iterate over the base classes in prefixed depth first order. + + :param recurs: Whether to recurse or return direct ancestors only. + :type recurs: bool + + :returns: The base classes + :rtype: iterable(NodeNG) + """ + # FIXME: should be possible to choose the resolution order + # FIXME: inference make infinite loops possible here + yielded = {self} + if context is None: + context = InferenceContext() + if not self.bases and self.qname() != "builtins.object": + yield builtin_lookup("object")[1][0] + return + + for stmt in self.bases: + with context.restore_path(): + try: + for baseobj in stmt.infer(context): + if not isinstance(baseobj, ClassDef): + if isinstance(baseobj, bases.Instance): + baseobj = baseobj._proxied + else: + continue + if not baseobj.hide: + if baseobj in yielded: + continue + yielded.add(baseobj) + yield baseobj + if not recurs: + continue + for grandpa in baseobj.ancestors(recurs=True, context=context): + if grandpa is self: + # This class is the ancestor of itself. + break + if grandpa in yielded: + continue + yielded.add(grandpa) + yield grandpa + except InferenceError: + continue + + def local_attr_ancestors(self, name, context=None): + """Iterate over the parents that define the given name. + + :param name: The name to find definitions for. + :type name: str + + :returns: The parents that define the given name. + :rtype: iterable(NodeNG) + """ + # Look up in the mro if we can. This will result in the + # attribute being looked up just as Python does it. + try: + ancestors = self.mro(context)[1:] + except MroError: + # Fallback to use ancestors, we can't determine + # a sane MRO. + ancestors = self.ancestors(context=context) + for astroid in ancestors: + if name in astroid: + yield astroid + + def instance_attr_ancestors(self, name, context=None): + """Iterate over the parents that define the given name as an attribute. + + :param name: The name to find definitions for. + :type name: str + + :returns: The parents that define the given name as + an instance attribute. + :rtype: iterable(NodeNG) + """ + for astroid in self.ancestors(context=context): + if name in astroid.instance_attrs: + yield astroid + + def has_base(self, node): + """Whether this class directly inherits from the given node. + + :param node: The node to check for. + :type node: NodeNG + + :returns: True if this class directly inherits from the given node. + :rtype: bool + """ + return node in self.bases + + def local_attr(self, name, context=None): + """Get the list of assign nodes associated to the given name. + + Assignments are looked for in both this class and in parents. + + :returns: The list of assignments to the given name. + :rtype: list(NodeNG) + + :raises AttributeInferenceError: If no attribute with this name + can be found in this class or parent classes. + """ + result = [] + if name in self.locals: + result = self.locals[name] + else: + class_node = next(self.local_attr_ancestors(name, context), None) + if class_node: + result = class_node.locals[name] + result = [n for n in result if not isinstance(n, node_classes.DelAttr)] + if result: + return result + raise AttributeInferenceError(target=self, attribute=name, context=context) + + def instance_attr(self, name, context=None): + """Get the list of nodes associated to the given attribute name. + + Assignments are looked for in both this class and in parents. + + :returns: The list of assignments to the given name. + :rtype: list(NodeNG) + + :raises AttributeInferenceError: If no attribute with this name + can be found in this class or parent classes. + """ + # Return a copy, so we don't modify self.instance_attrs, + # which could lead to infinite loop. + values = list(self.instance_attrs.get(name, [])) + # get all values from parents + for class_node in self.instance_attr_ancestors(name, context): + values += class_node.instance_attrs[name] + values = [n for n in values if not isinstance(n, node_classes.DelAttr)] + if values: + return values + raise AttributeInferenceError(target=self, attribute=name, context=context) + + def instantiate_class(self): + """Get an :class:`Instance` of the :class:`ClassDef` node. + + :returns: An :class:`Instance` of the :class:`ClassDef` node, + or self if this is not possible. + :rtype: Instance or ClassDef + """ + try: + if any(cls.name in EXCEPTION_BASE_CLASSES for cls in self.mro()): + # Subclasses of exceptions can be exception instances + return objects.ExceptionInstance(self) + except MroError: + pass + return bases.Instance(self) + + def getattr(self, name, context=None, class_context=True): + """Get an attribute from this class, using Python's attribute semantic. + + This method doesn't look in the :attr:`instance_attrs` dictionary + since it is done by an :class:`Instance` proxy at inference time. + It may return an :class:`Uninferable` object if + the attribute has not been + found, but a ``__getattr__`` or ``__getattribute__`` method is defined. + If ``class_context`` is given, then it is considered that the + attribute is accessed from a class context, + e.g. ClassDef.attribute, otherwise it might have been accessed + from an instance as well. If ``class_context`` is used in that + case, then a lookup in the implicit metaclass and the explicit + metaclass will be done. + + :param name: The attribute to look for. + :type name: str + + :param class_context: Whether the attribute can be accessed statically. + :type class_context: bool + + :returns: The attribute. + :rtype: list(NodeNG) + + :raises AttributeInferenceError: If the attribute cannot be inferred. + """ + if not name: + raise AttributeInferenceError(target=self, attribute=name, context=context) + + values = self.locals.get(name, []) + if name in self.special_attributes and class_context and not values: + result = [self.special_attributes.lookup(name)] + if name == "__bases__": + # Need special treatment, since they are mutable + # and we need to return all the values. + result += values + return result + + # don't modify the list in self.locals! + values = list(values) + for classnode in self.ancestors(recurs=True, context=context): + values += classnode.locals.get(name, []) + + if class_context: + values += self._metaclass_lookup_attribute(name, context) + + if not values: + raise AttributeInferenceError(target=self, attribute=name, context=context) + + # Look for AnnAssigns, which are not attributes in the purest sense. + for value in values: + if isinstance(value, node_classes.AssignName): + stmt = value.statement(future=True) + if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None: + raise AttributeInferenceError( + target=self, attribute=name, context=context + ) + return values + + def _metaclass_lookup_attribute(self, name, context): + """Search the given name in the implicit and the explicit metaclass.""" + attrs = set() + implicit_meta = self.implicit_metaclass() + context = copy_context(context) + metaclass = self.metaclass(context=context) + for cls in (implicit_meta, metaclass): + if cls and cls != self and isinstance(cls, ClassDef): + cls_attributes = self._get_attribute_from_metaclass(cls, name, context) + attrs.update(set(cls_attributes)) + return attrs + + def _get_attribute_from_metaclass(self, cls, name, context): + try: + attrs = cls.getattr(name, context=context, class_context=True) + except AttributeInferenceError: + return + + for attr in bases._infer_stmts(attrs, context, frame=cls): + if not isinstance(attr, FunctionDef): + yield attr + continue + + if isinstance(attr, objects.Property): + yield attr + continue + if attr.type == "classmethod": + # If the method is a classmethod, then it will + # be bound to the metaclass, not to the class + # from where the attribute is retrieved. + # get_wrapping_class could return None, so just + # default to the current class. + frame = get_wrapping_class(attr) or self + yield bases.BoundMethod(attr, frame) + elif attr.type == "staticmethod": + yield attr + else: + yield bases.BoundMethod(attr, self) + + def igetattr(self, name, context=None, class_context=True): + """Infer the possible values of the given variable. + + :param name: The name of the variable to infer. + :type name: str + + :returns: The inferred possible values. + :rtype: iterable(NodeNG or Uninferable) + """ + # set lookup name since this is necessary to infer on import nodes for + # instance + context = copy_context(context) + context.lookupname = name + + metaclass = self.metaclass(context=context) + try: + attributes = self.getattr(name, context, class_context=class_context) + # If we have more than one attribute, make sure that those starting from + # the second one are from the same scope. This is to account for modifications + # to the attribute happening *after* the attribute's definition (e.g. AugAssigns on lists) + if len(attributes) > 1: + first_attr, attributes = attributes[0], attributes[1:] + first_scope = first_attr.scope() + attributes = [first_attr] + [ + attr + for attr in attributes + if attr.parent and attr.parent.scope() == first_scope + ] + + for inferred in bases._infer_stmts(attributes, context, frame=self): + # yield Uninferable object instead of descriptors when necessary + if not isinstance(inferred, node_classes.Const) and isinstance( + inferred, bases.Instance + ): + try: + inferred._proxied.getattr("__get__", context) + except AttributeInferenceError: + yield inferred + else: + yield util.Uninferable + elif isinstance(inferred, objects.Property): + function = inferred.function + if not class_context: + # Through an instance so we can solve the property + yield from function.infer_call_result( + caller=self, context=context + ) + # If we're in a class context, we need to determine if the property + # was defined in the metaclass (a derived class must be a subclass of + # the metaclass of all its bases), in which case we can resolve the + # property. If not, i.e. the property is defined in some base class + # instead, then we return the property object + elif metaclass and function.parent.scope() is metaclass: + # Resolve a property as long as it is not accessed through + # the class itself. + yield from function.infer_call_result( + caller=self, context=context + ) + else: + yield inferred + else: + yield function_to_method(inferred, self) + except AttributeInferenceError as error: + if not name.startswith("__") and self.has_dynamic_getattr(context): + # class handle some dynamic attributes, return a Uninferable object + yield util.Uninferable + else: + raise InferenceError( + str(error), target=self, attribute=name, context=context + ) from error + + def has_dynamic_getattr(self, context=None): + """Check if the class has a custom __getattr__ or __getattribute__. + + If any such method is found and it is not from + builtins, nor from an extension module, then the function + will return True. + + :returns: True if the class has a custom + __getattr__ or __getattribute__, False otherwise. + :rtype: bool + """ + + def _valid_getattr(node): + root = node.root() + return root.name != "builtins" and getattr(root, "pure_python", None) + + try: + return _valid_getattr(self.getattr("__getattr__", context)[0]) + except AttributeInferenceError: + # if self.newstyle: XXX cause an infinite recursion error + try: + getattribute = self.getattr("__getattribute__", context)[0] + return _valid_getattr(getattribute) + except AttributeInferenceError: + pass + return False + + def getitem(self, index, context=None): + """Return the inference of a subscript. + + This is basically looking up the method in the metaclass and calling it. + + :returns: The inferred value of a subscript to this class. + :rtype: NodeNG + + :raises AstroidTypeError: If this class does not define a + ``__getitem__`` method. + """ + try: + methods = lookup(self, "__getitem__") + except AttributeInferenceError as exc: + if isinstance(self, ClassDef): + # subscripting a class definition may be + # achieved thanks to __class_getitem__ method + # which is a classmethod defined in the class + # that supports subscript and not in the metaclass + try: + methods = self.getattr("__class_getitem__") + # Here it is assumed that the __class_getitem__ node is + # a FunctionDef. One possible improvement would be to deal + # with more generic inference. + except AttributeInferenceError: + raise AstroidTypeError(node=self, context=context) from exc + else: + raise AstroidTypeError(node=self, context=context) from exc + + method = methods[0] + + # Create a new callcontext for providing index as an argument. + new_context = bind_context_to_node(context, self) + new_context.callcontext = CallContext(args=[index], callee=method) + + try: + return next(method.infer_call_result(self, new_context), util.Uninferable) + except AttributeError: + # Starting with python3.9, builtin types list, dict etc... + # are subscriptable thanks to __class_getitem___ classmethod. + # However in such case the method is bound to an EmptyNode and + # EmptyNode doesn't have infer_call_result method yielding to + # AttributeError + if ( + isinstance(method, node_classes.EmptyNode) + and self.name in {"list", "dict", "set", "tuple", "frozenset"} + and PY39_PLUS + ): + return self + raise + except InferenceError: + return util.Uninferable + + def methods(self): + """Iterate over all of the method defined in this class and its parents. + + :returns: The methods defined on the class. + :rtype: iterable(FunctionDef) + """ + done = {} + for astroid in itertools.chain(iter((self,)), self.ancestors()): + for meth in astroid.mymethods(): + if meth.name in done: + continue + done[meth.name] = None + yield meth + + def mymethods(self): + """Iterate over all of the method defined in this class only. + + :returns: The methods defined on the class. + :rtype: iterable(FunctionDef) + """ + for member in self.values(): + if isinstance(member, FunctionDef): + yield member + + def implicit_metaclass(self): + """Get the implicit metaclass of the current class. + + For newstyle classes, this will return an instance of builtins.type. + For oldstyle classes, it will simply return None, since there's + no implicit metaclass there. + + :returns: The metaclass. + :rtype: builtins.type or None + """ + if self.newstyle: + return builtin_lookup("type")[1][0] + return None + + _metaclass = None + + def declared_metaclass(self, context=None): + """Return the explicit declared metaclass for the current class. + + An explicit declared metaclass is defined + either by passing the ``metaclass`` keyword argument + in the class definition line (Python 3) or (Python 2) by + having a ``__metaclass__`` class attribute, or if there are + no explicit bases but there is a global ``__metaclass__`` variable. + + :returns: The metaclass of this class, + or None if one could not be found. + :rtype: NodeNG or None + """ + for base in self.bases: + try: + for baseobj in base.infer(context=context): + if isinstance(baseobj, ClassDef) and baseobj.hide: + self._metaclass = baseobj._metaclass + self._metaclass_hack = True + break + except InferenceError: + pass + + if self._metaclass: + # Expects this from Py3k TreeRebuilder + try: + return next( + node + for node in self._metaclass.infer(context=context) + if node is not util.Uninferable + ) + except (InferenceError, StopIteration): + return None + + return None + + def _find_metaclass(self, seen=None, context=None): + if seen is None: + seen = set() + seen.add(self) + + klass = self.declared_metaclass(context=context) + if klass is None: + for parent in self.ancestors(context=context): + if parent not in seen: + klass = parent._find_metaclass(seen) + if klass is not None: + break + return klass + + def metaclass(self, context=None): + """Get the metaclass of this class. + + If this class does not define explicitly a metaclass, + then the first defined metaclass in ancestors will be used + instead. + + :returns: The metaclass of this class. + :rtype: NodeNG or None + """ + return self._find_metaclass(context=context) + + def has_metaclass_hack(self): + return self._metaclass_hack + + def _islots(self): + """Return an iterator with the inferred slots.""" + if "__slots__" not in self.locals: + return None + for slots in self.igetattr("__slots__"): + # check if __slots__ is a valid type + for meth in ITER_METHODS: + try: + slots.getattr(meth) + break + except AttributeInferenceError: + continue + else: + continue + + if isinstance(slots, node_classes.Const): + # a string. Ignore the following checks, + # but yield the node, only if it has a value + if slots.value: + yield slots + continue + if not hasattr(slots, "itered"): + # we can't obtain the values, maybe a .deque? + continue + + if isinstance(slots, node_classes.Dict): + values = [item[0] for item in slots.items] + else: + values = slots.itered() + if values is util.Uninferable: + continue + if not values: + # Stop the iteration, because the class + # has an empty list of slots. + return values + + for elt in values: + try: + for inferred in elt.infer(): + if inferred is util.Uninferable: + continue + if not isinstance( + inferred, node_classes.Const + ) or not isinstance(inferred.value, str): + continue + if not inferred.value: + continue + yield inferred + except InferenceError: + continue + + return None + + def _slots(self): + if not self.newstyle: + raise NotImplementedError( + "The concept of slots is undefined for old-style classes." + ) + + slots = self._islots() + try: + first = next(slots) + except StopIteration as exc: + # The class doesn't have a __slots__ definition or empty slots. + if exc.args and exc.args[0] not in ("", None): + return exc.args[0] + return None + return [first] + list(slots) + + # Cached, because inferring them all the time is expensive + @decorators_mod.cached + def slots(self): + """Get all the slots for this node. + + :returns: The names of slots for this class. + If the class doesn't define any slot, through the ``__slots__`` + variable, then this function will return a None. + Also, it will return None in the case the slots were not inferred. + :rtype: list(str) or None + """ + + def grouped_slots( + mro: List["ClassDef"], + ) -> typing.Iterator[Optional[node_classes.NodeNG]]: + # Not interested in object, since it can't have slots. + for cls in mro[:-1]: + try: + cls_slots = cls._slots() + except NotImplementedError: + continue + if cls_slots is not None: + yield from cls_slots + else: + yield None + + if not self.newstyle: + raise NotImplementedError( + "The concept of slots is undefined for old-style classes." + ) + + try: + mro = self.mro() + except MroError as e: + raise NotImplementedError( + "Cannot get slots while parsing mro fails." + ) from e + + slots = list(grouped_slots(mro)) + if not all(slot is not None for slot in slots): + return None + + return sorted(set(slots), key=lambda item: item.value) + + def _inferred_bases(self, context=None): + # Similar with .ancestors, but the difference is when one base is inferred, + # only the first object is wanted. That's because + # we aren't interested in superclasses, as in the following + # example: + # + # class SomeSuperClass(object): pass + # class SomeClass(SomeSuperClass): pass + # class Test(SomeClass): pass + # + # Inferring SomeClass from the Test's bases will give + # us both SomeClass and SomeSuperClass, but we are interested + # only in SomeClass. + + if context is None: + context = InferenceContext() + if not self.bases and self.qname() != "builtins.object": + yield builtin_lookup("object")[1][0] + return + + for stmt in self.bases: + try: + # Find the first non-None inferred base value + baseobj = next( + b + for b in stmt.infer(context=context.clone()) + if not (isinstance(b, Const) and b.value is None) + ) + except (InferenceError, StopIteration): + continue + if isinstance(baseobj, bases.Instance): + baseobj = baseobj._proxied + if not isinstance(baseobj, ClassDef): + continue + if not baseobj.hide: + yield baseobj + else: + yield from baseobj.bases + + def _compute_mro(self, context=None): + inferred_bases = list(self._inferred_bases(context=context)) + bases_mro = [] + for base in inferred_bases: + if base is self: + continue + + try: + mro = base._compute_mro(context=context) + bases_mro.append(mro) + except NotImplementedError: + # Some classes have in their ancestors both newstyle and + # old style classes. For these we can't retrieve the .mro, + # although in Python it's possible, since the class we are + # currently working is in fact new style. + # So, we fallback to ancestors here. + ancestors = list(base.ancestors(context=context)) + bases_mro.append(ancestors) + + unmerged_mro = [[self]] + bases_mro + [inferred_bases] + unmerged_mro = list(clean_duplicates_mro(unmerged_mro, self, context)) + clean_typing_generic_mro(unmerged_mro) + return _c3_merge(unmerged_mro, self, context) + + def mro(self, context=None) -> List["ClassDef"]: + """Get the method resolution order, using C3 linearization. + + :returns: The list of ancestors, sorted by the mro. + :rtype: list(NodeNG) + :raises DuplicateBasesError: Duplicate bases in the same class base + :raises InconsistentMroError: A class' MRO is inconsistent + """ + return self._compute_mro(context=context) + + def bool_value(self, context=None): + """Determine the boolean value of this node. + + :returns: The boolean value of this node. + For a :class:`ClassDef` this is always ``True``. + :rtype: bool + """ + return True + + def get_children(self): + if self.decorators is not None: + yield self.decorators + + yield from self.bases + if self.keywords is not None: + yield from self.keywords + yield from self.body + + @decorators_mod.cached + def _get_assign_nodes(self): + children_assign_nodes = ( + child_node._get_assign_nodes() for child_node in self.body + ) + return list(itertools.chain.from_iterable(children_assign_nodes)) + + def frame(self: T, *, future: Literal[None, True] = None) -> T: + """The node's frame node. + + A frame node is a :class:`Module`, :class:`FunctionDef`, + :class:`ClassDef` or :class:`Lambda`. + + :returns: The node itself. + """ + return self diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/utils.py b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/utils.py new file mode 100644 index 0000000..272bdad --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/scoped_nodes/utils.py @@ -0,0 +1,36 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +This module contains utility functions for scoped nodes. +""" + +import builtins +from typing import TYPE_CHECKING, Sequence, Tuple + +from astroid.manager import AstroidManager + +if TYPE_CHECKING: + from astroid import nodes + + +_builtin_astroid: "nodes.Module | None" = None + + +def builtin_lookup(name: str) -> Tuple["nodes.Module", Sequence["nodes.NodeNG"]]: + """Lookup a name in the builtin module. + + Return the list of matching statements and the ast for the builtin module + """ + # pylint: disable-next=global-statement + global _builtin_astroid + if _builtin_astroid is None: + _builtin_astroid = AstroidManager().ast_from_module(builtins) + if name == "__dict__": + return _builtin_astroid, () + try: + stmts: Sequence["nodes.NodeNG"] = _builtin_astroid.locals[name] + except KeyError: + stmts = () + return _builtin_astroid, stmts diff --git a/myenv/lib/python3.9/site-packages/astroid/nodes/utils.py b/myenv/lib/python3.9/site-packages/astroid/nodes/utils.py new file mode 100644 index 0000000..5afa718 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/nodes/utils.py @@ -0,0 +1,14 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +from typing import NamedTuple + + +class Position(NamedTuple): + """Position with line and column information.""" + + lineno: int + col_offset: int + end_lineno: int + end_col_offset: int diff --git a/myenv/lib/python3.9/site-packages/astroid/objects.py b/myenv/lib/python3.9/site-packages/astroid/objects.py new file mode 100644 index 0000000..416602e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/objects.py @@ -0,0 +1,329 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +""" +Inference objects are a way to represent composite AST nodes, +which are used only as inference results, so they can't be found in the +original AST tree. For instance, inferring the following frozenset use, +leads to an inferred FrozenSet: + + Call(func=Name('frozenset'), args=Tuple(...)) +""" + +import sys +from typing import TYPE_CHECKING + +from astroid import bases, decorators, util +from astroid.exceptions import ( + AttributeInferenceError, + InferenceError, + MroError, + SuperError, +) +from astroid.manager import AstroidManager +from astroid.nodes import node_classes, scoped_nodes + +objectmodel = util.lazy_import("interpreter.objectmodel") + +if sys.version_info >= (3, 8) or TYPE_CHECKING: + from functools import cached_property +else: + from astroid.decorators import cachedproperty as cached_property + + +class FrozenSet(node_classes.BaseContainer): + """class representing a FrozenSet composite node""" + + def pytype(self): + return "builtins.frozenset" + + def _infer(self, context=None): + yield self + + @cached_property + def _proxied(self): # pylint: disable=method-hidden + ast_builtins = AstroidManager().builtins_module + return ast_builtins.getattr("frozenset")[0] + + +class Super(node_classes.NodeNG): + """Proxy class over a super call. + + This class offers almost the same behaviour as Python's super, + which is MRO lookups for retrieving attributes from the parents. + + The *mro_pointer* is the place in the MRO from where we should + start looking, not counting it. *mro_type* is the object which + provides the MRO, it can be both a type or an instance. + *self_class* is the class where the super call is, while + *scope* is the function where the super call is. + """ + + # pylint: disable=unnecessary-lambda + special_attributes = util.lazy_descriptor(lambda: objectmodel.SuperModel()) + + def __init__(self, mro_pointer, mro_type, self_class, scope): + self.type = mro_type + self.mro_pointer = mro_pointer + self._class_based = False + self._self_class = self_class + self._scope = scope + super().__init__() + + def _infer(self, context=None): + yield self + + def super_mro(self): + """Get the MRO which will be used to lookup attributes in this super.""" + if not isinstance(self.mro_pointer, scoped_nodes.ClassDef): + raise SuperError( + "The first argument to super must be a subtype of " + "type, not {mro_pointer}.", + super_=self, + ) + + if isinstance(self.type, scoped_nodes.ClassDef): + # `super(type, type)`, most likely in a class method. + self._class_based = True + mro_type = self.type + else: + mro_type = getattr(self.type, "_proxied", None) + if not isinstance(mro_type, (bases.Instance, scoped_nodes.ClassDef)): + raise SuperError( + "The second argument to super must be an " + "instance or subtype of type, not {type}.", + super_=self, + ) + + if not mro_type.newstyle: + raise SuperError("Unable to call super on old-style classes.", super_=self) + + mro = mro_type.mro() + if self.mro_pointer not in mro: + raise SuperError( + "The second argument to super must be an " + "instance or subtype of type, not {type}.", + super_=self, + ) + + index = mro.index(self.mro_pointer) + return mro[index + 1 :] + + @cached_property + def _proxied(self): + ast_builtins = AstroidManager().builtins_module + return ast_builtins.getattr("super")[0] + + def pytype(self): + return "builtins.super" + + def display_type(self): + return "Super of" + + @property + def name(self): + """Get the name of the MRO pointer.""" + return self.mro_pointer.name + + def qname(self): + return "super" + + def igetattr(self, name, context=None): + """Retrieve the inferred values of the given attribute name.""" + + if name in self.special_attributes: + yield self.special_attributes.lookup(name) + return + + try: + mro = self.super_mro() + # Don't let invalid MROs or invalid super calls + # leak out as is from this function. + except SuperError as exc: + raise AttributeInferenceError( + ( + "Lookup for {name} on {target!r} because super call {super!r} " + "is invalid." + ), + target=self, + attribute=name, + context=context, + super_=exc.super_, + ) from exc + except MroError as exc: + raise AttributeInferenceError( + ( + "Lookup for {name} on {target!r} failed because {cls!r} has an " + "invalid MRO." + ), + target=self, + attribute=name, + context=context, + mros=exc.mros, + cls=exc.cls, + ) from exc + found = False + for cls in mro: + if name not in cls.locals: + continue + + found = True + for inferred in bases._infer_stmts([cls[name]], context, frame=self): + if not isinstance(inferred, scoped_nodes.FunctionDef): + yield inferred + continue + + # We can obtain different descriptors from a super depending + # on what we are accessing and where the super call is. + if inferred.type == "classmethod": + yield bases.BoundMethod(inferred, cls) + elif self._scope.type == "classmethod" and inferred.type == "method": + yield inferred + elif self._class_based or inferred.type == "staticmethod": + yield inferred + elif isinstance(inferred, Property): + function = inferred.function + try: + yield from function.infer_call_result( + caller=self, context=context + ) + except InferenceError: + yield util.Uninferable + elif bases._is_property(inferred): + # TODO: support other descriptors as well. + try: + yield from inferred.infer_call_result(self, context) + except InferenceError: + yield util.Uninferable + else: + yield bases.BoundMethod(inferred, cls) + + if not found: + raise AttributeInferenceError(target=self, attribute=name, context=context) + + def getattr(self, name, context=None): + return list(self.igetattr(name, context=context)) + + +class ExceptionInstance(bases.Instance): + """Class for instances of exceptions + + It has special treatment for some of the exceptions's attributes, + which are transformed at runtime into certain concrete objects, such as + the case of .args. + """ + + @cached_property + def special_attributes(self): + qname = self.qname() + instance = objectmodel.BUILTIN_EXCEPTIONS.get( + qname, objectmodel.ExceptionInstanceModel + ) + return instance()(self) + + +class DictInstance(bases.Instance): + """Special kind of instances for dictionaries + + This instance knows the underlying object model of the dictionaries, which means + that methods such as .values or .items can be properly inferred. + """ + + # pylint: disable=unnecessary-lambda + special_attributes = util.lazy_descriptor(lambda: objectmodel.DictModel()) + + +# Custom objects tailored for dictionaries, which are used to +# disambiguate between the types of Python 2 dict's method returns +# and Python 3 (where they return set like objects). +class DictItems(bases.Proxy): + __str__ = node_classes.NodeNG.__str__ + __repr__ = node_classes.NodeNG.__repr__ + + +class DictKeys(bases.Proxy): + __str__ = node_classes.NodeNG.__str__ + __repr__ = node_classes.NodeNG.__repr__ + + +class DictValues(bases.Proxy): + __str__ = node_classes.NodeNG.__str__ + __repr__ = node_classes.NodeNG.__repr__ + + +class PartialFunction(scoped_nodes.FunctionDef): + """A class representing partial function obtained via functools.partial""" + + @decorators.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead") + def __init__( + self, call, name=None, doc=None, lineno=None, col_offset=None, parent=None + ): + # TODO: Pass end_lineno and end_col_offset as well + super().__init__(name, lineno=lineno, col_offset=col_offset, parent=None) + # Assigned directly to prevent triggering the DeprecationWarning. + self._doc = doc + # A typical FunctionDef automatically adds its name to the parent scope, + # but a partial should not, so defer setting parent until after init + self.parent = parent + self.filled_args = call.positional_arguments[1:] + self.filled_keywords = call.keyword_arguments + + wrapped_function = call.positional_arguments[0] + inferred_wrapped_function = next(wrapped_function.infer()) + if isinstance(inferred_wrapped_function, PartialFunction): + self.filled_args = inferred_wrapped_function.filled_args + self.filled_args + self.filled_keywords = { + **inferred_wrapped_function.filled_keywords, + **self.filled_keywords, + } + + self.filled_positionals = len(self.filled_args) + + def infer_call_result(self, caller=None, context=None): + if context: + current_passed_keywords = { + keyword for (keyword, _) in context.callcontext.keywords + } + for keyword, value in self.filled_keywords.items(): + if keyword not in current_passed_keywords: + context.callcontext.keywords.append((keyword, value)) + + call_context_args = context.callcontext.args or [] + context.callcontext.args = self.filled_args + call_context_args + + return super().infer_call_result(caller=caller, context=context) + + def qname(self): + return self.__class__.__name__ + + +# TODO: Hack to solve the circular import problem between node_classes and objects +# This is not needed in 2.0, which has a cleaner design overall +node_classes.Dict.__bases__ = (node_classes.NodeNG, DictInstance) + + +class Property(scoped_nodes.FunctionDef): + """Class representing a Python property""" + + @decorators.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead") + def __init__( + self, function, name=None, doc=None, lineno=None, col_offset=None, parent=None + ): + self.function = function + super().__init__(name, lineno=lineno, col_offset=col_offset, parent=parent) + # Assigned directly to prevent triggering the DeprecationWarning. + self._doc = doc + + # pylint: disable=unnecessary-lambda + special_attributes = util.lazy_descriptor(lambda: objectmodel.PropertyModel()) + type = "property" + + def pytype(self): + return "builtins.property" + + def infer_call_result(self, caller=None, context=None): + raise InferenceError("Properties are not callable") + + def infer(self, context=None, **kwargs): + return iter((self,)) diff --git a/myenv/lib/python3.9/site-packages/astroid/protocols.py b/myenv/lib/python3.9/site-packages/astroid/protocols.py new file mode 100644 index 0000000..f1fcec0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/protocols.py @@ -0,0 +1,894 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""this module contains a set of functions to handle python protocols for nodes +where it makes sense. +""" + +import collections +import itertools +import operator as operator_mod +from typing import Any, Generator, List, Optional, Union + +from astroid import arguments, bases, decorators, helpers, nodes, util +from astroid.const import Context +from astroid.context import InferenceContext, copy_context +from astroid.exceptions import ( + AstroidIndexError, + AstroidTypeError, + AttributeInferenceError, + InferenceError, + NoDefault, +) +from astroid.nodes import node_classes + +raw_building = util.lazy_import("raw_building") +objects = util.lazy_import("objects") + + +def _reflected_name(name): + return "__r" + name[2:] + + +def _augmented_name(name): + return "__i" + name[2:] + + +_CONTEXTLIB_MGR = "contextlib.contextmanager" +BIN_OP_METHOD = { + "+": "__add__", + "-": "__sub__", + "/": "__truediv__", + "//": "__floordiv__", + "*": "__mul__", + "**": "__pow__", + "%": "__mod__", + "&": "__and__", + "|": "__or__", + "^": "__xor__", + "<<": "__lshift__", + ">>": "__rshift__", + "@": "__matmul__", +} + +REFLECTED_BIN_OP_METHOD = { + key: _reflected_name(value) for (key, value) in BIN_OP_METHOD.items() +} +AUGMENTED_OP_METHOD = { + key + "=": _augmented_name(value) for (key, value) in BIN_OP_METHOD.items() +} + +UNARY_OP_METHOD = { + "+": "__pos__", + "-": "__neg__", + "~": "__invert__", + "not": None, # XXX not '__nonzero__' +} +_UNARY_OPERATORS = { + "+": operator_mod.pos, + "-": operator_mod.neg, + "~": operator_mod.invert, + "not": operator_mod.not_, +} + + +def _infer_unary_op(obj, op): + func = _UNARY_OPERATORS[op] + value = func(obj) + return nodes.const_factory(value) + + +nodes.Tuple.infer_unary_op = lambda self, op: _infer_unary_op(tuple(self.elts), op) +nodes.List.infer_unary_op = lambda self, op: _infer_unary_op(self.elts, op) +nodes.Set.infer_unary_op = lambda self, op: _infer_unary_op(set(self.elts), op) +nodes.Const.infer_unary_op = lambda self, op: _infer_unary_op(self.value, op) +nodes.Dict.infer_unary_op = lambda self, op: _infer_unary_op(dict(self.items), op) + +# Binary operations + +BIN_OP_IMPL = { + "+": lambda a, b: a + b, + "-": lambda a, b: a - b, + "/": lambda a, b: a / b, + "//": lambda a, b: a // b, + "*": lambda a, b: a * b, + "**": lambda a, b: a**b, + "%": lambda a, b: a % b, + "&": lambda a, b: a & b, + "|": lambda a, b: a | b, + "^": lambda a, b: a ^ b, + "<<": lambda a, b: a << b, + ">>": lambda a, b: a >> b, + "@": operator_mod.matmul, +} +for _KEY, _IMPL in list(BIN_OP_IMPL.items()): + BIN_OP_IMPL[_KEY + "="] = _IMPL + + +@decorators.yes_if_nothing_inferred +def const_infer_binary_op(self, opnode, operator, other, context, _): + not_implemented = nodes.Const(NotImplemented) + if isinstance(other, nodes.Const): + try: + impl = BIN_OP_IMPL[operator] + try: + yield nodes.const_factory(impl(self.value, other.value)) + except TypeError: + # ArithmeticError is not enough: float >> float is a TypeError + yield not_implemented + except Exception: # pylint: disable=broad-except + yield util.Uninferable + except TypeError: + yield not_implemented + elif isinstance(self.value, str) and operator == "%": + # TODO(cpopa): implement string interpolation later on. + yield util.Uninferable + else: + yield not_implemented + + +nodes.Const.infer_binary_op = const_infer_binary_op + + +def _multiply_seq_by_int(self, opnode, other, context): + node = self.__class__(parent=opnode) + filtered_elts = ( + helpers.safe_infer(elt, context) or util.Uninferable + for elt in self.elts + if elt is not util.Uninferable + ) + node.elts = list(filtered_elts) * other.value + return node + + +def _filter_uninferable_nodes(elts, context): + for elt in elts: + if elt is util.Uninferable: + yield nodes.Unknown() + else: + for inferred in elt.infer(context): + if inferred is not util.Uninferable: + yield inferred + else: + yield nodes.Unknown() + + +@decorators.yes_if_nothing_inferred +def tl_infer_binary_op( + self, + opnode: nodes.BinOp, + operator: str, + other: nodes.NodeNG, + context: InferenceContext, + method: nodes.FunctionDef, +) -> Generator[nodes.NodeNG, None, None]: + """Infer a binary operation on a tuple or list. + + The instance on which the binary operation is performed is a tuple + or list. This refers to the left-hand side of the operation, so: + 'tuple() + 1' or '[] + A()' + """ + # For tuples and list the boundnode is no longer the tuple or list instance + context.boundnode = None + not_implemented = nodes.Const(NotImplemented) + if isinstance(other, self.__class__) and operator == "+": + node = self.__class__(parent=opnode) + node.elts = list( + itertools.chain( + _filter_uninferable_nodes(self.elts, context), + _filter_uninferable_nodes(other.elts, context), + ) + ) + yield node + elif isinstance(other, nodes.Const) and operator == "*": + if not isinstance(other.value, int): + yield not_implemented + return + yield _multiply_seq_by_int(self, opnode, other, context) + elif isinstance(other, bases.Instance) and operator == "*": + # Verify if the instance supports __index__. + as_index = helpers.class_instance_as_index(other) + if not as_index: + yield util.Uninferable + else: + yield _multiply_seq_by_int(self, opnode, as_index, context) + else: + yield not_implemented + + +nodes.Tuple.infer_binary_op = tl_infer_binary_op +nodes.List.infer_binary_op = tl_infer_binary_op + + +@decorators.yes_if_nothing_inferred +def instance_class_infer_binary_op(self, opnode, operator, other, context, method): + return method.infer_call_result(self, context) + + +bases.Instance.infer_binary_op = instance_class_infer_binary_op +nodes.ClassDef.infer_binary_op = instance_class_infer_binary_op + + +# assignment ################################################################## + +"""the assigned_stmts method is responsible to return the assigned statement +(e.g. not inferred) according to the assignment type. + +The `assign_path` argument is used to record the lhs path of the original node. +For instance if we want assigned statements for 'c' in 'a, (b,c)', assign_path +will be [1, 1] once arrived to the Assign node. + +The `context` argument is the current inference context which should be given +to any intermediary inference necessary. +""" + + +def _resolve_looppart(parts, assign_path, context): + """recursive function to resolve multiple assignments on loops""" + assign_path = assign_path[:] + index = assign_path.pop(0) + for part in parts: + if part is util.Uninferable: + continue + if not hasattr(part, "itered"): + continue + try: + itered = part.itered() + except TypeError: + continue + for stmt in itered: + index_node = nodes.Const(index) + try: + assigned = stmt.getitem(index_node, context) + except (AttributeError, AstroidTypeError, AstroidIndexError): + continue + if not assign_path: + # we achieved to resolved the assignment path, + # don't infer the last part + yield assigned + elif assigned is util.Uninferable: + break + else: + # we are not yet on the last part of the path + # search on each possibly inferred value + try: + yield from _resolve_looppart( + assigned.infer(context), assign_path, context + ) + except InferenceError: + break + + +@decorators.raise_if_nothing_inferred +def for_assigned_stmts( + self: Union[nodes.For, nodes.Comprehension], + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + if isinstance(self, nodes.AsyncFor) or getattr(self, "is_async", False): + # Skip inferring of async code for now + return dict(node=self, unknown=node, assign_path=assign_path, context=context) + if assign_path is None: + for lst in self.iter.infer(context): + if isinstance(lst, (nodes.Tuple, nodes.List)): + yield from lst.elts + else: + yield from _resolve_looppart(self.iter.infer(context), assign_path, context) + return dict(node=self, unknown=node, assign_path=assign_path, context=context) + + +nodes.For.assigned_stmts = for_assigned_stmts +nodes.Comprehension.assigned_stmts = for_assigned_stmts + + +def sequence_assigned_stmts( + self: Union[nodes.Tuple, nodes.List], + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + if assign_path is None: + assign_path = [] + try: + index = self.elts.index(node) + except ValueError as exc: + raise InferenceError( + "Tried to retrieve a node {node!r} which does not exist", + node=self, + assign_path=assign_path, + context=context, + ) from exc + + assign_path.insert(0, index) + return self.parent.assigned_stmts( + node=self, context=context, assign_path=assign_path + ) + + +nodes.Tuple.assigned_stmts = sequence_assigned_stmts +nodes.List.assigned_stmts = sequence_assigned_stmts + + +def assend_assigned_stmts( + self: Union[nodes.AssignName, nodes.AssignAttr], + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + return self.parent.assigned_stmts(node=self, context=context) + + +nodes.AssignName.assigned_stmts = assend_assigned_stmts +nodes.AssignAttr.assigned_stmts = assend_assigned_stmts + + +def _arguments_infer_argname(self, name, context): + # arguments information may be missing, in which case we can't do anything + # more + if not (self.arguments or self.vararg or self.kwarg): + yield util.Uninferable + return + + functype = self.parent.type + # first argument of instance/class method + if ( + self.arguments + and getattr(self.arguments[0], "name", None) == name + and functype != "staticmethod" + ): + cls = self.parent.parent.scope() + is_metaclass = isinstance(cls, nodes.ClassDef) and cls.type == "metaclass" + # If this is a metaclass, then the first argument will always + # be the class, not an instance. + if context.boundnode and isinstance(context.boundnode, bases.Instance): + cls = context.boundnode._proxied + if is_metaclass or functype == "classmethod": + yield cls + return + if functype == "method": + yield cls.instantiate_class() + return + + if context and context.callcontext: + callee = context.callcontext.callee + while hasattr(callee, "_proxied"): + callee = callee._proxied + if getattr(callee, "name", None) == self.parent.name: + call_site = arguments.CallSite(context.callcontext, context.extra_context) + yield from call_site.infer_argument(self.parent, name, context) + return + + if name == self.vararg: + vararg = nodes.const_factory(()) + vararg.parent = self + if not self.arguments and self.parent.name == "__init__": + cls = self.parent.parent.scope() + vararg.elts = [cls.instantiate_class()] + yield vararg + return + if name == self.kwarg: + kwarg = nodes.const_factory({}) + kwarg.parent = self + yield kwarg + return + # if there is a default value, yield it. And then yield Uninferable to reflect + # we can't guess given argument value + try: + context = copy_context(context) + yield from self.default_value(name).infer(context) + yield util.Uninferable + except NoDefault: + yield util.Uninferable + + +def arguments_assigned_stmts( + self: nodes.Arguments, + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + if context.callcontext: + callee = context.callcontext.callee + while hasattr(callee, "_proxied"): + callee = callee._proxied + else: + callee = None + if ( + context.callcontext + and node + and getattr(callee, "name", None) == node.frame(future=True).name + ): + # reset call context/name + callcontext = context.callcontext + context = copy_context(context) + context.callcontext = None + args = arguments.CallSite(callcontext, context=context) + return args.infer_argument(self.parent, node.name, context) + return _arguments_infer_argname(self, node.name, context) + + +nodes.Arguments.assigned_stmts = arguments_assigned_stmts + + +@decorators.raise_if_nothing_inferred +def assign_assigned_stmts( + self: Union[nodes.AugAssign, nodes.Assign, nodes.AnnAssign], + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + if not assign_path: + yield self.value + return None + yield from _resolve_assignment_parts( + self.value.infer(context), assign_path, context + ) + + return dict(node=self, unknown=node, assign_path=assign_path, context=context) + + +def assign_annassigned_stmts( + self: nodes.AnnAssign, + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + for inferred in assign_assigned_stmts(self, node, context, assign_path): + if inferred is None: + yield util.Uninferable + else: + yield inferred + + +nodes.Assign.assigned_stmts = assign_assigned_stmts +nodes.AnnAssign.assigned_stmts = assign_annassigned_stmts +nodes.AugAssign.assigned_stmts = assign_assigned_stmts + + +def _resolve_assignment_parts(parts, assign_path, context): + """recursive function to resolve multiple assignments""" + assign_path = assign_path[:] + index = assign_path.pop(0) + for part in parts: + assigned = None + if isinstance(part, nodes.Dict): + # A dictionary in an iterating context + try: + assigned, _ = part.items[index] + except IndexError: + return + + elif hasattr(part, "getitem"): + index_node = nodes.Const(index) + try: + assigned = part.getitem(index_node, context) + except (AstroidTypeError, AstroidIndexError): + return + + if not assigned: + return + + if not assign_path: + # we achieved to resolved the assignment path, don't infer the + # last part + yield assigned + elif assigned is util.Uninferable: + return + else: + # we are not yet on the last part of the path search on each + # possibly inferred value + try: + yield from _resolve_assignment_parts( + assigned.infer(context), assign_path, context + ) + except InferenceError: + return + + +@decorators.raise_if_nothing_inferred +def excepthandler_assigned_stmts( + self: nodes.ExceptHandler, + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + for assigned in node_classes.unpack_infer(self.type): + if isinstance(assigned, nodes.ClassDef): + assigned = objects.ExceptionInstance(assigned) + + yield assigned + return dict(node=self, unknown=node, assign_path=assign_path, context=context) + + +nodes.ExceptHandler.assigned_stmts = excepthandler_assigned_stmts + + +def _infer_context_manager(self, mgr, context): + try: + inferred = next(mgr.infer(context=context)) + except StopIteration as e: + raise InferenceError(node=mgr) from e + if isinstance(inferred, bases.Generator): + # Check if it is decorated with contextlib.contextmanager. + func = inferred.parent + if not func.decorators: + raise InferenceError( + "No decorators found on inferred generator %s", node=func + ) + + for decorator_node in func.decorators.nodes: + decorator = next(decorator_node.infer(context=context), None) + if isinstance(decorator, nodes.FunctionDef): + if decorator.qname() == _CONTEXTLIB_MGR: + break + else: + # It doesn't interest us. + raise InferenceError(node=func) + try: + yield next(inferred.infer_yield_types()) + except StopIteration as e: + raise InferenceError(node=func) from e + + elif isinstance(inferred, bases.Instance): + try: + enter = next(inferred.igetattr("__enter__", context=context)) + except (InferenceError, AttributeInferenceError, StopIteration) as exc: + raise InferenceError(node=inferred) from exc + if not isinstance(enter, bases.BoundMethod): + raise InferenceError(node=enter) + yield from enter.infer_call_result(self, context) + else: + raise InferenceError(node=mgr) + + +@decorators.raise_if_nothing_inferred +def with_assigned_stmts( + self: nodes.With, + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + """Infer names and other nodes from a *with* statement. + + This enables only inference for name binding in a *with* statement. + For instance, in the following code, inferring `func` will return + the `ContextManager` class, not whatever ``__enter__`` returns. + We are doing this intentionally, because we consider that the context + manager result is whatever __enter__ returns and what it is binded + using the ``as`` keyword. + + class ContextManager(object): + def __enter__(self): + return 42 + with ContextManager() as f: + pass + + # ContextManager().infer() will return ContextManager + # f.infer() will return 42. + + Arguments: + self: nodes.With + node: The target of the assignment, `as (a, b)` in `with foo as (a, b)`. + context: Inference context used for caching already inferred objects + assign_path: + A list of indices, where each index specifies what item to fetch from + the inference results. + """ + try: + mgr = next(mgr for (mgr, vars) in self.items if vars == node) + except StopIteration: + return None + if assign_path is None: + yield from _infer_context_manager(self, mgr, context) + else: + for result in _infer_context_manager(self, mgr, context): + # Walk the assign_path and get the item at the final index. + obj = result + for index in assign_path: + if not hasattr(obj, "elts"): + raise InferenceError( + "Wrong type ({targets!r}) for {node!r} assignment", + node=self, + targets=node, + assign_path=assign_path, + context=context, + ) + try: + obj = obj.elts[index] + except IndexError as exc: + raise InferenceError( + "Tried to infer a nonexistent target with index {index} " + "in {node!r}.", + node=self, + targets=node, + assign_path=assign_path, + context=context, + ) from exc + except TypeError as exc: + raise InferenceError( + "Tried to unpack a non-iterable value " "in {node!r}.", + node=self, + targets=node, + assign_path=assign_path, + context=context, + ) from exc + yield obj + return dict(node=self, unknown=node, assign_path=assign_path, context=context) + + +nodes.With.assigned_stmts = with_assigned_stmts + + +@decorators.raise_if_nothing_inferred +def named_expr_assigned_stmts( + self: nodes.NamedExpr, + node: node_classes.AssignedStmtsPossibleNode, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + """Infer names and other nodes from an assignment expression""" + if self.target == node: + yield from self.value.infer(context=context) + else: + raise InferenceError( + "Cannot infer NamedExpr node {node!r}", + node=self, + assign_path=assign_path, + context=context, + ) + + +nodes.NamedExpr.assigned_stmts = named_expr_assigned_stmts + + +@decorators.yes_if_nothing_inferred +def starred_assigned_stmts( + self: nodes.Starred, + node: node_classes.AssignedStmtsPossibleNode = None, + context: Optional[InferenceContext] = None, + assign_path: Optional[List[int]] = None, +) -> Any: + """ + Arguments: + self: nodes.Starred + node: a node related to the current underlying Node. + context: Inference context used for caching already inferred objects + assign_path: + A list of indices, where each index specifies what item to fetch from + the inference results. + """ + # pylint: disable=too-many-locals,too-many-statements + def _determine_starred_iteration_lookups(starred, target, lookups): + # Determine the lookups for the rhs of the iteration + itered = target.itered() + for index, element in enumerate(itered): + if ( + isinstance(element, nodes.Starred) + and element.value.name == starred.value.name + ): + lookups.append((index, len(itered))) + break + if isinstance(element, nodes.Tuple): + lookups.append((index, len(element.itered()))) + _determine_starred_iteration_lookups(starred, element, lookups) + + stmt = self.statement(future=True) + if not isinstance(stmt, (nodes.Assign, nodes.For)): + raise InferenceError( + "Statement {stmt!r} enclosing {node!r} " "must be an Assign or For node.", + node=self, + stmt=stmt, + unknown=node, + context=context, + ) + + if context is None: + context = InferenceContext() + + if isinstance(stmt, nodes.Assign): + value = stmt.value + lhs = stmt.targets[0] + if not isinstance(lhs, nodes.BaseContainer): + yield util.Uninferable + return + + if sum(1 for _ in lhs.nodes_of_class(nodes.Starred)) > 1: + raise InferenceError( + "Too many starred arguments in the " " assignment targets {lhs!r}.", + node=self, + targets=lhs, + unknown=node, + context=context, + ) + + try: + rhs = next(value.infer(context)) + except (InferenceError, StopIteration): + yield util.Uninferable + return + if rhs is util.Uninferable or not hasattr(rhs, "itered"): + yield util.Uninferable + return + + try: + elts = collections.deque(rhs.itered()) + except TypeError: + yield util.Uninferable + return + + # Unpack iteratively the values from the rhs of the assignment, + # until the find the starred node. What will remain will + # be the list of values which the Starred node will represent + # This is done in two steps, from left to right to remove + # anything before the starred node and from right to left + # to remove anything after the starred node. + + for index, left_node in enumerate(lhs.elts): + if not isinstance(left_node, nodes.Starred): + if not elts: + break + elts.popleft() + continue + lhs_elts = collections.deque(reversed(lhs.elts[index:])) + for right_node in lhs_elts: + if not isinstance(right_node, nodes.Starred): + if not elts: + break + elts.pop() + continue + + # We're done unpacking. + packed = nodes.List( + ctx=Context.Store, + parent=self, + lineno=lhs.lineno, + col_offset=lhs.col_offset, + ) + packed.postinit(elts=list(elts)) + yield packed + break + + if isinstance(stmt, nodes.For): + try: + inferred_iterable = next(stmt.iter.infer(context=context)) + except (InferenceError, StopIteration): + yield util.Uninferable + return + if inferred_iterable is util.Uninferable or not hasattr( + inferred_iterable, "itered" + ): + yield util.Uninferable + return + try: + itered = inferred_iterable.itered() + except TypeError: + yield util.Uninferable + return + + target = stmt.target + + if not isinstance(target, nodes.Tuple): + raise InferenceError( + "Could not make sense of this, the target must be a tuple", + context=context, + ) + + lookups = [] + _determine_starred_iteration_lookups(self, target, lookups) + if not lookups: + raise InferenceError( + "Could not make sense of this, needs at least a lookup", context=context + ) + + # Make the last lookup a slice, since that what we want for a Starred node + last_element_index, last_element_length = lookups[-1] + is_starred_last = last_element_index == (last_element_length - 1) + + lookup_slice = slice( + last_element_index, + None if is_starred_last else (last_element_length - last_element_index), + ) + lookups[-1] = lookup_slice + + for element in itered: + + # We probably want to infer the potential values *for each* element in an + # iterable, but we can't infer a list of all values, when only a list of + # step values are expected: + # + # for a, *b in [...]: + # b + # + # *b* should now point to just the elements at that particular iteration step, + # which astroid can't know about. + + found_element = None + for lookup in lookups: + if not hasattr(element, "itered"): + break + if not isinstance(lookup, slice): + # Grab just the index, not the whole length + lookup = lookup[0] + try: + itered_inner_element = element.itered() + element = itered_inner_element[lookup] + except IndexError: + break + except TypeError: + # Most likely the itered() call failed, cannot make sense of this + yield util.Uninferable + return + else: + found_element = element + + unpacked = nodes.List( + ctx=Context.Store, + parent=self, + lineno=self.lineno, + col_offset=self.col_offset, + ) + unpacked.postinit(elts=found_element or []) + yield unpacked + return + + yield util.Uninferable + + +nodes.Starred.assigned_stmts = starred_assigned_stmts + + +@decorators.yes_if_nothing_inferred +def match_mapping_assigned_stmts( + self: nodes.MatchMapping, + node: nodes.AssignName, + context: Optional[InferenceContext] = None, + assign_path: None = None, +) -> Generator[nodes.NodeNG, None, None]: + """Return empty generator (return -> raises StopIteration) so inferred value + is Uninferable. + """ + return + yield + + +nodes.MatchMapping.assigned_stmts = match_mapping_assigned_stmts + + +@decorators.yes_if_nothing_inferred +def match_star_assigned_stmts( + self: nodes.MatchStar, + node: nodes.AssignName, + context: Optional[InferenceContext] = None, + assign_path: None = None, +) -> Generator[nodes.NodeNG, None, None]: + """Return empty generator (return -> raises StopIteration) so inferred value + is Uninferable. + """ + return + yield + + +nodes.MatchStar.assigned_stmts = match_star_assigned_stmts + + +@decorators.yes_if_nothing_inferred +def match_as_assigned_stmts( + self: nodes.MatchAs, + node: nodes.AssignName, + context: Optional[InferenceContext] = None, + assign_path: None = None, +) -> Generator[nodes.NodeNG, None, None]: + """Infer MatchAs as the Match subject if it's the only MatchCase pattern + else raise StopIteration to yield Uninferable. + """ + if ( + isinstance(self.parent, nodes.MatchCase) + and isinstance(self.parent.parent, nodes.Match) + and self.pattern is None + ): + yield self.parent.parent.subject + + +nodes.MatchAs.assigned_stmts = match_as_assigned_stmts diff --git a/myenv/lib/python3.9/site-packages/astroid/raw_building.py b/myenv/lib/python3.9/site-packages/astroid/raw_building.py new file mode 100644 index 0000000..bcc414b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/raw_building.py @@ -0,0 +1,508 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""this module contains a set of functions to create astroid trees from scratch +(build_* functions) or from living object (object_build_* functions) +""" + +import builtins +import inspect +import os +import sys +import types +import warnings +from typing import Iterable, List, Optional + +from astroid import bases, nodes +from astroid.manager import AstroidManager +from astroid.nodes import node_classes + +# the keys of CONST_CLS eg python builtin types +_CONSTANTS = tuple(node_classes.CONST_CLS) +_BUILTINS = vars(builtins) +TYPE_NONE = type(None) +TYPE_NOTIMPLEMENTED = type(NotImplemented) +TYPE_ELLIPSIS = type(...) + + +def _attach_local_node(parent, node, name): + node.name = name # needed by add_local_node + parent.add_local_node(node) + + +def _add_dunder_class(func, member): + """Add a __class__ member to the given func node, if we can determine it.""" + python_cls = member.__class__ + cls_name = getattr(python_cls, "__name__", None) + if not cls_name: + return + cls_bases = [ancestor.__name__ for ancestor in python_cls.__bases__] + ast_klass = build_class(cls_name, cls_bases, python_cls.__doc__) + func.instance_attrs["__class__"] = [ast_klass] + + +_marker = object() + + +def attach_dummy_node(node, name, runtime_object=_marker): + """create a dummy node and register it in the locals of the given + node with the specified name + """ + enode = nodes.EmptyNode() + enode.object = runtime_object + _attach_local_node(node, enode, name) + + +def _has_underlying_object(self): + return self.object is not None and self.object is not _marker + + +nodes.EmptyNode.has_underlying_object = _has_underlying_object + + +def attach_const_node(node, name, value): + """create a Const node and register it in the locals of the given + node with the specified name + """ + if name not in node.special_attributes: + _attach_local_node(node, nodes.const_factory(value), name) + + +def attach_import_node(node, modname, membername): + """create a ImportFrom node and register it in the locals of the given + node with the specified name + """ + from_node = nodes.ImportFrom(modname, [(membername, None)]) + _attach_local_node(node, from_node, membername) + + +def build_module(name: str, doc: Optional[str] = None) -> nodes.Module: + """create and initialize an astroid Module node""" + node = nodes.Module(name, pure_python=False, package=False) + node.postinit( + body=[], + doc_node=nodes.Const(value=doc) if doc else None, + ) + return node + + +def build_class( + name: str, basenames: Iterable[str] = (), doc: Optional[str] = None +) -> nodes.ClassDef: + """Create and initialize an astroid ClassDef node.""" + node = nodes.ClassDef(name) + node.postinit( + bases=[nodes.Name(name=base, parent=node) for base in basenames], + body=[], + decorators=None, + doc_node=nodes.Const(value=doc) if doc else None, + ) + return node + + +def build_function( + name, + args: Optional[List[str]] = None, + posonlyargs: Optional[List[str]] = None, + defaults=None, + doc: Optional[str] = None, + kwonlyargs: Optional[List[str]] = None, +) -> nodes.FunctionDef: + """create and initialize an astroid FunctionDef node""" + # first argument is now a list of decorators + func = nodes.FunctionDef(name) + argsnode = nodes.Arguments(parent=func) + argsnode.postinit( + args=[nodes.AssignName(name=arg, parent=argsnode) for arg in args or ()], + defaults=[], + kwonlyargs=[ + nodes.AssignName(name=arg, parent=argsnode) for arg in kwonlyargs or () + ], + kw_defaults=[], + annotations=[], + posonlyargs=[ + nodes.AssignName(name=arg, parent=argsnode) for arg in posonlyargs or () + ], + ) + func.postinit( + args=argsnode, + body=[], + doc_node=nodes.Const(value=doc) if doc else None, + ) + for default in defaults or (): + argsnode.defaults.append(nodes.const_factory(default)) + argsnode.defaults[-1].parent = argsnode + if args: + register_arguments(func) + return func + + +def build_from_import(fromname, names): + """create and initialize an astroid ImportFrom import statement""" + return nodes.ImportFrom(fromname, [(name, None) for name in names]) + + +def register_arguments(func, args=None): + """add given arguments to local + + args is a list that may contains nested lists + (i.e. def func(a, (b, c, d)): ...) + """ + if args is None: + args = func.args.args + if func.args.vararg: + func.set_local(func.args.vararg, func.args) + if func.args.kwarg: + func.set_local(func.args.kwarg, func.args) + for arg in args: + if isinstance(arg, nodes.AssignName): + func.set_local(arg.name, arg) + else: + register_arguments(func, arg.elts) + + +def object_build_class(node, member, localname): + """create astroid for a living class object""" + basenames = [base.__name__ for base in member.__bases__] + return _base_class_object_build(node, member, basenames, localname=localname) + + +def object_build_function(node, member, localname): + """create astroid for a living function object""" + signature = inspect.signature(member) + args = [] + defaults = [] + posonlyargs = [] + kwonlyargs = [] + for param_name, param in signature.parameters.items(): + if param.kind == inspect.Parameter.POSITIONAL_ONLY: + posonlyargs.append(param_name) + elif param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: + args.append(param_name) + elif param.kind == inspect.Parameter.VAR_POSITIONAL: + args.append(param_name) + elif param.kind == inspect.Parameter.VAR_KEYWORD: + args.append(param_name) + elif param.kind == inspect.Parameter.KEYWORD_ONLY: + kwonlyargs.append(param_name) + if param.default is not inspect._empty: + defaults.append(param.default) + func = build_function( + getattr(member, "__name__", None) or localname, + args, + posonlyargs, + defaults, + member.__doc__, + ) + node.add_local_node(func, localname) + + +def object_build_datadescriptor(node, member, name): + """create astroid for a living data descriptor object""" + return _base_class_object_build(node, member, [], name) + + +def object_build_methoddescriptor(node, member, localname): + """create astroid for a living method descriptor object""" + # FIXME get arguments ? + func = build_function( + getattr(member, "__name__", None) or localname, doc=member.__doc__ + ) + # set node's arguments to None to notice that we have no information, not + # and empty argument list + func.args.args = None + node.add_local_node(func, localname) + _add_dunder_class(func, member) + + +def _base_class_object_build(node, member, basenames, name=None, localname=None): + """create astroid for a living class object, with a given set of base names + (e.g. ancestors) + """ + klass = build_class( + name or getattr(member, "__name__", None) or localname, + basenames, + member.__doc__, + ) + klass._newstyle = isinstance(member, type) + node.add_local_node(klass, localname) + try: + # limit the instantiation trick since it's too dangerous + # (such as infinite test execution...) + # this at least resolves common case such as Exception.args, + # OSError.errno + if issubclass(member, Exception): + instdict = member().__dict__ + else: + raise TypeError + except TypeError: + pass + else: + for item_name, obj in instdict.items(): + valnode = nodes.EmptyNode() + valnode.object = obj + valnode.parent = klass + valnode.lineno = 1 + klass.instance_attrs[item_name] = [valnode] + return klass + + +def _build_from_function(node, name, member, module): + # verify this is not an imported function + try: + code = member.__code__ + except AttributeError: + # Some implementations don't provide the code object, + # such as Jython. + code = None + filename = getattr(code, "co_filename", None) + if filename is None: + assert isinstance(member, object) + object_build_methoddescriptor(node, member, name) + elif filename != getattr(module, "__file__", None): + attach_dummy_node(node, name, member) + else: + object_build_function(node, member, name) + + +def _safe_has_attribute(obj, member): + try: + return hasattr(obj, member) + except Exception: # pylint: disable=broad-except + return False + + +class InspectBuilder: + """class for building nodes from living object + + this is actually a really minimal representation, including only Module, + FunctionDef and ClassDef nodes and some others as guessed. + """ + + def __init__(self, manager_instance=None): + self._manager = manager_instance or AstroidManager() + self._done = {} + self._module = None + + def inspect_build( + self, + module: types.ModuleType, + modname: Optional[str] = None, + path: Optional[str] = None, + ) -> nodes.Module: + """build astroid from a living module (i.e. using inspect) + this is used when there is no python source code available (either + because it's a built-in module or because the .py is not available) + """ + self._module = module + if modname is None: + modname = module.__name__ + try: + node = build_module(modname, module.__doc__) + except AttributeError: + # in jython, java modules have no __doc__ (see #109562) + node = build_module(modname) + node.file = node.path = os.path.abspath(path) if path else path + node.name = modname + self._manager.cache_module(node) + node.package = hasattr(module, "__path__") + self._done = {} + self.object_build(node, module) + return node + + def object_build(self, node, obj): + """recursive method which create a partial ast from real objects + (only function, class, and method are handled) + """ + if obj in self._done: + return self._done[obj] + self._done[obj] = node + for name in dir(obj): + try: + with warnings.catch_warnings(): + warnings.simplefilter("error") + member = getattr(obj, name) + except (AttributeError, DeprecationWarning): + # damned ExtensionClass.Base, I know you're there ! + attach_dummy_node(node, name) + continue + if inspect.ismethod(member): + member = member.__func__ + if inspect.isfunction(member): + _build_from_function(node, name, member, self._module) + elif inspect.isbuiltin(member): + if self.imported_member(node, member, name): + continue + object_build_methoddescriptor(node, member, name) + elif inspect.isclass(member): + if self.imported_member(node, member, name): + continue + if member in self._done: + class_node = self._done[member] + if class_node not in node.locals.get(name, ()): + node.add_local_node(class_node, name) + else: + class_node = object_build_class(node, member, name) + # recursion + self.object_build(class_node, member) + if name == "__class__" and class_node.parent is None: + class_node.parent = self._done[self._module] + elif inspect.ismethoddescriptor(member): + assert isinstance(member, object) + object_build_methoddescriptor(node, member, name) + elif inspect.isdatadescriptor(member): + assert isinstance(member, object) + object_build_datadescriptor(node, member, name) + elif isinstance(member, _CONSTANTS): + attach_const_node(node, name, member) + elif inspect.isroutine(member): + # This should be called for Jython, where some builtin + # methods aren't caught by isbuiltin branch. + _build_from_function(node, name, member, self._module) + elif _safe_has_attribute(member, "__all__"): + module = build_module(name) + _attach_local_node(node, module, name) + # recursion + self.object_build(module, member) + else: + # create an empty node so that the name is actually defined + attach_dummy_node(node, name, member) + return None + + def imported_member(self, node, member, name: str) -> bool: + """verify this is not an imported class or handle it""" + # /!\ some classes like ExtensionClass doesn't have a __module__ + # attribute ! Also, this may trigger an exception on badly built module + # (see http://www.logilab.org/ticket/57299 for instance) + try: + modname = getattr(member, "__module__", None) + except TypeError: + modname = None + if modname is None: + if name in {"__new__", "__subclasshook__"}: + # Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14) + # >>> print object.__new__.__module__ + # None + modname = builtins.__name__ + else: + attach_dummy_node(node, name, member) + return True + + # On PyPy during bootstrapping we infer _io while _module is + # builtins. In CPython _io names itself io, see http://bugs.python.org/issue18602 + # Therefore, this basically checks whether we are not in PyPy. + if modname == "_io" and not self._module.__name__ == "builtins": + return False + + real_name = {"gtk": "gtk_gtk"}.get(modname, modname) + + if real_name != self._module.__name__: + # check if it sounds valid and then add an import node, else use a + # dummy node + try: + getattr(sys.modules[modname], name) + except (KeyError, AttributeError): + attach_dummy_node(node, name, member) + else: + attach_import_node(node, modname, name) + return True + return False + + +# astroid bootstrapping ###################################################### + +_CONST_PROXY = {} + + +def _set_proxied(const): + # TODO : find a nicer way to handle this situation; + return _CONST_PROXY[const.value.__class__] + + +def _astroid_bootstrapping(): + """astroid bootstrapping the builtins module""" + # this boot strapping is necessary since we need the Const nodes to + # inspect_build builtins, and then we can proxy Const + builder = InspectBuilder() + astroid_builtin = builder.inspect_build(builtins) + + for cls, node_cls in node_classes.CONST_CLS.items(): + if cls is TYPE_NONE: + proxy = build_class("NoneType") + proxy.parent = astroid_builtin + elif cls is TYPE_NOTIMPLEMENTED: + proxy = build_class("NotImplementedType") + proxy.parent = astroid_builtin + elif cls is TYPE_ELLIPSIS: + proxy = build_class("Ellipsis") + proxy.parent = astroid_builtin + else: + proxy = astroid_builtin.getattr(cls.__name__)[0] + if cls in (dict, list, set, tuple): + node_cls._proxied = proxy + else: + _CONST_PROXY[cls] = proxy + + # Set the builtin module as parent for some builtins. + nodes.Const._proxied = property(_set_proxied) + + _GeneratorType = nodes.ClassDef(types.GeneratorType.__name__) + _GeneratorType.parent = astroid_builtin + generator_doc_node = ( + nodes.Const(value=types.GeneratorType.__doc__) + if types.GeneratorType.__doc__ + else None + ) + _GeneratorType.postinit( + bases=[], + body=[], + decorators=None, + doc_node=generator_doc_node, + ) + bases.Generator._proxied = _GeneratorType + builder.object_build(bases.Generator._proxied, types.GeneratorType) + + if hasattr(types, "AsyncGeneratorType"): + _AsyncGeneratorType = nodes.ClassDef(types.AsyncGeneratorType.__name__) + _AsyncGeneratorType.parent = astroid_builtin + async_generator_doc_node = ( + nodes.Const(value=types.AsyncGeneratorType.__doc__) + if types.AsyncGeneratorType.__doc__ + else None + ) + _AsyncGeneratorType.postinit( + bases=[], + body=[], + decorators=None, + doc_node=async_generator_doc_node, + ) + bases.AsyncGenerator._proxied = _AsyncGeneratorType + builder.object_build(bases.AsyncGenerator._proxied, types.AsyncGeneratorType) + builtin_types = ( + types.GetSetDescriptorType, + types.GeneratorType, + types.MemberDescriptorType, + TYPE_NONE, + TYPE_NOTIMPLEMENTED, + types.FunctionType, + types.MethodType, + types.BuiltinFunctionType, + types.ModuleType, + types.TracebackType, + ) + for _type in builtin_types: + if _type.__name__ not in astroid_builtin: + klass = nodes.ClassDef(_type.__name__) + klass.parent = astroid_builtin + klass.postinit( + bases=[], + body=[], + decorators=None, + doc_node=nodes.Const(value=_type.__doc__) if _type.__doc__ else None, + ) + builder.object_build(klass, _type) + astroid_builtin[_type.__name__] = klass + + +_astroid_bootstrapping() diff --git a/myenv/lib/python3.9/site-packages/astroid/rebuilder.py b/myenv/lib/python3.9/site-packages/astroid/rebuilder.py new file mode 100644 index 0000000..a04d973 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/rebuilder.py @@ -0,0 +1,2119 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""this module contains utilities for rebuilding an _ast tree in +order to get a single Astroid representation +""" + +import ast +import sys +import token +import tokenize +from io import StringIO +from tokenize import TokenInfo, generate_tokens +from typing import ( + Callable, + Dict, + Generator, + List, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) + +from astroid import nodes +from astroid._ast import ParserModule, get_parser_module, parse_function_type_comment +from astroid.const import IS_PYPY, PY36, PY38, PY38_PLUS, PY39_PLUS, Context +from astroid.manager import AstroidManager +from astroid.nodes import NodeNG +from astroid.nodes.utils import Position + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + + +REDIRECT: Final[Dict[str, str]] = { + "arguments": "Arguments", + "comprehension": "Comprehension", + "ListCompFor": "Comprehension", + "GenExprFor": "Comprehension", + "excepthandler": "ExceptHandler", + "keyword": "Keyword", + "match_case": "MatchCase", +} + + +T_Doc = TypeVar( + "T_Doc", + "ast.Module", + "ast.ClassDef", + Union["ast.FunctionDef", "ast.AsyncFunctionDef"], +) +T_Function = TypeVar("T_Function", nodes.FunctionDef, nodes.AsyncFunctionDef) +T_For = TypeVar("T_For", nodes.For, nodes.AsyncFor) +T_With = TypeVar("T_With", nodes.With, nodes.AsyncWith) +NodesWithDocsType = Union[nodes.Module, nodes.ClassDef, nodes.FunctionDef] + + +# noinspection PyMethodMayBeStatic +class TreeRebuilder: + """Rebuilds the _ast tree to become an Astroid tree""" + + def __init__( + self, + manager: AstroidManager, + parser_module: Optional[ParserModule] = None, + data: Optional[str] = None, + ) -> None: + self._manager = manager + self._data = data.split("\n") if data else None + self._global_names: List[Dict[str, List[nodes.Global]]] = [] + self._import_from_nodes: List[nodes.ImportFrom] = [] + self._delayed_assattr: List[nodes.AssignAttr] = [] + self._visit_meths: Dict[ + Type["ast.AST"], Callable[["ast.AST", NodeNG], NodeNG] + ] = {} + + if parser_module is None: + self._parser_module = get_parser_module() + else: + self._parser_module = parser_module + self._module = self._parser_module.module + + def _get_doc(self, node: T_Doc) -> Tuple[T_Doc, Optional["ast.Constant | ast.Str"]]: + """Return the doc ast node.""" + try: + if node.body and isinstance(node.body[0], self._module.Expr): + first_value = node.body[0].value + if isinstance(first_value, self._module.Str) or ( + PY38_PLUS + and isinstance(first_value, self._module.Constant) + and isinstance(first_value.value, str) + ): + doc_ast_node = first_value + node.body = node.body[1:] + # The ast parser of python < 3.8 sets col_offset of multi-line strings to -1 + # as it is unable to determine the value correctly. We reset this to None. + if doc_ast_node.col_offset == -1: + doc_ast_node.col_offset = None + return node, doc_ast_node + except IndexError: + pass # ast built from scratch + return node, None + + def _get_context( + self, + node: Union[ + "ast.Attribute", + "ast.List", + "ast.Name", + "ast.Subscript", + "ast.Starred", + "ast.Tuple", + ], + ) -> Context: + return self._parser_module.context_classes.get(type(node.ctx), Context.Load) + + def _get_position_info( + self, + node: Union["ast.ClassDef", "ast.FunctionDef", "ast.AsyncFunctionDef"], + parent: Union[nodes.ClassDef, nodes.FunctionDef, nodes.AsyncFunctionDef], + ) -> Optional[Position]: + """Return position information for ClassDef and FunctionDef nodes. + + In contrast to AST positions, these only include the actual keyword(s) + and the class / function name. + + >>> @decorator + >>> async def some_func(var: int) -> None: + >>> ^^^^^^^^^^^^^^^^^^^ + """ + if not self._data: + return None + end_lineno: Optional[int] = getattr(node, "end_lineno", None) + if node.body: + end_lineno = node.body[0].lineno + # pylint: disable-next=unsubscriptable-object + data = "\n".join(self._data[node.lineno - 1 : end_lineno]) + + start_token: Optional[TokenInfo] = None + keyword_tokens: Tuple[int, ...] = (token.NAME,) + if isinstance(parent, nodes.AsyncFunctionDef): + search_token = "async" + if PY36: + # In Python 3.6, the token type for 'async' was 'ASYNC' + # In Python 3.7, the type was changed to 'NAME' and 'ASYNC' removed + # Python 3.8 added it back. However, if we use it unconditionally + # we would break 3.7. + keyword_tokens = (token.NAME, token.ASYNC) + elif isinstance(parent, nodes.FunctionDef): + search_token = "def" + else: + search_token = "class" + + for t in generate_tokens(StringIO(data).readline): + if ( + start_token is not None + and t.type == token.NAME + and t.string == node.name + ): + break + if t.type in keyword_tokens: + if t.string == search_token: + start_token = t + continue + if t.string in {"def"}: + continue + start_token = None + else: + return None + + # pylint: disable=undefined-loop-variable + return Position( + lineno=node.lineno + start_token.start[0] - 1, + col_offset=start_token.start[1], + end_lineno=node.lineno + t.end[0] - 1, + end_col_offset=t.end[1], + ) + + def _fix_doc_node_position(self, node: NodesWithDocsType) -> None: + """Fix start and end position of doc nodes for Python < 3.8.""" + if not self._data or not node.doc_node or node.lineno is None: + return + if PY38_PLUS: + return + + lineno = node.lineno or 1 # lineno of modules is 0 + end_range: Optional[int] = node.doc_node.lineno + if IS_PYPY and not PY39_PLUS: + end_range = None + # pylint: disable-next=unsubscriptable-object + data = "\n".join(self._data[lineno - 1 : end_range]) + + found_start, found_end = False, False + open_brackets = 0 + skip_token: Set[int] = {token.NEWLINE, token.INDENT} + if PY36: + skip_token.update((tokenize.NL, tokenize.COMMENT)) + else: + # token.NL and token.COMMENT were added in 3.7 + skip_token.update((token.NL, token.COMMENT)) + + if isinstance(node, nodes.Module): + found_end = True + + for t in generate_tokens(StringIO(data).readline): + if found_end is False: + if ( + found_start is False + and t.type == token.NAME + and t.string in {"def", "class"} + ): + found_start = True + elif found_start is True and t.type == token.OP: + if t.exact_type == token.COLON and open_brackets == 0: + found_end = True + elif t.exact_type == token.LPAR: + open_brackets += 1 + elif t.exact_type == token.RPAR: + open_brackets -= 1 + continue + if t.type in skip_token: + continue + if t.type == token.STRING: + break + return + else: + return + + # pylint: disable=undefined-loop-variable + node.doc_node.lineno = lineno + t.start[0] - 1 + node.doc_node.col_offset = t.start[1] + node.doc_node.end_lineno = lineno + t.end[0] - 1 + node.doc_node.end_col_offset = t.end[1] + + def _reset_end_lineno(self, newnode: nodes.NodeNG) -> None: + """Reset end_lineno and end_col_offset attributes for PyPy 3.8. + + For some nodes, these are either set to -1 or only partially assigned. + To keep consistency across astroid and pylint, reset all. + + This has been fixed in PyPy 3.9. + For reference, an (incomplete) list of nodes with issues: + - ClassDef - For + - FunctionDef - While + - Call - If + - Decorators - TryExcept + - With - TryFinally + - Assign + """ + newnode.end_lineno = None + newnode.end_col_offset = None + for child_node in newnode.get_children(): + self._reset_end_lineno(child_node) + + def visit_module( + self, node: "ast.Module", modname: str, modpath: str, package: bool + ) -> nodes.Module: + """visit a Module node by returning a fresh instance of it + + Note: Method not called by 'visit' + """ + node, doc_ast_node = self._get_doc(node) + newnode = nodes.Module( + name=modname, + file=modpath, + path=[modpath], + package=package, + parent=None, + ) + newnode.postinit( + [self.visit(child, newnode) for child in node.body], + doc_node=self.visit(doc_ast_node, newnode), + ) + self._fix_doc_node_position(newnode) + if IS_PYPY and PY38: + self._reset_end_lineno(newnode) + return newnode + + @overload + def visit(self, node: "ast.arg", parent: NodeNG) -> nodes.AssignName: + ... + + @overload + def visit(self, node: "ast.arguments", parent: NodeNG) -> nodes.Arguments: + ... + + @overload + def visit(self, node: "ast.Assert", parent: NodeNG) -> nodes.Assert: + ... + + @overload + def visit( + self, node: "ast.AsyncFunctionDef", parent: NodeNG + ) -> nodes.AsyncFunctionDef: + ... + + @overload + def visit(self, node: "ast.AsyncFor", parent: NodeNG) -> nodes.AsyncFor: + ... + + @overload + def visit(self, node: "ast.Await", parent: NodeNG) -> nodes.Await: + ... + + @overload + def visit(self, node: "ast.AsyncWith", parent: NodeNG) -> nodes.AsyncWith: + ... + + @overload + def visit(self, node: "ast.Assign", parent: NodeNG) -> nodes.Assign: + ... + + @overload + def visit(self, node: "ast.AnnAssign", parent: NodeNG) -> nodes.AnnAssign: + ... + + @overload + def visit(self, node: "ast.AugAssign", parent: NodeNG) -> nodes.AugAssign: + ... + + @overload + def visit(self, node: "ast.BinOp", parent: NodeNG) -> nodes.BinOp: + ... + + @overload + def visit(self, node: "ast.BoolOp", parent: NodeNG) -> nodes.BoolOp: + ... + + @overload + def visit(self, node: "ast.Break", parent: NodeNG) -> nodes.Break: + ... + + @overload + def visit(self, node: "ast.Call", parent: NodeNG) -> nodes.Call: + ... + + @overload + def visit(self, node: "ast.ClassDef", parent: NodeNG) -> nodes.ClassDef: + ... + + @overload + def visit(self, node: "ast.Continue", parent: NodeNG) -> nodes.Continue: + ... + + @overload + def visit(self, node: "ast.Compare", parent: NodeNG) -> nodes.Compare: + ... + + @overload + def visit(self, node: "ast.comprehension", parent: NodeNG) -> nodes.Comprehension: + ... + + @overload + def visit(self, node: "ast.Delete", parent: NodeNG) -> nodes.Delete: + ... + + @overload + def visit(self, node: "ast.Dict", parent: NodeNG) -> nodes.Dict: + ... + + @overload + def visit(self, node: "ast.DictComp", parent: NodeNG) -> nodes.DictComp: + ... + + @overload + def visit(self, node: "ast.Expr", parent: NodeNG) -> nodes.Expr: + ... + + @overload + def visit(self, node: "ast.ExceptHandler", parent: NodeNG) -> nodes.ExceptHandler: + ... + + @overload + def visit(self, node: "ast.For", parent: NodeNG) -> nodes.For: + ... + + @overload + def visit(self, node: "ast.ImportFrom", parent: NodeNG) -> nodes.ImportFrom: + ... + + @overload + def visit(self, node: "ast.FunctionDef", parent: NodeNG) -> nodes.FunctionDef: + ... + + @overload + def visit(self, node: "ast.GeneratorExp", parent: NodeNG) -> nodes.GeneratorExp: + ... + + @overload + def visit(self, node: "ast.Attribute", parent: NodeNG) -> nodes.Attribute: + ... + + @overload + def visit(self, node: "ast.Global", parent: NodeNG) -> nodes.Global: + ... + + @overload + def visit(self, node: "ast.If", parent: NodeNG) -> nodes.If: + ... + + @overload + def visit(self, node: "ast.IfExp", parent: NodeNG) -> nodes.IfExp: + ... + + @overload + def visit(self, node: "ast.Import", parent: NodeNG) -> nodes.Import: + ... + + @overload + def visit(self, node: "ast.JoinedStr", parent: NodeNG) -> nodes.JoinedStr: + ... + + @overload + def visit(self, node: "ast.FormattedValue", parent: NodeNG) -> nodes.FormattedValue: + ... + + if sys.version_info >= (3, 8): + + @overload + def visit(self, node: "ast.NamedExpr", parent: NodeNG) -> nodes.NamedExpr: + ... + + if sys.version_info < (3, 9): + # Not used in Python 3.9+ + @overload + def visit(self, node: "ast.ExtSlice", parent: nodes.Subscript) -> nodes.Tuple: + ... + + @overload + def visit(self, node: "ast.Index", parent: nodes.Subscript) -> NodeNG: + ... + + @overload + def visit(self, node: "ast.keyword", parent: NodeNG) -> nodes.Keyword: + ... + + @overload + def visit(self, node: "ast.Lambda", parent: NodeNG) -> nodes.Lambda: + ... + + @overload + def visit(self, node: "ast.List", parent: NodeNG) -> nodes.List: + ... + + @overload + def visit(self, node: "ast.ListComp", parent: NodeNG) -> nodes.ListComp: + ... + + @overload + def visit( + self, node: "ast.Name", parent: NodeNG + ) -> Union[nodes.Name, nodes.Const, nodes.AssignName, nodes.DelName]: + ... + + @overload + def visit(self, node: "ast.Nonlocal", parent: NodeNG) -> nodes.Nonlocal: + ... + + if sys.version_info < (3, 8): + # Not used in Python 3.8+ + @overload + def visit(self, node: "ast.Ellipsis", parent: NodeNG) -> nodes.Const: + ... + + @overload + def visit(self, node: "ast.NameConstant", parent: NodeNG) -> nodes.Const: + ... + + @overload + def visit(self, node: "ast.Str", parent: NodeNG) -> nodes.Const: + ... + + @overload + def visit(self, node: "ast.Bytes", parent: NodeNG) -> nodes.Const: + ... + + @overload + def visit(self, node: "ast.Num", parent: NodeNG) -> nodes.Const: + ... + + @overload + def visit(self, node: "ast.Constant", parent: NodeNG) -> nodes.Const: + ... + + @overload + def visit(self, node: "ast.Pass", parent: NodeNG) -> nodes.Pass: + ... + + @overload + def visit(self, node: "ast.Raise", parent: NodeNG) -> nodes.Raise: + ... + + @overload + def visit(self, node: "ast.Return", parent: NodeNG) -> nodes.Return: + ... + + @overload + def visit(self, node: "ast.Set", parent: NodeNG) -> nodes.Set: + ... + + @overload + def visit(self, node: "ast.SetComp", parent: NodeNG) -> nodes.SetComp: + ... + + @overload + def visit(self, node: "ast.Slice", parent: nodes.Subscript) -> nodes.Slice: + ... + + @overload + def visit(self, node: "ast.Subscript", parent: NodeNG) -> nodes.Subscript: + ... + + @overload + def visit(self, node: "ast.Starred", parent: NodeNG) -> nodes.Starred: + ... + + @overload + def visit( + self, node: "ast.Try", parent: NodeNG + ) -> Union[nodes.TryExcept, nodes.TryFinally]: + ... + + @overload + def visit(self, node: "ast.Tuple", parent: NodeNG) -> nodes.Tuple: + ... + + @overload + def visit(self, node: "ast.UnaryOp", parent: NodeNG) -> nodes.UnaryOp: + ... + + @overload + def visit(self, node: "ast.While", parent: NodeNG) -> nodes.While: + ... + + @overload + def visit(self, node: "ast.With", parent: NodeNG) -> nodes.With: + ... + + @overload + def visit(self, node: "ast.Yield", parent: NodeNG) -> nodes.Yield: + ... + + @overload + def visit(self, node: "ast.YieldFrom", parent: NodeNG) -> nodes.YieldFrom: + ... + + if sys.version_info >= (3, 10): + + @overload + def visit(self, node: "ast.Match", parent: NodeNG) -> nodes.Match: + ... + + @overload + def visit(self, node: "ast.match_case", parent: NodeNG) -> nodes.MatchCase: + ... + + @overload + def visit(self, node: "ast.MatchValue", parent: NodeNG) -> nodes.MatchValue: + ... + + @overload + def visit( + self, node: "ast.MatchSingleton", parent: NodeNG + ) -> nodes.MatchSingleton: + ... + + @overload + def visit( + self, node: "ast.MatchSequence", parent: NodeNG + ) -> nodes.MatchSequence: + ... + + @overload + def visit(self, node: "ast.MatchMapping", parent: NodeNG) -> nodes.MatchMapping: + ... + + @overload + def visit(self, node: "ast.MatchClass", parent: NodeNG) -> nodes.MatchClass: + ... + + @overload + def visit(self, node: "ast.MatchStar", parent: NodeNG) -> nodes.MatchStar: + ... + + @overload + def visit(self, node: "ast.MatchAs", parent: NodeNG) -> nodes.MatchAs: + ... + + @overload + def visit(self, node: "ast.MatchOr", parent: NodeNG) -> nodes.MatchOr: + ... + + @overload + def visit(self, node: "ast.pattern", parent: NodeNG) -> nodes.Pattern: + ... + + @overload + def visit(self, node: "ast.AST", parent: NodeNG) -> NodeNG: + ... + + @overload + def visit(self, node: None, parent: NodeNG) -> None: + ... + + def visit(self, node: Optional["ast.AST"], parent: NodeNG) -> Optional[NodeNG]: + if node is None: + return None + cls = node.__class__ + if cls in self._visit_meths: + visit_method = self._visit_meths[cls] + else: + cls_name = cls.__name__ + visit_name = "visit_" + REDIRECT.get(cls_name, cls_name).lower() + visit_method = getattr(self, visit_name) + self._visit_meths[cls] = visit_method + return visit_method(node, parent) + + def _save_assignment(self, node: Union[nodes.AssignName, nodes.DelName]) -> None: + """save assignment situation since node.parent is not available yet""" + if self._global_names and node.name in self._global_names[-1]: + node.root().set_local(node.name, node) + else: + assert node.parent + node.parent.set_local(node.name, node) + + def visit_arg(self, node: "ast.arg", parent: NodeNG) -> nodes.AssignName: + """visit an arg node by returning a fresh AssName instance""" + return self.visit_assignname(node, parent, node.arg) + + def visit_arguments(self, node: "ast.arguments", parent: NodeNG) -> nodes.Arguments: + """visit an Arguments node by returning a fresh instance of it""" + vararg: Optional[str] = None + kwarg: Optional[str] = None + newnode = nodes.Arguments( + node.vararg.arg if node.vararg else None, + node.kwarg.arg if node.kwarg else None, + parent, + ) + args = [self.visit(child, newnode) for child in node.args] + defaults = [self.visit(child, newnode) for child in node.defaults] + varargannotation: Optional[NodeNG] = None + kwargannotation: Optional[NodeNG] = None + posonlyargs: List[nodes.AssignName] = [] + if node.vararg: + vararg = node.vararg.arg + varargannotation = self.visit(node.vararg.annotation, newnode) + if node.kwarg: + kwarg = node.kwarg.arg + kwargannotation = self.visit(node.kwarg.annotation, newnode) + + if PY38: + # In Python 3.8 'end_lineno' and 'end_col_offset' + # for 'kwonlyargs' don't include the annotation. + for arg in node.kwonlyargs: + if arg.annotation is not None: + arg.end_lineno = arg.annotation.end_lineno + arg.end_col_offset = arg.annotation.end_col_offset + + kwonlyargs = [self.visit(child, newnode) for child in node.kwonlyargs] + kw_defaults = [self.visit(child, newnode) for child in node.kw_defaults] + annotations = [self.visit(arg.annotation, newnode) for arg in node.args] + kwonlyargs_annotations = [ + self.visit(arg.annotation, newnode) for arg in node.kwonlyargs + ] + + posonlyargs_annotations: List[Optional[NodeNG]] = [] + if PY38_PLUS: + posonlyargs = [self.visit(child, newnode) for child in node.posonlyargs] + posonlyargs_annotations = [ + self.visit(arg.annotation, newnode) for arg in node.posonlyargs + ] + type_comment_args = [ + self.check_type_comment(child, parent=newnode) for child in node.args + ] + type_comment_kwonlyargs = [ + self.check_type_comment(child, parent=newnode) for child in node.kwonlyargs + ] + type_comment_posonlyargs: List[Optional[NodeNG]] = [] + if PY38_PLUS: + type_comment_posonlyargs = [ + self.check_type_comment(child, parent=newnode) + for child in node.posonlyargs + ] + + newnode.postinit( + args=args, + defaults=defaults, + kwonlyargs=kwonlyargs, + posonlyargs=posonlyargs, + kw_defaults=kw_defaults, + annotations=annotations, + kwonlyargs_annotations=kwonlyargs_annotations, + posonlyargs_annotations=posonlyargs_annotations, + varargannotation=varargannotation, + kwargannotation=kwargannotation, + type_comment_args=type_comment_args, + type_comment_kwonlyargs=type_comment_kwonlyargs, + type_comment_posonlyargs=type_comment_posonlyargs, + ) + # save argument names in locals: + assert newnode.parent + if vararg: + newnode.parent.set_local(vararg, newnode) + if kwarg: + newnode.parent.set_local(kwarg, newnode) + return newnode + + def visit_assert(self, node: "ast.Assert", parent: NodeNG) -> nodes.Assert: + """visit a Assert node by returning a fresh instance of it""" + newnode = nodes.Assert( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + msg: Optional[NodeNG] = None + if node.msg: + msg = self.visit(node.msg, newnode) + newnode.postinit(self.visit(node.test, newnode), msg) + return newnode + + def check_type_comment( + self, + node: Union[ + "ast.Assign", + "ast.arg", + "ast.For", + "ast.AsyncFor", + "ast.With", + "ast.AsyncWith", + ], + parent: Union[ + nodes.Assign, + nodes.Arguments, + nodes.For, + nodes.AsyncFor, + nodes.With, + nodes.AsyncWith, + ], + ) -> Optional[NodeNG]: + type_comment = getattr(node, "type_comment", None) # Added in Python 3.8 + if not type_comment: + return None + + try: + type_comment_ast = self._parser_module.parse(type_comment) + except SyntaxError: + # Invalid type comment, just skip it. + return None + + type_object = self.visit(type_comment_ast.body[0], parent=parent) + if not isinstance(type_object, nodes.Expr): + return None + + return type_object.value + + def check_function_type_comment( + self, node: Union["ast.FunctionDef", "ast.AsyncFunctionDef"], parent: NodeNG + ) -> Optional[Tuple[Optional[NodeNG], List[NodeNG]]]: + type_comment = getattr(node, "type_comment", None) # Added in Python 3.8 + if not type_comment: + return None + + try: + type_comment_ast = parse_function_type_comment(type_comment) + except SyntaxError: + # Invalid type comment, just skip it. + return None + + if not type_comment_ast: + return None + + returns: Optional[NodeNG] = None + argtypes: List[NodeNG] = [ + self.visit(elem, parent) for elem in (type_comment_ast.argtypes or []) + ] + if type_comment_ast.returns: + returns = self.visit(type_comment_ast.returns, parent) + + return returns, argtypes + + def visit_asyncfunctiondef( + self, node: "ast.AsyncFunctionDef", parent: NodeNG + ) -> nodes.AsyncFunctionDef: + return self._visit_functiondef(nodes.AsyncFunctionDef, node, parent) + + def visit_asyncfor(self, node: "ast.AsyncFor", parent: NodeNG) -> nodes.AsyncFor: + return self._visit_for(nodes.AsyncFor, node, parent) + + def visit_await(self, node: "ast.Await", parent: NodeNG) -> nodes.Await: + newnode = nodes.Await( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(value=self.visit(node.value, newnode)) + return newnode + + def visit_asyncwith(self, node: "ast.AsyncWith", parent: NodeNG) -> nodes.AsyncWith: + return self._visit_with(nodes.AsyncWith, node, parent) + + def visit_assign(self, node: "ast.Assign", parent: NodeNG) -> nodes.Assign: + """visit a Assign node by returning a fresh instance of it""" + newnode = nodes.Assign( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + type_annotation = self.check_type_comment(node, parent=newnode) + newnode.postinit( + targets=[self.visit(child, newnode) for child in node.targets], + value=self.visit(node.value, newnode), + type_annotation=type_annotation, + ) + return newnode + + def visit_annassign(self, node: "ast.AnnAssign", parent: NodeNG) -> nodes.AnnAssign: + """visit an AnnAssign node by returning a fresh instance of it""" + newnode = nodes.AnnAssign( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + target=self.visit(node.target, newnode), + annotation=self.visit(node.annotation, newnode), + simple=node.simple, + value=self.visit(node.value, newnode), + ) + return newnode + + @overload + def visit_assignname( + self, node: "ast.AST", parent: NodeNG, node_name: str + ) -> nodes.AssignName: + ... + + @overload + def visit_assignname( + self, node: "ast.AST", parent: NodeNG, node_name: None + ) -> None: + ... + + def visit_assignname( + self, node: "ast.AST", parent: NodeNG, node_name: Optional[str] + ) -> Optional[nodes.AssignName]: + """visit a node and return a AssignName node + + Note: Method not called by 'visit' + """ + if node_name is None: + return None + newnode = nodes.AssignName( + name=node_name, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + self._save_assignment(newnode) + return newnode + + def visit_augassign(self, node: "ast.AugAssign", parent: NodeNG) -> nodes.AugAssign: + """visit a AugAssign node by returning a fresh instance of it""" + newnode = nodes.AugAssign( + op=self._parser_module.bin_op_classes[type(node.op)] + "=", + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.target, newnode), self.visit(node.value, newnode) + ) + return newnode + + def visit_binop(self, node: "ast.BinOp", parent: NodeNG) -> nodes.BinOp: + """visit a BinOp node by returning a fresh instance of it""" + newnode = nodes.BinOp( + op=self._parser_module.bin_op_classes[type(node.op)], + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.left, newnode), self.visit(node.right, newnode) + ) + return newnode + + def visit_boolop(self, node: "ast.BoolOp", parent: NodeNG) -> nodes.BoolOp: + """visit a BoolOp node by returning a fresh instance of it""" + newnode = nodes.BoolOp( + op=self._parser_module.bool_op_classes[type(node.op)], + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.values]) + return newnode + + def visit_break(self, node: "ast.Break", parent: NodeNG) -> nodes.Break: + """visit a Break node by returning a fresh instance of it""" + return nodes.Break( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + + def visit_call(self, node: "ast.Call", parent: NodeNG) -> nodes.Call: + """visit a CallFunc node by returning a fresh instance of it""" + newnode = nodes.Call( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + func=self.visit(node.func, newnode), + args=[self.visit(child, newnode) for child in node.args], + keywords=[self.visit(child, newnode) for child in node.keywords], + ) + return newnode + + def visit_classdef( + self, node: "ast.ClassDef", parent: NodeNG, newstyle: bool = True + ) -> nodes.ClassDef: + """visit a ClassDef node to become astroid""" + node, doc_ast_node = self._get_doc(node) + newnode = nodes.ClassDef( + name=node.name, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + metaclass = None + for keyword in node.keywords: + if keyword.arg == "metaclass": + metaclass = self.visit(keyword, newnode).value + break + decorators = self.visit_decorators(node, newnode) + newnode.postinit( + [self.visit(child, newnode) for child in node.bases], + [self.visit(child, newnode) for child in node.body], + decorators, + newstyle, + metaclass, + [ + self.visit(kwd, newnode) + for kwd in node.keywords + if kwd.arg != "metaclass" + ], + position=self._get_position_info(node, newnode), + doc_node=self.visit(doc_ast_node, newnode), + ) + self._fix_doc_node_position(newnode) + return newnode + + def visit_continue(self, node: "ast.Continue", parent: NodeNG) -> nodes.Continue: + """visit a Continue node by returning a fresh instance of it""" + return nodes.Continue( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + + def visit_compare(self, node: "ast.Compare", parent: NodeNG) -> nodes.Compare: + """visit a Compare node by returning a fresh instance of it""" + newnode = nodes.Compare( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.left, newnode), + [ + ( + self._parser_module.cmp_op_classes[op.__class__], + self.visit(expr, newnode), + ) + for (op, expr) in zip(node.ops, node.comparators) + ], + ) + return newnode + + def visit_comprehension( + self, node: "ast.comprehension", parent: NodeNG + ) -> nodes.Comprehension: + """visit a Comprehension node by returning a fresh instance of it""" + newnode = nodes.Comprehension(parent) + newnode.postinit( + self.visit(node.target, newnode), + self.visit(node.iter, newnode), + [self.visit(child, newnode) for child in node.ifs], + bool(node.is_async), + ) + return newnode + + def visit_decorators( + self, + node: Union["ast.ClassDef", "ast.FunctionDef", "ast.AsyncFunctionDef"], + parent: NodeNG, + ) -> Optional[nodes.Decorators]: + """visit a Decorators node by returning a fresh instance of it + + Note: Method not called by 'visit' + """ + if not node.decorator_list: + return None + # /!\ node is actually an _ast.FunctionDef node while + # parent is an astroid.nodes.FunctionDef node + if sys.version_info >= (3, 8): + # Set the line number of the first decorator for Python 3.8+. + lineno = node.decorator_list[0].lineno + end_lineno = node.decorator_list[-1].end_lineno + end_col_offset = node.decorator_list[-1].end_col_offset + else: + lineno = node.lineno + end_lineno = None + end_col_offset = None + newnode = nodes.Decorators( + lineno=lineno, + col_offset=node.col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.decorator_list]) + return newnode + + def visit_delete(self, node: "ast.Delete", parent: NodeNG) -> nodes.Delete: + """visit a Delete node by returning a fresh instance of it""" + newnode = nodes.Delete( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.targets]) + return newnode + + def _visit_dict_items( + self, node: "ast.Dict", parent: NodeNG, newnode: nodes.Dict + ) -> Generator[Tuple[NodeNG, NodeNG], None, None]: + for key, value in zip(node.keys, node.values): + rebuilt_key: NodeNG + rebuilt_value = self.visit(value, newnode) + if not key: + # Extended unpacking + rebuilt_key = nodes.DictUnpack( + lineno=rebuilt_value.lineno, + col_offset=rebuilt_value.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(rebuilt_value, "end_lineno", None), + end_col_offset=getattr(rebuilt_value, "end_col_offset", None), + parent=parent, + ) + else: + rebuilt_key = self.visit(key, newnode) + yield rebuilt_key, rebuilt_value + + def visit_dict(self, node: "ast.Dict", parent: NodeNG) -> nodes.Dict: + """visit a Dict node by returning a fresh instance of it""" + newnode = nodes.Dict( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + items = list(self._visit_dict_items(node, parent, newnode)) + newnode.postinit(items) + return newnode + + def visit_dictcomp(self, node: "ast.DictComp", parent: NodeNG) -> nodes.DictComp: + """visit a DictComp node by returning a fresh instance of it""" + newnode = nodes.DictComp( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.key, newnode), + self.visit(node.value, newnode), + [self.visit(child, newnode) for child in node.generators], + ) + return newnode + + def visit_expr(self, node: "ast.Expr", parent: NodeNG) -> nodes.Expr: + """visit a Expr node by returning a fresh instance of it""" + newnode = nodes.Expr( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + def visit_excepthandler( + self, node: "ast.ExceptHandler", parent: NodeNG + ) -> nodes.ExceptHandler: + """visit an ExceptHandler node by returning a fresh instance of it""" + newnode = nodes.ExceptHandler( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.type, newnode), + self.visit_assignname(node, newnode, node.name), + [self.visit(child, newnode) for child in node.body], + ) + return newnode + + @overload + def _visit_for( + self, cls: Type[nodes.For], node: "ast.For", parent: NodeNG + ) -> nodes.For: + ... + + @overload + def _visit_for( + self, cls: Type[nodes.AsyncFor], node: "ast.AsyncFor", parent: NodeNG + ) -> nodes.AsyncFor: + ... + + def _visit_for( + self, cls: Type[T_For], node: Union["ast.For", "ast.AsyncFor"], parent: NodeNG + ) -> T_For: + """visit a For node by returning a fresh instance of it""" + col_offset = node.col_offset + if IS_PYPY and not PY39_PLUS and isinstance(node, ast.AsyncFor) and self._data: + # pylint: disable-next=unsubscriptable-object + col_offset = self._data[node.lineno - 1].index("async") + + newnode = cls( + lineno=node.lineno, + col_offset=col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + type_annotation = self.check_type_comment(node, parent=newnode) + newnode.postinit( + target=self.visit(node.target, newnode), + iter=self.visit(node.iter, newnode), + body=[self.visit(child, newnode) for child in node.body], + orelse=[self.visit(child, newnode) for child in node.orelse], + type_annotation=type_annotation, + ) + return newnode + + def visit_for(self, node: "ast.For", parent: NodeNG) -> nodes.For: + return self._visit_for(nodes.For, node, parent) + + def visit_importfrom( + self, node: "ast.ImportFrom", parent: NodeNG + ) -> nodes.ImportFrom: + """visit an ImportFrom node by returning a fresh instance of it""" + names = [(alias.name, alias.asname) for alias in node.names] + newnode = nodes.ImportFrom( + fromname=node.module or "", + names=names, + level=node.level or None, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + # store From names to add them to locals after building + self._import_from_nodes.append(newnode) + return newnode + + @overload + def _visit_functiondef( + self, cls: Type[nodes.FunctionDef], node: "ast.FunctionDef", parent: NodeNG + ) -> nodes.FunctionDef: + ... + + @overload + def _visit_functiondef( + self, + cls: Type[nodes.AsyncFunctionDef], + node: "ast.AsyncFunctionDef", + parent: NodeNG, + ) -> nodes.AsyncFunctionDef: + ... + + def _visit_functiondef( + self, + cls: Type[T_Function], + node: Union["ast.FunctionDef", "ast.AsyncFunctionDef"], + parent: NodeNG, + ) -> T_Function: + """visit an FunctionDef node to become astroid""" + self._global_names.append({}) + node, doc_ast_node = self._get_doc(node) + + lineno = node.lineno + if PY38_PLUS and node.decorator_list: + # Python 3.8 sets the line number of a decorated function + # to be the actual line number of the function, but the + # previous versions expected the decorator's line number instead. + # We reset the function's line number to that of the + # first decorator to maintain backward compatibility. + # It's not ideal but this discrepancy was baked into + # the framework for *years*. + lineno = node.decorator_list[0].lineno + + newnode = cls( + name=node.name, + lineno=lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + decorators = self.visit_decorators(node, newnode) + returns: Optional[NodeNG] + if node.returns: + returns = self.visit(node.returns, newnode) + else: + returns = None + + type_comment_args = type_comment_returns = None + type_comment_annotation = self.check_function_type_comment(node, newnode) + if type_comment_annotation: + type_comment_returns, type_comment_args = type_comment_annotation + newnode.postinit( + args=self.visit(node.args, newnode), + body=[self.visit(child, newnode) for child in node.body], + decorators=decorators, + returns=returns, + type_comment_returns=type_comment_returns, + type_comment_args=type_comment_args, + position=self._get_position_info(node, newnode), + doc_node=self.visit(doc_ast_node, newnode), + ) + if IS_PYPY and PY36 and newnode.position: + # PyPy: col_offset in Python 3.6 doesn't include 'async', + # use position.col_offset instead. + newnode.col_offset = newnode.position.col_offset + self._fix_doc_node_position(newnode) + self._global_names.pop() + return newnode + + def visit_functiondef( + self, node: "ast.FunctionDef", parent: NodeNG + ) -> nodes.FunctionDef: + return self._visit_functiondef(nodes.FunctionDef, node, parent) + + def visit_generatorexp( + self, node: "ast.GeneratorExp", parent: NodeNG + ) -> nodes.GeneratorExp: + """visit a GeneratorExp node by returning a fresh instance of it""" + newnode = nodes.GeneratorExp( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.elt, newnode), + [self.visit(child, newnode) for child in node.generators], + ) + return newnode + + def visit_attribute( + self, node: "ast.Attribute", parent: NodeNG + ) -> Union[nodes.Attribute, nodes.AssignAttr, nodes.DelAttr]: + """visit an Attribute node by returning a fresh instance of it""" + context = self._get_context(node) + newnode: Union[nodes.Attribute, nodes.AssignAttr, nodes.DelAttr] + if context == Context.Del: + # FIXME : maybe we should reintroduce and visit_delattr ? + # for instance, deactivating assign_ctx + newnode = nodes.DelAttr( + attrname=node.attr, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + elif context == Context.Store: + # pylint: disable=redefined-variable-type + newnode = nodes.AssignAttr( + attrname=node.attr, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + # Prohibit a local save if we are in an ExceptHandler. + if not isinstance(parent, nodes.ExceptHandler): + # mypy doesn't recognize that newnode has to be AssignAttr because it doesn't support ParamSpec + # See https://github.com/python/mypy/issues/8645 + self._delayed_assattr.append(newnode) # type: ignore[arg-type] + else: + newnode = nodes.Attribute( + attrname=node.attr, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + def visit_global(self, node: "ast.Global", parent: NodeNG) -> nodes.Global: + """visit a Global node to become astroid""" + newnode = nodes.Global( + names=node.names, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + if self._global_names: # global at the module level, no effect + for name in node.names: + self._global_names[-1].setdefault(name, []).append(newnode) + return newnode + + def visit_if(self, node: "ast.If", parent: NodeNG) -> nodes.If: + """visit an If node by returning a fresh instance of it""" + newnode = nodes.If( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.test, newnode), + [self.visit(child, newnode) for child in node.body], + [self.visit(child, newnode) for child in node.orelse], + ) + return newnode + + def visit_ifexp(self, node: "ast.IfExp", parent: NodeNG) -> nodes.IfExp: + """visit a IfExp node by returning a fresh instance of it""" + newnode = nodes.IfExp( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.test, newnode), + self.visit(node.body, newnode), + self.visit(node.orelse, newnode), + ) + return newnode + + def visit_import(self, node: "ast.Import", parent: NodeNG) -> nodes.Import: + """visit a Import node by returning a fresh instance of it""" + names = [(alias.name, alias.asname) for alias in node.names] + newnode = nodes.Import( + names=names, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + # save import names in parent's locals: + for (name, asname) in newnode.names: + name = asname or name + parent.set_local(name.split(".")[0], newnode) + return newnode + + def visit_joinedstr(self, node: "ast.JoinedStr", parent: NodeNG) -> nodes.JoinedStr: + newnode = nodes.JoinedStr( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.values]) + return newnode + + def visit_formattedvalue( + self, node: "ast.FormattedValue", parent: NodeNG + ) -> nodes.FormattedValue: + newnode = nodes.FormattedValue( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.value, newnode), + node.conversion, + self.visit(node.format_spec, newnode), + ) + return newnode + + if sys.version_info >= (3, 8): + + def visit_namedexpr( + self, node: "ast.NamedExpr", parent: NodeNG + ) -> nodes.NamedExpr: + newnode = nodes.NamedExpr( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.target, newnode), self.visit(node.value, newnode) + ) + return newnode + + if sys.version_info < (3, 9): + # Not used in Python 3.9+. + def visit_extslice( + self, node: "ast.ExtSlice", parent: nodes.Subscript + ) -> nodes.Tuple: + """visit an ExtSlice node by returning a fresh instance of Tuple""" + # ExtSlice doesn't have lineno or col_offset information + newnode = nodes.Tuple(ctx=Context.Load, parent=parent) + newnode.postinit([self.visit(dim, newnode) for dim in node.dims]) + return newnode + + def visit_index(self, node: "ast.Index", parent: nodes.Subscript) -> NodeNG: + """visit a Index node by returning a fresh instance of NodeNG""" + return self.visit(node.value, parent) + + def visit_keyword(self, node: "ast.keyword", parent: NodeNG) -> nodes.Keyword: + """visit a Keyword node by returning a fresh instance of it""" + newnode = nodes.Keyword( + arg=node.arg, + # position attributes added in 3.9 + lineno=getattr(node, "lineno", None), + col_offset=getattr(node, "col_offset", None), + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + def visit_lambda(self, node: "ast.Lambda", parent: NodeNG) -> nodes.Lambda: + """visit a Lambda node by returning a fresh instance of it""" + newnode = nodes.Lambda( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(self.visit(node.args, newnode), self.visit(node.body, newnode)) + return newnode + + def visit_list(self, node: "ast.List", parent: NodeNG) -> nodes.List: + """visit a List node by returning a fresh instance of it""" + context = self._get_context(node) + newnode = nodes.List( + ctx=context, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.elts]) + return newnode + + def visit_listcomp(self, node: "ast.ListComp", parent: NodeNG) -> nodes.ListComp: + """visit a ListComp node by returning a fresh instance of it""" + newnode = nodes.ListComp( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.elt, newnode), + [self.visit(child, newnode) for child in node.generators], + ) + return newnode + + def visit_name( + self, node: "ast.Name", parent: NodeNG + ) -> Union[nodes.Name, nodes.AssignName, nodes.DelName]: + """visit a Name node by returning a fresh instance of it""" + context = self._get_context(node) + newnode: Union[nodes.Name, nodes.AssignName, nodes.DelName] + if context == Context.Del: + newnode = nodes.DelName( + name=node.id, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + elif context == Context.Store: + # pylint: disable=redefined-variable-type + newnode = nodes.AssignName( + name=node.id, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + else: + newnode = nodes.Name( + name=node.id, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + # XXX REMOVE me : + if context in (Context.Del, Context.Store): # 'Aug' ?? + newnode = cast(Union[nodes.AssignName, nodes.DelName], newnode) + self._save_assignment(newnode) + return newnode + + def visit_nonlocal(self, node: "ast.Nonlocal", parent: NodeNG) -> nodes.Nonlocal: + """visit a Nonlocal node and return a new instance of it""" + return nodes.Nonlocal( + names=node.names, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + + def visit_constant(self, node: "ast.Constant", parent: NodeNG) -> nodes.Const: + """visit a Constant node by returning a fresh instance of Const""" + return nodes.Const( + value=node.value, + kind=node.kind, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + + if sys.version_info < (3, 8): + # Not used in Python 3.8+. + def visit_ellipsis(self, node: "ast.Ellipsis", parent: NodeNG) -> nodes.Const: + """visit an Ellipsis node by returning a fresh instance of Const""" + return nodes.Const( + value=Ellipsis, + lineno=node.lineno, + col_offset=node.col_offset, + parent=parent, + ) + + def visit_nameconstant( + self, node: "ast.NameConstant", parent: NodeNG + ) -> nodes.Const: + # For singleton values True / False / None + return nodes.Const( + node.value, + node.lineno, + node.col_offset, + parent, + ) + + def visit_str( + self, node: Union["ast.Str", "ast.Bytes"], parent: NodeNG + ) -> nodes.Const: + """visit a String/Bytes node by returning a fresh instance of Const""" + return nodes.Const( + node.s, + node.lineno, + node.col_offset, + parent, + ) + + visit_bytes = visit_str + + def visit_num(self, node: "ast.Num", parent: NodeNG) -> nodes.Const: + """visit a Num node by returning a fresh instance of Const""" + return nodes.Const( + node.n, + node.lineno, + node.col_offset, + parent, + ) + + def visit_pass(self, node: "ast.Pass", parent: NodeNG) -> nodes.Pass: + """visit a Pass node by returning a fresh instance of it""" + return nodes.Pass( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + + def visit_raise(self, node: "ast.Raise", parent: NodeNG) -> nodes.Raise: + """visit a Raise node by returning a fresh instance of it""" + newnode = nodes.Raise( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + # no traceback; anyway it is not used in Pylint + newnode.postinit( + exc=self.visit(node.exc, newnode), + cause=self.visit(node.cause, newnode), + ) + return newnode + + def visit_return(self, node: "ast.Return", parent: NodeNG) -> nodes.Return: + """visit a Return node by returning a fresh instance of it""" + newnode = nodes.Return( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + if node.value is not None: + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + def visit_set(self, node: "ast.Set", parent: NodeNG) -> nodes.Set: + """visit a Set node by returning a fresh instance of it""" + newnode = nodes.Set( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.elts]) + return newnode + + def visit_setcomp(self, node: "ast.SetComp", parent: NodeNG) -> nodes.SetComp: + """visit a SetComp node by returning a fresh instance of it""" + newnode = nodes.SetComp( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.elt, newnode), + [self.visit(child, newnode) for child in node.generators], + ) + return newnode + + def visit_slice(self, node: "ast.Slice", parent: nodes.Subscript) -> nodes.Slice: + """visit a Slice node by returning a fresh instance of it""" + newnode = nodes.Slice( + # position attributes added in 3.9 + lineno=getattr(node, "lineno", None), + col_offset=getattr(node, "col_offset", None), + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + lower=self.visit(node.lower, newnode), + upper=self.visit(node.upper, newnode), + step=self.visit(node.step, newnode), + ) + return newnode + + def visit_subscript(self, node: "ast.Subscript", parent: NodeNG) -> nodes.Subscript: + """visit a Subscript node by returning a fresh instance of it""" + context = self._get_context(node) + newnode = nodes.Subscript( + ctx=context, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.value, newnode), self.visit(node.slice, newnode) + ) + return newnode + + def visit_starred(self, node: "ast.Starred", parent: NodeNG) -> nodes.Starred: + """visit a Starred node and return a new instance of it""" + context = self._get_context(node) + newnode = nodes.Starred( + ctx=context, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + def visit_tryexcept(self, node: "ast.Try", parent: NodeNG) -> nodes.TryExcept: + """visit a TryExcept node by returning a fresh instance of it""" + if sys.version_info >= (3, 8): + # TryExcept excludes the 'finally' but that will be included in the + # end_lineno from 'node'. Therefore, we check all non 'finally' + # children to find the correct end_lineno and column. + end_lineno = node.end_lineno + end_col_offset = node.end_col_offset + all_children: List["ast.AST"] = [*node.body, *node.handlers, *node.orelse] + for child in reversed(all_children): + end_lineno = child.end_lineno + end_col_offset = child.end_col_offset + break + newnode = nodes.TryExcept( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=end_lineno, + end_col_offset=end_col_offset, + parent=parent, + ) + else: + newnode = nodes.TryExcept(node.lineno, node.col_offset, parent) + newnode.postinit( + [self.visit(child, newnode) for child in node.body], + [self.visit(child, newnode) for child in node.handlers], + [self.visit(child, newnode) for child in node.orelse], + ) + return newnode + + def visit_try( + self, node: "ast.Try", parent: NodeNG + ) -> Union[nodes.TryExcept, nodes.TryFinally, None]: + # python 3.3 introduce a new Try node replacing + # TryFinally/TryExcept nodes + if node.finalbody: + newnode = nodes.TryFinally( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + body: List[Union[NodeNG, nodes.TryExcept]] + if node.handlers: + body = [self.visit_tryexcept(node, newnode)] + else: + body = [self.visit(child, newnode) for child in node.body] + newnode.postinit(body, [self.visit(n, newnode) for n in node.finalbody]) + return newnode + if node.handlers: + return self.visit_tryexcept(node, parent) + return None + + def visit_tuple(self, node: "ast.Tuple", parent: NodeNG) -> nodes.Tuple: + """visit a Tuple node by returning a fresh instance of it""" + context = self._get_context(node) + newnode = nodes.Tuple( + ctx=context, + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit([self.visit(child, newnode) for child in node.elts]) + return newnode + + def visit_unaryop(self, node: "ast.UnaryOp", parent: NodeNG) -> nodes.UnaryOp: + """visit a UnaryOp node by returning a fresh instance of it""" + newnode = nodes.UnaryOp( + op=self._parser_module.unary_op_classes[node.op.__class__], + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit(self.visit(node.operand, newnode)) + return newnode + + def visit_while(self, node: "ast.While", parent: NodeNG) -> nodes.While: + """visit a While node by returning a fresh instance of it""" + newnode = nodes.While( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + newnode.postinit( + self.visit(node.test, newnode), + [self.visit(child, newnode) for child in node.body], + [self.visit(child, newnode) for child in node.orelse], + ) + return newnode + + @overload + def _visit_with( + self, cls: Type[nodes.With], node: "ast.With", parent: NodeNG + ) -> nodes.With: + ... + + @overload + def _visit_with( + self, cls: Type[nodes.AsyncWith], node: "ast.AsyncWith", parent: NodeNG + ) -> nodes.AsyncWith: + ... + + def _visit_with( + self, + cls: Type[T_With], + node: Union["ast.With", "ast.AsyncWith"], + parent: NodeNG, + ) -> T_With: + col_offset = node.col_offset + if IS_PYPY and not PY39_PLUS and isinstance(node, ast.AsyncWith) and self._data: + # pylint: disable-next=unsubscriptable-object + col_offset = self._data[node.lineno - 1].index("async") + + newnode = cls( + lineno=node.lineno, + col_offset=col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + + def visit_child(child: "ast.withitem") -> Tuple[NodeNG, Optional[NodeNG]]: + expr = self.visit(child.context_expr, newnode) + var = self.visit(child.optional_vars, newnode) + return expr, var + + type_annotation = self.check_type_comment(node, parent=newnode) + newnode.postinit( + items=[visit_child(child) for child in node.items], + body=[self.visit(child, newnode) for child in node.body], + type_annotation=type_annotation, + ) + return newnode + + def visit_with(self, node: "ast.With", parent: NodeNG) -> NodeNG: + return self._visit_with(nodes.With, node, parent) + + def visit_yield(self, node: "ast.Yield", parent: NodeNG) -> NodeNG: + """visit a Yield node by returning a fresh instance of it""" + newnode = nodes.Yield( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + if node.value is not None: + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + def visit_yieldfrom(self, node: "ast.YieldFrom", parent: NodeNG) -> NodeNG: + newnode = nodes.YieldFrom( + lineno=node.lineno, + col_offset=node.col_offset, + # end_lineno and end_col_offset added in 3.8 + end_lineno=getattr(node, "end_lineno", None), + end_col_offset=getattr(node, "end_col_offset", None), + parent=parent, + ) + if node.value is not None: + newnode.postinit(self.visit(node.value, newnode)) + return newnode + + if sys.version_info >= (3, 10): + + def visit_match(self, node: "ast.Match", parent: NodeNG) -> nodes.Match: + newnode = nodes.Match( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + newnode.postinit( + subject=self.visit(node.subject, newnode), + cases=[self.visit(case, newnode) for case in node.cases], + ) + return newnode + + def visit_matchcase( + self, node: "ast.match_case", parent: NodeNG + ) -> nodes.MatchCase: + newnode = nodes.MatchCase(parent=parent) + newnode.postinit( + pattern=self.visit(node.pattern, newnode), + guard=self.visit(node.guard, newnode), + body=[self.visit(child, newnode) for child in node.body], + ) + return newnode + + def visit_matchvalue( + self, node: "ast.MatchValue", parent: NodeNG + ) -> nodes.MatchValue: + newnode = nodes.MatchValue( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + newnode.postinit(value=self.visit(node.value, newnode)) + return newnode + + def visit_matchsingleton( + self, node: "ast.MatchSingleton", parent: NodeNG + ) -> nodes.MatchSingleton: + return nodes.MatchSingleton( + value=node.value, + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + + def visit_matchsequence( + self, node: "ast.MatchSequence", parent: NodeNG + ) -> nodes.MatchSequence: + newnode = nodes.MatchSequence( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + newnode.postinit( + patterns=[self.visit(pattern, newnode) for pattern in node.patterns] + ) + return newnode + + def visit_matchmapping( + self, node: "ast.MatchMapping", parent: NodeNG + ) -> nodes.MatchMapping: + newnode = nodes.MatchMapping( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + # Add AssignName node for 'node.name' + # https://bugs.python.org/issue43994 + newnode.postinit( + keys=[self.visit(child, newnode) for child in node.keys], + patterns=[self.visit(pattern, newnode) for pattern in node.patterns], + rest=self.visit_assignname(node, newnode, node.rest), + ) + return newnode + + def visit_matchclass( + self, node: "ast.MatchClass", parent: NodeNG + ) -> nodes.MatchClass: + newnode = nodes.MatchClass( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + newnode.postinit( + cls=self.visit(node.cls, newnode), + patterns=[self.visit(pattern, newnode) for pattern in node.patterns], + kwd_attrs=node.kwd_attrs, + kwd_patterns=[ + self.visit(pattern, newnode) for pattern in node.kwd_patterns + ], + ) + return newnode + + def visit_matchstar( + self, node: "ast.MatchStar", parent: NodeNG + ) -> nodes.MatchStar: + newnode = nodes.MatchStar( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + # Add AssignName node for 'node.name' + # https://bugs.python.org/issue43994 + newnode.postinit(name=self.visit_assignname(node, newnode, node.name)) + return newnode + + def visit_matchas(self, node: "ast.MatchAs", parent: NodeNG) -> nodes.MatchAs: + newnode = nodes.MatchAs( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + # Add AssignName node for 'node.name' + # https://bugs.python.org/issue43994 + newnode.postinit( + pattern=self.visit(node.pattern, newnode), + name=self.visit_assignname(node, newnode, node.name), + ) + return newnode + + def visit_matchor(self, node: "ast.MatchOr", parent: NodeNG) -> nodes.MatchOr: + newnode = nodes.MatchOr( + lineno=node.lineno, + col_offset=node.col_offset, + end_lineno=node.end_lineno, + end_col_offset=node.end_col_offset, + parent=parent, + ) + newnode.postinit( + patterns=[self.visit(pattern, newnode) for pattern in node.patterns] + ) + return newnode diff --git a/myenv/lib/python3.9/site-packages/astroid/scoped_nodes.py b/myenv/lib/python3.9/site-packages/astroid/scoped_nodes.py new file mode 100644 index 0000000..677f892 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/scoped_nodes.py @@ -0,0 +1,33 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +# pylint: disable=unused-import + +import warnings + +from astroid.nodes.scoped_nodes import ( + AsyncFunctionDef, + ClassDef, + ComprehensionScope, + DictComp, + FunctionDef, + GeneratorExp, + Lambda, + ListComp, + LocalsDictNodeNG, + Module, + SetComp, + _is_metaclass, + builtin_lookup, + function_to_method, + get_wrapping_class, +) + +# We cannot create a __all__ here because it would create a circular import +# Please remove astroid/scoped_nodes.py|astroid/node_classes.py in autoflake +# exclude when removing this file. +warnings.warn( + "The 'astroid.scoped_nodes' module is deprecated and will be replaced by 'astroid.nodes' in astroid 3.0.0", + DeprecationWarning, +) diff --git a/myenv/lib/python3.9/site-packages/astroid/test_utils.py b/myenv/lib/python3.9/site-packages/astroid/test_utils.py new file mode 100644 index 0000000..2ab7383 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/test_utils.py @@ -0,0 +1,76 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +"""Utility functions for test code that uses astroid ASTs as input.""" +import contextlib +import functools +import sys +import warnings +from typing import Callable, Tuple + +import pytest + +from astroid import manager, nodes, transforms + + +def require_version(minver: str = "0.0.0", maxver: str = "4.0.0") -> Callable: + """Compare version of python interpreter to the given one. + Skip the test if older. + """ + + def parse(python_version: str) -> Tuple[int, ...]: + try: + return tuple(int(v) for v in python_version.split(".")) + except ValueError as e: + msg = f"{python_version} is not a correct version : should be X.Y[.Z]." + raise ValueError(msg) from e + + min_version = parse(minver) + max_version = parse(maxver) + + def check_require_version(f): + current: Tuple[int, int, int] = sys.version_info[:3] + if min_version < current <= max_version: + return f + + version: str = ".".join(str(v) for v in sys.version_info) + + @functools.wraps(f) + def new_f(*args, **kwargs): + if current <= min_version: + pytest.skip(f"Needs Python > {minver}. Current version is {version}.") + elif current > max_version: + pytest.skip(f"Needs Python <= {maxver}. Current version is {version}.") + + return new_f + + return check_require_version + + +def get_name_node(start_from, name, index=0): + return [n for n in start_from.nodes_of_class(nodes.Name) if n.name == name][index] + + +@contextlib.contextmanager +def enable_warning(warning): + warnings.simplefilter("always", warning) + try: + yield + finally: + # Reset it to default value, so it will take + # into account the values from the -W flag. + warnings.simplefilter("default", warning) + + +def brainless_manager(): + m = manager.AstroidManager() + # avoid caching into the AstroidManager borg since we get problems + # with other tests : + m.__dict__ = {} + m._failed_import_hooks = [] + m.astroid_cache = {} + m._mod_file_cache = {} + m._transform = transforms.TransformVisitor() + m.extension_package_whitelist = {} + return m diff --git a/myenv/lib/python3.9/site-packages/astroid/transforms.py b/myenv/lib/python3.9/site-packages/astroid/transforms.py new file mode 100644 index 0000000..2fc8935 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/transforms.py @@ -0,0 +1,88 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import collections +from typing import TYPE_CHECKING + +from astroid.context import _invalidate_cache + +if TYPE_CHECKING: + from astroid import NodeNG + + +class TransformVisitor: + """A visitor for handling transforms. + + The standard approach of using it is to call + :meth:`~visit` with an *astroid* module and the class + will take care of the rest, walking the tree and running the + transforms for each encountered node. + """ + + def __init__(self): + self.transforms = collections.defaultdict(list) + + def _transform(self, node: "NodeNG") -> "NodeNG": + """Call matching transforms for the given node if any and return the + transformed node. + """ + cls = node.__class__ + if cls not in self.transforms: + # no transform registered for this class of node + return node + + transforms = self.transforms[cls] + for transform_func, predicate in transforms: + if predicate is None or predicate(node): + ret = transform_func(node) + # if the transformation function returns something, it's + # expected to be a replacement for the node + if ret is not None: + _invalidate_cache() + node = ret + if ret.__class__ != cls: + # Can no longer apply the rest of the transforms. + break + return node + + def _visit(self, node): + if hasattr(node, "_astroid_fields"): + for name in node._astroid_fields: + value = getattr(node, name) + visited = self._visit_generic(value) + if visited != value: + setattr(node, name, visited) + return self._transform(node) + + def _visit_generic(self, node): + if isinstance(node, list): + return [self._visit_generic(child) for child in node] + if isinstance(node, tuple): + return tuple(self._visit_generic(child) for child in node) + if not node or isinstance(node, str): + return node + + return self._visit(node) + + def register_transform(self, node_class, transform, predicate=None): + """Register `transform(node)` function to be applied on the given + astroid's `node_class` if `predicate` is None or returns true + when called with the node as argument. + + The transform function may return a value which is then used to + substitute the original node in the tree. + """ + self.transforms[node_class].append((transform, predicate)) + + def unregister_transform(self, node_class, transform, predicate=None): + """Unregister the given transform.""" + self.transforms[node_class].remove((transform, predicate)) + + def visit(self, module): + """Walk the given astroid *tree* and transform each encountered node + + Only the nodes which have transforms registered will actually + be replaced or changed. + """ + return self._visit(module) diff --git a/myenv/lib/python3.9/site-packages/astroid/typing.py b/myenv/lib/python3.9/site-packages/astroid/typing.py new file mode 100644 index 0000000..32d01dd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/typing.py @@ -0,0 +1,27 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import sys +from typing import TYPE_CHECKING, Any, Callable + +if TYPE_CHECKING: + from astroid import nodes + from astroid.context import InferenceContext + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + + +class InferenceErrorInfo(TypedDict): + """Store additional Inference error information + raised with StopIteration exception. + """ + + node: "nodes.NodeNG" + context: "InferenceContext | None" + + +InferFn = Callable[..., Any] diff --git a/myenv/lib/python3.9/site-packages/astroid/util.py b/myenv/lib/python3.9/site-packages/astroid/util.py new file mode 100644 index 0000000..b071285 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/astroid/util.py @@ -0,0 +1,147 @@ +# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html +# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE +# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt + +import importlib +import warnings + +import lazy_object_proxy + + +def lazy_descriptor(obj): + class DescriptorProxy(lazy_object_proxy.Proxy): + def __get__(self, instance, owner=None): + return self.__class__.__get__(self, instance) + + return DescriptorProxy(obj) + + +def lazy_import(module_name): + return lazy_object_proxy.Proxy( + lambda: importlib.import_module("." + module_name, "astroid") + ) + + +@object.__new__ +class Uninferable: + """Special inference object, which is returned when inference fails.""" + + def __repr__(self): + return "Uninferable" + + __str__ = __repr__ + + def __getattribute__(self, name): + if name == "next": + raise AttributeError("next method should not be called") + if name.startswith("__") and name.endswith("__"): + return object.__getattribute__(self, name) + if name == "accept": + return object.__getattribute__(self, name) + return self + + def __call__(self, *args, **kwargs): + return self + + def __bool__(self): + return False + + __nonzero__ = __bool__ + + def accept(self, visitor): + return visitor.visit_uninferable(self) + + +class BadOperationMessage: + """Object which describes a TypeError occurred somewhere in the inference chain + + This is not an exception, but a container object which holds the types and + the error which occurred. + """ + + +class BadUnaryOperationMessage(BadOperationMessage): + """Object which describes operational failures on UnaryOps.""" + + def __init__(self, operand, op, error): + self.operand = operand + self.op = op + self.error = error + + @property + def _object_type_helper(self): + helpers = lazy_import("helpers") + return helpers.object_type + + def _object_type(self, obj): + objtype = self._object_type_helper(obj) + if objtype is Uninferable: + return None + + return objtype + + def __str__(self): + if hasattr(self.operand, "name"): + operand_type = self.operand.name + else: + object_type = self._object_type(self.operand) + if hasattr(object_type, "name"): + operand_type = object_type.name + else: + # Just fallback to as_string + operand_type = object_type.as_string() + + msg = "bad operand type for unary {}: {}" + return msg.format(self.op, operand_type) + + +class BadBinaryOperationMessage(BadOperationMessage): + """Object which describes type errors for BinOps.""" + + def __init__(self, left_type, op, right_type): + self.left_type = left_type + self.right_type = right_type + self.op = op + + def __str__(self): + msg = "unsupported operand type(s) for {}: {!r} and {!r}" + return msg.format(self.op, self.left_type.name, self.right_type.name) + + +def _instancecheck(cls, other): + wrapped = cls.__wrapped__ + other_cls = other.__class__ + is_instance_of = wrapped is other_cls or issubclass(other_cls, wrapped) + warnings.warn( + "%r is deprecated and slated for removal in astroid " + "2.0, use %r instead" % (cls.__class__.__name__, wrapped.__name__), + PendingDeprecationWarning, + stacklevel=2, + ) + return is_instance_of + + +def proxy_alias(alias_name, node_type): + """Get a Proxy from the given name to the given node type.""" + proxy = type( + alias_name, + (lazy_object_proxy.Proxy,), + { + "__class__": object.__dict__["__class__"], + "__instancecheck__": _instancecheck, + }, + ) + return proxy(lambda: node_type) + + +def check_warnings_filter() -> bool: + """Return True if any other than the default DeprecationWarning filter is enabled. + + https://docs.python.org/3/library/warnings.html#default-warning-filter + """ + return any( + issubclass(DeprecationWarning, filter[2]) + and filter[0] != "ignore" + and filter[3] != "__main__" + for filter in warnings.filters + ) diff --git a/myenv/lib/python3.9/site-packages/attr/__init__.py b/myenv/lib/python3.9/site-packages/attr/__init__.py new file mode 100644 index 0000000..f95c96d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/__init__.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +import sys + +from functools import partial + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) +from ._version_info import VersionInfo + + +__version__ = "21.4.0" +__version_info__ = VersionInfo._from_version_string(__version__) + +__title__ = "attrs" +__description__ = "Classes Without Boilerplate" +__url__ = "https://www.attrs.org/" +__uri__ = __url__ +__doc__ = __description__ + " <" + __uri__ + ">" + +__author__ = "Hynek Schlawack" +__email__ = "hs@ox.cx" + +__license__ = "MIT" +__copyright__ = "Copyright (c) 2015 Hynek Schlawack" + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + +__all__ = [ + "Attribute", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "evolve", + "exceptions", + "fields", + "fields_dict", + "filters", + "get_run_validators", + "has", + "ib", + "make_class", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + +if sys.version_info[:2] >= (3, 6): + from ._next_gen import define, field, frozen, mutable # noqa: F401 + + __all__.extend(("define", "field", "frozen", "mutable")) diff --git a/myenv/lib/python3.9/site-packages/attr/__init__.pyi b/myenv/lib/python3.9/site-packages/attr/__init__.pyi new file mode 100644 index 0000000..c0a2126 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/__init__.pyi @@ -0,0 +1,484 @@ +import sys + +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._version_info import VersionInfo + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = Union[bool, Callable[[Any], Any]] +_ValidatorType = Callable[[Any, Attribute[_T], _T], Any] +_ConverterType = Callable[[Any], Any] +_FilterType = Callable[[Attribute[_T], _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] +_OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any] +_OnSetAttrArgType = Union[ + _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType +] +_FieldTransformer = Callable[ + [type, List[Attribute[Any]]], List[Attribute[Any]] +] +_CompareWithType = Callable[[Any, Any], bool] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# _make -- + +NOTHING: object + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +# Static type inference support via __dataclass_transform__ implemented as per: +# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md +# This annotation must be applied to all overloads of "define" and "attrs" +# +# NOTE: This is a typing construct and does not exist at runtime. Extensions +# wrapping attrs decorators should declare a separate __dataclass_transform__ +# signature in the extension module using the specification linked above to +# provide pyright support. +def __dataclass_transform__( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()), +) -> Callable[[_T], _T]: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + on_setattr: _OnSetAttrType + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define +frozen = define # they differ only in their defaults + +# TODO: add support for returning NamedTuple from the mypy plugin +class _Fields(Tuple[Attribute[Any], ...]): + def __getattr__(self, name: str) -> Attribute[Any]: ... + +def fields(cls: type) -> _Fields: ... +def fields_dict(cls: type) -> Dict[str, Attribute[Any]]: ... +def validate(inst: Any) -> None: ... +def resolve_types( + cls: _C, + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + attribs: Optional[List[Attribute[Any]]] = ..., +) -> _C: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + collect_by_mro: bool = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: Optional[bool] = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> bool: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/myenv/lib/python3.9/site-packages/attr/_cmp.py b/myenv/lib/python3.9/site-packages/attr/_cmp.py new file mode 100644 index 0000000..6cffa4d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_cmp.py @@ -0,0 +1,154 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +import functools + +from ._compat import new_class +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and + ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if + at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality + of two objects. + :param Optional[callable] lt: `callable` used to evaluate whether + one object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether + one object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether + one object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether + one object is greater than or equal to another object. + + :param bool require_same_type: When `True`, equality and ordering methods + will return `NotImplemented` if objects are not of the same type. + + :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body)) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + raise ValueError( + "eq must be define is order to complete ordering from " + "lt, le, gt, ge." + ) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = "__%s__" % (name,) + method.__doc__ = "Return a %s b. Computed by attrs." % ( + _operation_names[name], + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + for func in self._requirements: + if not func(self, other): + return False + return True + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/myenv/lib/python3.9/site-packages/attr/_cmp.pyi b/myenv/lib/python3.9/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000..e71aaff --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Type + +from . import _CompareWithType + +def cmp_using( + eq: Optional[_CompareWithType], + lt: Optional[_CompareWithType], + le: Optional[_CompareWithType], + gt: Optional[_CompareWithType], + ge: Optional[_CompareWithType], + require_same_type: bool, + class_name: str, +) -> Type: ... diff --git a/myenv/lib/python3.9/site-packages/attr/_compat.py b/myenv/lib/python3.9/site-packages/attr/_compat.py new file mode 100644 index 0000000..dc0cb02 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_compat.py @@ -0,0 +1,261 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +import platform +import sys +import threading +import types +import warnings + + +PY2 = sys.version_info[0] == 2 +PYPY = platform.python_implementation() == "PyPy" +PY36 = sys.version_info[:2] >= (3, 6) +HAS_F_STRINGS = PY36 +PY310 = sys.version_info[:2] >= (3, 10) + + +if PYPY or PY36: + ordered_dict = dict +else: + from collections import OrderedDict + + ordered_dict = OrderedDict + + +if PY2: + from collections import Mapping, Sequence + + from UserDict import IterableUserDict + + # We 'bundle' isclass instead of using inspect as importing inspect is + # fairly expensive (order of 10-15 ms for a modern machine in 2016) + def isclass(klass): + return isinstance(klass, (type, types.ClassType)) + + def new_class(name, bases, kwds, exec_body): + """ + A minimal stub of types.new_class that we need for make_class. + """ + ns = {} + exec_body(ns) + + return type(name, bases, ns) + + # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. + TYPE = "type" + + def iteritems(d): + return d.iteritems() + + # Python 2 is bereft of a read-only dict proxy, so we make one! + class ReadOnlyDict(IterableUserDict): + """ + Best-effort read-only dict wrapper. + """ + + def __setitem__(self, key, val): + # We gently pretend we're a Python 3 mappingproxy. + raise TypeError( + "'mappingproxy' object does not support item assignment" + ) + + def update(self, _): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'update'" + ) + + def __delitem__(self, _): + # We gently pretend we're a Python 3 mappingproxy. + raise TypeError( + "'mappingproxy' object does not support item deletion" + ) + + def clear(self): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'clear'" + ) + + def pop(self, key, default=None): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'pop'" + ) + + def popitem(self): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'popitem'" + ) + + def setdefault(self, key, default=None): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'setdefault'" + ) + + def __repr__(self): + # Override to be identical to the Python 3 version. + return "mappingproxy(" + repr(self.data) + ")" + + def metadata_proxy(d): + res = ReadOnlyDict() + res.data.update(d) # We blocked update, so we have to do it like this. + return res + + def just_warn(*args, **kw): # pragma: no cover + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + +else: # Python 3 and later. + from collections.abc import Mapping, Sequence # noqa + + def just_warn(*args, **kw): + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + warnings.warn( + "Running interpreter doesn't sufficiently support code object " + "introspection. Some features like bare super() or accessing " + "__class__ will not work with slotted classes.", + RuntimeWarning, + stacklevel=2, + ) + + def isclass(klass): + return isinstance(klass, type) + + TYPE = "class" + + def iteritems(d): + return d.items() + + new_class = types.new_class + + def metadata_proxy(d): + return types.MappingProxyType(dict(d)) + + +def make_set_closure_cell(): + """Return a function of two arguments (cell, value) which sets + the value stored in the closure cell `cell` to `value`. + """ + # pypy makes this easy. (It also supports the logic below, but + # why not do the easy/fast thing?) + if PYPY: + + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + + return set_closure_cell + + # Otherwise gotta do it the hard way. + + # Create a function that will set its first cellvar to `value`. + def set_first_cellvar_to(value): + x = value + return + + # This function will be eliminated as dead code, but + # not before its reference to `x` forces `x` to be + # represented as a closure cell rather than a local. + def force_x_to_be_a_cell(): # pragma: no cover + return x + + try: + # Extract the code object and make sure our assumptions about + # the closure behavior are correct. + if PY2: + co = set_first_cellvar_to.func_code + else: + co = set_first_cellvar_to.__code__ + if co.co_cellvars != ("x",) or co.co_freevars != (): + raise AssertionError # pragma: no cover + + # Convert this code object to a code object that sets the + # function's first _freevar_ (not cellvar) to the argument. + if sys.version_info >= (3, 8): + # CPython 3.8+ has an incompatible CodeType signature + # (added a posonlyargcount argument) but also added + # CodeType.replace() to do this without counting parameters. + set_first_freevar_code = co.replace( + co_cellvars=co.co_freevars, co_freevars=co.co_cellvars + ) + else: + args = [co.co_argcount] + if not PY2: + args.append(co.co_kwonlyargcount) + args.extend( + [ + co.co_nlocals, + co.co_stacksize, + co.co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + # These two arguments are reversed: + co.co_cellvars, + co.co_freevars, + ] + ) + set_first_freevar_code = types.CodeType(*args) + + def set_closure_cell(cell, value): + # Create a function using the set_first_freevar_code, + # whose first closure cell is `cell`. Calling it will + # change the value of that cell. + setter = types.FunctionType( + set_first_freevar_code, {}, "setter", (), (cell,) + ) + # And call it to set the cell. + setter(value) + + # Make sure it works on this interpreter: + def make_func_with_cell(): + x = None + + def func(): + return x # pragma: no cover + + return func + + if PY2: + cell = make_func_with_cell().func_closure[0] + else: + cell = make_func_with_cell().__closure__[0] + set_closure_cell(cell, 100) + if cell.cell_contents != 100: + raise AssertionError # pragma: no cover + + except Exception: + return just_warn + else: + return set_closure_cell + + +set_closure_cell = make_set_closure_cell() + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() diff --git a/myenv/lib/python3.9/site-packages/attr/_config.py b/myenv/lib/python3.9/site-packages/attr/_config.py new file mode 100644 index 0000000..fc9be29 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_config.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/myenv/lib/python3.9/site-packages/attr/_funcs.py b/myenv/lib/python3.9/site-packages/attr/_funcs.py new file mode 100644 index 0000000..4c90085 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_funcs.py @@ -0,0 +1,422 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +import copy + +from ._compat import iteritems +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the ``attrs`` attribute values of *inst* as a dict. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + :param Optional[callable] value_serializer: A hook that is called for every + attribute or dict key/value. It receives the current instance, field + and value and must return the (updated) value. The hook is run *after* + the optional *filter* has been applied. + + :rtype: return type of *dict_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 If a dict has a collection for a key, it is + serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + rv[a.name] = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + ) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in iteritems(v) + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in iteritems(val) + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the ``attrs`` attribute values of *inst* as a tuple. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + rv.append( + cf( + [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + ) + ) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in iteritems(v) + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with ``attrs`` attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: bool + """ + return getattr(cls, "__attrs_attrs__", None) is not None + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't + be found on *cls*. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. + This function will not be removed du to the slightly different approach + compared to `attrs.evolve`. + """ + import warnings + + warnings.warn( + "assoc is deprecated and will be removed after 2018/01.", + DeprecationWarning, + stacklevel=2, + ) + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in iteritems(changes): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + raise AttrsAttributeNotFoundError( + "{k} is not an attrs attribute on {cl}.".format( + k=k, cl=new.__class__ + ) + ) + _obj_setattr(new, k, v) + return new + + +def evolve(inst, **changes): + """ + Create a new instance, based on *inst* with *changes* applied. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 17.1.0 + """ + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types(cls, globalns=None, localns=None, attribs=None): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in `Attribute`'s *type* + field. In other words, you don't need to resolve your types if you only + use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, e.g. if the name only exists + inside a method, you may pass *globalns* or *localns* to specify other + dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + :param type cls: Class to resolve. + :param Optional[dict] globalns: Dictionary containing global variables. + :param Optional[dict] localns: Dictionary containing local variables. + :param Optional[list] attribs: List of attribs for the given class. + This is necessary when calling from inside a ``field_transformer`` + since *cls* is not an ``attrs`` class yet. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class and you didn't pass any attribs. + :raise NameError: If types cannot be resolved because of missing variables. + + :returns: *cls* so you can use this function also as a class decorator. + Please note that you have to apply it **after** `attrs.define`. That + means the decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + hints = typing.get_type_hints(cls, globalns=globalns, localns=localns) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _obj_setattr(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/myenv/lib/python3.9/site-packages/attr/_make.py b/myenv/lib/python3.9/site-packages/attr/_make.py new file mode 100644 index 0000000..d46f8a3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_make.py @@ -0,0 +1,3173 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +import copy +import inspect +import linecache +import sys +import warnings + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + HAS_F_STRINGS, + PY2, + PY310, + PYPY, + isclass, + iteritems, + metadata_proxy, + new_class, + ordered_dict, + set_closure_cell, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + PythonTooOldError, + UnannotatedAttributeError, +) + + +if not PY2: + import typing + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_{}" +_tuple_property_pat = ( + " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" +) +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = metadata_proxy({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(object): + """ + Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + + ``_Nothing`` is a singleton. There is only ever one of it. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + """ + + _singleton = None + + def __new__(cls): + if _Nothing._singleton is None: + _Nothing._singleton = super(_Nothing, cls).__new__(cls) + return _Nothing._singleton + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + def __len__(self): + return 0 # __bool__ for Python 2 + + +NOTHING = _Nothing() +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + if PY2: + # For some reason `type(None)` isn't callable in Python 2, but we don't + # actually need a constructor for None objects, we just need any + # available function that returns None. + def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)): + return _none_constructor, _args + + else: + + def __reduce__(self, _none_constructor=type(None), _args=()): + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + `attr.s`! + + :param default: A value that is used if an ``attrs``-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by ``attrs``-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `get_run_validators`. + + The validator can also be set using decorator notation as shown below. + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` + method. If ``True``, include the attribute; if ``False``, omit it. By + default, the built-in ``repr()`` function is used. To override how the + attribute value is formatted, pass a ``callable`` that takes a single + value and returns a string. Note that the resulting string is used + as-is, i.e. it will be used directly *instead* of calling ``repr()`` + (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the + generated ``__eq__`` and ``__ne__`` methods that check two instances + for equality. To override how the attribute value is compared, + pass a ``callable`` that takes a single value and returns the value + to be compared. + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. + To override how the attribute value is ordered, + pass a ``callable`` that takes a single value and returns the value + to be ordered. + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + :type cmp: a `bool` or a `callable`. + + :param Optional[bool] hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: `callable` that is called by + ``attrs``-generated ``__init__`` methods to convert attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See `extending_metadata`. + :param type: The type of the attribute. In Python 3.6 or greater, the + preferred method to specify the type is using a variable annotation + (see `PEP 526 `_). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + Please note that ``attrs`` doesn't do anything with this metadata by + itself. You can use it as part of your own code or for + `static type checking `. + :param kw_only: Make this attribute keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if factory is not None: + if default is not NOTHING: + raise ValueError( + "The `default` and `factory` arguments are mutually " + "exclusive." + ) + if not callable(factory): + raise ValueError("The `factory` argument must be a callable.") + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs=None): + """ + Create the method with the script given and return the method object. + """ + locs = {} + if globs is None: + globs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + else: + filename = "{}-{}>".format(base_filename[:-1], count) + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = "{}Attributes".format(cls_name) + attr_class_template = [ + "class {}(tuple):".format(attr_class_name), + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + _tuple_property_pat.format(index=i, attr_name=attr_name) + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + + Requires Python 3. + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _counter_getter(e): + """ + Key function for sorting to avoid re-creating a lambda for every class. + """ + return e[1].counter + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = [(name, ca) for name, ca in iteritems(these)] + + if not isinstance(these, ordered_dict): + ca_list.sort(key=_counter_getter) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + "default value or factory. Attribute in question: %r" % (a,) + ) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +if PYPY: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + +else: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder(object): + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = set(a.name for a in base_attrs) + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + else: + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + try: + delattr(cls, name) + except AttributeError: + # This can happen if a base class defines a class + # variable and we want to set an attribute with the + # same name by using only a type annotation. + pass + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = object.__setattr__ + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in iteritems(self._cls_dict) + if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = object.__setattr__ + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = dict() + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overriden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in iteritems(existing_slots) + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + cd["__slots__"] = tuple(slot_names) + + qualname = getattr(self._cls, "__qualname__", None) + if qualname is not None: + cd["__qualname__"] = qualname + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . On Python 3, + # if a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # ValueError: Cell is empty + pass + else: + if match: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return tuple(getattr(self, name) for name in state_attr_names) + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + raise ValueError( + "Can't combine custom __setattr__ with on_setattr hooks." + ) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__) + ) + except AttributeError: + pass + + try: + method.__doc__ = "Method generated by attrs for class %s." % ( + self._cls.__qualname__, + ) + except AttributeError: + pass + + return method + + +_CMP_DEPRECATION = ( + "The usage of `cmp` is deprecated and will be removed on or after " + "2021-06-01. Please use `eq` and `order` instead." +) + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + + auto_detect must be False on Python 2. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + A class decorator that adds `dunder + `_\ -methods according to the + specified attributes using `attr.ib` or the *these* argument. + + :param these: A dictionary of name to `attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, ``attrs`` will *not* search the class body + for attributes and will *not* remove any attributes from it. + + If *these* is an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the attributes inside *these*. Otherwise the order + of the definition of the attributes is used. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, + ``attrs`` will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible + ``__ne__`` by default, so it *should* be enough to only implement + ``__eq__`` in most cases). + + .. warning:: + + If you prevent ``attrs`` from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, + *cmp*, or *hash* overrides whatever *auto_detect* would determine. + + *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises + an `attrs.exceptions.PythonTooOldError`. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of ``attrs`` attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + `Exception`\ s. + :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their ``attrs`` + attributes if and only if the types of both classes are *identical*! + :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* + and *order* to the same value. Must not be mixed with *eq* or *order*. + :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method + is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + ``attrs`` to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See our documentation on `hashing`, Python's documentation on + `object.__hash__`, and the `GitHub issue that led to the default \ + behavior `_ for more + details. + :param bool init: Create a ``__init__`` method that initializes the + ``attrs`` attributes. Leading underscores are stripped for the argument + name. If a ``__attrs_pre_init__`` method exists on the class, it will + be called before the class is initialized. If a ``__attrs_post_init__`` + method exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be + injected instead. This allows you to define a custom ``__init__`` + method that can do pre-init work such as ``super().__init__()``, + and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. + :param bool slots: Create a `slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the default + dict classes, but have some gotchas you should know about, so we + encourage you to read the `glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attr.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated + attributes (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an `attr.ib` but lacks a type + annotation, an `attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + `validators `), you still *must* assign `attr.ib` to + them. Otherwise Python will either not find the name or try to use + the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ + :param bool kw_only: Make all attributes keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed + only once and stored on the object. If this is set to ``True``, + hashing must be either explicitly or implicitly enabled for this + class. If the hash code is cached, avoid any reassignments of + fields involved in hash code computation or mutations of the objects + those fields point to after object creation. If such changes occur, + the behavior of the object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` + (which implicitly includes any subclass of any exception), the + following happens to behave like a well-behaved Python exceptions + class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. ``attrs`` will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + See issue `#428 `_ for + more details. + + :param Optional[bool] getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and + ``__setstate__`` are generated and attached to the class. This is + necessary for slotted classes to be pickleable. If left `None`, it's + `True` by default for slotted classes and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, + and **either** ``__getstate__`` or ``__setstate__`` is detected directly + on the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + + :param Optional[callable] field_transformer: + A function that is called with the original class object and all + fields right before ``attrs`` finalizes the class. You can use + this, e.g., to automatically add converters or validators to + fields based on their types. See `transform-fields` for more details. + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + `PEP 634 `_ (Structural + Pattern Matching). It is a tuple of all positional-only ``__init__`` + parameter names on Python 3.10 and later. Ignored on older Python + versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + """ + if auto_detect and PY2: + raise PythonTooOldError( + "auto_detect only works on Python 3 and later." + ) + + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + hash_ = hash # work around the lack of nonlocal + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + + if getattr(cls, "__class__", None) is None: + raise TypeError("attrs only works with new-style classes.") + + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + raise ValueError("Can't freeze a class with a custom __setattr__.") + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + if ( + hash_ is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + else: + hash = hash_ + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " init must be True." + ) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +if PY2: + + def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return ( + getattr(cls.__setattr__, "__module__", None) + == _frozen_setattrs.__module__ + and cls.__setattr__.__name__ == _frozen_setattrs.__name__ + ) + +else: + + def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ == _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + unique_filename = "".format( + func_name, + cls.__module__, + getattr(cls, "__qualname__", cls.__name__), + ) + return unique_filename + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + if not PY2: + hash_def += ", *" + + hash_def += ( + ", _cache_wrapper=" + + "__import__('attr._make')._make._CacheHashWrapper):" + ) + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + " %d," % (type_hash,), + ] + ) + + for a in attrs: + method_lines.append(indent + " self.%s," % a.name) + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) + if frozen: + append_hash_computation_lines( + "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + "self.%s = " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab + "return self.%s" % _hash_cache_field) + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = "_%s_key" % (a.name,) + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + " %s(self.%s)," + % ( + cmp_name, + a.name, + ) + ) + others.append( + " %s(other.%s)," + % ( + cmp_name, + a.name, + ) + ) + else: + lines.append(" self.%s," % (a.name,)) + others.append(" other.%s," % (a.name,)) + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +if HAS_F_STRINGS: + + def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r + for name, r, _ in attr_names_with_reprs + if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name + if i + else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = ( + '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + ) + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + " return f'%s(%s)'" % (cls_name_fragment, repr_fragment), + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + +else: + + def _make_repr(attrs, ns, _): + """ + Make a repr method that includes relevant *attrs*, adding *ns* to the + full name. + """ + + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, repr if a.repr is True else a.repr) + for a in attrs + if a.repr is not False + ) + + def __repr__(self): + """ + Automatically created by attrs. + """ + try: + already_repring = _compat.repr_context.already_repring + except AttributeError: + already_repring = set() + _compat.repr_context.already_repring = already_repring + + if id(self) in already_repring: + return "..." + real_cls = self.__class__ + if ns is None: + qualname = getattr(real_cls, "__qualname__", None) + if qualname is not None: # pragma: no cover + # This case only happens on Python 3.5 and 3.6. We exclude + # it from coverage, because we don't want to slow down our + # test suite by running them under coverage too for this + # one line. + class_name = qualname.rsplit(">.", 1)[-1] + else: + class_name = real_cls.__name__ + else: + class_name = ns + "." + real_cls.__name__ + + # Since 'self' remains on the stack (i.e.: strongly referenced) + # for the duration of this call, it's safe to depend on id(...) + # stability, and not need to track the instance and therefore + # worry about properties like weakref- or hash-ability. + already_repring.add(id(self)) + try: + result = [class_name, "("] + first = True + for name, attr_repr in attr_names_with_reprs: + if first: + first = False + else: + result.append(", ") + result.extend( + (name, "=", attr_repr(getattr(self, name, NOTHING))) + ) + return "".join(result) + ")" + finally: + already_repring.remove(id(self)) + + return __repr__ + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of ``attrs`` attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of ``attrs`` attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: an ordered dict where keys are attribute names and values are + `attrs.Attribute`\\ s. This will be a `dict` if it's + naturally ordered like on Python 3.6+ or an + :class:`~collections.OrderedDict` otherwise. + + .. versionadded:: 18.1.0 + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return ordered_dict(((a.name, a) for a in attrs)) + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with ``attrs`` attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr"] = _obj_setattr + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return "_setattr('%s', %s)" % (attr_name, value_var) + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr('%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return "self.%s = %s" % (attr_name, value) + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +if PY2: + + def _unpack_kw_only_py2(attr_name, default=None): + """ + Unpack *attr_name* from _kw_only dict. + """ + if default is not None: + arg_default = ", %s" % default + else: + arg_default = "" + return "%s = _kw_only.pop('%s'%s)" % ( + attr_name, + attr_name, + arg_default, + ) + + def _unpack_kw_only_lines_py2(kw_only_args): + """ + Unpack all *kw_only_args* from _kw_only dict and handle errors. + + Given a list of strings "{attr_name}" and "{attr_name}={default}" + generates list of lines of code that pop attrs from _kw_only dict and + raise TypeError similar to builtin if required attr is missing or + extra key is passed. + + >>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"]))) + try: + a = _kw_only.pop('a') + b = _kw_only.pop('b', 42) + except KeyError as _key_error: + raise TypeError( + ... + if _kw_only: + raise TypeError( + ... + """ + lines = ["try:"] + lines.extend( + " " + _unpack_kw_only_py2(*arg.split("=")) + for arg in kw_only_args + ) + lines += """\ +except KeyError as _key_error: + raise TypeError( + '__init__() missing required keyword-only argument: %s' % _key_error + ) +if _kw_only: + raise TypeError( + '__init__() got an unexpected keyword argument %r' + % next(iter(_kw_only)) + ) +""".split( + "\n" + ) + return lines + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + # Note _setattr will be used again below if cache_hash is True + "_setattr = _cached_setattr.__get__(self, self.__class__)" + ) + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return "_inst_dict['%s'] = %s" % (attr_name, value_var) + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + arg_name = a.name.lstrip("_") + + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = "%s=NOTHING" % (arg_name,) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append("if %s is not NOTHING:" % (arg_name,)) + + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None and not PY2: + # Try to get the type from the converter. + sig = None + try: + sig = inspect.signature(a.converter) + except (ValueError, TypeError): # inspect failed + pass + if sig: + sig_params = list(sig.parameters.values()) + if ( + sig_params + and sig_params[0].annotation + is not inspect.Parameter.empty + ): + annotations[arg_name] = sig_params[0].annotation + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append( + " %s(self, %s, self.%s)" % (val_name, attr_name, a.name) + ) + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr('%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join("self." + a.name for a in attrs if a.init) + + lines.append("BaseException.__init__(self, %s)" % (vals,)) + + args = ", ".join(args) + if kw_only_args: + if PY2: + lines = _unpack_kw_only_lines_py2(kw_only_args) + lines + + args += "%s**_kw_only" % (", " if args else "",) # leading comma + else: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + return ( + """\ +def {init_name}(self, {args}): + {lines} +""".format( + init_name=("__attrs_init__" if attrs_init else "__init__"), + args=args, + lines="\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +class Attribute(object): + """ + *Read-only* representation of an attribute. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self, Attribute) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + metadata_proxy(metadata) + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict + ) + + @property + def cmp(self): + """ + Simulate the presence of a cmp attribute and warn. + """ + warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2) + + return self.eq and self.order + + # Don't use attr.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attr.evolve` but that function does not work + with ``Attribute``. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + metadata_proxy(value) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr(object): + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + ) + __attrs_attrs__ = tuple( + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + ) + ) + ( + Attribute( + name="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory(object): + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + """ + `Factory` is part of the default machinery so if we want a default + value here, we have to implement it ourselves. + """ + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + """ + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + attributes. + + If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the names or attributes inside *attrs*. Otherwise the + order of the definition of the attributes is used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = dict((a, attrib()) for a in attrs) + else: + raise TypeError("attrs argument must be a dict or a list.") + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + except (AttributeError, ValueError): + pass + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator(object): + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not PY2: + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type. + sig = None + try: + sig = inspect.signature(converters[0]) + except (ValueError, TypeError): # inspect failed + pass + if sig: + params = list(sig.parameters.values()) + if ( + params + and params[0].annotation is not inspect.Parameter.empty + ): + pipe_converter.__annotations__["val"] = params[ + 0 + ].annotation + # Get return type. + sig = None + try: + sig = inspect.signature(converters[-1]) + except (ValueError, TypeError): # inspect failed + pass + if sig and sig.return_annotation is not inspect.Signature().empty: + pipe_converter.__annotations__[ + "return" + ] = sig.return_annotation + + return pipe_converter diff --git a/myenv/lib/python3.9/site-packages/attr/_next_gen.py b/myenv/lib/python3.9/site-packages/attr/_next_gen.py new file mode 100644 index 0000000..0682536 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_next_gen.py @@ -0,0 +1,216 @@ +# SPDX-License-Identifier: MIT + +""" +These are Python 3.6+-only and keyword-only APIs that call `attr.s` and +`attr.ib` with different default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + NOTHING, + _frozen_setattrs, + _ng_default_on_setattr, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + Define an ``attrs`` class. + + Differences to the classic `attr.s` that it uses underneath: + + - Automatically detect whether or not *auto_attribs* should be `True` + (c.f. *auto_attribs* parameter). + - If *frozen* is `False`, run converters and validators when setting an + attribute by default. + - *slots=True* (see :term:`slotted classes` for potentially surprising + behaviors) + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - *match_args=True* + - Some options that were only relevant on Python 2 or were kept around for + backwards-compatibility have been removed. + + Please note that these are all defaults and you can change them as you + wish. + + :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves + exactly like `attr.s`. If left `None`, `attr.s` will try to guess: + + 1. If any attributes are annotated and no unannotated `attrs.fields`\ s + are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.fields`\ s. + + For now, please refer to `attr.s` for the rest of the parameters. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _ng_default_on_setattr + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + raise ValueError( + "Frozen classes can't use on_setattr " + "(frozen-ness was inherited)." + ) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Identical to `attr.ib`, except keyword-only and with some arguments + removed. + + .. versionadded:: 20.1.0 + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/myenv/lib/python3.9/site-packages/attr/_version_info.py b/myenv/lib/python3.9/site-packages/attr/_version_info.py new file mode 100644 index 0000000..cdaeec3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_version_info.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo(object): + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/myenv/lib/python3.9/site-packages/attr/_version_info.pyi b/myenv/lib/python3.9/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000..45ced08 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/myenv/lib/python3.9/site-packages/attr/converters.py b/myenv/lib/python3.9/site-packages/attr/converters.py new file mode 100644 index 0000000..1fb6c05 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/converters.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + +from __future__ import absolute_import, division, print_function + +from ._compat import PY2 +from ._make import NOTHING, Factory, pipe + + +if not PY2: + import inspect + import typing + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + if not PY2: + sig = None + try: + sig = inspect.signature(converter) + except (ValueError, TypeError): # inspect failed + pass + if sig: + params = list(sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + optional_converter.__annotations__["val"] = typing.Optional[ + params[0].annotation + ] + if sig.return_annotation is not inspect.Signature.empty: + optional_converter.__annotations__["return"] = typing.Optional[ + sig.return_annotation + ] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + raise TypeError("Must pass either `default` or `factory`.") + + if default is not NOTHING and factory is not None: + raise TypeError( + "Must pass either `default` or `factory` but not both." + ) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + raise ValueError( + "`takes_self` is not supported by default_if_none." + ) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + raise ValueError("Cannot convert value to bool: {}".format(val)) diff --git a/myenv/lib/python3.9/site-packages/attr/converters.pyi b/myenv/lib/python3.9/site-packages/attr/converters.pyi new file mode 100644 index 0000000..0f58088 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/converters.pyi @@ -0,0 +1,13 @@ +from typing import Callable, Optional, TypeVar, overload + +from . import _ConverterType + +_T = TypeVar("_T") + +def pipe(*validators: _ConverterType) -> _ConverterType: ... +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: _T) -> _ConverterType: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... +def to_bool(val: str) -> bool: ... diff --git a/myenv/lib/python3.9/site-packages/attr/exceptions.py b/myenv/lib/python3.9/site-packages/attr/exceptions.py new file mode 100644 index 0000000..b2f1edc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/exceptions.py @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An ``attrs`` function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-``attrs`` class has been passed into an ``attrs`` function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set using ``attr.ib()`` and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type + annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an ``attrs`` feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A ``attr.ib()`` requiring a callable has been set with a value + that is not callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/myenv/lib/python3.9/site-packages/attr/exceptions.pyi b/myenv/lib/python3.9/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000..f268011 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/myenv/lib/python3.9/site-packages/attr/filters.py b/myenv/lib/python3.9/site-packages/attr/filters.py new file mode 100644 index 0000000..a1978a8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/filters.py @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attr.asdict`. +""" + +from __future__ import absolute_import, division, print_function + +from ._compat import isclass +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isclass(cls)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Include *what*. + + :param what: What to include. + :type what: `list` of `type` or `attrs.Attribute`\\ s + + :rtype: `callable` + """ + cls, attrs = _split_what(what) + + def include_(attribute, value): + return value.__class__ in cls or attribute in attrs + + return include_ + + +def exclude(*what): + """ + Exclude *what*. + + :param what: What to exclude. + :type what: `list` of classes or `attrs.Attribute`\\ s. + + :rtype: `callable` + """ + cls, attrs = _split_what(what) + + def exclude_(attribute, value): + return value.__class__ not in cls and attribute not in attrs + + return exclude_ diff --git a/myenv/lib/python3.9/site-packages/attr/filters.pyi b/myenv/lib/python3.9/site-packages/attr/filters.pyi new file mode 100644 index 0000000..9938668 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any, Union + +from . import Attribute, _FilterType + +def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... +def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/myenv/lib/python3.9/site-packages/attr/py.typed b/myenv/lib/python3.9/site-packages/attr/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/attr/setters.py b/myenv/lib/python3.9/site-packages/attr/setters.py new file mode 100644 index 0000000..b1cbb5d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/setters.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + +from __future__ import absolute_import, division, print_function + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + return c(new_value) + + return new_value + + +NO_OP = object() +""" +Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. + +Does not work in `pipe` or within lists. + +.. versionadded:: 20.1.0 +""" diff --git a/myenv/lib/python3.9/site-packages/attr/setters.pyi b/myenv/lib/python3.9/site-packages/attr/setters.pyi new file mode 100644 index 0000000..3f5603c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/setters.pyi @@ -0,0 +1,19 @@ +from typing import Any, NewType, NoReturn, TypeVar, cast + +from . import Attribute, _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/myenv/lib/python3.9/site-packages/attr/validators.py b/myenv/lib/python3.9/site-packages/attr/validators.py new file mode 100644 index 0000000..0b0c834 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/validators.py @@ -0,0 +1,561 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + +from __future__ import absolute_import, division, print_function + +import operator +import re + +from contextlib import contextmanager + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .exceptions import NotCallableError + + +try: + Pattern = re.Pattern +except AttributeError: # Python <3.7 lacks a Pattern type. + Pattern = type(re.compile("")) + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "optional", + "provides", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + :param disabled: If ``True``, disable running all validators. + :type disabled: bool + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + :return: ``True`` if validators are currently disabled. + :rtype: bool + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator(object): + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + raise TypeError( + "'{name}' must be {type!r} (got {value!r} that is a " + "{actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator(object): + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + raise ValueError( + "'{name}' must match regex {pattern!r}" + " ({value!r} doesn't)".format( + name=attr.name, pattern=self.pattern.pattern, value=value + ), + attr, + self.pattern, + value, + ) + + def __repr__(self): + return "".format( + pattern=self.pattern + ) + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param regex: a regex string or precompiled pattern to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call (options + are `re.fullmatch`, `re.search`, `re.match`, default + is ``None`` which means either `re.fullmatch` or an emulation of + it on Python 2). For performance reasons, they won't be used directly + but on a pre-`re.compile`\ ed pattern. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + fullmatch = getattr(re, "fullmatch", None) + valid_funcs = (fullmatch, None, re.search, re.match) + if func not in valid_funcs: + raise ValueError( + "'func' must be one of {}.".format( + ", ".join( + sorted( + e and e.__name__ or "None" for e in set(valid_funcs) + ) + ) + ) + ) + + if isinstance(regex, Pattern): + if flags: + raise TypeError( + "'flags' can only be used with a string pattern; " + "pass flags to re.compile() instead" + ) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + elif fullmatch: + match_func = pattern.fullmatch + else: # Python 2 fullmatch emulation (https://bugs.python.org/issue16203) + pattern = re.compile( + r"(?:{})\Z".format(pattern.pattern), pattern.flags + ) + match_func = pattern.match + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator(object): + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + raise TypeError( + "'{name}' must provide {interface!r} which {value!r} " + "doesn't.".format( + name=attr.name, interface=self.interface, value=value + ), + attr, + self.interface, + value, + ) + + def __repr__(self): + return "".format( + interface=self.interface + ) + + +def provides(interface): + """ + A validator that raises a `TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param interface: The interface to check for. + :type interface: ``zope.interface.Interface`` + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected interface, and the + value it got. + """ + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator(object): + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return "".format( + what=repr(self.validator) + ) + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param validator: A validator (or a list of validators) that is used for + non-``None`` values. + :type validator: callable or `list` of callables. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + """ + if isinstance(validator, list): + return _OptionalValidator(_AndValidator(validator)) + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator(object): + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})".format( + name=attr.name, options=self.options, value=value + ) + ) + + def __repr__(self): + return "".format( + options=self.options + ) + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, `enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type `attrs.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + """ + return _InValidator(options) + + +@attrs(repr=False, slots=False, hash=True) +class _IsCallableValidator(object): + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attr.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. + + .. versionadded:: 19.1.0 + + :raises `attr.exceptions.NotCallableError`: With a human readable error + message containing the attribute (`attrs.Attribute`) name, + and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, hash=True) +class _DeepIterable(object): + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else " {iterable!r}".format(iterable=self.iterable_validator) + ) + return ( + "" + ).format( + iterable_identifier=iterable_identifier, + member=self.member_validator, + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + :param member_validator: Validator to apply to iterable members + :param iterable_validator: Validator to apply to iterable itself + (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, hash=True) +class _DeepMapping(object): + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return ( + "" + ).format(key=self.key_validator, value=self.value_validator) + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + :param key_validator: Validator to apply to dictionary keys + :param value_validator: Validator to apply to dictionary values + :param mapping_validator: Validator to apply to top-level mapping + attribute (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator(object): + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + raise ValueError( + "'{name}' must be {op} {bound}: {value}".format( + name=attr.name, + op=self.compare_op, + bound=self.bound, + value=value, + ) + ) + + def __repr__(self): + return "".format( + op=self.compare_op, bound=self.bound + ) + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number larger or equal to *val*. + + :param val: Exclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number greater than *val*. + + :param val: Inclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller than *val*. + + :param val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller or equal to *val*. + + :param val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator(object): + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + raise ValueError( + "Length of '{name}' must be <= {max}: {len}".format( + name=attr.name, max=self.max_length, len=len(value) + ) + ) + + def __repr__(self): + return "".format(max=self.max_length) + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + :param int length: Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) diff --git a/myenv/lib/python3.9/site-packages/attr/validators.pyi b/myenv/lib/python3.9/site-packages/attr/validators.pyi new file mode 100644 index 0000000..5e00b85 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attr/validators.pyi @@ -0,0 +1,78 @@ +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + List, + Mapping, + Match, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +from . import _ValidatorType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2]] +) -> _ValidatorType[Union[_T1, _T2]]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2], Type[_T3]] +) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... +@overload +def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... +def provides(interface: Any) -> _ValidatorType[Any]: ... +def optional( + validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] +) -> _ValidatorType[Optional[_T]]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Union[Pattern[AnyStr], AnyStr], + flags: int = ..., + func: Optional[ + Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] + ] = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorType[_T], + iterable_validator: Optional[_ValidatorType[_I]] = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: Optional[_ValidatorType[_M]] = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/AUTHORS.rst b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/AUTHORS.rst new file mode 100644 index 0000000..f14ef6c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/AUTHORS.rst @@ -0,0 +1,11 @@ +Credits +======= + +``attrs`` is written and maintained by `Hynek Schlawack `_. + +The development is kindly supported by `Variomedia AG `_. + +A full list of contributors can be found in `GitHub's overview `_. + +It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. +Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/LICENSE new file mode 100644 index 0000000..7ae3df9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/METADATA new file mode 100644 index 0000000..aa327d5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/METADATA @@ -0,0 +1,232 @@ +Metadata-Version: 2.1 +Name: attrs +Version: 21.4.0 +Summary: Classes Without Boilerplate +Home-page: https://www.attrs.org/ +Author: Hynek Schlawack +Author-email: hs@ox.cx +Maintainer: Hynek Schlawack +Maintainer-email: hs@ox.cx +License: MIT +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues +Project-URL: Source Code, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Project-URL: Ko-fi, https://ko-fi.com/the_hynek +Keywords: class,attribute,boilerplate +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: AUTHORS.rst +Provides-Extra: dev +Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'dev' +Requires-Dist: hypothesis ; extra == 'dev' +Requires-Dist: pympler ; extra == 'dev' +Requires-Dist: pytest (>=4.3.0) ; extra == 'dev' +Requires-Dist: six ; extra == 'dev' +Requires-Dist: mypy ; extra == 'dev' +Requires-Dist: pytest-mypy-plugins ; extra == 'dev' +Requires-Dist: zope.interface ; extra == 'dev' +Requires-Dist: furo ; extra == 'dev' +Requires-Dist: sphinx ; extra == 'dev' +Requires-Dist: sphinx-notfound-page ; extra == 'dev' +Requires-Dist: pre-commit ; extra == 'dev' +Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'dev' +Provides-Extra: docs +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: sphinx ; extra == 'docs' +Requires-Dist: zope.interface ; extra == 'docs' +Requires-Dist: sphinx-notfound-page ; extra == 'docs' +Provides-Extra: tests +Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'tests' +Requires-Dist: hypothesis ; extra == 'tests' +Requires-Dist: pympler ; extra == 'tests' +Requires-Dist: pytest (>=4.3.0) ; extra == 'tests' +Requires-Dist: six ; extra == 'tests' +Requires-Dist: mypy ; extra == 'tests' +Requires-Dist: pytest-mypy-plugins ; extra == 'tests' +Requires-Dist: zope.interface ; extra == 'tests' +Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'tests' +Provides-Extra: tests_no_zope +Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'tests_no_zope' +Requires-Dist: hypothesis ; extra == 'tests_no_zope' +Requires-Dist: pympler ; extra == 'tests_no_zope' +Requires-Dist: pytest (>=4.3.0) ; extra == 'tests_no_zope' +Requires-Dist: six ; extra == 'tests_no_zope' +Requires-Dist: mypy ; extra == 'tests_no_zope' +Requires-Dist: pytest-mypy-plugins ; extra == 'tests_no_zope' +Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'tests_no_zope' + + +.. image:: https://www.attrs.org/en/stable/_static/attrs_logo.png + :alt: attrs logo + :align: center + + +``attrs`` is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka `dunder methods `_). +`Trusted by NASA `_ for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + +.. teaser-end + +For that, it gives you a class decorator and a way to declaratively define the attributes on that class: + +.. -code-begin- + +.. code-block:: pycon + + >>> from attrs import asdict, define, make_class, Factory + + >>> @define + ... class SomeClass: + ... a_number: int = 42 + ... list_of_numbers: list[int] = Factory(list) + ... + ... def hard_math(self, another_number): + ... return self.a_number + sum(self.list_of_numbers) * another_number + + + >>> sc = SomeClass(1, [1, 2, 3]) + >>> sc + SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + + >>> sc.hard_math(3) + 19 + >>> sc == SomeClass(1, [1, 2, 3]) + True + >>> sc != SomeClass(2, [3, 2, 1]) + True + + >>> asdict(sc) + {'a_number': 1, 'list_of_numbers': [1, 2, 3]} + + >>> SomeClass() + SomeClass(a_number=42, list_of_numbers=[]) + + >>> C = make_class("C", ["a", "b"]) + >>> C("foo", "bar") + C(a='foo', b='bar') + + +After *declaring* your attributes ``attrs`` gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable ``__repr__``, +- a equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +**Hate type annotations**!? +No problem! +Types are entirely **optional** with ``attrs``. +Simply assign ``attrs.field()`` to the attributes instead of annotating them with types. + +---- + +This example uses ``attrs``'s modern APIs that have been introduced in version 20.1.0, and the ``attrs`` package import name that has been added in version 21.3.0. +The classic APIs (``@attr.s``, ``attr.ib``, plus their serious business aliases) and the ``attr`` package import name will remain **indefinitely**. + +Please check out `On The Core API Names `_ for a more in-depth explanation. + + +Data Classes +============ + +On the tin, ``attrs`` might remind you of ``dataclasses`` (and indeed, ``dataclasses`` are a descendant of ``attrs``). +In practice it does a lot more and is more flexible. +For instance it allows you to define `special handling of NumPy arrays for equality checks `_, or allows more ways to `plug into the initialization process `_. + +For more details, please refer to our `comparison page `_. + + +.. -getting-help- + +Getting Help +============ + +Please use the ``python-attrs`` tag on `Stack Overflow `_ to get help. + +Answering questions of your fellow developers is also a great way to help the project! + + +.. -project-information- + +Project Information +=================== + +``attrs`` is released under the `MIT `_ license, +its documentation lives at `Read the Docs `_, +the code on `GitHub `_, +and the latest release on `PyPI `_. +It’s rigorously tested on Python 2.7, 3.5+, and PyPy. + +We collect information on **third-party extensions** in our `wiki `_. +Feel free to browse and add your own! + +If you'd like to contribute to ``attrs`` you're most welcome and we've written `a little guide `_ to get you started! + + +``attrs`` for Enterprise +------------------------ + +Available as part of the Tidelift Subscription. + +The maintainers of ``attrs`` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +`Learn more. `_ + + +Release Information +=================== + +21.4.0 (2021-12-29) +------------------- + +Changes +^^^^^^^ + +- Fixed the test suite on PyPy3.8 where ``cloudpickle`` does not work. + `#892 `_ +- Fixed ``coverage report`` for projects that use ``attrs`` and don't set a ``--source``. + `#895 `_, + `#896 `_ + +`Full changelog `_. + +Credits +======= + +``attrs`` is written and maintained by `Hynek Schlawack `_. + +The development is kindly supported by `Variomedia AG `_. + +A full list of contributors can be found in `GitHub's overview `_. + +It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. +Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? + + diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/RECORD new file mode 100644 index 0000000..896e259 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/RECORD @@ -0,0 +1,37 @@ +attr/__init__.py,sha256=_zhJ4O8Q5KR5gaIrjX73vkR5nA6NjfpMGXQChEdNljI,1667 +attr/__init__.pyi,sha256=ubRkstoRHPpQN17iA0OCh8waIwZ5NeJgbz0lwI8XUjY,15100 +attr/_cmp.py,sha256=JP0N7OIyTqIR3prUDfMZOR4DV4tlV_xXf39-bQg7xOo,4165 +attr/_cmp.pyi,sha256=oyjJVytrwwkUJOoe332IiYzp6pCVZEKKcKveH-ev604,317 +attr/_compat.py,sha256=i8u27AAK_4SzQnmTf3aliGV27UdYbJxdZ-O0tOHbLU8,8396 +attr/_config.py,sha256=aj1Lh8t2CuVa5nSxgCrLQtg_ZSdO8ZKeNJQd6RvpIp8,892 +attr/_funcs.py,sha256=sm_D12y2IyRW_bCnR7M-O7U5qHaieXr0BzINwJ7_K38,14753 +attr/_make.py,sha256=D05j0_ckcVIRFn2xHch5SPUCwh3t7WpeFj-3Ku9SocQ,102736 +attr/_next_gen.py,sha256=s5jCsVEQ4IhOjAykP4N0ETaWpg0RsgQttMvEZErUrhQ,5752 +attr/_version_info.py,sha256=sxD9yNai0jGbur_-RGEQHbgV2YX5_5G9PhrhBA5pA54,2194 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=uiiWTz8GLJe8I1Ty7UICK1DegVUnqHTXbOSnar7g7Nk,4078 +attr/converters.pyi,sha256=MQo7iEzPNVoFpKqD30sVwgVpdNoIeSCF2nsXvoxLZ-Y,416 +attr/exceptions.py,sha256=BMg7AljkJnvG-irMwL2TBHYlaLBXhSKnzoEWo4e42Zw,1981 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=JGZgvPGkdOfttkoL6XhXS6ZCoaVV5nZ8GCYeZNUN_mE,1124 +attr/filters.pyi,sha256=_Sm80jGySETX_Clzdkon5NHVjQWRl3Y3liQKZX1czXc,215 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=rH_UtQuHgQEC7hfZyMO_SJW0R1Gus7-a83U8igZfqs8,1466 +attr/setters.pyi,sha256=7dM10rqpQVDW0y-iJUnq8rabdO5Wx2Sbo5LwNa0IXl0,573 +attr/validators.py,sha256=jVE9roaSOmTf0dJNSLHNaQNilkrlzc3pNNBKmv0g7pk,15966 +attr/validators.pyi,sha256=adn6rNbIXmRXlg_FKrTmWj0dOX0vKTsGG82Jd3YcJbQ,2268 +attrs/__init__.py,sha256=CeyxLGVViAEKKsLOLaif8vF3vs1a28vsrRVLv7eMEgM,1109 +attrs/__init__.pyi,sha256=57aCxUJukK9lZlrUgk9RuWiBiPY5DzDKJAJkhbrStYw,1982 +attrs/converters.py,sha256=fCBEdlYWcmI3sCnpUk2pz22GYtXzqTkp6NeOpdI64PY,70 +attrs/exceptions.py,sha256=SlDli6AY77f6ny-H7oy98OkQjsrw-D_supEuErIVYkE,70 +attrs/filters.py,sha256=dc_dNey29kH6KLU1mT2Dakq7tZ3kBfzEGwzOmDzw1F8,67 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=oKw51C72Hh45wTwYvDHJP9kbicxiMhMR4Y5GvdpKdHQ,67 +attrs/validators.py,sha256=4ag1SyVD2Hm3PYKiNG_NOtR_e7f81Hr6GiNl4YvXo4Q,70 +attrs-21.4.0.dist-info/AUTHORS.rst,sha256=wsqCNbGz_mklcJrt54APIZHZpoTIJLkXqEhhn4Nd8hc,752 +attrs-21.4.0.dist-info/LICENSE,sha256=v2WaKLSSQGAvVrvfSQy-LsUJsVuY-Z17GaUsdA4yeGM,1082 +attrs-21.4.0.dist-info/METADATA,sha256=WwgR4MfxE55PpGGv21UOEOEtXZGCqwekfXYg-JgA5HY,9810 +attrs-21.4.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +attrs-21.4.0.dist-info/top_level.txt,sha256=AGbmKnOtYpdkLRsDRQVSBIwfL32pAQ6BSo1mt-BxI7M,11 +attrs-21.4.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +attrs-21.4.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/WHEEL new file mode 100644 index 0000000..0b18a28 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/top_level.txt new file mode 100644 index 0000000..eca8ba9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs-21.4.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +attr +attrs diff --git a/myenv/lib/python3.9/site-packages/attrs/__init__.py b/myenv/lib/python3.9/site-packages/attrs/__init__.py new file mode 100644 index 0000000..a704b8b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/__init__.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + Factory, + __author__, + __copyright__, + __description__, + __doc__, + __email__, + __license__, + __title__, + __url__, + __version__, + __version_info__, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "Attribute", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "Factory", + "field", + "fields_dict", + "fields", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "NOTHING", + "resolve_types", + "setters", + "validate", + "validators", +] diff --git a/myenv/lib/python3.9/site-packages/attrs/__init__.pyi b/myenv/lib/python3.9/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000..7426fa5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/__init__.pyi @@ -0,0 +1,63 @@ +from typing import ( + Any, + Callable, + Dict, + Mapping, + Optional, + Sequence, + Tuple, + Type, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import _FilterType +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import define as define +from attr import evolve as evolve +from attr import Factory as Factory +from attr import exceptions as exceptions +from attr import field as field +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import frozen as frozen +from attr import has as has +from attr import make_class as make_class +from attr import mutable as mutable +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators + +# TODO: see definition of attr.asdict/astuple +def asdict( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: bool = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... diff --git a/myenv/lib/python3.9/site-packages/attrs/converters.py b/myenv/lib/python3.9/site-packages/attrs/converters.py new file mode 100644 index 0000000..edfa8d3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa diff --git a/myenv/lib/python3.9/site-packages/attrs/exceptions.py b/myenv/lib/python3.9/site-packages/attrs/exceptions.py new file mode 100644 index 0000000..bd9efed --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa diff --git a/myenv/lib/python3.9/site-packages/attrs/filters.py b/myenv/lib/python3.9/site-packages/attrs/filters.py new file mode 100644 index 0000000..5295900 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa diff --git a/myenv/lib/python3.9/site-packages/attrs/py.typed b/myenv/lib/python3.9/site-packages/attrs/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/attrs/setters.py b/myenv/lib/python3.9/site-packages/attrs/setters.py new file mode 100644 index 0000000..9b50770 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa diff --git a/myenv/lib/python3.9/site-packages/attrs/validators.py b/myenv/lib/python3.9/site-packages/attrs/validators.py new file mode 100644 index 0000000..ab2c9b3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa diff --git a/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/LICENSE new file mode 100644 index 0000000..11069ed --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/METADATA b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/METADATA new file mode 100644 index 0000000..1322efb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/METADATA @@ -0,0 +1,272 @@ +Metadata-Version: 2.1 +Name: bcrypt +Version: 3.2.2 +Summary: Modern password hashing for your software and your servers +Home-page: https://github.com/pyca/bcrypt/ +Author: The Python Cryptographic Authority developers +Author-email: cryptography-dev@python.org +License: Apache License, Version 2.0 +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: cffi (>=1.1) +Provides-Extra: tests +Requires-Dist: pytest (!=3.3.0,>=3.2.1) ; extra == 'tests' +Provides-Extra: typecheck +Requires-Dist: mypy ; extra == 'typecheck' + +bcrypt +====== + +.. image:: https://img.shields.io/pypi/v/bcrypt.svg + :target: https://pypi.org/project/bcrypt/ + :alt: Latest Version + +.. image:: https://github.com/pyca/bcrypt/workflows/CI/badge.svg?branch=main + :target: https://github.com/pyca/bcrypt/actions?query=workflow%3ACI+branch%3Amain + +Good password hashing for your software and your servers + + +Installation +============ + +To install bcrypt, simply: + +.. code:: bash + + $ pip install bcrypt + +Note that bcrypt should build very easily on Linux provided you have a C compiler, headers for Python (if you're not using pypy), and headers for the libffi libraries available on your system. + +For Debian and Ubuntu, the following command will ensure that the required dependencies are installed: + +.. code:: bash + + $ sudo apt-get install build-essential libffi-dev python-dev + +For Fedora and RHEL-derivatives, the following command will ensure that the required dependencies are installed: + +.. code:: bash + + $ sudo yum install gcc libffi-devel python-devel + +For Alpine, the following command will ensure that the required dependencies are installed: + +.. code:: bash + + $ apk add --update musl-dev gcc libffi-dev + + +Alternatives +============ + +While bcrypt remains a good choice for password storage depending on your specific use case you may also want to consider using scrypt (either via `standard library`_ or `cryptography`_) or argon2id via `argon2_cffi`_. + +Changelog +========= + +3.2.2 +----- + +* Fixed packaging of ``py.typed`` files in wheels so that ``mypy`` works. + +3.2.1 +----- + +* Added support for compilation on z/OS +* The next release of ``bcrypt`` with be 4.0 and it will require Rust at + compile time, for users building from source. There will be no additional + requirement for users who are installing from wheels. Users on most + platforms will be able to obtain a wheel by making sure they have an up to + date ``pip``. The minimum supported Rust version will be 1.56.0. +* This will be the final release for which we ship ``manylinux2010`` wheels. + Going forward the minimum supported manylinux ABI for our wheels will be + ``manylinux2014``. The vast majority of users will continue to receive + ``manylinux`` wheels provided they have an up to date ``pip``. + + +3.2.0 +----- + +* Added typehints for library functions. +* Dropped support for Python versions less than 3.6 (2.7, 3.4, 3.5). +* Shipped ``abi3`` Windows wheels (requires pip >= 20). + +3.1.7 +----- + +* Set a ``setuptools`` lower bound for PEP517 wheel building. +* We no longer distribute 32-bit ``manylinux1`` wheels. Continuing to produce + them was a maintenance burden. + +3.1.6 +----- + +* Added support for compilation on Haiku. + +3.1.5 +----- + +* Added support for compilation on AIX. +* Dropped Python 2.6 and 3.3 support. +* Switched to using ``abi3`` wheels for Python 3. If you are not getting a + wheel on a compatible platform please upgrade your ``pip`` version. + +3.1.4 +----- + +* Fixed compilation with mingw and on illumos. + +3.1.3 +----- +* Fixed a compilation issue on Solaris. +* Added a warning when using too few rounds with ``kdf``. + +3.1.2 +----- +* Fixed a compile issue affecting big endian platforms. +* Fixed invalid escape sequence warnings on Python 3.6. +* Fixed building in non-UTF8 environments on Python 2. + +3.1.1 +----- +* Resolved a ``UserWarning`` when used with ``cffi`` 1.8.3. + +3.1.0 +----- +* Added support for ``checkpw``, a convenience method for verifying a password. +* Ensure that you get a ``$2y$`` hash when you input a ``$2y$`` salt. +* Fixed a regression where ``$2a`` hashes were vulnerable to a wraparound bug. +* Fixed compilation under Alpine Linux. + +3.0.0 +----- +* Switched the C backend to code obtained from the OpenBSD project rather than + openwall. +* Added support for ``bcrypt_pbkdf`` via the ``kdf`` function. + +2.0.0 +----- +* Added support for an adjustible prefix when calling ``gensalt``. +* Switched to CFFI 1.0+ + +Usage +----- + +Password Hashing +~~~~~~~~~~~~~~~~ + +Hashing and then later checking that a password matches the previous hashed +password is very simple: + +.. code:: pycon + + >>> import bcrypt + >>> password = b"super secret password" + >>> # Hash a password for the first time, with a randomly-generated salt + >>> hashed = bcrypt.hashpw(password, bcrypt.gensalt()) + >>> # Check that an unhashed password matches one that has previously been + >>> # hashed + >>> if bcrypt.checkpw(password, hashed): + ... print("It Matches!") + ... else: + ... print("It Does not Match :(") + +KDF +~~~ + +As of 3.0.0 ``bcrypt`` now offers a ``kdf`` function which does ``bcrypt_pbkdf``. +This KDF is used in OpenSSH's newer encrypted private key format. + +.. code:: pycon + + >>> import bcrypt + >>> key = bcrypt.kdf( + ... password=b'password', + ... salt=b'salt', + ... desired_key_bytes=32, + ... rounds=100) + + +Adjustable Work Factor +~~~~~~~~~~~~~~~~~~~~~~ +One of bcrypt's features is an adjustable logarithmic work factor. To adjust +the work factor merely pass the desired number of rounds to +``bcrypt.gensalt(rounds=12)`` which defaults to 12): + +.. code:: pycon + + >>> import bcrypt + >>> password = b"super secret password" + >>> # Hash a password for the first time, with a certain number of rounds + >>> hashed = bcrypt.hashpw(password, bcrypt.gensalt(14)) + >>> # Check that a unhashed password matches one that has previously been + >>> # hashed + >>> if bcrypt.checkpw(password, hashed): + ... print("It Matches!") + ... else: + ... print("It Does not Match :(") + + +Adjustable Prefix +~~~~~~~~~~~~~~~~~ + +Another one of bcrypt's features is an adjustable prefix to let you define what +libraries you'll remain compatible with. To adjust this, pass either ``2a`` or +``2b`` (the default) to ``bcrypt.gensalt(prefix=b"2b")`` as a bytes object. + +As of 3.0.0 the ``$2y$`` prefix is still supported in ``hashpw`` but deprecated. + +Maximum Password Length +~~~~~~~~~~~~~~~~~~~~~~~ + +The bcrypt algorithm only handles passwords up to 72 characters, any characters +beyond that are ignored. To work around this, a common approach is to hash a +password with a cryptographic hash (such as ``sha256``) and then base64 +encode it to prevent NULL byte problems before hashing the result with +``bcrypt``: + +.. code:: pycon + + >>> password = b"an incredibly long password" * 10 + >>> hashed = bcrypt.hashpw( + ... base64.b64encode(hashlib.sha256(password).digest()), + ... bcrypt.gensalt() + ... ) + +Compatibility +------------- + +This library should be compatible with py-bcrypt and it will run on Python +3.6+, and PyPy 3. + +C Code +------ + +This library uses code from OpenBSD. + +Security +-------- + +``bcrypt`` follows the `same security policy as cryptography`_, if you +identify a vulnerability, we ask you to contact us privately. + +.. _`same security policy as cryptography`: https://cryptography.io/en/latest/security.html +.. _`standard library`: https://docs.python.org/3/library/hashlib.html#hashlib.scrypt +.. _`argon2_cffi`: https://argon2-cffi.readthedocs.io +.. _`cryptography`: https://cryptography.io/en/latest/hazmat/primitives/key-derivation-functions/#cryptography.hazmat.primitives.kdf.scrypt.Scrypt + + diff --git a/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/RECORD b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/RECORD new file mode 100644 index 0000000..eb0ddf5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/RECORD @@ -0,0 +1,11 @@ +bcrypt/__about__.py,sha256=y3-LY1hqQ1KAVagHmhL_-VDEDf4r3LWt4HnGi6OCGmk,1320 +bcrypt/__init__.py,sha256=tr7xBv0w9ajqIHf8537qirLqM_bGOGM_lIQMjFd1PfI,5587 +bcrypt/_bcrypt.abi3.so,sha256=dSpfL4RVrKuQDWuXmWOuWIJrU5kgFnBZl8l1j1_78d8,156671 +bcrypt/_bcrypt.pyi,sha256=TGNrzZD1NWdyeWGGJ2i3tj_HWg_YovqF-WdI7otmiIg,47 +bcrypt/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bcrypt-3.2.2.dist-info/LICENSE,sha256=gXPVwptPlW1TJ4HSuG5OMPg-a3h43OGMkZRR1rpwfJA,10850 +bcrypt-3.2.2.dist-info/METADATA,sha256=cTPRtd-8x91DRNDRQwvtj4_cic98Cqx4qTgCCS29NGc,8342 +bcrypt-3.2.2.dist-info/WHEEL,sha256=4Pc_qD0VhjiuSiOgv99UcgTmp8ljuSMnWHBjm4-tsjo,114 +bcrypt-3.2.2.dist-info/top_level.txt,sha256=igJttN6fNWPEzk4lnCMzlitVT_1PlLVJzxzogMWGARU,15 +bcrypt-3.2.2.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +bcrypt-3.2.2.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/WHEEL new file mode 100644 index 0000000..b254a18 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: false +Tag: cp36-abi3-macosx_10_10_universal2 + diff --git a/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/top_level.txt new file mode 100644 index 0000000..9fd6492 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt-3.2.2.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_bcrypt +bcrypt diff --git a/myenv/lib/python3.9/site-packages/bcrypt/__about__.py b/myenv/lib/python3.9/site-packages/bcrypt/__about__.py new file mode 100644 index 0000000..8bcee8d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt/__about__.py @@ -0,0 +1,41 @@ +# Author:: Donald Stufft () +# Copyright:: Copyright (c) 2013 Donald Stufft +# License:: Apache License, Version 2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "bcrypt" +__summary__ = "Modern password hashing for your software and your servers" +__uri__ = "https://github.com/pyca/bcrypt/" + +__version__ = "3.2.2" + +__author__ = "The Python Cryptographic Authority developers" +__email__ = "cryptography-dev@python.org" + +__license__ = "Apache License, Version 2.0" +__copyright__ = "Copyright 2013-2022 {0}".format(__author__) diff --git a/myenv/lib/python3.9/site-packages/bcrypt/__init__.py b/myenv/lib/python3.9/site-packages/bcrypt/__init__.py new file mode 100644 index 0000000..a9aae84 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt/__init__.py @@ -0,0 +1,171 @@ +# Author:: Donald Stufft () +# Copyright:: Copyright (c) 2013 Donald Stufft +# License:: Apache License, Version 2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division + +import hmac +import os +import re +import warnings + +from .__about__ import ( + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, +) +from . import _bcrypt # noqa: I100 + + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", + "gensalt", + "hashpw", + "kdf", + "checkpw", +] + + +_normalize_re = re.compile(rb"^\$2y\$") + + +def gensalt(rounds: int = 12, prefix: bytes = b"2b") -> bytes: + if prefix not in (b"2a", b"2b"): + raise ValueError("Supported prefixes are b'2a' or b'2b'") + + if rounds < 4 or rounds > 31: + raise ValueError("Invalid rounds") + + salt = os.urandom(16) + output = _bcrypt.ffi.new("char[]", 30) + _bcrypt.lib.encode_base64(output, salt, len(salt)) + + return ( + b"$" + + prefix + + b"$" + + ("%2.2u" % rounds).encode("ascii") + + b"$" + + _bcrypt.ffi.string(output) + ) + + +def hashpw(password: bytes, salt: bytes) -> bytes: + if isinstance(password, str) or isinstance(salt, str): + raise TypeError("Strings must be encoded before hashing") + + if b"\x00" in password: + raise ValueError("password may not contain NUL bytes") + + # bcrypt originally suffered from a wraparound bug: + # http://www.openwall.com/lists/oss-security/2012/01/02/4 + # This bug was corrected in the OpenBSD source by truncating inputs to 72 + # bytes on the updated prefix $2b$, but leaving $2a$ unchanged for + # compatibility. However, pyca/bcrypt 2.0.0 *did* correctly truncate inputs + # on $2a$, so we do it here to preserve compatibility with 2.0.0 + password = password[:72] + + # When the original 8bit bug was found the original library we supported + # added a new prefix, $2y$, that fixes it. This prefix is exactly the same + # as the $2b$ prefix added by OpenBSD other than the name. Since the + # OpenBSD library does not support the $2y$ prefix, if the salt given to us + # is for the $2y$ prefix, we'll just mugne it so that it's a $2b$ prior to + # passing it into the C library. + original_salt, salt = salt, _normalize_re.sub(b"$2b$", salt) + + hashed = _bcrypt.ffi.new("char[]", 128) + retval = _bcrypt.lib.bcrypt_hashpass(password, salt, hashed, len(hashed)) + + if retval != 0: + raise ValueError("Invalid salt") + + # Now that we've gotten our hashed password, we want to ensure that the + # prefix we return is the one that was passed in, so we'll use the prefix + # from the original salt and concatenate that with the return value (minus + # the return value's prefix). This will ensure that if someone passed in a + # salt with a $2y$ prefix, that they get back a hash with a $2y$ prefix + # even though we munged it to $2b$. + return original_salt[:4] + _bcrypt.ffi.string(hashed)[4:] + + +def checkpw(password: bytes, hashed_password: bytes) -> bool: + if isinstance(password, str) or isinstance(hashed_password, str): + raise TypeError("Strings must be encoded before checking") + + if b"\x00" in password or b"\x00" in hashed_password: + raise ValueError( + "password and hashed_password may not contain NUL bytes" + ) + + ret = hashpw(password, hashed_password) + return hmac.compare_digest(ret, hashed_password) + + +def kdf( + password: bytes, + salt: bytes, + desired_key_bytes: int, + rounds: int, + ignore_few_rounds: bool = False, +) -> bytes: + if isinstance(password, str) or isinstance(salt, str): + raise TypeError("Strings must be encoded before hashing") + + if len(password) == 0 or len(salt) == 0: + raise ValueError("password and salt must not be empty") + + if desired_key_bytes <= 0 or desired_key_bytes > 512: + raise ValueError("desired_key_bytes must be 1-512") + + if rounds < 1: + raise ValueError("rounds must be 1 or more") + + if rounds < 50 and not ignore_few_rounds: + # They probably think bcrypt.kdf()'s rounds parameter is logarithmic, + # expecting this value to be slow enough (it probably would be if this + # were bcrypt). Emit a warning. + warnings.warn( + ( + "Warning: bcrypt.kdf() called with only {0} round(s). " + "This few is not secure: the parameter is linear, like PBKDF2." + ).format(rounds), + UserWarning, + stacklevel=2, + ) + + key = _bcrypt.ffi.new("uint8_t[]", desired_key_bytes) + res = _bcrypt.lib.bcrypt_pbkdf( + password, len(password), salt, len(salt), key, len(key), rounds + ) + _bcrypt_assert(res == 0) + + return _bcrypt.ffi.buffer(key, desired_key_bytes)[:] + + +def _bcrypt_assert(ok: bool) -> None: + if not ok: + raise SystemError("bcrypt assertion failed") diff --git a/myenv/lib/python3.9/site-packages/bcrypt/_bcrypt.abi3.so b/myenv/lib/python3.9/site-packages/bcrypt/_bcrypt.abi3.so new file mode 100755 index 0000000..5712368 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/bcrypt/_bcrypt.abi3.so differ diff --git a/myenv/lib/python3.9/site-packages/bcrypt/_bcrypt.pyi b/myenv/lib/python3.9/site-packages/bcrypt/_bcrypt.pyi new file mode 100644 index 0000000..329382a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/bcrypt/_bcrypt.pyi @@ -0,0 +1,4 @@ +import typing + +ffi: typing.Any +lib: typing.Any diff --git a/myenv/lib/python3.9/site-packages/bcrypt/py.typed b/myenv/lib/python3.9/site-packages/bcrypt/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/AUTHORS.md b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/AUTHORS.md new file mode 100644 index 0000000..8d112ea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/AUTHORS.md @@ -0,0 +1,185 @@ +# Authors + +Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). + +Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), +[Carl Meyer](mailto:carl@oddbird.net), +[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), +[Mika Naylor](mailto:mail@autophagy.io), +[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com), +[Cooper Lees](mailto:me@cooperlees.com), and Richard Si. + +Multiple contributions by: + +- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) +- [Adam Johnson](mailto:me@adamj.eu) +- [Adam Williamson](mailto:adamw@happyassassin.net) +- [Alexander Huynh](mailto:github@grande.coffee) +- [Alex Vandiver](mailto:github@chmrr.net) +- [Allan Simon](mailto:allan.simon@supinfo.com) +- Anders-Petter Ljungquist +- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) +- [Andrew Zhou](mailto:andrewfzhou@gmail.com) +- [Andrey](mailto:dyuuus@yandex.ru) +- [Andy Freeland](mailto:andy@andyfreeland.net) +- [Anthony Sottile](mailto:asottile@umich.edu) +- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) +- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) +- [Artem Malyshev](mailto:proofit404@gmail.com) +- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) +- [Augie Fackler](mailto:raf@durin42.com) +- [Aviskar KC](mailto:aviskarkc10@gmail.com) +- Batuhan Taşkaya +- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) +- [Benjamin Woodruff](mailto:github@benjam.info) +- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) +- [Brandt Bucher](mailto:brandtbucher@gmail.com) +- [Brett Cannon](mailto:brett@python.org) +- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) +- [Bryan Forbes](mailto:bryan@reigndropsfall.net) +- [Calum Lind](mailto:calumlind@gmail.com) +- [Charles](mailto:peacech@gmail.com) +- Charles Reid +- [Christian Clauss](mailto:cclauss@bluewin.ch) +- [Christian Heimes](mailto:christian@python.org) +- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) +- [Chris Rose](mailto:offline@offby1.net) +- Codey Oxley +- [Cong](mailto:congusbongus@gmail.com) +- [Cooper Ry Lees](mailto:me@cooperlees.com) +- [Dan Davison](mailto:dandavison7@gmail.com) +- [Daniel Hahler](mailto:github@thequod.de) +- [Daniel M. Capella](mailto:polycitizen@gmail.com) +- Daniele Esposti +- [David Hotham](mailto:david.hotham@metaswitch.com) +- [David Lukes](mailto:dafydd.lukes@gmail.com) +- [David Szotten](mailto:davidszotten@gmail.com) +- [Denis Laxalde](mailto:denis@laxalde.org) +- [Douglas Thor](mailto:dthor@transphormusa.com) +- dylanjblack +- [Eli Treuherz](mailto:eli@treuherz.com) +- [Emil Hessman](mailto:emil@hessman.se) +- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) +- [Florent Thiery](mailto:fthiery@gmail.com) +- Francisco +- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) +- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) +- [Gregory P. Smith](mailto:greg@krypto.org) +- Gustavo Camargo +- hauntsaninja +- [Hadi Alqattan](mailto:alqattanhadizaki@gmail.com) +- [Hassan Abouelela](mailto:hassan@hassanamr.com) +- [Heaford](mailto:dan@heaford.com) +- [Hugo Barrera](mailto::hugo@barrera.io) +- Hugo van Kemenade +- [Hynek Schlawack](mailto:hs@ox.cx) +- [Ivan Katanić](mailto:ivan.katanic@gmail.com) +- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) +- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) +- [Jan Hnátek](mailto:jan.hnatek@gmail.com) +- [Jason Fried](mailto:me@jasonfried.info) +- [Jason Friedland](mailto:jason@friedland.id.au) +- [jgirardet](mailto:ijkl@netc.fr) +- Jim Brännlund +- [Jimmy Jia](mailto:tesrin@gmail.com) +- [Joe Antonakakis](mailto:jma353@cornell.edu) +- [Jon Dufresne](mailto:jon.dufresne@gmail.com) +- [Jonas Obrist](mailto:ojiidotch@gmail.com) +- [Jonty Wareing](mailto:jonty@jonty.co.uk) +- [Jose Nazario](mailto:jose.monkey.org@gmail.com) +- [Joseph Larson](mailto:larson.joseph@gmail.com) +- [Josh Bode](mailto:joshbode@fastmail.com) +- [Josh Holland](mailto:anowlcalledjosh@gmail.com) +- [Joshua Cannon](mailto:joshdcannon@gmail.com) +- [José Padilla](mailto:jpadilla@webapplicate.com) +- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) +- [kaiix](mailto:kvn.hou@gmail.com) +- [Katie McLaughlin](mailto:katie@glasnt.com) +- Katrin Leinweber +- [Keith Smiley](mailto:keithbsmiley@gmail.com) +- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) +- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) +- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) +- [Kyle Sunden](mailto:sunden@wisc.edu) +- Lawrence Chan +- [Linus Groh](mailto:mail@linusgroh.de) +- [Loren Carvalho](mailto:comradeloren@gmail.com) +- [Luka Sterbic](mailto:luka.sterbic@gmail.com) +- [LukasDrude](mailto:mail@lukas-drude.de) +- Mahmoud Hossam +- Mariatta +- [Matt VanEseltine](mailto:vaneseltine@gmail.com) +- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) +- [Matthew Walster](mailto:matthew@walster.org) +- Max Smolens +- [Michael Aquilina](mailto:michaelaquilina@gmail.com) +- [Michael Flaxman](mailto:michael.flaxman@gmail.com) +- [Michael J. Sullivan](mailto:sully@msully.net) +- [Michael McClimon](mailto:michael@mcclimon.org) +- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) +- [Mike](mailto:roshi@fedoraproject.org) +- [mikehoyio](mailto:mikehoy@gmail.com) +- [Min ho Kim](mailto:minho42@gmail.com) +- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) +- MomIsBestFriend +- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) +- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) +- [Neraste](mailto:neraste.herr10@gmail.com) +- [Nikolaus Waxweiler](mailto:madigens@gmail.com) +- [Ofek Lev](mailto:ofekmeister@gmail.com) +- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) +- [otstrel](mailto:otstrel@gmail.com) +- [Pablo Galindo](mailto:Pablogsal@gmail.com) +- [Paul Ganssle](mailto:p.ganssle@gmail.com) +- [Paul Meinhardt](mailto:mnhrdt@gmail.com) +- [Peter Bengtsson](mailto:mail@peterbe.com) +- [Peter Grayson](mailto:pete@jpgrayson.net) +- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) +- pmacosta +- [Quentin Pradet](mailto:quentin@pradet.me) +- [Ralf Schmitt](mailto:ralf@systemexit.de) +- [Ramón Valles](mailto:mroutis@protonmail.com) +- [Richard Fearn](mailto:richardfearn@gmail.com) +- [Rishikesh Jha](mailto:rishijha424@gmail.com) +- [Rupert Bedford](mailto:rupert@rupertb.com) +- Russell Davis +- [Rémi Verschelde](mailto:rverschelde@gmail.com) +- [Sami Salonen](mailto:sakki@iki.fi) +- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) +- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) +- Sergi +- [Scott Stevenson](mailto:scott@stevenson.io) +- Shantanu +- [shaoran](mailto:shaoran@sakuranohana.org) +- [Shinya Fujino](mailto:shf0811@gmail.com) +- springstan +- [Stavros Korokithakis](mailto:hi@stavros.io) +- [Stephen Rosen](mailto:sirosen@globus.org) +- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) +- [Sunil Kapil](mailto:snlkapil@gmail.com) +- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) +- [Tal Amuyal](mailto:TalAmuyal@gmail.com) +- [Terrance](mailto:git@terrance.allofti.me) +- [Thom Lu](mailto:thomas.c.lu@gmail.com) +- [Thomas Grainger](mailto:tagrain@gmail.com) +- [Tim Gates](mailto:tim.gates@iress.com) +- [Tim Swast](mailto:swast@google.com) +- [Timo](mailto:timo_tk@hotmail.com) +- Toby Fleming +- [Tom Christie](mailto:tom@tomchristie.com) +- [Tony Narlock](mailto:tony@git-pull.com) +- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) +- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) +- [Tzu-ping Chung](mailto:uranusjr@gmail.com) +- [Utsav Shah](mailto:ukshah2@illinois.edu) +- utsav-dbx +- vezeli +- [Ville Skyttä](mailto:ville.skytta@iki.fi) +- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) +- [Vlad Emelianov](mailto:volshebnyi@gmail.com) +- [williamfzc](mailto:178894043@qq.com) +- [wouter bolsterlee](mailto:wouter@bolsterl.ee) +- Yazdan +- [Yngve Høiseth](mailto:yngve@hoiseth.net) +- [Yurii Karabas](mailto:1998uriyyo@gmail.com) +- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/LICENSE new file mode 100644 index 0000000..7a9b891 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Łukasz Langa + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/METADATA new file mode 100644 index 0000000..7a2127f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/METADATA @@ -0,0 +1,1112 @@ +Metadata-Version: 2.1 +Name: black +Version: 21.12b0 +Summary: The uncompromising code formatter. +Home-page: https://github.com/psf/black +Author: Łukasz Langa +Author-email: lukasz@langa.pl +License: MIT +Project-URL: Changelog, https://github.com/psf/black/blob/main/CHANGES.md +Keywords: automation formatter yapf autopep8 pyfmt gofmt rustfmt +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Quality Assurance +Requires-Python: >=3.6.2 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS.md +Requires-Dist: click (>=7.1.2) +Requires-Dist: platformdirs (>=2) +Requires-Dist: tomli (<2.0.0,>=0.2.6) +Requires-Dist: pathspec (<1,>=0.9.0) +Requires-Dist: typing-extensions (>=3.10.0.0) +Requires-Dist: mypy-extensions (>=0.4.3) +Requires-Dist: dataclasses (>=0.6) ; python_version < "3.7" +Requires-Dist: typed-ast (>=1.4.2) ; python_version < "3.8" and implementation_name == "cpython" +Requires-Dist: typing-extensions (!=3.10.0.1) ; python_version >= "3.10" +Provides-Extra: colorama +Requires-Dist: colorama (>=0.4.3) ; extra == 'colorama' +Provides-Extra: d +Requires-Dist: aiohttp (>=3.7.4) ; extra == 'd' +Provides-Extra: jupyter +Requires-Dist: ipython (>=7.8.0) ; extra == 'jupyter' +Requires-Dist: tokenize-rt (>=3.2.0) ; extra == 'jupyter' +Provides-Extra: python2 +Requires-Dist: typed-ast (>=1.4.3) ; extra == 'python2' +Provides-Extra: uvloop +Requires-Dist: uvloop (>=0.15.2) ; extra == 'uvloop' + +![Black Logo](https://raw.githubusercontent.com/psf/black/main/docs/_static/logo2-readme.png) + +

The Uncompromising Code Formatter

+ +

+Actions Status +Actions Status +Documentation Status +Coverage Status +License: MIT +PyPI +Downloads +conda-forge +Code style: black +

+ +> “Any color you like.” + +_Black_ is the uncompromising Python code formatter. By using it, you agree to cede +control over minutiae of hand-formatting. In return, _Black_ gives you speed, +determinism, and freedom from `pycodestyle` nagging about formatting. You will save time +and mental energy for more important matters. + +Blackened code looks the same regardless of the project you're reading. Formatting +becomes transparent after a while and you can focus on the content instead. + +_Black_ makes code review faster by producing the smallest diffs possible. + +Try it out now using the [Black Playground](https://black.vercel.app). Watch the +[PyCon 2019 talk](https://youtu.be/esZLCuWs_2Y) to learn more. + +--- + +**[Read the documentation on ReadTheDocs!](https://black.readthedocs.io/en/stable)** + +--- + +## Installation and usage + +### Installation + +_Black_ can be installed by running `pip install black`. It requires Python 3.6.2+ to +run. If you want to format Python 2 code as well, install with +`pip install black[python2]`. If you want to format Jupyter Notebooks, install with +`pip install black[jupyter]`. + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+git://github.com/psf/black` + +### Usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory} +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory} +``` + +Further information can be found in our docs: + +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) + +### NOTE: This is a beta product + +_Black_ is already [successfully used](https://github.com/psf/black#used-by) by many +projects, small and big. Black has a comprehensive test suite, with efficient parallel +tests, and our own auto formatting and parallel Continuous Integration runner. However, +_Black_ is still beta. Things will probably be wonky for a while. This is made explicit +by the "Beta" trove classifier, as well as by the "b" in the version number. What this +means for you is that **until the formatter becomes stable, you should expect some +formatting to change in the future**. That being said, no drastic stylistic changes are +planned, mostly responses to bug reports. + +Also, as a safety measure which slows down processing, _Black_ will check that the +reformatted code still produces a valid AST that is effectively equivalent to the +original (see the +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#ast-before-and-after-formatting) +section for details). If you're feeling confident, use `--fast`. + +## The _Black_ code style + +_Black_ is a PEP 8 compliant opinionated formatter. _Black_ reformats entire files in +place. Style configuration options are deliberately limited and rarely added. It doesn't +take previous formatting into account (see [Pragmatism](#pragmatism) for exceptions). + +Our documentation covers the current _Black_ code style, but planned changes to it are +also documented. They're both worth taking a look: + +- [The _Black_ Code Style: Current style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) +- [The _Black_ Code Style: Future style](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html) + +Please refer to this document before submitting an issue. What seems like a bug might be +intended behaviour. + +### Pragmatism + +Early versions of _Black_ used to be absolutist in some respects. They took after its +initial author. This was fine at the time as it made the implementation simpler and +there were not many users anyway. Not many edge cases were reported. As a mature tool, +_Black_ does make some exceptions to rules it otherwise holds. + +- [The _Black_ code style: Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) + +Please refer to this document before submitting an issue just like with the document +above. What seems like a bug might be intended behaviour. + +## Configuration + +_Black_ is able to read project-specific default values for its command line options +from a `pyproject.toml` file. This is especially useful for specifying custom +`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your +project. + +You can find more details in our documentation: + +- [The basics: Configuration via a file](https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-via-a-file) + +And if you're looking for more general configuration documentation: + +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) + +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. Applying those defaults will have your +code in compliance with many other _Black_ formatted projects. + +## Used by + +The following notable open-source projects trust _Black_ with enforcing a consistent +code style: pytest, tox, Pyramid, Django Channels, Hypothesis, attrs, SQLAlchemy, +Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), pandas, Pillow, +Twisted, LocalStack, every Datadog Agent Integration, Home Assistant, Zulip, Kedro, and +many more. + +The following organizations use _Black_: Facebook, Dropbox, KeepTruckin, Mozilla, Quora, +Duolingo, QuantumBlack, Tesla. + +Are we missing anyone? Let us know. + +## Testimonials + +**Mike Bayer**, [author of `SQLAlchemy`](https://www.sqlalchemy.org/): + +> I can't think of any single tool in my entire programming career that has given me a +> bigger productivity increase by its introduction. I can now do refactorings in about +> 1% of the keystrokes that it would have taken me previously when we had no way for +> code to format itself. + +**Dusty Phillips**, +[writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): + +> _Black_ is opinionated so you don't have to be. + +**Hynek Schlawack**, [creator of `attrs`](https://www.attrs.org/), core developer of +Twisted and CPython: + +> An auto-formatter that doesn't suck is all I want for Xmas! + +**Carl Meyer**, [Django](https://www.djangoproject.com/) core developer: + +> At least the name is good. + +**Kenneth Reitz**, creator of [`requests`](http://python-requests.org/) and +[`pipenv`](https://readthedocs.org/projects/pipenv/): + +> This vastly improves the formatting of our code. Thanks a ton! + +## Show your style + +Use the badge in your project's README.md: + +```md +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +``` + +Using the badge in README.rst: + +``` +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +``` + +Looks like this: +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +## License + +MIT + +## Contributing + +Welcome! Happy to see you willing to make the project better. You can get started by +reading this: + +- [Contributing: The basics](https://black.readthedocs.io/en/latest/contributing/the_basics.html) + +You can also take a look at the rest of the contributing docs or talk with the +developers: + +- [Contributing documentation](https://black.readthedocs.io/en/latest/contributing/index.html) +- [Chat on Discord](https://discord.gg/RtVdv86PrH) + +## Change log + +The log has become rather long. It moved to its own file. + +See [CHANGES](https://black.readthedocs.io/en/latest/change_log.html). + +## Authors + +The author list is quite long nowadays, so it lives in its own file. + +See [AUTHORS.md](./AUTHORS.md) + +## Code of Conduct + +Everyone participating in the _Black_ project, and in particular in the issue tracker, +pull requests, and social media activity, is expected to treat other people with respect +and more generally to follow the guidelines articulated in the +[Python Community Code of Conduct](https://www.python.org/psf/codeofconduct/). + +At the same time, humor is encouraged. In fact, basic familiarity with Monty Python's +Flying Circus is expected. We are not savages. + +And if you _really_ need to slap somebody, do it with a fish while dancing. + + +# Change Log + +## 21.12b0 + +### _Black_ + +- Fix determination of f-string expression spans (#2654) +- Fix bad formatting of error messages about EOF in multi-line statements (#2343) +- Functions and classes in blocks now have more consistent surrounding spacing (#2472) + +#### Jupyter Notebook support + +- Cell magics are now only processed if they are known Python cell magics. Earlier, all + cell magics were tokenized, leading to possible indentation errors e.g. with + `%%writefile`. (#2630) +- Fix assignment to environment variables in Jupyter Notebooks (#2642) + +#### Python 3.10 support + +- Point users to using `--target-version py310` if we detect 3.10-only syntax (#2668) +- Fix `match` statements with open sequence subjects, like `match a, b:` or + `match a, *b:` (#2639) (#2659) +- Fix `match`/`case` statements that contain `match`/`case` soft keywords multiple + times, like `match re.match()` (#2661) +- Fix `case` statements with an inline body (#2665) +- Fix styling of starred expressions inside `match` subject (#2667) +- Fix parser error location on invalid syntax in a `match` statement (#2649) +- Fix Python 3.10 support on platforms without ProcessPoolExecutor (#2631) +- Improve parsing performance on code that uses `match` under `--target-version py310` + up to ~50% (#2670) + +### Packaging + +- Remove dependency on `regex` (#2644) (#2663) + +## 21.11b1 + +### _Black_ + +- Bumped regex version minimum to 2021.4.4 to fix Pattern class usage (#2621) + +## 21.11b0 + +### _Black_ + +- Warn about Python 2 deprecation in more cases by improving Python 2 only syntax + detection (#2592) +- Add experimental PyPy support (#2559) +- Add partial support for the match statement. As it's experimental, it's only enabled + when `--target-version py310` is explicitly specified (#2586) +- Add support for parenthesized with (#2586) +- Declare support for Python 3.10 for running Black (#2562) + +### Integrations + +- Fixed vim plugin with Python 3.10 by removing deprecated distutils import (#2610) +- The vim plugin now parses `skip_magic_trailing_comma` from pyproject.toml (#2613) + +## 21.10b0 + +### _Black_ + +- Document stability policy, that will apply for non-beta releases (#2529) +- Add new `--workers` parameter (#2514) +- Fixed feature detection for positional-only arguments in lambdas (#2532) +- Bumped typed-ast version minimum to 1.4.3 for 3.10 compatibility (#2519) +- Fixed a Python 3.10 compatibility issue where the loop argument was still being passed + even though it has been removed (#2580) +- Deprecate Python 2 formatting support (#2523) + +### _Blackd_ + +- Remove dependency on aiohttp-cors (#2500) +- Bump required aiohttp version to 3.7.4 (#2509) + +### _Black-Primer_ + +- Add primer support for --projects (#2555) +- Print primer summary after individual failures (#2570) + +### Integrations + +- Allow to pass `target_version` in the vim plugin (#1319) +- Install build tools in docker file and use multi-stage build to keep the image size + down (#2582) + +## 21.9b0 + +### Packaging + +- Fix missing modules in self-contained binaries (#2466) +- Fix missing toml extra used during installation (#2475) + +## 21.8b0 + +### _Black_ + +- Add support for formatting Jupyter Notebook files (#2357) +- Move from `appdirs` dependency to `platformdirs` (#2375) +- Present a more user-friendly error if .gitignore is invalid (#2414) +- The failsafe for accidentally added backslashes in f-string expressions has been + hardened to handle more edge cases during quote normalization (#2437) +- Avoid changing a function return type annotation's type to a tuple by adding a + trailing comma (#2384) +- Parsing support has been added for unparenthesized walruses in set literals, set + comprehensions, and indices (#2447). +- Pin `setuptools-scm` build-time dependency version (#2457) +- Exclude typing-extensions version 3.10.0.1 due to it being broken on Python 3.10 + (#2460) + +### _Blackd_ + +- Replace sys.exit(-1) with raise ImportError as it plays more nicely with tools that + scan installed packages (#2440) + +### Integrations + +- The provided pre-commit hooks no longer specify `language_version` to avoid overriding + `default_language_version` (#2430) + +## 21.7b0 + +### _Black_ + +- Configuration files using TOML features higher than spec v0.5.0 are now supported + (#2301) +- Add primer support and test for code piped into black via STDIN (#2315) +- Fix internal error when `FORCE_OPTIONAL_PARENTHESES` feature is enabled (#2332) +- Accept empty stdin (#2346) +- Provide a more useful error when parsing fails during AST safety checks (#2304) + +### Docker + +- Add new `latest_release` tag automation to follow latest black release on docker + images (#2374) + +### Integrations + +- The vim plugin now searches upwards from the directory containing the current buffer + instead of the current working directory for pyproject.toml. (#1871) +- The vim plugin now reads the correct string normalization option in pyproject.toml + (#1869) +- The vim plugin no longer crashes Black when there's boolean values in pyproject.toml + (#1869) + +## 21.6b0 + +### _Black_ + +- Fix failure caused by `fmt: skip` and indentation (#2281) +- Account for += assignment when deciding whether to split string (#2312) +- Correct max string length calculation when there are string operators (#2292) +- Fixed option usage when using the `--code` flag (#2259) +- Do not call `uvloop.install()` when _Black_ is used as a library (#2303) +- Added `--required-version` option to require a specific version to be running (#2300) +- Fix incorrect custom breakpoint indices when string group contains fake f-strings + (#2311) +- Fix regression where `R` prefixes would be lowercased for docstrings (#2285) +- Fix handling of named escapes (`\N{...}`) when `--experimental-string-processing` is + used (#2319) + +### Integrations + +- The official Black action now supports choosing what version to use, and supports the + major 3 OSes. (#1940) + +## 21.5b2 + +### _Black_ + +- A space is no longer inserted into empty docstrings (#2249) +- Fix handling of .gitignore files containing non-ASCII characters on Windows (#2229) +- Respect `.gitignore` files in all levels, not only `root/.gitignore` file (apply + `.gitignore` rules like `git` does) (#2225) +- Restored compatibility with Click 8.0 on Python 3.6 when LANG=C used (#2227) +- Add extra uvloop install + import support if in python env (#2258) +- Fix --experimental-string-processing crash when matching parens are not found (#2283) +- Make sure to split lines that start with a string operator (#2286) +- Fix regular expression that black uses to identify f-expressions (#2287) + +### _Blackd_ + +- Add a lower bound for the `aiohttp-cors` dependency. Only 0.4.0 or higher is + supported. (#2231) + +### Packaging + +- Release self-contained x86_64 MacOS binaries as part of the GitHub release pipeline + (#2198) +- Always build binaries with the latest available Python (#2260) + +### Documentation + +- Add discussion of magic comments to FAQ page (#2272) +- `--experimental-string-processing` will be enabled by default in the future (#2273) +- Fix typos discovered by codespell (#2228) +- Fix Vim plugin installation instructions. (#2235) +- Add new Frequently Asked Questions page (#2247) +- Fix encoding + symlink issues preventing proper build on Windows (#2262) + +## 21.5b1 + +### _Black_ + +- Refactor `src/black/__init__.py` into many files (#2206) + +### Documentation + +- Replaced all remaining references to the + [`master`](https://github.com/psf/black/tree/main) branch with the + [`main`](https://github.com/psf/black/tree/main) branch. Some additional changes in + the source code were also made. (#2210) +- Sigificantly reorganized the documentation to make much more sense. Check them out by + heading over to [the stable docs on RTD](https://black.readthedocs.io/en/stable/). + (#2174) + +## 21.5b0 + +### _Black_ + +- Set `--pyi` mode if `--stdin-filename` ends in `.pyi` (#2169) +- Stop detecting target version as Python 3.9+ with pre-PEP-614 decorators that are + being called but with no arguments (#2182) + +### _Black-Primer_ + +- Add `--no-diff` to black-primer to suppress formatting changes (#2187) + +## 21.4b2 + +### _Black_ + +- Fix crash if the user configuration directory is inaccessible. (#2158) + +- Clarify + [circumstances](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) + in which _Black_ may change the AST (#2159) + +- Allow `.gitignore` rules to be overridden by specifying `exclude` in `pyproject.toml` + or on the command line. (#2170) + +### _Packaging_ + +- Install `primer.json` (used by `black-primer` by default) with black. (#2154) + +## 21.4b1 + +### _Black_ + +- Fix crash on docstrings ending with "\\ ". (#2142) + +- Fix crash when atypical whitespace is cleaned out of dostrings (#2120) + +- Reflect the `--skip-magic-trailing-comma` and `--experimental-string-processing` flags + in the name of the cache file. Without this fix, changes in these flags would not take + effect if the cache had already been populated. (#2131) + +- Don't remove necessary parentheses from assignment expression containing assert / + return statements. (#2143) + +### _Packaging_ + +- Bump pathspec to >= 0.8.1 to solve invalid .gitignore exclusion handling + +## 21.4b0 + +### _Black_ + +- Fixed a rare but annoying formatting instability created by the combination of + optional trailing commas inserted by `Black` and optional parentheses looking at + pre-existing "magic" trailing commas. This fixes issue #1629 and all of its many many + duplicates. (#2126) + +- `Black` now processes one-line docstrings by stripping leading and trailing spaces, + and adding a padding space when needed to break up """". (#1740) + +- `Black` now cleans up leading non-breaking spaces in comments (#2092) + +- `Black` now respects `--skip-string-normalization` when normalizing multiline + docstring quotes (#1637) + +- `Black` no longer removes all empty lines between non-function code and decorators + when formatting typing stubs. Now `Black` enforces a single empty line. (#1646) + +- `Black` no longer adds an incorrect space after a parenthesized assignment expression + in if/while statements (#1655) + +- Added `--skip-magic-trailing-comma` / `-C` to avoid using trailing commas as a reason + to split lines (#1824) + +- fixed a crash when PWD=/ on POSIX (#1631) + +- fixed "I/O operation on closed file" when using --diff (#1664) + +- Prevent coloured diff output being interleaved with multiple files (#1673) + +- Added support for PEP 614 relaxed decorator syntax on python 3.9 (#1711) + +- Added parsing support for unparenthesized tuples and yield expressions in annotated + assignments (#1835) + +- added `--extend-exclude` argument (PR #2005) + +- speed up caching by avoiding pathlib (#1950) + +- `--diff` correctly indicates when a file doesn't end in a newline (#1662) + +- Added `--stdin-filename` argument to allow stdin to respect `--force-exclude` rules + (#1780) + +- Lines ending with `fmt: skip` will now be not formatted (#1800) + +- PR #2053: Black no longer relies on typed-ast for Python 3.8 and higher + +- PR #2053: Python 2 support is now optional, install with + `python3 -m pip install black[python2]` to maintain support. + +- Exclude `venv` directory by default (#1683) + +- Fixed "Black produced code that is not equivalent to the source" when formatting + Python 2 docstrings (#2037) + +### _Packaging_ + +- Self-contained native _Black_ binaries are now provided for releases via GitHub + Releases (#1743) + +## 20.8b1 + +### _Packaging_ + +- explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions + older than 7.0 + +## 20.8b0 + +### _Black_ + +- re-implemented support for explicit trailing commas: now it works consistently within + any bracket pair, including nested structures (#1288 and duplicates) + +- `Black` now reindents docstrings when reindenting code around it (#1053) + +- `Black` now shows colored diffs (#1266) + +- `Black` is now packaged using 'py3' tagged wheels (#1388) + +- `Black` now supports Python 3.8 code, e.g. star expressions in return statements + (#1121) + +- `Black` no longer normalizes capital R-string prefixes as those have a + community-accepted meaning (#1244) + +- `Black` now uses exit code 2 when specified configuration file doesn't exit (#1361) + +- `Black` now works on AWS Lambda (#1141) + +- added `--force-exclude` argument (#1032) + +- removed deprecated `--py36` option (#1236) + +- fixed `--diff` output when EOF is encountered (#526) + +- fixed `# fmt: off` handling around decorators (#560) + +- fixed unstable formatting with some `# type: ignore` comments (#1113) + +- fixed invalid removal on organizing brackets followed by indexing (#1575) + +- introduced `black-primer`, a CI tool that allows us to run regression tests against + existing open source users of Black (#1402) + +- introduced property-based fuzzing to our test suite based on Hypothesis and + Hypothersmith (#1566) + +- implemented experimental and disabled by default long string rewrapping (#1132), + hidden under a `--experimental-string-processing` flag while it's being worked on; + this is an undocumented and unsupported feature, you lose Internet points for + depending on it (#1609) + +### Vim plugin + +- prefer virtualenv packages over global packages (#1383) + +## 19.10b0 + +- added support for PEP 572 assignment expressions (#711) + +- added support for PEP 570 positional-only arguments (#943) + +- added support for async generators (#593) + +- added support for pre-splitting collections by putting an explicit trailing comma + inside (#826) + +- added `black -c` as a way to format code passed from the command line (#761) + +- --safe now works with Python 2 code (#840) + +- fixed grammar selection for Python 2-specific code (#765) + +- fixed feature detection for trailing commas in function definitions and call sites + (#763) + +- `# fmt: off`/`# fmt: on` comment pairs placed multiple times within the same block of + code now behave correctly (#1005) + +- _Black_ no longer crashes on Windows machines with more than 61 cores (#838) + +- _Black_ no longer crashes on standalone comments prepended with a backslash (#767) + +- _Black_ no longer crashes on `from` ... `import` blocks with comments (#829) + +- _Black_ no longer crashes on Python 3.7 on some platform configurations (#494) + +- _Black_ no longer fails on comments in from-imports (#671) + +- _Black_ no longer fails when the file starts with a backslash (#922) + +- _Black_ no longer merges regular comments with type comments (#1027) + +- _Black_ no longer splits long lines that contain type comments (#997) + +- removed unnecessary parentheses around `yield` expressions (#834) + +- added parentheses around long tuples in unpacking assignments (#832) + +- added parentheses around complex powers when they are prefixed by a unary operator + (#646) + +- fixed bug that led _Black_ format some code with a line length target of 1 (#762) + +- _Black_ no longer introduces quotes in f-string subexpressions on string boundaries + (#863) + +- if _Black_ puts parenthesis around a single expression, it moves comments to the + wrapped expression instead of after the brackets (#872) + +- `blackd` now returns the version of _Black_ in the response headers (#1013) + +- `blackd` can now output the diff of formats on source code when the `X-Diff` header is + provided (#969) + +## 19.3b0 + +- new option `--target-version` to control which Python versions _Black_-formatted code + should target (#618) + +- deprecated `--py36` (use `--target-version=py36` instead) (#724) + +- _Black_ no longer normalizes numeric literals to include `_` separators (#696) + +- long `del` statements are now split into multiple lines (#698) + +- type comments are no longer mangled in function signatures + +- improved performance of formatting deeply nested data structures (#509) + +- _Black_ now properly formats multiple files in parallel on Windows (#632) + +- _Black_ now creates cache files atomically which allows it to be used in parallel + pipelines (like `xargs -P8`) (#673) + +- _Black_ now correctly indents comments in files that were previously formatted with + tabs (#262) + +- `blackd` now supports CORS (#622) + +## 18.9b0 + +- numeric literals are now formatted by _Black_ (#452, #461, #464, #469): + + - numeric literals are normalized to include `_` separators on Python 3.6+ code + + - added `--skip-numeric-underscore-normalization` to disable the above behavior and + leave numeric underscores as they were in the input + + - code with `_` in numeric literals is recognized as Python 3.6+ + + - most letters in numeric literals are lowercased (e.g., in `1e10`, `0x01`) + + - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) + +- added `blackd`, see + [its documentation](https://github.com/psf/black/blob/18.9b0/README.md#blackd) for + more info (#349) + +- adjacent string literals are now correctly split into multiple lines (#463) + +- trailing comma is now added to single imports that don't fit on a line (#250) + +- cache is now populated when `--check` is successful for a file which speeds up + consecutive checks of properly formatted unmodified files (#448) + +- whitespace at the beginning of the file is now removed (#399) + +- fixed mangling [pweave](http://mpastell.com/pweave/) and + [Spyder IDE](https://www.spyder-ide.org/) special comments (#532) + +- fixed unstable formatting when unpacking big tuples (#267) + +- fixed parsing of `__future__` imports with renames (#389) + +- fixed scope of `# fmt: off` when directly preceding `yield` and other nodes (#385) + +- fixed formatting of lambda expressions with default arguments (#468) + +- fixed `async for` statements: _Black_ no longer breaks them into separate lines (#372) + +- note: the Vim plugin stopped registering `,=` as a default chord as it turned out to + be a bad idea (#415) + +## 18.6b4 + +- hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) + +## 18.6b3 + +- typing stub files (`.pyi`) now have blank lines added after constants (#340) + +- `# fmt: off` and `# fmt: on` are now much more dependable: + + - they now work also within bracket pairs (#329) + + - they now correctly work across function/class boundaries (#335) + + - they now work when an indentation block starts with empty lines or misaligned + comments (#334) + +- made Click not fail on invalid environments; note that Click is right but the + likelihood we'll need to access non-ASCII file paths when dealing with Python source + code is low (#277) + +- fixed improper formatting of f-strings with quotes inside interpolated expressions + (#322) + +- fixed unnecessary slowdown when long list literals where found in a file + +- fixed unnecessary slowdown on AST nodes with very many siblings + +- fixed cannibalizing backslashes during string normalization + +- fixed a crash due to symbolic links pointing outside of the project directory (#338) + +## 18.6b2 + +- added `--config` (#65) + +- added `-h` equivalent to `--help` (#316) + +- fixed improper unmodified file caching when `-S` was used + +- fixed extra space in string unpacking (#305) + +- fixed formatting of empty triple quoted strings (#313) + +- fixed unnecessary slowdown in comment placement calculation on lines without comments + +## 18.6b1 + +- hotfix: don't output human-facing information on stdout (#299) + +- hotfix: don't output cake emoji on non-zero return code (#300) + +## 18.6b0 + +- added `--include` and `--exclude` (#270) + +- added `--skip-string-normalization` (#118) + +- added `--verbose` (#283) + +- the header output in `--diff` now actually conforms to the unified diff spec + +- fixed long trivial assignments being wrapped in unnecessary parentheses (#273) + +- fixed unnecessary parentheses when a line contained multiline strings (#232) + +- fixed stdin handling not working correctly if an old version of Click was used (#276) + +- _Black_ now preserves line endings when formatting a file in place (#258) + +## 18.5b1 + +- added `--pyi` (#249) + +- added `--py36` (#249) + +- Python grammar pickle caches are stored with the formatting caches, making _Black_ + work in environments where site-packages is not user-writable (#192) + +- _Black_ now enforces a PEP 257 empty line after a class-level docstring (and/or + fields) and the first method + +- fixed invalid code produced when standalone comments were present in a trailer that + was omitted from line splitting on a large expression (#237) + +- fixed optional parentheses being removed within `# fmt: off` sections (#224) + +- fixed invalid code produced when stars in very long imports were incorrectly wrapped + in optional parentheses (#234) + +- fixed unstable formatting when inline comments were moved around in a trailer that was + omitted from line splitting on a large expression (#238) + +- fixed extra empty line between a class declaration and the first method if no class + docstring or fields are present (#219) + +- fixed extra empty line between a function signature and an inner function or inner + class (#196) + +## 18.5b0 + +- call chains are now formatted according to the + [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) + +- data structure literals (tuples, lists, dictionaries, and sets) are now also always + exploded like imports when they don't fit in a single line (#152) + +- slices are now formatted according to PEP 8 (#178) + +- parentheses are now also managed automatically on the right-hand side of assignments + and return statements (#140) + +- math operators now use their respective priorities for delimiting multiline + expressions (#148) + +- optional parentheses are now omitted on expressions that start or end with a bracket + and only contain a single operator (#177) + +- empty parentheses in a class definition are now removed (#145, #180) + +- string prefixes are now standardized to lowercase and `u` is removed on Python 3.6+ + only code and Python 2.7+ code with the `unicode_literals` future import (#188, #198, + #199) + +- typing stub files (`.pyi`) are now formatted in a style that is consistent with PEP + 484 (#207, #210) + +- progress when reformatting many files is now reported incrementally + +- fixed trailers (content with brackets) being unnecessarily exploded into their own + lines after a dedented closing bracket (#119) + +- fixed an invalid trailing comma sometimes left in imports (#185) + +- fixed non-deterministic formatting when multiple pairs of removable parentheses were + used (#183) + +- fixed multiline strings being unnecessarily wrapped in optional parentheses in long + assignments (#215) + +- fixed not splitting long from-imports with only a single name + +- fixed Python 3.6+ file discovery by also looking at function calls with unpacking. + This fixed non-deterministic formatting if trailing commas where used both in function + signatures with stars and function calls with stars but the former would be + reformatted to a single line. + +- fixed crash on dealing with optional parentheses (#193) + +- fixed "is", "is not", "in", and "not in" not considered operators for splitting + purposes + +- fixed crash when dead symlinks where encountered + +## 18.4a4 + +- don't populate the cache on `--check` (#175) + +## 18.4a3 + +- added a "cache"; files already reformatted that haven't changed on disk won't be + reformatted again (#109) + +- `--check` and `--diff` are no longer mutually exclusive (#149) + +- generalized star expression handling, including double stars; this fixes + multiplication making expressions "unsafe" for trailing commas (#132) + +- _Black_ no longer enforces putting empty lines behind control flow statements (#90) + +- _Black_ now splits imports like "Mode 3 + trailing comma" of isort (#127) + +- fixed comment indentation when a standalone comment closes a block (#16, #32) + +- fixed standalone comments receiving extra empty lines if immediately preceding a + class, def, or decorator (#56, #154) + +- fixed `--diff` not showing entire path (#130) + +- fixed parsing of complex expressions after star and double stars in function calls + (#2) + +- fixed invalid splitting on comma in lambda arguments (#133) + +- fixed missing splits of ternary expressions (#141) + +## 18.4a2 + +- fixed parsing of unaligned standalone comments (#99, #112) + +- fixed placement of dictionary unpacking inside dictionary literals (#111) + +- Vim plugin now works on Windows, too + +- fixed unstable formatting when encountering unnecessarily escaped quotes in a string + (#120) + +## 18.4a1 + +- added `--quiet` (#78) + +- added automatic parentheses management (#4) + +- added [pre-commit](https://pre-commit.com) integration (#103, #104) + +- fixed reporting on `--check` with multiple files (#101, #102) + +- fixed removing backslash escapes from raw strings (#100, #105) + +## 18.4a0 + +- added `--diff` (#87) + +- add line breaks before all delimiters, except in cases like commas, to better comply + with PEP 8 (#73) + +- standardize string literals to use double quotes (almost) everywhere (#75) + +- fixed handling of standalone comments within nested bracketed expressions; _Black_ + will no longer produce super long lines or put all standalone comments at the end of + the expression (#22) + +- fixed 18.3a4 regression: don't crash and burn on empty lines with trailing whitespace + (#80) + +- fixed 18.3a4 regression: `# yapf: disable` usage as trailing comment would cause + _Black_ to not emit the rest of the file (#95) + +- when CTRL+C is pressed while formatting many files, _Black_ no longer freaks out with + a flurry of asyncio-related exceptions + +- only allow up to two empty lines on module level and only single empty lines within + functions (#74) + +## 18.3a4 + +- `# fmt: off` and `# fmt: on` are implemented (#5) + +- automatic detection of deprecated Python 2 forms of print statements and exec + statements in the formatted file (#49) + +- use proper spaces for complex expressions in default values of typed function + arguments (#60) + +- only return exit code 1 when --check is used (#50) + +- don't remove single trailing commas from square bracket indexing (#59) + +- don't omit whitespace if the previous factor leaf wasn't a math operator (#55) + +- omit extra space in kwarg unpacking if it's the first argument (#46) + +- omit extra space in + [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) + (#68) + +## 18.3a3 + +- don't remove single empty lines outside of bracketed expressions (#19) + +- added ability to pipe formatting from stdin to stdin (#25) + +- restored ability to format code with legacy usage of `async` as a name (#20, #42) + +- even better handling of numpy-style array indexing (#33, again) + +## 18.3a2 + +- changed positioning of binary operators to occur at beginning of lines instead of at + the end, following + [a recent change to PEP 8](https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b) + (#21) + +- ignore empty bracket pairs while splitting. This avoids very weirdly looking + formattings (#34, #35) + +- remove a trailing comma if there is a single argument to a call + +- if top level functions were separated by a comment, don't put four empty lines after + the upper function + +- fixed unstable formatting of newlines with imports + +- fixed unintentional folding of post scriptum standalone comments into last statement + if it was a simple statement (#18, #28) + +- fixed missing space in numpy-style array indexing (#33) + +- fixed spurious space after star-based unary expressions (#31) + +## 18.3a1 + +- added `--check` + +- only put trailing commas in function signatures and calls if it's safe to do so. If + the file is Python 3.6+ it's always safe, otherwise only safe if there are no `*args` + or `**kwargs` used in the signature or call. (#8) + +- fixed invalid spacing of dots in relative imports (#6, #13) + +- fixed invalid splitting after comma on unpacked variables in for-loops (#23) + +- fixed spurious space in parenthesized set expressions (#7) + +- fixed spurious space after opening parentheses and in default arguments (#14, #17) + +- fixed spurious space after unary operators when the operand was a complex expression + (#15) + +## 18.3a0 + +- first published version, Happy 🍰 Day 2018! + +- alpha quality + +- date-versioned (see: ) + + diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/RECORD new file mode 100644 index 0000000..53b0b4f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/RECORD @@ -0,0 +1,54 @@ +../../../bin/black,sha256=OxSf28OqvHfdamdUy6uj720fnq5YrNowAKX6rxRn1zo,261 +../../../bin/black-primer,sha256=83JdAPbcZRtxyDvx62xZ8PXPONS3ARcT1QGohKYEIZc,256 +../../../bin/blackd,sha256=4Ql34teLrKgP_I07BOQpKyMdTOKTbk8sTD19Js_CEZY,262 +_black_version.py,sha256=J13gsXjkb0PeiRMcxI_ff48THu--ndPguUjbwdQKirA,20 +black/__init__.py,sha256=WfrijLeAboMaGIz1VvQoxg3K0925OS3EjtNGedtQesg,44979 +black/__main__.py,sha256=mogeA4o9zt4w-ufKvaQjSEhtSgQkcMVLK9ChvdB5wH8,47 +black/brackets.py,sha256=4YGgFwSDk2hdzHaJTPF_JWoo_hdmnTGTwN7aQMWjogo,10761 +black/cache.py,sha256=fG6R1gIB7laUKJmJGGDLbvGyXV-mVV6d8OyY7sN4YnY,2342 +black/comments.py,sha256=6ybPXvmmQ7pcbNInMuUKjiP8zZlMOis5O-uUC_hMAxI,10197 +black/concurrency.py,sha256=MDDmc2LKcbai2scV2vYIas4eBmLhoThFqo4mPQcAuew,1829 +black/const.py,sha256=5jQVHjBmOqRai7fDheI-16JruvI4Nj6_7yBa6GCHMoM,249 +black/debug.py,sha256=BblF9L88hNGbuVo-EH6SzHo5jgBbFgWHyLL14zhlO3Y,1595 +black/files.py,sha256=BphFBRqiyN_l0evZZoDqjhvRTkJWB_Z3MzGGJdomceY,8448 +black/handle_ipynb_magics.py,sha256=s40Co89epSnHVz9MfdloZqWCmUiKKU7Buma9j-E5yss,13534 +black/linegen.py,sha256=znJY9M7CUF7HPacQUcnGDW6j2CNLDgVhhVSypoHEZGQ,41093 +black/lines.py,sha256=rPVcLxYuQVNeb-VOiB13QBJP04Qno6KghQuBPnv-yVY,26879 +black/mode.py,sha256=990qXazXHjHIsX33oin5Ps7K3mVQP9yqMIuzJLhW0bs,4699 +black/nodes.py,sha256=06hQf4EX3yRhnz5wvDfZN6vRPSlNCOtlxe_IZj2IkEE,24373 +black/numerics.py,sha256=zg7l1_XOtFNfcWnEd12sTsQsilAEtI3o7Rt6osdK3m4,1843 +black/output.py,sha256=sg1AEcMQ0kcS0gyD9jPESo34wpfKAd074NfcTdvNLkc,3495 +black/parsing.py,sha256=Ixr_gXf0AEC5BENp_rU4euewJ2sqO-njhwqVrJFtP6Q,10410 +black/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +black/report.py,sha256=iq4YS8A_F5zBp-HyohzNGL0w5PEJJrAnOUvOg7ddx0s,3380 +black/rusty.py,sha256=JAZPP4tU8U_WyRMQerygQ65Zthb4dM10yvtX8vjHUK4,557 +black/strings.py,sha256=ogWmRbtiIbnSwTlfIxNg3UxWYer06Y45exBFN7S_1r0,7924 +black/trans.py,sha256=v3Yj0TNSabiUbS9C6uGq7mDeAPHC2hzmsC3mvE8MXd0,76431 +black_primer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +black_primer/cli.py,sha256=V7y_LxYRWHwMWj00bqhXVeRkv97uJcO3OU-3yxO6iys,4847 +black_primer/lib.py,sha256=1IH7o7nLKlnXxRUSQ2Y0SIlqkBd9NF-3pEZb57SHGUg,13941 +black_primer/primer.json,sha256=9DFXpSGbEQNx9lUk6vnYJR3nvzwv9ImeIWfL3WRD3as,6783 +blackd/__init__.py,sha256=S_AGDXsgGUN9ZdhDx-6pPxOimtqlb2x6gpXwADEJ3OQ,7104 +blackd/middlewares.py,sha256=FDA9ve5O1YptredJmHODWa0FPElVRSUkuI-LXlqtmqQ,1207 +blib2to3/Grammar.txt,sha256=Ek2B-mvc4uoMstW1GvYcYlqXLsVo0ianhfZ-L6121Q0,11163 +blib2to3/PatternGrammar.txt,sha256=7lul2ztnIqDi--JWDrwciD5yMo75w7TaHHxdHMZJvOM,793 +blib2to3/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 +blib2to3/pygram.py,sha256=aY7mifSN3ddqJXtEHg0oQC-nf1b87gxuht9wrVbOaiI,5463 +blib2to3/pytree.py,sha256=jaHZDtPnUQMPmTagt26xuJ8BPGqg0lFslQHgXrOogFE,31964 +blib2to3/pgen2/__init__.py,sha256=hY6w9QUzvTvRb-MoFfd_q_7ZLt6IUHC2yxWCfsZupQA,143 +blib2to3/pgen2/conv.py,sha256=wGS0vapMV81PvxisAentOrXCZX1IlCFulQDW5Ha9Ueo,9607 +blib2to3/pgen2/driver.py,sha256=ysH9xh3jCM-K0UBiZoZIOz8E-wvgTHPnO_SCOH3r2wA,10774 +blib2to3/pgen2/grammar.py,sha256=ohZ3ZwaIGTfVz90fWRKazCYxJWVq-ZB-Ht3hItar31k,6791 +blib2to3/pgen2/literals.py,sha256=vbzw1RX0NvATBR971WwRNtp68fsXky4-pV4cBZ02_9E,1628 +blib2to3/pgen2/parse.py,sha256=RderMwR6uCnMI395EAgeCUW6Fw-gHrF7c3UVE-p_r88,13083 +blib2to3/pgen2/pgen.py,sha256=8gkqFARDLVp8tdpjxjJEP1YytCZvNrLl5tuK6SdM_nc,15491 +blib2to3/pgen2/token.py,sha256=eDZWhONvSUNGsWYzHhgBd1pIQ2HlVPP7aon0S1CLuKE,1919 +blib2to3/pgen2/tokenize.py,sha256=P41EcFKYe0bH-EaaQY2KzsWkbz-wIqSUvoUA5kgobBI,22842 +black-21.12b0.dist-info/AUTHORS.md,sha256=QkFLz3GyG9FOsxzCSs5dFyRO8VfNZ4MooKSQtK-tIgU,7699 +black-21.12b0.dist-info/LICENSE,sha256=nAQo8MO0d5hQz1vZbhGqqK_HLUqG1KNiI9erouWNbgA,1080 +black-21.12b0.dist-info/METADATA,sha256=sP67E_PJhjq7Fc4i-Guw9qfDJflioPMn8FFwqIhS468,39684 +black-21.12b0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +black-21.12b0.dist-info/entry_points.txt,sha256=YLrV9zqlrKNpCDWkEerYGVwQlNq0Ib4g9LHgGcaqBww,116 +black-21.12b0.dist-info/top_level.txt,sha256=jGkYA_03ZBzJOsfB7YnyswRCARxXZRcCbzc3pugKwew,50 +black-21.12b0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +black-21.12b0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/WHEEL new file mode 100644 index 0000000..5bad85f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/entry_points.txt new file mode 100644 index 0000000..faa86e5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +black = black:patched_main +black-primer = black_primer.cli:main +blackd = blackd:patched_main [d] + diff --git a/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/top_level.txt new file mode 100644 index 0000000..edff6bd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black-21.12b0.dist-info/top_level.txt @@ -0,0 +1,5 @@ +_black_version +black +black_primer +blackd +blib2to3 diff --git a/myenv/lib/python3.9/site-packages/black/__init__.py b/myenv/lib/python3.9/site-packages/black/__init__.py new file mode 100644 index 0000000..1923c06 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/__init__.py @@ -0,0 +1,1377 @@ +import asyncio +from json.decoder import JSONDecodeError +import json +from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor +from contextlib import contextmanager +from datetime import datetime +from enum import Enum +import io +from multiprocessing import Manager, freeze_support +import os +from pathlib import Path +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError +import re +import signal +import sys +import tokenize +import traceback +from typing import ( + Any, + Dict, + Generator, + Iterator, + List, + MutableMapping, + Optional, + Pattern, + Set, + Sized, + Tuple, + Union, +) + +import click +from dataclasses import replace +from mypy_extensions import mypyc_attr + +from black.const import DEFAULT_LINE_LENGTH, DEFAULT_INCLUDES, DEFAULT_EXCLUDES +from black.const import STDIN_PLACEHOLDER +from black.nodes import STARS, syms, is_simple_decorator_expression +from black.lines import Line, EmptyLineTracker +from black.linegen import transform_line, LineGenerator, LN +from black.comments import normalize_fmt_off +from black.mode import Mode, TargetVersion +from black.mode import Feature, supports_feature, VERSION_TO_FEATURES +from black.cache import read_cache, write_cache, get_cache_info, filter_cached, Cache +from black.concurrency import cancel, shutdown, maybe_install_uvloop +from black.output import dump_to_file, ipynb_diff, diff, color_diff, out, err +from black.report import Report, Changed, NothingChanged +from black.files import find_project_root, find_pyproject_toml, parse_pyproject_toml +from black.files import gen_python_files, get_gitignore, normalize_path_maybe_ignore +from black.files import wrap_stream_for_windows +from black.parsing import InvalidInput # noqa F401 +from black.parsing import lib2to3_parse, parse_ast, stringify_ast +from black.handle_ipynb_magics import ( + mask_cell, + unmask_cell, + remove_trailing_semicolon, + put_trailing_semicolon_back, + TRANSFORMED_MAGICS, + PYTHON_CELL_MAGICS, + jupyter_dependencies_are_installed, +) + + +# lib2to3 fork +from blib2to3.pytree import Node, Leaf +from blib2to3.pgen2 import token + +from _black_version import version as __version__ + +COMPILED = Path(__file__).suffix in (".pyd", ".so") + +# types +FileContent = str +Encoding = str +NewLine = str + + +class WriteBack(Enum): + NO = 0 + YES = 1 + DIFF = 2 + CHECK = 3 + COLOR_DIFF = 4 + + @classmethod + def from_configuration( + cls, *, check: bool, diff: bool, color: bool = False + ) -> "WriteBack": + if check and not diff: + return cls.CHECK + + if diff and color: + return cls.COLOR_DIFF + + return cls.DIFF if diff else cls.YES + + +# Legacy name, left for integrations. +FileMode = Mode + +DEFAULT_WORKERS = os.cpu_count() + + +def read_pyproject_toml( + ctx: click.Context, param: click.Parameter, value: Optional[str] +) -> Optional[str]: + """Inject Black configuration from "pyproject.toml" into defaults in `ctx`. + + Returns the path to a successfully found and read configuration file, None + otherwise. + """ + if not value: + value = find_pyproject_toml(ctx.params.get("src", ())) + if value is None: + return None + + try: + config = parse_pyproject_toml(value) + except (OSError, ValueError) as e: + raise click.FileError( + filename=value, hint=f"Error reading configuration file: {e}" + ) from None + + if not config: + return None + else: + # Sanitize the values to be Click friendly. For more information please see: + # https://github.com/psf/black/issues/1458 + # https://github.com/pallets/click/issues/1567 + config = { + k: str(v) if not isinstance(v, (list, dict)) else v + for k, v in config.items() + } + + target_version = config.get("target_version") + if target_version is not None and not isinstance(target_version, list): + raise click.BadOptionUsage( + "target-version", "Config key target-version must be a list" + ) + + default_map: Dict[str, Any] = {} + if ctx.default_map: + default_map.update(ctx.default_map) + default_map.update(config) + + ctx.default_map = default_map + return value + + +def target_version_option_callback( + c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...] +) -> List[TargetVersion]: + """Compute the target versions from a --target-version flag. + + This is its own function because mypy couldn't infer the type correctly + when it was a lambda, causing mypyc trouble. + """ + return [TargetVersion[val.upper()] for val in v] + + +def re_compile_maybe_verbose(regex: str) -> Pattern[str]: + """Compile a regular expression string in `regex`. + + If it contains newlines, use verbose mode. + """ + if "\n" in regex: + regex = "(?x)" + regex + compiled: Pattern[str] = re.compile(regex) + return compiled + + +def validate_regex( + ctx: click.Context, + param: click.Parameter, + value: Optional[str], +) -> Optional[Pattern[str]]: + try: + return re_compile_maybe_verbose(value) if value is not None else None + except re.error: + raise click.BadParameter("Not a valid regular expression") from None + + +@click.command( + context_settings={"help_option_names": ["-h", "--help"]}, + # While Click does set this field automatically using the docstring, mypyc + # (annoyingly) strips 'em so we need to set it here too. + help="The uncompromising code formatter.", +) +@click.option("-c", "--code", type=str, help="Format the code passed in as a string.") +@click.option( + "-l", + "--line-length", + type=int, + default=DEFAULT_LINE_LENGTH, + help="How many characters per line to allow.", + show_default=True, +) +@click.option( + "-t", + "--target-version", + type=click.Choice([v.name.lower() for v in TargetVersion]), + callback=target_version_option_callback, + multiple=True, + help=( + "Python versions that should be supported by Black's output. [default: per-file" + " auto-detection]" + ), +) +@click.option( + "--pyi", + is_flag=True, + help=( + "Format all input files like typing stubs regardless of file extension (useful" + " when piping source on standard input)." + ), +) +@click.option( + "--ipynb", + is_flag=True, + help=( + "Format all input files like Jupyter Notebooks regardless of file extension " + "(useful when piping source on standard input)." + ), +) +@click.option( + "-S", + "--skip-string-normalization", + is_flag=True, + help="Don't normalize string quotes or prefixes.", +) +@click.option( + "-C", + "--skip-magic-trailing-comma", + is_flag=True, + help="Don't use trailing commas as a reason to split lines.", +) +@click.option( + "--experimental-string-processing", + is_flag=True, + hidden=True, + help=( + "Experimental option that performs more normalization on string literals." + " Currently disabled because it leads to some crashes." + ), +) +@click.option( + "--check", + is_flag=True, + help=( + "Don't write the files back, just return the status. Return code 0 means" + " nothing would change. Return code 1 means some files would be reformatted." + " Return code 123 means there was an internal error." + ), +) +@click.option( + "--diff", + is_flag=True, + help="Don't write the files back, just output a diff for each file on stdout.", +) +@click.option( + "--color/--no-color", + is_flag=True, + help="Show colored diff. Only applies when `--diff` is given.", +) +@click.option( + "--fast/--safe", + is_flag=True, + help="If --fast given, skip temporary sanity checks. [default: --safe]", +) +@click.option( + "--required-version", + type=str, + help=( + "Require a specific version of Black to be running (useful for unifying results" + " across many environments e.g. with a pyproject.toml file)." + ), +) +@click.option( + "--include", + type=str, + default=DEFAULT_INCLUDES, + callback=validate_regex, + help=( + "A regular expression that matches files and directories that should be" + " included on recursive searches. An empty value means all files are included" + " regardless of the name. Use forward slashes for directories on all platforms" + " (Windows, too). Exclusions are calculated first, inclusions later." + ), + show_default=True, +) +@click.option( + "--exclude", + type=str, + callback=validate_regex, + help=( + "A regular expression that matches files and directories that should be" + " excluded on recursive searches. An empty value means no paths are excluded." + " Use forward slashes for directories on all platforms (Windows, too)." + " Exclusions are calculated first, inclusions later. [default:" + f" {DEFAULT_EXCLUDES}]" + ), + show_default=False, +) +@click.option( + "--extend-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but adds additional files and directories on top of the" + " excluded ones. (Useful if you simply want to add to the default)" + ), +) +@click.option( + "--force-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but files and directories matching this regex will be " + "excluded even when they are passed explicitly as arguments." + ), +) +@click.option( + "--stdin-filename", + type=str, + help=( + "The name of the file when passing it through stdin. Useful to make " + "sure Black will respect --force-exclude option on some " + "editors that rely on using stdin." + ), +) +@click.option( + "-W", + "--workers", + type=click.IntRange(min=1), + default=DEFAULT_WORKERS, + show_default=True, + help="Number of parallel workers", +) +@click.option( + "-q", + "--quiet", + is_flag=True, + help=( + "Don't emit non-error messages to stderr. Errors are still emitted; silence" + " those with 2>/dev/null." + ), +) +@click.option( + "-v", + "--verbose", + is_flag=True, + help=( + "Also emit messages to stderr about files that were not changed or were ignored" + " due to exclusion patterns." + ), +) +@click.version_option( + version=__version__, + message=f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})", +) +@click.argument( + "src", + nargs=-1, + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True + ), + is_eager=True, + metavar="SRC ...", +) +@click.option( + "--config", + type=click.Path( + exists=True, + file_okay=True, + dir_okay=False, + readable=True, + allow_dash=False, + path_type=str, + ), + is_eager=True, + callback=read_pyproject_toml, + help="Read configuration from FILE path.", +) +@click.pass_context +def main( + ctx: click.Context, + code: Optional[str], + line_length: int, + target_version: List[TargetVersion], + check: bool, + diff: bool, + color: bool, + fast: bool, + pyi: bool, + ipynb: bool, + skip_string_normalization: bool, + skip_magic_trailing_comma: bool, + experimental_string_processing: bool, + quiet: bool, + verbose: bool, + required_version: Optional[str], + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + stdin_filename: Optional[str], + workers: int, + src: Tuple[str, ...], + config: Optional[str], +) -> None: + """The uncompromising code formatter.""" + if config and verbose: + out(f"Using configuration from {config}.", bold=False, fg="blue") + + error_msg = "Oh no! 💥 💔 💥" + if required_version and required_version != __version__: + err( + f"{error_msg} The required version `{required_version}` does not match" + f" the running version `{__version__}`!" + ) + ctx.exit(1) + if ipynb and pyi: + err("Cannot pass both `pyi` and `ipynb` flags!") + ctx.exit(1) + + write_back = WriteBack.from_configuration(check=check, diff=diff, color=color) + if target_version: + versions = set(target_version) + else: + # We'll autodetect later. + versions = set() + mode = Mode( + target_versions=versions, + line_length=line_length, + is_pyi=pyi, + is_ipynb=ipynb, + string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, + experimental_string_processing=experimental_string_processing, + ) + + if code is not None: + # Run in quiet mode by default with -c; the extra output isn't useful. + # You can still pass -v to get verbose output. + quiet = True + + report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) + + if code is not None: + reformat_code( + content=code, fast=fast, write_back=write_back, mode=mode, report=report + ) + else: + try: + sources = get_sources( + ctx=ctx, + src=src, + quiet=quiet, + verbose=verbose, + include=include, + exclude=exclude, + extend_exclude=extend_exclude, + force_exclude=force_exclude, + report=report, + stdin_filename=stdin_filename, + ) + except GitWildMatchPatternError: + ctx.exit(1) + + path_empty( + sources, + "No Python files are present to be formatted. Nothing to do 😴", + quiet, + verbose, + ctx, + ) + + if len(sources) == 1: + reformat_one( + src=sources.pop(), + fast=fast, + write_back=write_back, + mode=mode, + report=report, + ) + else: + reformat_many( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + workers=workers, + ) + + if verbose or not quiet: + out(error_msg if report.return_code else "All done! ✨ 🍰 ✨") + if code is None: + click.echo(str(report), err=True) + ctx.exit(report.return_code) + + +def get_sources( + *, + ctx: click.Context, + src: Tuple[str, ...], + quiet: bool, + verbose: bool, + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + report: "Report", + stdin_filename: Optional[str], +) -> Set[Path]: + """Compute the set of files to be formatted.""" + + root = find_project_root(src) + sources: Set[Path] = set() + path_empty(src, "No Path provided. Nothing to do 😴", quiet, verbose, ctx) + + if exclude is None: + exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) + gitignore = get_gitignore(root) + else: + gitignore = None + + for s in src: + if s == "-" and stdin_filename: + p = Path(stdin_filename) + is_stdin = True + else: + p = Path(s) + is_stdin = False + + if is_stdin or p.is_file(): + normalized_path = normalize_path_maybe_ignore(p, root, report) + if normalized_path is None: + continue + + normalized_path = "/" + normalized_path + # Hard-exclude any files that matches the `--force-exclude` regex. + if force_exclude: + force_exclude_match = force_exclude.search(normalized_path) + else: + force_exclude_match = None + if force_exclude_match and force_exclude_match.group(0): + report.path_ignored(p, "matches the --force-exclude regular expression") + continue + + if is_stdin: + p = Path(f"{STDIN_PLACEHOLDER}{str(p)}") + + if p.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + + sources.add(p) + elif p.is_dir(): + sources.update( + gen_python_files( + p.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore, + verbose=verbose, + quiet=quiet, + ) + ) + elif s == "-": + sources.add(p) + else: + err(f"invalid path: {s}") + return sources + + +def path_empty( + src: Sized, msg: str, quiet: bool, verbose: bool, ctx: click.Context +) -> None: + """ + Exit if there is no `src` provided for formatting + """ + if not src: + if verbose or not quiet: + out(msg) + ctx.exit(0) + + +def reformat_code( + content: str, fast: bool, write_back: WriteBack, mode: Mode, report: Report +) -> None: + """ + Reformat and print out `content` without spawning child processes. + Similar to `reformat_one`, but for string content. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + path = Path("") + try: + changed = Changed.NO + if format_stdin_to_stdout( + content=content, fast=fast, write_back=write_back, mode=mode + ): + changed = Changed.YES + report.done(path, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(path, str(exc)) + + +def reformat_one( + src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report" +) -> None: + """Reformat a single file under `src` without spawning child processes. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + try: + changed = Changed.NO + + if str(src) == "-": + is_stdin = True + elif str(src).startswith(STDIN_PLACEHOLDER): + is_stdin = True + # Use the original name again in case we want to print something + # to the user + src = Path(str(src)[len(STDIN_PLACEHOLDER) :]) + else: + is_stdin = False + + if is_stdin: + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) + if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): + changed = Changed.YES + else: + cache: Cache = {} + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + cache = read_cache(mode) + res_src = src.resolve() + res_src_s = str(res_src) + if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src): + changed = Changed.CACHED + if changed is not Changed.CACHED and format_file_in_place( + src, fast=fast, write_back=write_back, mode=mode + ): + changed = Changed.YES + if (write_back is WriteBack.YES and changed is not Changed.CACHED) or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + write_cache(cache, [src], mode) + report.done(src, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(src, str(exc)) + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_many( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: "Report", + workers: Optional[int], +) -> None: + """Reformat multiple files using a ProcessPoolExecutor.""" + executor: Executor + loop = asyncio.get_event_loop() + worker_count = workers if workers is not None else DEFAULT_WORKERS + if sys.platform == "win32": + # Work around https://bugs.python.org/issue26903 + assert worker_count is not None + worker_count = min(worker_count, 60) + try: + executor = ProcessPoolExecutor(max_workers=worker_count) + except (ImportError, NotImplementedError, OSError): + # we arrive here if the underlying system does not support multi-processing + # like in AWS Lambda or Termux, in which case we gracefully fallback to + # a ThreadPoolExecutor with just a single worker (more workers would not do us + # any good due to the Global Interpreter Lock) + executor = ThreadPoolExecutor(max_workers=1) + + try: + loop.run_until_complete( + schedule_formatting( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + loop=loop, + executor=executor, + ) + ) + finally: + shutdown(loop) + if executor is not None: + executor.shutdown() + + +async def schedule_formatting( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: "Report", + loop: asyncio.AbstractEventLoop, + executor: Executor, +) -> None: + """Run formatting of `sources` in parallel using the provided `executor`. + + (Use ProcessPoolExecutors for actual parallelism.) + + `write_back`, `fast`, and `mode` options are passed to + :func:`format_file_in_place`. + """ + cache: Cache = {} + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + cache = read_cache(mode) + sources, cached = filter_cached(cache, sources) + for src in sorted(cached): + report.done(src, Changed.CACHED) + if not sources: + return + + cancelled = [] + sources_to_cache = [] + lock = None + if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + # For diff output, we need locks to ensure we don't interleave output + # from different processes. + manager = Manager() + lock = manager.Lock() + tasks = { + asyncio.ensure_future( + loop.run_in_executor( + executor, format_file_in_place, src, fast, mode, write_back, lock + ) + ): src + for src in sorted(sources) + } + pending = tasks.keys() + try: + loop.add_signal_handler(signal.SIGINT, cancel, pending) + loop.add_signal_handler(signal.SIGTERM, cancel, pending) + except NotImplementedError: + # There are no good alternatives for these on Windows. + pass + while pending: + done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + for task in done: + src = tasks.pop(task) + if task.cancelled(): + cancelled.append(task) + elif task.exception(): + report.failed(src, str(task.exception())) + else: + changed = Changed.YES if task.result() else Changed.NO + # If the file was written back or was successfully checked as + # well-formatted, store this information in the cache. + if write_back is WriteBack.YES or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + sources_to_cache.append(src) + report.done(src, changed) + if cancelled: + if sys.version_info >= (3, 7): + await asyncio.gather(*cancelled, return_exceptions=True) + else: + await asyncio.gather(*cancelled, loop=loop, return_exceptions=True) + if sources_to_cache: + write_cache(cache, sources_to_cache, mode) + + +def format_file_in_place( + src: Path, + fast: bool, + mode: Mode, + write_back: WriteBack = WriteBack.NO, + lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy +) -> bool: + """Format file under `src` path. Return True if changed. + + If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted + code to the file. + `mode` and `fast` options are passed to :func:`format_file_contents`. + """ + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) + + then = datetime.utcfromtimestamp(src.stat().st_mtime) + with open(src, "rb") as buf: + src_contents, encoding, newline = decode_bytes(buf.read()) + try: + dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) + except NothingChanged: + return False + except JSONDecodeError: + raise ValueError( + f"File '{src}' cannot be parsed as valid Jupyter notebook." + ) from None + + if write_back == WriteBack.YES: + with open(src, "w", encoding=encoding, newline=newline) as f: + f.write(dst_contents) + elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + now = datetime.utcnow() + src_name = f"{src}\t{then} +0000" + dst_name = f"{src}\t{now} +0000" + if mode.is_ipynb: + diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name) + else: + diff_contents = diff(src_contents, dst_contents, src_name, dst_name) + + if write_back == WriteBack.COLOR_DIFF: + diff_contents = color_diff(diff_contents) + + with lock or nullcontext(): + f = io.TextIOWrapper( + sys.stdout.buffer, + encoding=encoding, + newline=newline, + write_through=True, + ) + f = wrap_stream_for_windows(f) + f.write(diff_contents) + f.detach() + + return True + + +def format_stdin_to_stdout( + fast: bool, + *, + content: Optional[str] = None, + write_back: WriteBack = WriteBack.NO, + mode: Mode, +) -> bool: + """Format file on stdin. Return True if changed. + + If content is None, it's read from sys.stdin. + + If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, + write a diff to stdout. The `mode` argument is passed to + :func:`format_file_contents`. + """ + then = datetime.utcnow() + + if content is None: + src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) + else: + src, encoding, newline = content, "utf-8", "" + + dst = src + try: + dst = format_file_contents(src, fast=fast, mode=mode) + return True + + except NothingChanged: + return False + + finally: + f = io.TextIOWrapper( + sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True + ) + if write_back == WriteBack.YES: + # Make sure there's a newline after the content + if dst and dst[-1] != "\n": + dst += "\n" + f.write(dst) + elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + now = datetime.utcnow() + src_name = f"STDIN\t{then} +0000" + dst_name = f"STDOUT\t{now} +0000" + d = diff(src, dst, src_name, dst_name) + if write_back == WriteBack.COLOR_DIFF: + d = color_diff(d) + f = wrap_stream_for_windows(f) + f.write(d) + f.detach() + + +def check_stability_and_equivalence( + src_contents: str, dst_contents: str, *, mode: Mode +) -> None: + """Perform stability and equivalence checks. + + Raise AssertionError if source and destination contents are not + equivalent, or if a second pass of the formatter would format the + content differently. + """ + assert_equivalent(src_contents, dst_contents) + + # Forced second pass to work around optional trailing commas (becoming + # forced trailing commas on pass 2) interacting differently with optional + # parentheses. Admittedly ugly. + dst_contents_pass2 = format_str(dst_contents, mode=mode) + if dst_contents != dst_contents_pass2: + dst_contents = dst_contents_pass2 + assert_equivalent(src_contents, dst_contents, pass_num=2) + assert_stable(src_contents, dst_contents, mode=mode) + # Note: no need to explicitly call `assert_stable` if `dst_contents` was + # the same as `dst_contents_pass2`. + + +def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Reformat contents of a file and return new contents. + + If `fast` is False, additionally confirm that the reformatted code is + valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it. + `mode` is passed to :func:`format_str`. + """ + if not src_contents.strip(): + raise NothingChanged + + if mode.is_ipynb: + dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode) + else: + dst_contents = format_str(src_contents, mode=mode) + if src_contents == dst_contents: + raise NothingChanged + + if not fast and not mode.is_ipynb: + # Jupyter notebooks will already have been checked above. + check_stability_and_equivalence(src_contents, dst_contents, mode=mode) + return dst_contents + + +def validate_cell(src: str) -> None: + """Check that cell does not already contain TransformerManager transformations, + or non-Python cell magics, which might cause tokenizer_rt to break because of + indentations. + + If a cell contains ``!ls``, then it'll be transformed to + ``get_ipython().system('ls')``. However, if the cell originally contained + ``get_ipython().system('ls')``, then it would get transformed in the same way: + + >>> TransformerManager().transform_cell("get_ipython().system('ls')") + "get_ipython().system('ls')\n" + >>> TransformerManager().transform_cell("!ls") + "get_ipython().system('ls')\n" + + Due to the impossibility of safely roundtripping in such situations, cells + containing transformed magics will be ignored. + """ + if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): + raise NothingChanged + if src[:2] == "%%" and src.split()[0][2:] not in PYTHON_CELL_MAGICS: + raise NothingChanged + + +def format_cell(src: str, *, fast: bool, mode: Mode) -> str: + """Format code in given cell of Jupyter notebook. + + General idea is: + + - if cell has trailing semicolon, remove it; + - if cell has IPython magics, mask them; + - format cell; + - reinstate IPython magics; + - reinstate trailing semicolon (if originally present); + - strip trailing newlines. + + Cells with syntax errors will not be processed, as they + could potentially be automagics or multi-line magics, which + are currently not supported. + """ + validate_cell(src) + src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon( + src + ) + try: + masked_src, replacements = mask_cell(src_without_trailing_semicolon) + except SyntaxError: + raise NothingChanged from None + masked_dst = format_str(masked_src, mode=mode) + if not fast: + check_stability_and_equivalence(masked_src, masked_dst, mode=mode) + dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements) + dst = put_trailing_semicolon_back( + dst_without_trailing_semicolon, has_trailing_semicolon + ) + dst = dst.rstrip("\n") + if dst == src: + raise NothingChanged from None + return dst + + +def validate_metadata(nb: MutableMapping[str, Any]) -> None: + """If notebook is marked as non-Python, don't format it. + + All notebook metadata fields are optional, see + https://nbformat.readthedocs.io/en/latest/format_description.html. So + if a notebook has empty metadata, we will try to parse it anyway. + """ + language = nb.get("metadata", {}).get("language_info", {}).get("name", None) + if language is not None and language != "python": + raise NothingChanged from None + + +def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Format Jupyter notebook. + + Operate cell-by-cell, only on code cells, only for Python notebooks. + If the ``.ipynb`` originally had a trailing newline, it'll be preserved. + """ + trailing_newline = src_contents[-1] == "\n" + modified = False + nb = json.loads(src_contents) + validate_metadata(nb) + for cell in nb["cells"]: + if cell.get("cell_type", None) == "code": + try: + src = "".join(cell["source"]) + dst = format_cell(src, fast=fast, mode=mode) + except NothingChanged: + pass + else: + cell["source"] = dst.splitlines(keepends=True) + modified = True + if modified: + dst_contents = json.dumps(nb, indent=1, ensure_ascii=False) + if trailing_newline: + dst_contents = dst_contents + "\n" + return dst_contents + else: + raise NothingChanged + + +def format_str(src_contents: str, *, mode: Mode) -> FileContent: + """Reformat a string and return new contents. + + `mode` determines formatting options, such as how many characters per line are + allowed. Example: + + >>> import black + >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode())) + def f(arg: str = "") -> None: + ... + + A more complex example: + + >>> print( + ... black.format_str( + ... "def f(arg:str='')->None: hey", + ... mode=black.Mode( + ... target_versions={black.TargetVersion.PY36}, + ... line_length=10, + ... string_normalization=False, + ... is_pyi=False, + ... ), + ... ), + ... ) + def f( + arg: str = '', + ) -> None: + hey + + """ + src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) + dst_contents = [] + future_imports = get_future_imports(src_node) + if mode.target_versions: + versions = mode.target_versions + else: + versions = detect_target_versions(src_node) + + # TODO: fully drop support and this code hopefully in January 2022 :D + if TargetVersion.PY27 in mode.target_versions or versions == {TargetVersion.PY27}: + msg = ( + "DEPRECATION: Python 2 support will be removed in the first stable release " + "expected in January 2022." + ) + err(msg, fg="yellow", bold=True) + + normalize_fmt_off(src_node) + lines = LineGenerator( + mode=mode, + remove_u_prefix="unicode_literals" in future_imports + or supports_feature(versions, Feature.UNICODE_LITERALS), + ) + elt = EmptyLineTracker(is_pyi=mode.is_pyi) + empty_line = Line(mode=mode) + after = 0 + split_line_features = { + feature + for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF} + if supports_feature(versions, feature) + } + for current_line in lines.visit(src_node): + dst_contents.append(str(empty_line) * after) + before, after = elt.maybe_empty_lines(current_line) + dst_contents.append(str(empty_line) * before) + for line in transform_line( + current_line, mode=mode, features=split_line_features + ): + dst_contents.append(str(line)) + return "".join(dst_contents) + + +def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: + """Return a tuple of (decoded_contents, encoding, newline). + + `newline` is either CRLF or LF but `decoded_contents` is decoded with + universal newlines (i.e. only contains LF). + """ + srcbuf = io.BytesIO(src) + encoding, lines = tokenize.detect_encoding(srcbuf.readline) + if not lines: + return "", encoding, "\n" + + newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" + srcbuf.seek(0) + with io.TextIOWrapper(srcbuf, encoding) as tiow: + return tiow.read(), encoding, newline + + +def get_features_used(node: Node) -> Set[Feature]: # noqa: C901 + """Return a set of (relatively) new Python features used in this file. + + Currently looking for: + - f-strings; + - underscores in numeric literals; + - trailing commas after * or ** in function signatures and calls; + - positional only arguments in function signatures and lambdas; + - assignment expression; + - relaxed decorator syntax; + - print / exec statements; + """ + features: Set[Feature] = set() + for n in node.pre_order(): + if n.type == token.STRING: + value_head = n.value[:2] # type: ignore + if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: + features.add(Feature.F_STRINGS) + + elif n.type == token.NUMBER: + assert isinstance(n, Leaf) + if "_" in n.value: + features.add(Feature.NUMERIC_UNDERSCORES) + elif n.value.endswith(("L", "l")): + # Python 2: 10L + features.add(Feature.LONG_INT_LITERAL) + elif len(n.value) >= 2 and n.value[0] == "0" and n.value[1].isdigit(): + # Python 2: 0123; 00123; ... + if not all(char == "0" for char in n.value): + # although we don't want to match 0000 or similar + features.add(Feature.OCTAL_INT_LITERAL) + + elif n.type == token.SLASH: + if n.parent and n.parent.type in { + syms.typedargslist, + syms.arglist, + syms.varargslist, + }: + features.add(Feature.POS_ONLY_ARGUMENTS) + + elif n.type == token.COLONEQUAL: + features.add(Feature.ASSIGNMENT_EXPRESSIONS) + + elif n.type == syms.decorator: + if len(n.children) > 1 and not is_simple_decorator_expression( + n.children[1] + ): + features.add(Feature.RELAXED_DECORATORS) + + elif ( + n.type in {syms.typedargslist, syms.arglist} + and n.children + and n.children[-1].type == token.COMMA + ): + if n.type == syms.typedargslist: + feature = Feature.TRAILING_COMMA_IN_DEF + else: + feature = Feature.TRAILING_COMMA_IN_CALL + + for ch in n.children: + if ch.type in STARS: + features.add(feature) + + if ch.type == syms.argument: + for argch in ch.children: + if argch.type in STARS: + features.add(feature) + + # Python 2 only features (for its deprecation) except for integers, see above + elif n.type == syms.print_stmt: + features.add(Feature.PRINT_STMT) + elif n.type == syms.exec_stmt: + features.add(Feature.EXEC_STMT) + elif n.type == syms.tfpdef: + # def set_position((x, y), value): + # ... + features.add(Feature.AUTOMATIC_PARAMETER_UNPACKING) + elif n.type == syms.except_clause: + # try: + # ... + # except Exception, err: + # ... + if len(n.children) >= 4: + if n.children[-2].type == token.COMMA: + features.add(Feature.COMMA_STYLE_EXCEPT) + elif n.type == syms.raise_stmt: + # raise Exception, "msg" + if len(n.children) >= 4: + if n.children[-2].type == token.COMMA: + features.add(Feature.COMMA_STYLE_RAISE) + elif n.type == token.BACKQUOTE: + # `i'm surprised this ever existed` + features.add(Feature.BACKQUOTE_REPR) + + return features + + +def detect_target_versions(node: Node) -> Set[TargetVersion]: + """Detect the version to target based on the nodes used.""" + features = get_features_used(node) + return { + version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] + } + + +def get_future_imports(node: Node) -> Set[str]: + """Return a set of __future__ imports in the file.""" + imports: Set[str] = set() + + def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: + for child in children: + if isinstance(child, Leaf): + if child.type == token.NAME: + yield child.value + + elif child.type == syms.import_as_name: + orig_name = child.children[0] + assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports" + assert orig_name.type == token.NAME, "Invalid syntax parsing imports" + yield orig_name.value + + elif child.type == syms.import_as_names: + yield from get_imports_from_children(child.children) + + else: + raise AssertionError("Invalid syntax parsing imports") + + for child in node.children: + if child.type != syms.simple_stmt: + break + + first_child = child.children[0] + if isinstance(first_child, Leaf): + # Continue looking if we see a docstring; otherwise stop. + if ( + len(child.children) == 2 + and first_child.type == token.STRING + and child.children[1].type == token.NEWLINE + ): + continue + + break + + elif first_child.type == syms.import_from: + module_name = first_child.children[1] + if not isinstance(module_name, Leaf) or module_name.value != "__future__": + break + + imports |= set(get_imports_from_children(first_child.children[3:])) + else: + break + + return imports + + +def assert_equivalent(src: str, dst: str, *, pass_num: int = 1) -> None: + """Raise AssertionError if `src` and `dst` aren't equivalent.""" + try: + src_ast = parse_ast(src) + except Exception as exc: + raise AssertionError( + "cannot use --safe with this file; failed to parse source file." + ) from exc + + try: + dst_ast = parse_ast(dst) + except Exception as exc: + log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) + raise AssertionError( + f"INTERNAL ERROR: Black produced invalid code on pass {pass_num}: {exc}. " + "Please report a bug on https://github.com/psf/black/issues. " + f"This invalid output might be helpful: {log}" + ) from None + + src_ast_str = "\n".join(stringify_ast(src_ast)) + dst_ast_str = "\n".join(stringify_ast(dst_ast)) + if src_ast_str != dst_ast_str: + log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) + raise AssertionError( + "INTERNAL ERROR: Black produced code that is not equivalent to the" + f" source on pass {pass_num}. Please report a bug on " + f"https://github.com/psf/black/issues. This diff might be helpful: {log}" + ) from None + + +def assert_stable(src: str, dst: str, mode: Mode) -> None: + """Raise AssertionError if `dst` reformats differently the second time.""" + newdst = format_str(dst, mode=mode) + if dst != newdst: + log = dump_to_file( + str(mode), + diff(src, dst, "source", "first pass"), + diff(dst, newdst, "first pass", "second pass"), + ) + raise AssertionError( + "INTERNAL ERROR: Black produced different code on the second pass of the" + " formatter. Please report a bug on https://github.com/psf/black/issues." + f" This diff might be helpful: {log}" + ) from None + + +@contextmanager +def nullcontext() -> Iterator[None]: + """Return an empty context manager. + + To be used like `nullcontext` in Python 3.7. + """ + yield + + +def patch_click() -> None: + """Make Click not crash on Python 3.6 with LANG=C. + + On certain misconfigured environments, Python 3 selects the ASCII encoding as the + default which restricts paths that it can access during the lifetime of the + application. Click refuses to work in this scenario by raising a RuntimeError. + + In case of Black the likelihood that non-ASCII characters are going to be used in + file paths is minimal since it's Python source code. Moreover, this crash was + spurious on Python 3.7 thanks to PEP 538 and PEP 540. + """ + try: + from click import core + from click import _unicodefun + except ModuleNotFoundError: + return + + for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None # type: ignore + if hasattr(module, "_verify_python_env"): + module._verify_python_env = lambda: None # type: ignore + + +def patched_main() -> None: + maybe_install_uvloop() + freeze_support() + patch_click() + main() + + +if __name__ == "__main__": + patched_main() diff --git a/myenv/lib/python3.9/site-packages/black/__main__.py b/myenv/lib/python3.9/site-packages/black/__main__.py new file mode 100644 index 0000000..19b810b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/__main__.py @@ -0,0 +1,3 @@ +from black import patched_main + +patched_main() diff --git a/myenv/lib/python3.9/site-packages/black/brackets.py b/myenv/lib/python3.9/site-packages/black/brackets.py new file mode 100644 index 0000000..c5ed4bf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/brackets.py @@ -0,0 +1,334 @@ +"""Builds on top of nodes.py to track brackets.""" + +from dataclasses import dataclass, field +import sys +from typing import Dict, Iterable, List, Optional, Tuple, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from blib2to3.pytree import Leaf, Node +from blib2to3.pgen2 import token + +from black.nodes import syms, is_vararg, VARARGS_PARENTS, UNPACKING_PARENTS +from black.nodes import BRACKET, OPENING_BRACKETS, CLOSING_BRACKETS +from black.nodes import MATH_OPERATORS, COMPARATORS, LOGIC_OPERATORS + +# types +LN = Union[Leaf, Node] +Depth = int +LeafID = int +NodeType = int +Priority = int + + +COMPREHENSION_PRIORITY: Final = 20 +COMMA_PRIORITY: Final = 18 +TERNARY_PRIORITY: Final = 16 +LOGIC_PRIORITY: Final = 14 +STRING_PRIORITY: Final = 12 +COMPARATOR_PRIORITY: Final = 10 +MATH_PRIORITIES: Final = { + token.VBAR: 9, + token.CIRCUMFLEX: 8, + token.AMPER: 7, + token.LEFTSHIFT: 6, + token.RIGHTSHIFT: 6, + token.PLUS: 5, + token.MINUS: 5, + token.STAR: 4, + token.SLASH: 4, + token.DOUBLESLASH: 4, + token.PERCENT: 4, + token.AT: 4, + token.TILDE: 3, + token.DOUBLESTAR: 2, +} +DOT_PRIORITY: Final = 1 + + +class BracketMatchError(Exception): + """Raised when an opening bracket is unable to be matched to a closing bracket.""" + + +@dataclass +class BracketTracker: + """Keeps track of brackets on a line.""" + + depth: int = 0 + bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) + delimiters: Dict[LeafID, Priority] = field(default_factory=dict) + previous: Optional[Leaf] = None + _for_loop_depths: List[int] = field(default_factory=list) + _lambda_argument_depths: List[int] = field(default_factory=list) + invisible: List[Leaf] = field(default_factory=list) + + def mark(self, leaf: Leaf) -> None: + """Mark `leaf` with bracket-related metadata. Keep track of delimiters. + + All leaves receive an int `bracket_depth` field that stores how deep + within brackets a given leaf is. 0 means there are no enclosing brackets + that started on this line. + + If a leaf is itself a closing bracket, it receives an `opening_bracket` + field that it forms a pair with. This is a one-directional link to + avoid reference cycles. + + If a leaf is a delimiter (a token on which Black can split the line if + needed) and it's on depth 0, its `id()` is stored in the tracker's + `delimiters` field. + """ + if leaf.type == token.COMMENT: + return + + self.maybe_decrement_after_for_loop_variable(leaf) + self.maybe_decrement_after_lambda_arguments(leaf) + if leaf.type in CLOSING_BRACKETS: + self.depth -= 1 + try: + opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) + except KeyError as e: + raise BracketMatchError( + "Unable to match a closing bracket to the following opening" + f" bracket: {leaf}" + ) from e + leaf.opening_bracket = opening_bracket + if not leaf.value: + self.invisible.append(leaf) + leaf.bracket_depth = self.depth + if self.depth == 0: + delim = is_split_before_delimiter(leaf, self.previous) + if delim and self.previous is not None: + self.delimiters[id(self.previous)] = delim + else: + delim = is_split_after_delimiter(leaf, self.previous) + if delim: + self.delimiters[id(leaf)] = delim + if leaf.type in OPENING_BRACKETS: + self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf + self.depth += 1 + if not leaf.value: + self.invisible.append(leaf) + self.previous = leaf + self.maybe_increment_lambda_arguments(leaf) + self.maybe_increment_for_loop_variable(leaf) + + def any_open_brackets(self) -> bool: + """Return True if there is an yet unmatched open bracket on the line.""" + return bool(self.bracket_match) + + def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: + """Return the highest priority of a delimiter found on the line. + + Values are consistent with what `is_split_*_delimiter()` return. + Raises ValueError on no delimiters. + """ + return max(v for k, v in self.delimiters.items() if k not in exclude) + + def delimiter_count_with_priority(self, priority: Priority = 0) -> int: + """Return the number of delimiters with the given `priority`. + + If no `priority` is passed, defaults to max priority on the line. + """ + if not self.delimiters: + return 0 + + priority = priority or self.max_delimiter_priority() + return sum(1 for p in self.delimiters.values() if p == priority) + + def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: + """In a for loop, or comprehension, the variables are often unpacks. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `for` and `in`. + """ + if leaf.type == token.NAME and leaf.value == "for": + self.depth += 1 + self._for_loop_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: + """See `maybe_increment_for_loop_variable` above for explanation.""" + if ( + self._for_loop_depths + and self._for_loop_depths[-1] == self.depth + and leaf.type == token.NAME + and leaf.value == "in" + ): + self.depth -= 1 + self._for_loop_depths.pop() + return True + + return False + + def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: + """In a lambda expression, there might be more than one argument. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `lambda` and `:`. + """ + if leaf.type == token.NAME and leaf.value == "lambda": + self.depth += 1 + self._lambda_argument_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: + """See `maybe_increment_lambda_arguments` above for explanation.""" + if ( + self._lambda_argument_depths + and self._lambda_argument_depths[-1] == self.depth + and leaf.type == token.COLON + ): + self.depth -= 1 + self._lambda_argument_depths.pop() + return True + + return False + + def get_open_lsqb(self) -> Optional[Leaf]: + """Return the most recent opening square bracket (if any).""" + return self.bracket_match.get((self.depth - 1, token.RSQB)) + + +def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break after it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break after themselves. + + Higher numbers are higher priority. + """ + if leaf.type == token.COMMA: + return COMMA_PRIORITY + + return 0 + + +def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break before it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break before themselves. + + Higher numbers are higher priority. + """ + if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): + # * and ** might also be MATH_OPERATORS but in this case they are not. + # Don't treat them as a delimiter. + return 0 + + if ( + leaf.type == token.DOT + and leaf.parent + and leaf.parent.type not in {syms.import_from, syms.dotted_name} + and (previous is None or previous.type in CLOSING_BRACKETS) + ): + return DOT_PRIORITY + + if ( + leaf.type in MATH_OPERATORS + and leaf.parent + and leaf.parent.type not in {syms.factor, syms.star_expr} + ): + return MATH_PRIORITIES[leaf.type] + + if leaf.type in COMPARATORS: + return COMPARATOR_PRIORITY + + if ( + leaf.type == token.STRING + and previous is not None + and previous.type == token.STRING + ): + return STRING_PRIORITY + + if leaf.type not in {token.NAME, token.ASYNC}: + return 0 + + if ( + leaf.value == "for" + and leaf.parent + and leaf.parent.type in {syms.comp_for, syms.old_comp_for} + or leaf.type == token.ASYNC + ): + if ( + not isinstance(leaf.prev_sibling, Leaf) + or leaf.prev_sibling.value != "async" + ): + return COMPREHENSION_PRIORITY + + if ( + leaf.value == "if" + and leaf.parent + and leaf.parent.type in {syms.comp_if, syms.old_comp_if} + ): + return COMPREHENSION_PRIORITY + + if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: + return TERNARY_PRIORITY + + if leaf.value == "is": + return COMPARATOR_PRIORITY + + if ( + leaf.value == "in" + and leaf.parent + and leaf.parent.type in {syms.comp_op, syms.comparison} + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "not" + ) + ): + return COMPARATOR_PRIORITY + + if ( + leaf.value == "not" + and leaf.parent + and leaf.parent.type == syms.comp_op + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "is" + ) + ): + return COMPARATOR_PRIORITY + + if leaf.value in LOGIC_OPERATORS and leaf.parent: + return LOGIC_PRIORITY + + return 0 + + +def max_delimiter_priority_in_atom(node: LN) -> Priority: + """Return maximum delimiter priority inside `node`. + + This is specific to atoms with contents contained in a pair of parentheses. + If `node` isn't an atom or there are no enclosing parentheses, returns 0. + """ + if node.type != syms.atom: + return 0 + + first = node.children[0] + last = node.children[-1] + if not (first.type == token.LPAR and last.type == token.RPAR): + return 0 + + bt = BracketTracker() + for c in node.children[1:-1]: + if isinstance(c, Leaf): + bt.mark(c) + else: + for leaf in c.leaves(): + bt.mark(leaf) + try: + return bt.max_delimiter_priority() + + except ValueError: + return 0 diff --git a/myenv/lib/python3.9/site-packages/black/cache.py b/myenv/lib/python3.9/site-packages/black/cache.py new file mode 100644 index 0000000..bca7279 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/cache.py @@ -0,0 +1,83 @@ +"""Caching of formatted files with feature-based invalidation.""" + +import os +import pickle +from pathlib import Path +import tempfile +from typing import Dict, Iterable, Set, Tuple + +from platformdirs import user_cache_dir + +from black.mode import Mode + +from _black_version import version as __version__ + + +# types +Timestamp = float +FileSize = int +CacheInfo = Tuple[Timestamp, FileSize] +Cache = Dict[str, CacheInfo] + + +CACHE_DIR = Path(user_cache_dir("black", version=__version__)) + + +def read_cache(mode: Mode) -> Cache: + """Read the cache if it exists and is well formed. + + If it is not well formed, the call to write_cache later should resolve the issue. + """ + cache_file = get_cache_file(mode) + if not cache_file.exists(): + return {} + + with cache_file.open("rb") as fobj: + try: + cache: Cache = pickle.load(fobj) + except (pickle.UnpicklingError, ValueError, IndexError): + return {} + + return cache + + +def get_cache_file(mode: Mode) -> Path: + return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" + + +def get_cache_info(path: Path) -> CacheInfo: + """Return the information used to check if a file is already formatted or not.""" + stat = path.stat() + return stat.st_mtime, stat.st_size + + +def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: + """Split an iterable of paths in `sources` into two sets. + + The first contains paths of files that modified on disk or are not in the + cache. The other contains paths to non-modified files. + """ + todo, done = set(), set() + for src in sources: + res_src = src.resolve() + if cache.get(str(res_src)) != get_cache_info(res_src): + todo.add(src) + else: + done.add(src) + return todo, done + + +def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None: + """Update the cache file.""" + cache_file = get_cache_file(mode) + try: + CACHE_DIR.mkdir(parents=True, exist_ok=True) + new_cache = { + **cache, + **{str(src.resolve()): get_cache_info(src) for src in sources}, + } + with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: + pickle.dump(new_cache, f, protocol=4) + os.replace(f.name, cache_file) + except OSError: + pass diff --git a/myenv/lib/python3.9/site-packages/black/comments.py b/myenv/lib/python3.9/site-packages/black/comments.py new file mode 100644 index 0000000..28b9117 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/comments.py @@ -0,0 +1,277 @@ +import sys +from dataclasses import dataclass +from functools import lru_cache +import re +from typing import Iterator, List, Optional, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from blib2to3.pytree import Node, Leaf +from blib2to3.pgen2 import token + +from black.nodes import first_leaf_column, preceding_leaf, container_of +from black.nodes import STANDALONE_COMMENT, WHITESPACE + +# types +LN = Union[Leaf, Node] + +FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"} +FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"} +FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP} +FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"} + + +@dataclass +class ProtoComment: + """Describes a piece of syntax that is a comment. + + It's not a :class:`blib2to3.pytree.Leaf` so that: + + * it can be cached (`Leaf` objects should not be reused more than once as + they store their lineno, column, prefix, and parent information); + * `newlines` and `consumed` fields are kept separate from the `value`. This + simplifies handling of special marker comments like ``# fmt: off/on``. + """ + + type: int # token.COMMENT or STANDALONE_COMMENT + value: str # content of the comment + newlines: int # how many newlines before the comment + consumed: int # how many characters of the original leaf's prefix did we consume + + +def generate_comments(leaf: LN) -> Iterator[Leaf]: + """Clean the prefix of the `leaf` and generate comments from it, if any. + + Comments in lib2to3 are shoved into the whitespace prefix. This happens + in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation + move because it does away with modifying the grammar to include all the + possible places in which comments can be placed. + + The sad consequence for us though is that comments don't "belong" anywhere. + This is why this function generates simple parentless Leaf objects for + comments. We simply don't know what the correct parent should be. + + No matter though, we can live without this. We really only need to + differentiate between inline and standalone comments. The latter don't + share the line with any code. + + Inline comments are emitted as regular token.COMMENT leaves. Standalone + are emitted with a fake STANDALONE_COMMENT token identifier. + """ + for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): + yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) + + +@lru_cache(maxsize=4096) +def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: + """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" + result: List[ProtoComment] = [] + if not prefix or "#" not in prefix: + return result + + consumed = 0 + nlines = 0 + ignored_lines = 0 + for index, line in enumerate(re.split("\r?\n", prefix)): + consumed += len(line) + 1 # adding the length of the split '\n' + line = line.lstrip() + if not line: + nlines += 1 + if not line.startswith("#"): + # Escaped newlines outside of a comment are not really newlines at + # all. We treat a single-line comment following an escaped newline + # as a simple trailing comment. + if line.endswith("\\"): + ignored_lines += 1 + continue + + if index == ignored_lines and not is_endmarker: + comment_type = token.COMMENT # simple trailing comment + else: + comment_type = STANDALONE_COMMENT + comment = make_comment(line) + result.append( + ProtoComment( + type=comment_type, value=comment, newlines=nlines, consumed=consumed + ) + ) + nlines = 0 + return result + + +def make_comment(content: str) -> str: + """Return a consistently formatted comment from the given `content` string. + + All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single + space between the hash sign and the content. + + If `content` didn't start with a hash sign, one is provided. + """ + content = content.rstrip() + if not content: + return "#" + + if content[0] == "#": + content = content[1:] + NON_BREAKING_SPACE = " " + if ( + content + and content[0] == NON_BREAKING_SPACE + and not content.lstrip().startswith("type:") + ): + content = " " + content[1:] # Replace NBSP by a simple space + if content and content[0] not in " !:#'%": + content = " " + content + return "#" + content + + +def normalize_fmt_off(node: Node) -> None: + """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" + try_again = True + while try_again: + try_again = convert_one_fmt_off_pair(node) + + +def convert_one_fmt_off_pair(node: Node) -> bool: + """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. + + Returns True if a pair was converted. + """ + for leaf in node.leaves(): + previous_consumed = 0 + for comment in list_comments(leaf.prefix, is_endmarker=False): + if comment.value not in FMT_PASS: + previous_consumed = comment.consumed + continue + # We only want standalone comments. If there's no previous leaf or + # the previous leaf is indentation, it's a standalone comment in + # disguise. + if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT: + prev = preceding_leaf(leaf) + if prev: + if comment.value in FMT_OFF and prev.type not in WHITESPACE: + continue + if comment.value in FMT_SKIP and prev.type in WHITESPACE: + continue + + ignored_nodes = list(generate_ignored_nodes(leaf, comment)) + if not ignored_nodes: + continue + + first = ignored_nodes[0] # Can be a container node with the `leaf`. + parent = first.parent + prefix = first.prefix + if comment.value in FMT_OFF: + first.prefix = prefix[comment.consumed :] + if comment.value in FMT_SKIP: + first.prefix = "" + hidden_value = "".join(str(n) for n in ignored_nodes) + if comment.value in FMT_OFF: + hidden_value = comment.value + "\n" + hidden_value + if comment.value in FMT_SKIP: + hidden_value += " " + comment.value + if hidden_value.endswith("\n"): + # That happens when one of the `ignored_nodes` ended with a NEWLINE + # leaf (possibly followed by a DEDENT). + hidden_value = hidden_value[:-1] + first_idx: Optional[int] = None + for ignored in ignored_nodes: + index = ignored.remove() + if first_idx is None: + first_idx = index + assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" + assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" + parent.insert_child( + first_idx, + Leaf( + STANDALONE_COMMENT, + hidden_value, + prefix=prefix[:previous_consumed] + "\n" * comment.newlines, + ), + ) + return True + + return False + + +def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: + """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. + + If comment is skip, returns leaf only. + Stops at the end of the block. + """ + container: Optional[LN] = container_of(leaf) + if comment.value in FMT_SKIP: + prev_sibling = leaf.prev_sibling + if comment.value in leaf.prefix and prev_sibling is not None: + leaf.prefix = leaf.prefix.replace(comment.value, "") + siblings = [prev_sibling] + while ( + "\n" not in prev_sibling.prefix + and prev_sibling.prev_sibling is not None + ): + prev_sibling = prev_sibling.prev_sibling + siblings.insert(0, prev_sibling) + for sibling in siblings: + yield sibling + elif leaf.parent is not None: + yield leaf.parent + return + while container is not None and container.type != token.ENDMARKER: + if is_fmt_on(container): + return + + # fix for fmt: on in children + if contains_fmt_on_at_column(container, leaf.column): + for child in container.children: + if contains_fmt_on_at_column(child, leaf.column): + return + yield child + else: + yield container + container = container.next_sibling + + +def is_fmt_on(container: LN) -> bool: + """Determine whether formatting is switched on within a container. + Determined by whether the last `# fmt:` comment is `on` or `off`. + """ + fmt_on = False + for comment in list_comments(container.prefix, is_endmarker=False): + if comment.value in FMT_ON: + fmt_on = True + elif comment.value in FMT_OFF: + fmt_on = False + return fmt_on + + +def contains_fmt_on_at_column(container: LN, column: int) -> bool: + """Determine if children at a given column have formatting switched on.""" + for child in container.children: + if ( + isinstance(child, Node) + and first_leaf_column(child) == column + or isinstance(child, Leaf) + and child.column == column + ): + if is_fmt_on(child): + return True + + return False + + +def contains_pragma_comment(comment_list: List[Leaf]) -> bool: + """ + Returns: + True iff one of the comments in @comment_list is a pragma used by one + of the more common static analysis tools for python (e.g. mypy, flake8, + pylint). + """ + for comment in comment_list: + if comment.value.startswith(("# type:", "# noqa", "# pylint:")): + return True + + return False diff --git a/myenv/lib/python3.9/site-packages/black/concurrency.py b/myenv/lib/python3.9/site-packages/black/concurrency.py new file mode 100644 index 0000000..24f67b6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/concurrency.py @@ -0,0 +1,57 @@ +import asyncio +import logging +import sys +from typing import Any, Iterable + +from black.output import err + + +def maybe_install_uvloop() -> None: + """If our environment has uvloop installed we use it. + + This is called only from command-line entry points to avoid + interfering with the parent process if Black is used as a library. + + """ + try: + import uvloop + + uvloop.install() + except ImportError: + pass + + +def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: + """asyncio signal handler that cancels all `tasks` and reports to stderr.""" + err("Aborted!") + for task in tasks: + task.cancel() + + +def shutdown(loop: asyncio.AbstractEventLoop) -> None: + """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" + try: + if sys.version_info[:2] >= (3, 7): + all_tasks = asyncio.all_tasks + else: + all_tasks = asyncio.Task.all_tasks + # This part is borrowed from asyncio/runners.py in Python 3.7b2. + to_cancel = [task for task in all_tasks(loop) if not task.done()] + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + if sys.version_info >= (3, 7): + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + else: + loop.run_until_complete( + asyncio.gather(*to_cancel, loop=loop, return_exceptions=True) + ) + finally: + # `concurrent.futures.Future` objects cannot be cancelled once they + # are already running. There might be some when the `shutdown()` happened. + # Silence their logger's spew about the event loop being closed. + cf_logger = logging.getLogger("concurrent.futures") + cf_logger.setLevel(logging.CRITICAL) + loop.close() diff --git a/myenv/lib/python3.9/site-packages/black/const.py b/myenv/lib/python3.9/site-packages/black/const.py new file mode 100644 index 0000000..dbb4826 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/const.py @@ -0,0 +1,4 @@ +DEFAULT_LINE_LENGTH = 88 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 +DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$" +STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" diff --git a/myenv/lib/python3.9/site-packages/black/debug.py b/myenv/lib/python3.9/site-packages/black/debug.py new file mode 100644 index 0000000..5143076 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/debug.py @@ -0,0 +1,48 @@ +from dataclasses import dataclass +from typing import Iterator, TypeVar, Union + +from blib2to3.pytree import Node, Leaf, type_repr +from blib2to3.pgen2 import token + +from black.nodes import Visitor +from black.output import out +from black.parsing import lib2to3_parse + +LN = Union[Leaf, Node] +T = TypeVar("T") + + +@dataclass +class DebugVisitor(Visitor[T]): + tree_depth: int = 0 + + def visit_default(self, node: LN) -> Iterator[T]: + indent = " " * (2 * self.tree_depth) + if isinstance(node, Node): + _type = type_repr(node.type) + out(f"{indent}{_type}", fg="yellow") + self.tree_depth += 1 + for child in node.children: + yield from self.visit(child) + + self.tree_depth -= 1 + out(f"{indent}/{_type}", fg="yellow", bold=False) + else: + _type = token.tok_name.get(node.type, str(node.type)) + out(f"{indent}{_type}", fg="blue", nl=False) + if node.prefix: + # We don't have to handle prefixes for `Node` objects since + # that delegates to the first child anyway. + out(f" {node.prefix!r}", fg="green", bold=False, nl=False) + out(f" {node.value!r}", fg="blue", bold=False) + + @classmethod + def show(cls, code: Union[str, Leaf, Node]) -> None: + """Pretty-print the lib2to3 AST of a given string of `code`. + + Convenience method for debugging. + """ + v: DebugVisitor[None] = DebugVisitor() + if isinstance(code, str): + code = lib2to3_parse(code) + list(v.visit(code)) diff --git a/myenv/lib/python3.9/site-packages/black/files.py b/myenv/lib/python3.9/site-packages/black/files.py new file mode 100644 index 0000000..560aa05 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/files.py @@ -0,0 +1,261 @@ +from functools import lru_cache +import io +import os +from pathlib import Path +import sys +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Sequence, + Tuple, + Union, + TYPE_CHECKING, +) + +from mypy_extensions import mypyc_attr +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError +import tomli + +from black.output import err +from black.report import Report +from black.handle_ipynb_magics import jupyter_dependencies_are_installed + +if TYPE_CHECKING: + import colorama # noqa: F401 + + +@lru_cache() +def find_project_root(srcs: Sequence[str]) -> Path: + """Return a directory containing .git, .hg, or pyproject.toml. + + That directory will be a common parent of all files and directories + passed in `srcs`. + + If no directory in the tree contains a marker that would specify it's the + project root, the root of the file system is returned. + """ + if not srcs: + srcs = [str(Path.cwd().resolve())] + + path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): + if (directory / ".git").exists(): + return directory + + if (directory / ".hg").is_dir(): + return directory + + if (directory / "pyproject.toml").is_file(): + return directory + + return directory + + +def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]: + """Find the absolute filepath to a pyproject.toml if it exists""" + path_project_root = find_project_root(path_search_start) + path_pyproject_toml = path_project_root / "pyproject.toml" + if path_pyproject_toml.is_file(): + return str(path_pyproject_toml) + + try: + path_user_pyproject_toml = find_user_pyproject_toml() + return ( + str(path_user_pyproject_toml) + if path_user_pyproject_toml.is_file() + else None + ) + except PermissionError as e: + # We do not have access to the user-level config directory, so ignore it. + err(f"Ignoring user configuration directory due to {e!r}") + return None + + +@mypyc_attr(patchable=True) +def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: + """Parse a pyproject toml file, pulling out relevant parts for Black + + If parsing fails, will raise a tomli.TOMLDecodeError + """ + with open(path_config, encoding="utf8") as f: + pyproject_toml = tomli.loads(f.read()) + config = pyproject_toml.get("tool", {}).get("black", {}) + return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} + + +@lru_cache() +def find_user_pyproject_toml() -> Path: + r"""Return the path to the top-level user configuration for black. + + This looks for ~\.black on Windows and ~/.config/black on Linux and other + Unix systems. + """ + if sys.platform == "win32": + # Windows + user_config_path = Path.home() / ".black" + else: + config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config") + user_config_path = Path(config_root).expanduser() / "black" + return user_config_path.resolve() + + +@lru_cache() +def get_gitignore(root: Path) -> PathSpec: + """Return a PathSpec matching gitignore content if present.""" + gitignore = root / ".gitignore" + lines: List[str] = [] + if gitignore.is_file(): + with gitignore.open(encoding="utf-8") as gf: + lines = gf.readlines() + try: + return PathSpec.from_lines("gitwildmatch", lines) + except GitWildMatchPatternError as e: + err(f"Could not parse {gitignore}: {e}") + raise + + +def normalize_path_maybe_ignore( + path: Path, root: Path, report: Report +) -> Optional[str]: + """Normalize `path`. May return `None` if `path` was ignored. + + `report` is where "path ignored" output goes. + """ + try: + abspath = path if path.is_absolute() else Path.cwd() / path + normalized_path = abspath.resolve().relative_to(root).as_posix() + except OSError as e: + report.path_ignored(path, f"cannot be read because {e}") + return None + + except ValueError: + if path.is_symlink(): + report.path_ignored(path, f"is a symbolic link that points outside {root}") + return None + + raise + + return normalized_path + + +def path_is_excluded( + normalized_path: str, + pattern: Optional[Pattern[str]], +) -> bool: + match = pattern.search(normalized_path) if pattern else None + return bool(match and match.group(0)) + + +def gen_python_files( + paths: Iterable[Path], + root: Path, + include: Pattern[str], + exclude: Pattern[str], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + report: Report, + gitignore: Optional[PathSpec], + *, + verbose: bool, + quiet: bool, +) -> Iterator[Path]: + """Generate all files under `path` whose paths are not excluded by the + `exclude_regex`, `extend_exclude`, or `force_exclude` regexes, + but are included by the `include` regex. + + Symbolic links pointing outside of the `root` directory are ignored. + + `report` is where output about exclusions goes. + """ + assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" + for child in paths: + normalized_path = normalize_path_maybe_ignore(child, root, report) + if normalized_path is None: + continue + + # First ignore files matching .gitignore, if passed + if gitignore is not None and gitignore.match_file(normalized_path): + report.path_ignored(child, "matches the .gitignore file content") + continue + + # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. + normalized_path = "/" + normalized_path + if child.is_dir(): + normalized_path += "/" + + if path_is_excluded(normalized_path, exclude): + report.path_ignored(child, "matches the --exclude regular expression") + continue + + if path_is_excluded(normalized_path, extend_exclude): + report.path_ignored( + child, "matches the --extend-exclude regular expression" + ) + continue + + if path_is_excluded(normalized_path, force_exclude): + report.path_ignored(child, "matches the --force-exclude regular expression") + continue + + if child.is_dir(): + # If gitignore is None, gitignore usage is disabled, while a Falsey + # gitignore is when the directory doesn't have a .gitignore file. + yield from gen_python_files( + child.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore + get_gitignore(child) if gitignore is not None else None, + verbose=verbose, + quiet=quiet, + ) + + elif child.is_file(): + if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + include_match = include.search(normalized_path) if include else True + if include_match: + yield child + + +def wrap_stream_for_windows( + f: io.TextIOWrapper, +) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]: + """ + Wrap stream with colorama's wrap_stream so colors are shown on Windows. + + If `colorama` is unavailable, the original stream is returned unmodified. + Otherwise, the `wrap_stream()` function determines whether the stream needs + to be wrapped for a Windows environment and will accordingly either return + an `AnsiToWin32` wrapper or the original stream. + """ + try: + from colorama.initialise import wrap_stream + except ImportError: + return f + else: + # Set `strip=False` to avoid needing to modify test_express_diff_with_color. + return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) diff --git a/myenv/lib/python3.9/site-packages/black/handle_ipynb_magics.py b/myenv/lib/python3.9/site-packages/black/handle_ipynb_magics.py new file mode 100644 index 0000000..a0ed56b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/handle_ipynb_magics.py @@ -0,0 +1,465 @@ +"""Functions to process IPython magics with.""" + +from functools import lru_cache +import dataclasses +import ast +from typing import Dict, List, Tuple, Optional + +import secrets +import sys +import collections + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from black.report import NothingChanged +from black.output import out + + +TRANSFORMED_MAGICS = frozenset( + ( + "get_ipython().run_cell_magic", + "get_ipython().system", + "get_ipython().getoutput", + "get_ipython().run_line_magic", + ) +) +TOKENS_TO_IGNORE = frozenset( + ( + "ENDMARKER", + "NL", + "NEWLINE", + "COMMENT", + "DEDENT", + "UNIMPORTANT_WS", + "ESCAPED_NL", + ) +) +PYTHON_CELL_MAGICS = frozenset( + ( + "capture", + "prun", + "pypy", + "python", + "python3", + "time", + "timeit", + ) +) +TOKEN_HEX = secrets.token_hex + + +@dataclasses.dataclass(frozen=True) +class Replacement: + mask: str + src: str + + +@lru_cache() +def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: + try: + import IPython # noqa:F401 + import tokenize_rt # noqa:F401 + except ModuleNotFoundError: + if verbose or not quiet: + msg = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``" + ) + out(msg) + return False + else: + return True + + +def remove_trailing_semicolon(src: str) -> Tuple[str, bool]: + """Remove trailing semicolon from Jupyter notebook cell. + + For example, + + fig, ax = plt.subplots() + ax.plot(x_data, y_data); # plot data + + would become + + fig, ax = plt.subplots() + ax.plot(x_data, y_data) # plot data + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + from tokenize_rt import ( + src_to_tokens, + tokens_to_src, + reversed_enumerate, + ) + + tokens = src_to_tokens(src) + trailing_semicolon = False + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + if token.name == "OP" and token.src == ";": + del tokens[idx] + trailing_semicolon = True + break + if not trailing_semicolon: + return src, False + return tokens_to_src(tokens), True + + +def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str: + """Put trailing semicolon back if cell originally had it. + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + if not has_trailing_semicolon: + return src + from tokenize_rt import src_to_tokens, tokens_to_src, reversed_enumerate + + tokens = src_to_tokens(src) + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + tokens[idx] = token._replace(src=token.src + ";") + break + else: # pragma: nocover + raise AssertionError( + "INTERNAL ERROR: Was not able to reinstate trailing semicolon. " + "Please report a bug on https://github.com/psf/black/issues. " + ) from None + return str(tokens_to_src(tokens)) + + +def mask_cell(src: str) -> Tuple[str, List[Replacement]]: + """Mask IPython magics so content becomes parseable Python code. + + For example, + + %matplotlib inline + 'foo' + + becomes + + "25716f358c32750e" + 'foo' + + The replacements are returned, along with the transformed code. + """ + replacements: List[Replacement] = [] + try: + ast.parse(src) + except SyntaxError: + # Might have IPython magics, will process below. + pass + else: + # Syntax is fine, nothing to mask, early return. + return src, replacements + + from IPython.core.inputtransformer2 import TransformerManager + + transformer_manager = TransformerManager() + transformed = transformer_manager.transform_cell(src) + transformed, cell_magic_replacements = replace_cell_magics(transformed) + replacements += cell_magic_replacements + transformed = transformer_manager.transform_cell(transformed) + transformed, magic_replacements = replace_magics(transformed) + if len(transformed.splitlines()) != len(src.splitlines()): + # Multi-line magic, not supported. + raise NothingChanged + replacements += magic_replacements + return transformed, replacements + + +def get_token(src: str, magic: str) -> str: + """Return randomly generated token to mask IPython magic with. + + For example, if 'magic' was `%matplotlib inline`, then a possible + token to mask it with would be `"43fdd17f7e5ddc83"`. The token + will be the same length as the magic, and we make sure that it was + not already present anywhere else in the cell. + """ + assert magic + nbytes = max(len(magic) // 2 - 1, 1) + token = TOKEN_HEX(nbytes) + counter = 0 + while token in src: + token = TOKEN_HEX(nbytes) + counter += 1 + if counter > 100: + raise AssertionError( + "INTERNAL ERROR: Black was not able to replace IPython magic. " + "Please report a bug on https://github.com/psf/black/issues. " + f"The magic might be helpful: {magic}" + ) from None + if len(token) + 2 < len(magic): + token = f"{token}." + return f'"{token}"' + + +def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace cell magic with token. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, + + get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n') + + becomes + + "a794." + ls =!ls + + The replacement, along with the transformed code, is returned. + """ + replacements: List[Replacement] = [] + + tree = ast.parse(src) + + cell_magic_finder = CellMagicFinder() + cell_magic_finder.visit(tree) + if cell_magic_finder.cell_magic is None: + return src, replacements + header = cell_magic_finder.cell_magic.header + mask = get_token(src, header) + replacements.append(Replacement(mask=mask, src=header)) + return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements + + +def replace_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace magics within body of cell. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, this + + get_ipython().run_line_magic('matplotlib', 'inline') + 'foo' + + becomes + + "5e67db56d490fd39" + 'foo' + + The replacement, along with the transformed code, are returned. + """ + replacements = [] + magic_finder = MagicFinder() + magic_finder.visit(ast.parse(src)) + new_srcs = [] + for i, line in enumerate(src.splitlines(), start=1): + if i in magic_finder.magics: + offsets_and_magics = magic_finder.magics[i] + if len(offsets_and_magics) != 1: # pragma: nocover + raise AssertionError( + f"Expecting one magic per line, got: {offsets_and_magics}\n" + "Please report a bug on https://github.com/psf/black/issues." + ) + col_offset, magic = ( + offsets_and_magics[0].col_offset, + offsets_and_magics[0].magic, + ) + mask = get_token(src, magic) + replacements.append(Replacement(mask=mask, src=magic)) + line = line[:col_offset] + mask + new_srcs.append(line) + return "\n".join(new_srcs), replacements + + +def unmask_cell(src: str, replacements: List[Replacement]) -> str: + """Remove replacements from cell. + + For example + + "9b20" + foo = bar + + becomes + + %%time + foo = bar + """ + for replacement in replacements: + src = src.replace(replacement.mask, replacement.src) + return src + + +def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: + """Check if attribute is IPython magic. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + """ + return ( + isinstance(node, ast.Attribute) + and isinstance(node.value, ast.Call) + and isinstance(node.value.func, ast.Name) + and node.value.func.id == "get_ipython" + ) + + +def _get_str_args(args: List[ast.expr]) -> List[str]: + str_args = [] + for arg in args: + assert isinstance(arg, ast.Str) + str_args.append(arg.s) + return str_args + + +@dataclasses.dataclass(frozen=True) +class CellMagic: + name: str + params: Optional[str] + body: str + + @property + def header(self) -> str: + if self.params: + return f"%%{self.name} {self.params}" + return f"%%{self.name}" + + +# ast.NodeVisitor + dataclass = breakage under mypyc. +class CellMagicFinder(ast.NodeVisitor): + """Find cell magics. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %%time\nfoo() + + would have been transformed to + + get_ipython().run_cell_magic('time', '', 'foo()\\n') + + and we look for instances of the latter. + """ + + def __init__(self, cell_magic: Optional[CellMagic] = None) -> None: + self.cell_magic = cell_magic + + def visit_Expr(self, node: ast.Expr) -> None: + """Find cell magic, extract header and body.""" + if ( + isinstance(node.value, ast.Call) + and _is_ipython_magic(node.value.func) + and node.value.func.attr == "run_cell_magic" + ): + args = _get_str_args(node.value.args) + self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2]) + self.generic_visit(node) + + +@dataclasses.dataclass(frozen=True) +class OffsetAndMagic: + col_offset: int + magic: str + + +# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here +# as mypyc will generate broken code. +class MagicFinder(ast.NodeVisitor): + """Visit cell to look for get_ipython calls. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %matplotlib inline + + would have been transformed to + + get_ipython().run_line_magic('matplotlib', 'inline') + + and we look for instances of the latter (and likewise for other + types of magics). + """ + + def __init__(self) -> None: + self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list) + + def visit_Assign(self, node: ast.Assign) -> None: + """Look for system assign magics. + + For example, + + black_version = !black --version + env = %env var + + would have been (respectively) transformed to + + black_version = get_ipython().getoutput('black --version') + env = get_ipython().run_line_magic('env', 'var') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "getoutput": + src = f"!{args[0]}" + elif node.value.func.attr == "run_line_magic": + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + else: + raise AssertionError( + f"Unexpected IPython magic {node.value.func.attr!r} found. " + "Please report a bug on https://github.com/psf/black/issues." + ) from None + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) + + def visit_Expr(self, node: ast.Expr) -> None: + """Look for magics in body of cell. + + For examples, + + !ls + !!ls + ?ls + ??ls + + would (respectively) get transformed to + + get_ipython().system('ls') + get_ipython().getoutput('ls') + get_ipython().run_line_magic('pinfo', 'ls') + get_ipython().run_line_magic('pinfo2', 'ls') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "run_line_magic": + if args[0] == "pinfo": + src = f"?{args[1]}" + elif args[0] == "pinfo2": + src = f"??{args[1]}" + else: + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + elif node.value.func.attr == "system": + src = f"!{args[0]}" + elif node.value.func.attr == "getoutput": + src = f"!!{args[0]}" + else: + raise NothingChanged # unsupported magic. + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) diff --git a/myenv/lib/python3.9/site-packages/black/linegen.py b/myenv/lib/python3.9/site-packages/black/linegen.py new file mode 100644 index 0000000..f234913 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/linegen.py @@ -0,0 +1,1025 @@ +""" +Generating lines of code. +""" +from functools import partial, wraps +import sys +from typing import Collection, Iterator, List, Optional, Set, Union + +from black.nodes import WHITESPACE, RARROW, STATEMENT, STANDALONE_COMMENT +from black.nodes import ASSIGNMENTS, OPENING_BRACKETS, CLOSING_BRACKETS +from black.nodes import Visitor, syms, first_child_is_arith, ensure_visible +from black.nodes import is_docstring, is_empty_tuple, is_one_tuple, is_one_tuple_between +from black.nodes import is_walrus_assignment, is_yield, is_vararg, is_multiline_string +from black.nodes import is_stub_suite, is_stub_body, is_atom_with_invisible_parens +from black.nodes import wrap_in_parentheses +from black.brackets import max_delimiter_priority_in_atom +from black.brackets import DOT_PRIORITY, COMMA_PRIORITY +from black.lines import Line, line_to_string, is_line_short_enough +from black.lines import can_omit_invisible_parens, can_be_split, append_leaves +from black.comments import generate_comments, list_comments, FMT_OFF +from black.numerics import normalize_numeric_literal +from black.strings import get_string_prefix, fix_docstring +from black.strings import normalize_string_prefix, normalize_string_quotes +from black.trans import Transformer, CannotTransform, StringMerger +from black.trans import StringSplitter, StringParenWrapper, StringParenStripper +from black.mode import Mode +from black.mode import Feature + +from blib2to3.pytree import Node, Leaf +from blib2to3.pgen2 import token + + +# types +LeafID = int +LN = Union[Leaf, Node] + + +class CannotSplit(CannotTransform): + """A readable split that fits the allotted line length is impossible.""" + + +# This isn't a dataclass because @dataclass + Generic breaks mypyc. +# See also https://github.com/mypyc/mypyc/issues/827. +class LineGenerator(Visitor[Line]): + """Generates reformatted Line objects. Empty lines are not emitted. + + Note: destroys the tree it's visiting by mutating prefixes of its leaves + in ways that will no longer stringify to valid Python code on the tree. + """ + + def __init__(self, mode: Mode, remove_u_prefix: bool = False) -> None: + self.mode = mode + self.remove_u_prefix = remove_u_prefix + self.current_line: Line + self.__post_init__() + + def line(self, indent: int = 0) -> Iterator[Line]: + """Generate a line. + + If the line is empty, only emit if it makes sense. + If the line is too long, split it first and then generate. + + If any lines were generated, set up a new current_line. + """ + if not self.current_line: + self.current_line.depth += indent + return # Line is empty, don't emit. Creating a new one unnecessary. + + complete_line = self.current_line + self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) + yield complete_line + + def visit_default(self, node: LN) -> Iterator[Line]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Leaf): + any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() + for comment in generate_comments(node): + if any_open_brackets: + # any comment within brackets is subject to splitting + self.current_line.append(comment) + elif comment.type == token.COMMENT: + # regular trailing comment + self.current_line.append(comment) + yield from self.line() + + else: + # regular standalone comment + yield from self.line() + + self.current_line.append(comment) + yield from self.line() + + normalize_prefix(node, inside_brackets=any_open_brackets) + if self.mode.string_normalization and node.type == token.STRING: + node.value = normalize_string_prefix( + node.value, remove_u_prefix=self.remove_u_prefix + ) + node.value = normalize_string_quotes(node.value) + if node.type == token.NUMBER: + normalize_numeric_literal(node) + if node.type not in WHITESPACE: + self.current_line.append(node) + yield from super().visit_default(node) + + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: + """Increase indentation level, maybe yield a line.""" + # In blib2to3 INDENT never holds comments. + yield from self.line(+1) + yield from self.visit_default(node) + + def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: + """Decrease indentation level, maybe yield a line.""" + # The current line might still wait for trailing comments. At DEDENT time + # there won't be any (they would be prefixes on the preceding NEWLINE). + # Emit the line then. + yield from self.line() + + # While DEDENT has no value, its prefix may contain standalone comments + # that belong to the current indentation level. Get 'em. + yield from self.visit_default(node) + + # Finally, emit the dedent. + yield from self.line(-1) + + def visit_stmt( + self, node: Node, keywords: Set[str], parens: Set[str] + ) -> Iterator[Line]: + """Visit a statement. + + This implementation is shared for `if`, `while`, `for`, `try`, `except`, + `def`, `with`, `class`, `assert`, and assignments. + + The relevant Python language `keywords` for a given statement will be + NAME leaves within it. This methods puts those on a separate line. + + `parens` holds a set of string leaf values immediately after which + invisible parens should be put. + """ + normalize_invisible_parens(node, parens_after=parens) + for child in node.children: + if child.type == token.NAME and child.value in keywords: # type: ignore + yield from self.line() + + yield from self.visit(child) + + def visit_match_case(self, node: Node) -> Iterator[Line]: + """Visit either a match or case statement.""" + normalize_invisible_parens(node, parens_after=set()) + + yield from self.line() + for child in node.children: + yield from self.visit(child) + + def visit_suite(self, node: Node) -> Iterator[Line]: + """Visit a suite.""" + if self.mode.is_pyi and is_stub_suite(node): + yield from self.visit(node.children[2]) + else: + yield from self.visit_default(node) + + def visit_simple_stmt(self, node: Node) -> Iterator[Line]: + """Visit a statement without nested statements.""" + if first_child_is_arith(node): + wrap_in_parentheses(node, node.children[0], visible=False) + is_suite_like = node.parent and node.parent.type in STATEMENT + if is_suite_like: + if self.mode.is_pyi and is_stub_body(node): + yield from self.visit_default(node) + else: + yield from self.line(+1) + yield from self.visit_default(node) + yield from self.line(-1) + + else: + if ( + not self.mode.is_pyi + or not node.parent + or not is_stub_suite(node.parent) + ): + yield from self.line() + yield from self.visit_default(node) + + def visit_async_stmt(self, node: Node) -> Iterator[Line]: + """Visit `async def`, `async for`, `async with`.""" + yield from self.line() + + children = iter(node.children) + for child in children: + yield from self.visit(child) + + if child.type == token.ASYNC: + break + + internal_stmt = next(children) + for child in internal_stmt.children: + yield from self.visit(child) + + def visit_decorators(self, node: Node) -> Iterator[Line]: + """Visit decorators.""" + for child in node.children: + yield from self.line() + yield from self.visit(child) + + def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: + """Remove a semicolon and put the other statement on a separate line.""" + yield from self.line() + + def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: + """End of file. Process outstanding comments and end with a newline.""" + yield from self.visit_default(leaf) + yield from self.line() + + def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: + if not self.current_line.bracket_tracker.any_open_brackets(): + yield from self.line() + yield from self.visit_default(leaf) + + def visit_factor(self, node: Node) -> Iterator[Line]: + """Force parentheses between a unary op and a binary power: + + -2 ** 8 -> -(2 ** 8) + """ + _operator, operand = node.children + if ( + operand.type == syms.power + and len(operand.children) == 3 + and operand.children[1].type == token.DOUBLESTAR + ): + lpar = Leaf(token.LPAR, "(") + rpar = Leaf(token.RPAR, ")") + index = operand.remove() or 0 + node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) + yield from self.visit_default(node) + + def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + if is_docstring(leaf) and "\\\n" not in leaf.value: + # We're ignoring docstrings with backslash newline escapes because changing + # indentation of those changes the AST representation of the code. + docstring = normalize_string_prefix(leaf.value, self.remove_u_prefix) + prefix = get_string_prefix(docstring) + docstring = docstring[len(prefix) :] # Remove the prefix + quote_char = docstring[0] + # A natural way to remove the outer quotes is to do: + # docstring = docstring.strip(quote_char) + # but that breaks on """""x""" (which is '""x'). + # So we actually need to remove the first character and the next two + # characters but only if they are the same as the first. + quote_len = 1 if docstring[1] != quote_char else 3 + docstring = docstring[quote_len:-quote_len] + docstring_started_empty = not docstring + + if is_multiline_string(leaf): + indent = " " * 4 * self.current_line.depth + docstring = fix_docstring(docstring, indent) + else: + docstring = docstring.strip() + + if docstring: + # Add some padding if the docstring starts / ends with a quote mark. + if docstring[0] == quote_char: + docstring = " " + docstring + if docstring[-1] == quote_char: + docstring += " " + if docstring[-1] == "\\": + backslash_count = len(docstring) - len(docstring.rstrip("\\")) + if backslash_count % 2: + # Odd number of tailing backslashes, add some padding to + # avoid escaping the closing string quote. + docstring += " " + elif not docstring_started_empty: + docstring = " " + + # We could enforce triple quotes at this point. + quote = quote_char * quote_len + leaf.value = prefix + quote + docstring + quote + + yield from self.visit_default(leaf) + + def __post_init__(self) -> None: + """You are in a twisty little maze of passages.""" + self.current_line = Line(mode=self.mode) + + v = self.visit_stmt + Ø: Set[str] = set() + self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) + self.visit_if_stmt = partial( + v, keywords={"if", "else", "elif"}, parens={"if", "elif"} + ) + self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) + self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) + self.visit_try_stmt = partial( + v, keywords={"try", "except", "else", "finally"}, parens=Ø + ) + self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) + self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) + self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) + self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) + self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) + self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) + self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) + self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) + self.visit_async_funcdef = self.visit_async_stmt + self.visit_decorated = self.visit_decorators + + # PEP 634 + self.visit_match_stmt = self.visit_match_case + self.visit_case_block = self.visit_match_case + + +def transform_line( + line: Line, mode: Mode, features: Collection[Feature] = () +) -> Iterator[Line]: + """Transform a `line`, potentially splitting it into many lines. + + They should fit in the allotted `line_length` but might not be able to. + + `features` are syntactical features that may be used in the output. + """ + if line.is_comment: + yield line + return + + line_str = line_to_string(line) + + ll = mode.line_length + sn = mode.string_normalization + string_merge = StringMerger(ll, sn) + string_paren_strip = StringParenStripper(ll, sn) + string_split = StringSplitter(ll, sn) + string_paren_wrap = StringParenWrapper(ll, sn) + + transformers: List[Transformer] + if ( + not line.contains_uncollapsable_type_comments() + and not line.should_split_rhs + and not line.magic_trailing_comma + and ( + is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) + or line.contains_unsplittable_type_ignore() + ) + and not (line.inside_brackets and line.contains_standalone_comments()) + ): + # Only apply basic string preprocessing, since lines shouldn't be split here. + if mode.experimental_string_processing: + transformers = [string_merge, string_paren_strip] + else: + transformers = [] + elif line.is_def: + transformers = [left_hand_split] + else: + + def _rhs( + self: object, line: Line, features: Collection[Feature] + ) -> Iterator[Line]: + """Wraps calls to `right_hand_split`. + + The calls increasingly `omit` right-hand trailers (bracket pairs with + content), meaning the trailers get glued together to split on another + bracket pair instead. + """ + for omit in generate_trailers_to_omit(line, mode.line_length): + lines = list( + right_hand_split(line, mode.line_length, features, omit=omit) + ) + # Note: this check is only able to figure out if the first line of the + # *current* transformation fits in the line length. This is true only + # for simple cases. All others require running more transforms via + # `transform_line()`. This check doesn't know if those would succeed. + if is_line_short_enough(lines[0], line_length=mode.line_length): + yield from lines + return + + # All splits failed, best effort split with no omits. + # This mostly happens to multiline strings that are by definition + # reported as not fitting a single line, as well as lines that contain + # trailing commas (those have to be exploded). + yield from right_hand_split( + line, line_length=mode.line_length, features=features + ) + + # HACK: nested functions (like _rhs) compiled by mypyc don't retain their + # __name__ attribute which is needed in `run_transformer` further down. + # Unfortunately a nested class breaks mypyc too. So a class must be created + # via type ... https://github.com/mypyc/mypyc/issues/884 + rhs = type("rhs", (), {"__call__": _rhs})() + + if mode.experimental_string_processing: + if line.inside_brackets: + transformers = [ + string_merge, + string_paren_strip, + string_split, + delimiter_split, + standalone_comment_split, + string_paren_wrap, + rhs, + ] + else: + transformers = [ + string_merge, + string_paren_strip, + string_split, + string_paren_wrap, + rhs, + ] + else: + if line.inside_brackets: + transformers = [delimiter_split, standalone_comment_split, rhs] + else: + transformers = [rhs] + + for transform in transformers: + # We are accumulating lines in `result` because we might want to abort + # mission and return the original line in the end, or attempt a different + # split altogether. + try: + result = run_transformer(line, transform, mode, features, line_str=line_str) + except CannotTransform: + continue + else: + yield from result + break + + else: + yield line + + +def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: + """Split line into many lines, starting with the first matching bracket pair. + + Note: this usually looks weird, only use this for function definitions. + Prefer RHS otherwise. This is why this function is not symmetrical with + :func:`right_hand_split` which also handles optional parentheses. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = head_leaves + matching_bracket: Optional[Leaf] = None + for leaf in line.leaves: + if ( + current_leaves is body_leaves + and leaf.type in CLOSING_BRACKETS + and leaf.opening_bracket is matching_bracket + ): + current_leaves = tail_leaves if body_leaves else head_leaves + current_leaves.append(leaf) + if current_leaves is head_leaves: + if leaf.type in OPENING_BRACKETS: + matching_bracket = leaf + current_leaves = body_leaves + if not matching_bracket: + raise CannotSplit("No brackets found") + + head = bracket_split_build_line(head_leaves, line, matching_bracket) + body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) + tail = bracket_split_build_line(tail_leaves, line, matching_bracket) + bracket_split_succeeded_or_raise(head, body, tail) + for result in (head, body, tail): + if result: + yield result + + +def right_hand_split( + line: Line, + line_length: int, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + """Split line into many lines, starting with the last matching bracket pair. + + If the split was by optional parentheses, attempt splitting without them, too. + `omit` is a collection of closing bracket IDs that shouldn't be considered for + this split. + + Note: running this function modifies `bracket_depth` on the leaves of `line`. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = tail_leaves + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + for leaf in reversed(line.leaves): + if current_leaves is body_leaves: + if leaf is opening_bracket: + current_leaves = head_leaves if body_leaves else tail_leaves + current_leaves.append(leaf) + if current_leaves is tail_leaves: + if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + current_leaves = body_leaves + if not (opening_bracket and closing_bracket and head_leaves): + # If there is no opening or closing_bracket that means the split failed and + # all content is in the tail. Otherwise, if `head_leaves` are empty, it means + # the matching `opening_bracket` wasn't available on `line` anymore. + raise CannotSplit("No brackets found") + + tail_leaves.reverse() + body_leaves.reverse() + head_leaves.reverse() + head = bracket_split_build_line(head_leaves, line, opening_bracket) + body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) + tail = bracket_split_build_line(tail_leaves, line, opening_bracket) + bracket_split_succeeded_or_raise(head, body, tail) + if ( + Feature.FORCE_OPTIONAL_PARENTHESES not in features + # the opening bracket is an optional paren + and opening_bracket.type == token.LPAR + and not opening_bracket.value + # the closing bracket is an optional paren + and closing_bracket.type == token.RPAR + and not closing_bracket.value + # it's not an import (optional parens are the only thing we can split on + # in this case; attempting a split without them is a waste of time) + and not line.is_import + # there are no standalone comments in the body + and not body.contains_standalone_comments(0) + # and we can actually remove the parens + and can_omit_invisible_parens(body, line_length, omit_on_explode=omit) + ): + omit = {id(closing_bracket), *omit} + try: + yield from right_hand_split(line, line_length, features=features, omit=omit) + return + + except CannotSplit as e: + if not ( + can_be_split(body) + or is_line_short_enough(body, line_length=line_length) + ): + raise CannotSplit( + "Splitting failed, body is still too long and can't be split." + ) from e + + elif head.contains_multiline_strings() or tail.contains_multiline_strings(): + raise CannotSplit( + "The current optional pair of parentheses is bound to fail to" + " satisfy the splitting algorithm because the head or the tail" + " contains multiline strings which by definition never fit one" + " line." + ) from e + + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + for result in (head, body, tail): + if result: + yield result + + +def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: + """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. + + Do nothing otherwise. + + A left- or right-hand split is based on a pair of brackets. Content before + (and including) the opening bracket is left on one line, content inside the + brackets is put on a separate line, and finally content starting with and + following the closing bracket is put on a separate line. + + Those are called `head`, `body`, and `tail`, respectively. If the split + produced the same line (all content in `head`) or ended up with an empty `body` + and the `tail` is just the closing bracket, then it's considered failed. + """ + tail_len = len(str(tail).strip()) + if not body: + if tail_len == 0: + raise CannotSplit("Splitting brackets produced the same line") + + elif tail_len < 3: + raise CannotSplit( + f"Splitting brackets on an empty body to save {tail_len} characters is" + " not worth it" + ) + + +def bracket_split_build_line( + leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False +) -> Line: + """Return a new line with given `leaves` and respective comments from `original`. + + If `is_body` is True, the result line is one-indented inside brackets and as such + has its first leaf's prefix normalized and a trailing comma added when expected. + """ + result = Line(mode=original.mode, depth=original.depth) + if is_body: + result.inside_brackets = True + result.depth += 1 + if leaves: + # Since body is a new indent level, remove spurious leading whitespace. + normalize_prefix(leaves[0], inside_brackets=True) + # Ensure a trailing comma for imports and standalone function arguments, but + # be careful not to add one after any comments or within type annotations. + no_commas = ( + original.is_def + and opening_bracket.value == "(" + and not any(leaf.type == token.COMMA for leaf in leaves) + # In particular, don't add one within a parenthesized return annotation. + # Unfortunately the indicator we're in a return annotation (RARROW) may + # be defined directly in the parent node, the parent of the parent ... + # and so on depending on how complex the return annotation is. + # This isn't perfect and there's some false negatives but they are in + # contexts were a comma is actually fine. + and not any( + node.prev_sibling.type == RARROW + for node in ( + leaves[0].parent, + getattr(leaves[0].parent, "parent", None), + ) + if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf) + ) + ) + + if original.is_import or no_commas: + for i in range(len(leaves) - 1, -1, -1): + if leaves[i].type == STANDALONE_COMMENT: + continue + + if leaves[i].type != token.COMMA: + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) + break + + # Populate the line + for leaf in leaves: + result.append(leaf, preformatted=True) + for comment_after in original.comments_after(leaf): + result.append(comment_after, preformatted=True) + if is_body and should_split_line(result, opening_bracket): + result.should_split_rhs = True + return result + + +def dont_increase_indentation(split_func: Transformer) -> Transformer: + """Normalize prefix of the first leaf in every line returned by `split_func`. + + This is a decorator over relevant split functions. + """ + + @wraps(split_func) + def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + for line in split_func(line, features): + normalize_prefix(line.leaves[0], inside_brackets=True) + yield line + + return split_wrapper + + +@dont_increase_indentation +def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + """Split according to delimiters of the highest priority. + + If the appropriate Features are given, the split will add trailing commas + also in function signatures and calls that contain `*` and `**`. + """ + try: + last_leaf = line.leaves[-1] + except IndexError: + raise CannotSplit("Line empty") from None + + bt = line.bracket_tracker + try: + delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) + except ValueError: + raise CannotSplit("No delimiters found") from None + + if delimiter_priority == DOT_PRIORITY: + if bt.delimiter_count_with_priority(delimiter_priority) == 1: + raise CannotSplit("Splitting a single attribute from its owner looks wrong") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + lowest_depth = sys.maxsize + trailing_comma_safe = True + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + lowest_depth = min(lowest_depth, leaf.bracket_depth) + if leaf.bracket_depth == lowest_depth: + if is_vararg(leaf, within={syms.typedargslist}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features + ) + elif is_vararg(leaf, within={syms.arglist, syms.argument}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features + ) + + leaf_priority = bt.delimiters.get(id(leaf)) + if leaf_priority == delimiter_priority: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + if current_line: + if ( + trailing_comma_safe + and delimiter_priority == COMMA_PRIORITY + and current_line.leaves[-1].type != token.COMMA + and current_line.leaves[-1].type != STANDALONE_COMMENT + ): + new_comma = Leaf(token.COMMA, ",") + current_line.append(new_comma) + yield current_line + + +@dont_increase_indentation +def standalone_comment_split( + line: Line, features: Collection[Feature] = () +) -> Iterator[Line]: + """Split standalone comments from the rest of the line.""" + if not line.contains_standalone_comments(0): + raise CannotSplit("Line does not have any standalone comments") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + if current_line: + yield current_line + + +def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: + """Leave existing extra newlines if not `inside_brackets`. Remove everything + else. + + Note: don't use backslashes for formatting or you'll lose your voting rights. + """ + if not inside_brackets: + spl = leaf.prefix.split("#") + if "\\" not in spl[0]: + nl_count = spl[-1].count("\n") + if len(spl) > 1: + nl_count -= 1 + leaf.prefix = "\n" * nl_count + return + + leaf.prefix = "" + + +def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: + """Make existing optional parentheses invisible or create new ones. + + `parens_after` is a set of string leaf values immediately after which parens + should be put. + + Standardizes on visible parentheses for single-element tuples, and keeps + existing visible parentheses for other tuples and generator expressions. + """ + for pc in list_comments(node.prefix, is_endmarker=False): + if pc.value in FMT_OFF: + # This `node` has a prefix with `# fmt: off`, don't mess with parens. + return + check_lpar = False + for index, child in enumerate(list(node.children)): + # Fixes a bug where invisible parens are not properly stripped from + # assignment statements that contain type annotations. + if isinstance(child, Node) and child.type == syms.annassign: + normalize_invisible_parens(child, parens_after=parens_after) + + # Add parentheses around long tuple unpacking in assignments. + if ( + index == 0 + and isinstance(child, Node) + and child.type == syms.testlist_star_expr + ): + check_lpar = True + + if check_lpar: + if child.type == syms.atom: + if maybe_make_parens_invisible_in_atom(child, parent=node): + wrap_in_parentheses(node, child, visible=False) + elif is_one_tuple(child): + wrap_in_parentheses(node, child, visible=True) + elif node.type == syms.import_from: + # "import from" nodes store parentheses directly as part of + # the statement + if child.type == token.LPAR: + # make parentheses invisible + child.value = "" # type: ignore + node.children[-1].value = "" # type: ignore + elif child.type != token.STAR: + # insert invisible parentheses + node.insert_child(index, Leaf(token.LPAR, "")) + node.append_child(Leaf(token.RPAR, "")) + break + + elif not (isinstance(child, Leaf) and is_multiline_string(child)): + wrap_in_parentheses(node, child, visible=False) + + check_lpar = isinstance(child, Leaf) and child.value in parens_after + + +def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: + """If it's safe, make the parens in the atom `node` invisible, recursively. + Additionally, remove repeated, adjacent invisible parens from the atom `node` + as they are redundant. + + Returns whether the node should itself be wrapped in invisible parentheses. + + """ + + if ( + node.type != syms.atom + or is_empty_tuple(node) + or is_one_tuple(node) + or (is_yield(node) and parent.type != syms.expr_stmt) + or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY + ): + return False + + if is_walrus_assignment(node): + if parent.type in [ + syms.annassign, + syms.expr_stmt, + syms.assert_stmt, + syms.return_stmt, + # these ones aren't useful to end users, but they do please fuzzers + syms.for_stmt, + syms.del_stmt, + ]: + return False + + first = node.children[0] + last = node.children[-1] + if first.type == token.LPAR and last.type == token.RPAR: + middle = node.children[1] + # make parentheses invisible + first.value = "" # type: ignore + last.value = "" # type: ignore + maybe_make_parens_invisible_in_atom(middle, parent=parent) + + if is_atom_with_invisible_parens(middle): + # Strip the invisible parens from `middle` by replacing + # it with the child in-between the invisible parens + middle.replace(middle.children[1]) + + return False + + return True + + +def should_split_line(line: Line, opening_bracket: Leaf) -> bool: + """Should `line` be immediately split with `delimiter_split()` after RHS?""" + + if not (opening_bracket.parent and opening_bracket.value in "[{("): + return False + + # We're essentially checking if the body is delimited by commas and there's more + # than one of them (we're excluding the trailing comma and if the delimiter priority + # is still commas, that means there's more). + exclude = set() + trailing_comma = False + try: + last_leaf = line.leaves[-1] + if last_leaf.type == token.COMMA: + trailing_comma = True + exclude.add(id(last_leaf)) + max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) + except (IndexError, ValueError): + return False + + return max_priority == COMMA_PRIORITY and ( + (line.mode.magic_trailing_comma and trailing_comma) + # always explode imports + or opening_bracket.parent.type in {syms.atom, syms.import_from} + ) + + +def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: + """Generate sets of closing bracket IDs that should be omitted in a RHS. + + Brackets can be omitted if the entire trailer up to and including + a preceding closing bracket fits in one line. + + Yielded sets are cumulative (contain results of previous yields, too). First + set is empty, unless the line should explode, in which case bracket pairs until + the one that needs to explode are omitted. + """ + + omit: Set[LeafID] = set() + if not line.magic_trailing_comma: + yield omit + + length = 4 * line.depth + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + inner_brackets: Set[LeafID] = set() + for index, leaf, leaf_length in line.enumerate_with_length(reversed=True): + length += leaf_length + if length > line_length: + break + + has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) + if leaf.type == STANDALONE_COMMENT or has_inline_comment: + break + + if opening_bracket: + if leaf is opening_bracket: + opening_bracket = None + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if ( + prev + and prev.type == token.COMMA + and not is_one_tuple_between( + leaf.opening_bracket, leaf, line.leaves + ) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + inner_brackets.add(id(leaf)) + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if prev and prev.type in OPENING_BRACKETS: + # Empty brackets would fail a split so treat them as "inner" + # brackets (e.g. only add them to the `omit` set if another + # pair of brackets was good enough. + inner_brackets.add(id(leaf)) + continue + + if closing_bracket: + omit.add(id(closing_bracket)) + omit.update(inner_brackets) + inner_brackets.clear() + yield omit + + if ( + prev + and prev.type == token.COMMA + and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + if leaf.value: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + + +def run_transformer( + line: Line, + transform: Transformer, + mode: Mode, + features: Collection[Feature], + *, + line_str: str = "", +) -> List[Line]: + if not line_str: + line_str = line_to_string(line) + result: List[Line] = [] + for transformed_line in transform(line, features): + if str(transformed_line).strip("\n") == line_str: + raise CannotTransform("Line transformer returned an unchanged result") + + result.extend(transform_line(transformed_line, mode=mode, features=features)) + + if ( + transform.__class__.__name__ != "rhs" + or not line.bracket_tracker.invisible + or any(bracket.value for bracket in line.bracket_tracker.invisible) + or line.contains_multiline_strings() + or result[0].contains_uncollapsable_type_comments() + or result[0].contains_unsplittable_type_ignore() + or is_line_short_enough(result[0], line_length=mode.line_length) + # If any leaves have no parents (which _can_ occur since + # `transform(line)` potentially destroys the line's underlying node + # structure), then we can't proceed. Doing so would cause the below + # call to `append_leaves()` to fail. + or any(leaf.parent is None for leaf in line.leaves) + ): + return result + + line_copy = line.clone() + append_leaves(line_copy, line, line.leaves) + features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES} + second_opinion = run_transformer( + line_copy, transform, mode, features_fop, line_str=line_str + ) + if all( + is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion + ): + result = second_opinion + return result diff --git a/myenv/lib/python3.9/site-packages/black/lines.py b/myenv/lib/python3.9/site-packages/black/lines.py new file mode 100644 index 0000000..f2bdada --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/lines.py @@ -0,0 +1,754 @@ +from dataclasses import dataclass, field +import itertools +import sys +from typing import ( + Callable, + Collection, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + cast, +) + +from blib2to3.pytree import Node, Leaf +from blib2to3.pgen2 import token + +from black.brackets import BracketTracker, DOT_PRIORITY +from black.mode import Mode +from black.nodes import STANDALONE_COMMENT, TEST_DESCENDANTS +from black.nodes import BRACKETS, OPENING_BRACKETS, CLOSING_BRACKETS +from black.nodes import syms, whitespace, replace_child, child_towards +from black.nodes import is_multiline_string, is_import, is_type_comment, last_two_except +from black.nodes import is_one_tuple_between + +# types +T = TypeVar("T") +Index = int +LeafID = int + + +@dataclass +class Line: + """Holds leaves and comments. Can be printed with `str(line)`.""" + + mode: Mode + depth: int = 0 + leaves: List[Leaf] = field(default_factory=list) + # keys ordered like `leaves` + comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) + bracket_tracker: BracketTracker = field(default_factory=BracketTracker) + inside_brackets: bool = False + should_split_rhs: bool = False + magic_trailing_comma: Optional[Leaf] = None + + def append(self, leaf: Leaf, preformatted: bool = False) -> None: + """Add a new `leaf` to the end of the line. + + Unless `preformatted` is True, the `leaf` will receive a new consistent + whitespace prefix and metadata applied by :class:`BracketTracker`. + Trailing commas are maybe removed, unpacked for loop variables are + demoted from being delimiters. + + Inline comments are put aside. + """ + has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) + if not has_value: + return + + if token.COLON == leaf.type and self.is_class_paren_empty: + del self.leaves[-2:] + if self.leaves and not preformatted: + # Note: at this point leaf.prefix should be empty except for + # imports, for which we only preserve newlines. + leaf.prefix += whitespace( + leaf, complex_subscript=self.is_complex_subscript(leaf) + ) + if self.inside_brackets or not preformatted: + self.bracket_tracker.mark(leaf) + if self.mode.magic_trailing_comma: + if self.has_magic_trailing_comma(leaf): + self.magic_trailing_comma = leaf + elif self.has_magic_trailing_comma(leaf, ensure_removable=True): + self.remove_trailing_comma() + if not self.append_comment(leaf): + self.leaves.append(leaf) + + def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: + """Like :func:`append()` but disallow invalid standalone comment structure. + + Raises ValueError when any `leaf` is appended after a standalone comment + or when a standalone comment is not the first leaf on the line. + """ + if self.bracket_tracker.depth == 0: + if self.is_comment: + raise ValueError("cannot append to standalone comments") + + if self.leaves and leaf.type == STANDALONE_COMMENT: + raise ValueError( + "cannot append standalone comments to a populated line" + ) + + self.append(leaf, preformatted=preformatted) + + @property + def is_comment(self) -> bool: + """Is this line a standalone comment?""" + return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT + + @property + def is_decorator(self) -> bool: + """Is this line a decorator?""" + return bool(self) and self.leaves[0].type == token.AT + + @property + def is_import(self) -> bool: + """Is this an import line?""" + return bool(self) and is_import(self.leaves[0]) + + @property + def is_class(self) -> bool: + """Is this line a class definition?""" + return ( + bool(self) + and self.leaves[0].type == token.NAME + and self.leaves[0].value == "class" + ) + + @property + def is_stub_class(self) -> bool: + """Is this line a class definition with a body consisting only of "..."?""" + return self.is_class and self.leaves[-3:] == [ + Leaf(token.DOT, ".") for _ in range(3) + ] + + @property + def is_def(self) -> bool: + """Is this a function definition? (Also returns True for async defs.)""" + try: + first_leaf = self.leaves[0] + except IndexError: + return False + + try: + second_leaf: Optional[Leaf] = self.leaves[1] + except IndexError: + second_leaf = None + return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( + first_leaf.type == token.ASYNC + and second_leaf is not None + and second_leaf.type == token.NAME + and second_leaf.value == "def" + ) + + @property + def is_class_paren_empty(self) -> bool: + """Is this a class with no base classes but using parentheses? + + Those are unnecessary and should be removed. + """ + return ( + bool(self) + and len(self.leaves) == 4 + and self.is_class + and self.leaves[2].type == token.LPAR + and self.leaves[2].value == "(" + and self.leaves[3].type == token.RPAR + and self.leaves[3].value == ")" + ) + + @property + def is_triple_quoted_string(self) -> bool: + """Is the line a triple quoted string?""" + return ( + bool(self) + and self.leaves[0].type == token.STRING + and self.leaves[0].value.startswith(('"""', "'''")) + ) + + def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: + """If so, needs to be split before emitting.""" + for leaf in self.leaves: + if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: + return True + + return False + + def contains_uncollapsable_type_comments(self) -> bool: + ignored_ids = set() + try: + last_leaf = self.leaves[-1] + ignored_ids.add(id(last_leaf)) + if last_leaf.type == token.COMMA or ( + last_leaf.type == token.RPAR and not last_leaf.value + ): + # When trailing commas or optional parens are inserted by Black for + # consistency, comments after the previous last element are not moved + # (they don't have to, rendering will still be correct). So we ignore + # trailing commas and invisible. + last_leaf = self.leaves[-2] + ignored_ids.add(id(last_leaf)) + except IndexError: + return False + + # A type comment is uncollapsable if it is attached to a leaf + # that isn't at the end of the line (since that could cause it + # to get associated to a different argument) or if there are + # comments before it (since that could cause it to get hidden + # behind a comment. + comment_seen = False + for leaf_id, comments in self.comments.items(): + for comment in comments: + if is_type_comment(comment): + if comment_seen or ( + not is_type_comment(comment, " ignore") + and leaf_id not in ignored_ids + ): + return True + + comment_seen = True + + return False + + def contains_unsplittable_type_ignore(self) -> bool: + if not self.leaves: + return False + + # If a 'type: ignore' is attached to the end of a line, we + # can't split the line, because we can't know which of the + # subexpressions the ignore was meant to apply to. + # + # We only want this to apply to actual physical lines from the + # original source, though: we don't want the presence of a + # 'type: ignore' at the end of a multiline expression to + # justify pushing it all onto one line. Thus we + # (unfortunately) need to check the actual source lines and + # only report an unsplittable 'type: ignore' if this line was + # one line in the original code. + + # Grab the first and last line numbers, skipping generated leaves + first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) + last_line = next( + (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 + ) + + if first_line == last_line: + # We look at the last two leaves since a comma or an + # invisible paren could have been added at the end of the + # line. + for node in self.leaves[-2:]: + for comment in self.comments.get(id(node), []): + if is_type_comment(comment, " ignore"): + return True + + return False + + def contains_multiline_strings(self) -> bool: + return any(is_multiline_string(leaf) for leaf in self.leaves) + + def has_magic_trailing_comma( + self, closing: Leaf, ensure_removable: bool = False + ) -> bool: + """Return True if we have a magic trailing comma, that is when: + - there's a trailing comma here + - it's not a one-tuple + Additionally, if ensure_removable: + - it's not from square bracket indexing + """ + if not ( + closing.type in CLOSING_BRACKETS + and self.leaves + and self.leaves[-1].type == token.COMMA + ): + return False + + if closing.type == token.RBRACE: + return True + + if closing.type == token.RSQB: + if not ensure_removable: + return True + comma = self.leaves[-1] + return bool(comma.parent and comma.parent.type == syms.listmaker) + + if self.is_import: + return True + + if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves): + return True + + return False + + def append_comment(self, comment: Leaf) -> bool: + """Add an inline or standalone comment to the line.""" + if ( + comment.type == STANDALONE_COMMENT + and self.bracket_tracker.any_open_brackets() + ): + comment.prefix = "" + return False + + if comment.type != token.COMMENT: + return False + + if not self.leaves: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-1] + if ( + last_leaf.type == token.RPAR + and not last_leaf.value + and last_leaf.parent + and len(list(last_leaf.parent.leaves())) <= 3 + and not is_type_comment(comment) + ): + # Comments on an optional parens wrapping a single leaf should belong to + # the wrapped node except if it's a type comment. Pinning the comment like + # this avoids unstable formatting caused by comment migration. + if len(self.leaves) < 2: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-2] + self.comments.setdefault(id(last_leaf), []).append(comment) + return True + + def comments_after(self, leaf: Leaf) -> List[Leaf]: + """Generate comments that should appear directly after `leaf`.""" + return self.comments.get(id(leaf), []) + + def remove_trailing_comma(self) -> None: + """Remove the trailing comma and moves the comments attached to it.""" + trailing_comma = self.leaves.pop() + trailing_comma_comments = self.comments.pop(id(trailing_comma), []) + self.comments.setdefault(id(self.leaves[-1]), []).extend( + trailing_comma_comments + ) + + def is_complex_subscript(self, leaf: Leaf) -> bool: + """Return True iff `leaf` is part of a slice with non-trivial exprs.""" + open_lsqb = self.bracket_tracker.get_open_lsqb() + if open_lsqb is None: + return False + + subscript_start = open_lsqb.next_sibling + + if isinstance(subscript_start, Node): + if subscript_start.type == syms.listmaker: + return False + + if subscript_start.type == syms.subscriptlist: + subscript_start = child_towards(subscript_start, leaf) + return subscript_start is not None and any( + n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() + ) + + def enumerate_with_length( + self, reversed: bool = False + ) -> Iterator[Tuple[Index, Leaf, int]]: + """Return an enumeration of leaves with their length. + + Stops prematurely on multiline strings and standalone comments. + """ + op = cast( + Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], + enumerate_reversed if reversed else enumerate, + ) + for index, leaf in op(self.leaves): + length = len(leaf.prefix) + len(leaf.value) + if "\n" in leaf.value: + return # Multiline strings, we can't continue. + + for comment in self.comments_after(leaf): + length += len(comment.value) + + yield index, leaf, length + + def clone(self) -> "Line": + return Line( + mode=self.mode, + depth=self.depth, + inside_brackets=self.inside_brackets, + should_split_rhs=self.should_split_rhs, + magic_trailing_comma=self.magic_trailing_comma, + ) + + def __str__(self) -> str: + """Render the line.""" + if not self: + return "\n" + + indent = " " * self.depth + leaves = iter(self.leaves) + first = next(leaves) + res = f"{first.prefix}{indent}{first.value}" + for leaf in leaves: + res += str(leaf) + for comment in itertools.chain.from_iterable(self.comments.values()): + res += str(comment) + + return res + "\n" + + def __bool__(self) -> bool: + """Return True if the line has leaves or comments.""" + return bool(self.leaves or self.comments) + + +@dataclass +class EmptyLineTracker: + """Provides a stateful method that returns the number of potential extra + empty lines needed before and after the currently processed line. + + Note: this tracker works on lines that haven't been split yet. It assumes + the prefix of the first leaf consists of optional newlines. Those newlines + are consumed by `maybe_empty_lines()` and included in the computation. + """ + + is_pyi: bool = False + previous_line: Optional[Line] = None + previous_after: int = 0 + previous_defs: List[int] = field(default_factory=list) + + def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + """Return the number of extra empty lines before and after the `current_line`. + + This is for separating `def`, `async def` and `class` with extra empty + lines (two on module-level). + """ + before, after = self._maybe_empty_lines(current_line) + before = ( + # Black should not insert empty lines at the beginning + # of the file + 0 + if self.previous_line is None + else before - self.previous_after + ) + self.previous_after = after + self.previous_line = current_line + return before, after + + def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + max_allowed = 1 + if current_line.depth == 0: + max_allowed = 1 if self.is_pyi else 2 + if current_line.leaves: + # Consume the first leaf's extra newlines. + first_leaf = current_line.leaves[0] + before = first_leaf.prefix.count("\n") + before = min(before, max_allowed) + first_leaf.prefix = "" + else: + before = 0 + depth = current_line.depth + while self.previous_defs and self.previous_defs[-1] >= depth: + if self.is_pyi: + before = 0 if depth else 1 + else: + if depth: + before = 1 + elif ( + not depth + and self.previous_defs[-1] + and current_line.leaves[-1].type == token.COLON + and ( + current_line.leaves[0].value + not in ("with", "try", "for", "while", "if", "match") + ) + ): + # We shouldn't add two newlines between an indented function and + # a dependent non-indented clause. This is to avoid issues with + # conditional function definitions that are technically top-level + # and therefore get two trailing newlines, but look weird and + # inconsistent when they're followed by elif, else, etc. This is + # worse because these functions only get *one* preceding newline + # already. + before = 1 + else: + before = 2 + self.previous_defs.pop() + if current_line.is_decorator or current_line.is_def or current_line.is_class: + return self._maybe_empty_lines_for_class_or_def(current_line, before) + + if ( + self.previous_line + and self.previous_line.is_import + and not current_line.is_import + and depth == self.previous_line.depth + ): + return (before or 1), 0 + + if ( + self.previous_line + and self.previous_line.is_class + and current_line.is_triple_quoted_string + ): + return before, 1 + + return before, 0 + + def _maybe_empty_lines_for_class_or_def( + self, current_line: Line, before: int + ) -> Tuple[int, int]: + if not current_line.is_decorator: + self.previous_defs.append(current_line.depth) + if self.previous_line is None: + # Don't insert empty lines before the first line in the file. + return 0, 0 + + if self.previous_line.is_decorator: + if self.is_pyi and current_line.is_stub_class: + # Insert an empty line after a decorated stub class + return 0, 1 + + return 0, 0 + + if self.previous_line.depth < current_line.depth and ( + self.previous_line.is_class or self.previous_line.is_def + ): + return 0, 0 + + if ( + self.previous_line.is_comment + and self.previous_line.depth == current_line.depth + and before == 0 + ): + return 0, 0 + + if self.is_pyi: + if self.previous_line.depth > current_line.depth: + newlines = 1 + elif current_line.is_class or self.previous_line.is_class: + if current_line.is_stub_class and self.previous_line.is_stub_class: + # No blank line between classes with an empty body + newlines = 0 + else: + newlines = 1 + elif ( + current_line.is_def or current_line.is_decorator + ) and not self.previous_line.is_def: + # Blank line between a block of functions (maybe with preceding + # decorators) and a block of non-functions + newlines = 1 + else: + newlines = 0 + else: + newlines = 2 + if current_line.depth and newlines: + newlines -= 1 + return newlines, 0 + + +def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: + """Like `reversed(enumerate(sequence))` if that were possible.""" + index = len(sequence) - 1 + for element in reversed(sequence): + yield (index, element) + index -= 1 + + +def append_leaves( + new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False +) -> None: + """ + Append leaves (taken from @old_line) to @new_line, making sure to fix the + underlying Node structure where appropriate. + + All of the leaves in @leaves are duplicated. The duplicates are then + appended to @new_line and used to replace their originals in the underlying + Node structure. Any comments attached to the old leaves are reattached to + the new leaves. + + Pre-conditions: + set(@leaves) is a subset of set(@old_line.leaves). + """ + for old_leaf in leaves: + new_leaf = Leaf(old_leaf.type, old_leaf.value) + replace_child(old_leaf, new_leaf) + new_line.append(new_leaf, preformatted=preformatted) + + for comment_leaf in old_line.comments_after(old_leaf): + new_line.append(comment_leaf, preformatted=True) + + +def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: + """Return True if `line` is no longer than `line_length`. + + Uses the provided `line_str` rendering, if any, otherwise computes a new one. + """ + if not line_str: + line_str = line_to_string(line) + return ( + len(line_str) <= line_length + and "\n" not in line_str # multiline strings + and not line.contains_standalone_comments() + ) + + +def can_be_split(line: Line) -> bool: + """Return False if the line cannot be split *for sure*. + + This is not an exhaustive search but a cheap heuristic that we can use to + avoid some unfortunate formattings (mostly around wrapping unsplittable code + in unnecessary parentheses). + """ + leaves = line.leaves + if len(leaves) < 2: + return False + + if leaves[0].type == token.STRING and leaves[1].type == token.DOT: + call_count = 0 + dot_count = 0 + next = leaves[-1] + for leaf in leaves[-2::-1]: + if leaf.type in OPENING_BRACKETS: + if next.type not in CLOSING_BRACKETS: + return False + + call_count += 1 + elif leaf.type == token.DOT: + dot_count += 1 + elif leaf.type == token.NAME: + if not (next.type == token.DOT or next.type in OPENING_BRACKETS): + return False + + elif leaf.type not in CLOSING_BRACKETS: + return False + + if dot_count > 1 and call_count > 1: + return False + + return True + + +def can_omit_invisible_parens( + line: Line, + line_length: int, + omit_on_explode: Collection[LeafID] = (), +) -> bool: + """Does `line` have a shape safe to reformat without optional parens around it? + + Returns True for only a subset of potentially nice looking formattings but + the point is to not return false positives that end up producing lines that + are too long. + """ + bt = line.bracket_tracker + if not bt.delimiters: + # Without delimiters the optional parentheses are useless. + return True + + max_priority = bt.max_delimiter_priority() + if bt.delimiter_count_with_priority(max_priority) > 1: + # With more than one delimiter of a kind the optional parentheses read better. + return False + + if max_priority == DOT_PRIORITY: + # A single stranded method call doesn't require optional parentheses. + return True + + assert len(line.leaves) >= 2, "Stranded delimiter" + + # With a single delimiter, omit if the expression starts or ends with + # a bracket. + first = line.leaves[0] + second = line.leaves[1] + if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: + if _can_omit_opening_paren(line, first=first, line_length=line_length): + return True + + # Note: we are not returning False here because a line might have *both* + # a leading opening bracket and a trailing closing bracket. If the + # opening bracket doesn't match our rule, maybe the closing will. + + penultimate = line.leaves[-2] + last = line.leaves[-1] + if line.magic_trailing_comma: + try: + penultimate, last = last_two_except(line.leaves, omit=omit_on_explode) + except LookupError: + # Turns out we'd omit everything. We cannot skip the optional parentheses. + return False + + if ( + last.type == token.RPAR + or last.type == token.RBRACE + or ( + # don't use indexing for omitting optional parentheses; + # it looks weird + last.type == token.RSQB + and last.parent + and last.parent.type != syms.trailer + ) + ): + if penultimate.type in OPENING_BRACKETS: + # Empty brackets don't help. + return False + + if is_multiline_string(first): + # Additional wrapping of a multiline string in this situation is + # unnecessary. + return True + + if line.magic_trailing_comma and penultimate.type == token.COMMA: + # The rightmost non-omitted bracket pair is the one we want to explode on. + return True + + if _can_omit_closing_paren(line, last=last, line_length=line_length): + return True + + return False + + +def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + remainder = False + length = 4 * line.depth + _index = -1 + for _index, leaf, leaf_length in line.enumerate_with_length(): + if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: + remainder = True + if remainder: + length += leaf_length + if length > line_length: + break + + if leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + remainder = False + + else: + # checked the entire string and line length wasn't exceeded + if len(line.leaves) == _index + 1: + return True + + return False + + +def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + length = 4 * line.depth + seen_other_brackets = False + for _index, leaf, leaf_length in line.enumerate_with_length(): + length += leaf_length + if leaf is last.opening_bracket: + if seen_other_brackets or length <= line_length: + return True + + elif leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + seen_other_brackets = True + + return False + + +def line_to_string(line: Line) -> str: + """Returns the string representation of @line. + + WARNING: This is known to be computationally expensive. + """ + return str(line).strip("\n") diff --git a/myenv/lib/python3.9/site-packages/black/mode.py b/myenv/lib/python3.9/site-packages/black/mode.py new file mode 100644 index 0000000..e241753 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/mode.py @@ -0,0 +1,160 @@ +"""Data structures configuring Black behavior. + +Mostly around Python language feature support per version and Black configuration +chosen by the user. +""" + +from dataclasses import dataclass, field +from enum import Enum +from operator import attrgetter +from typing import Dict, Set + +from black.const import DEFAULT_LINE_LENGTH + + +class TargetVersion(Enum): + PY27 = 2 + PY33 = 3 + PY34 = 4 + PY35 = 5 + PY36 = 6 + PY37 = 7 + PY38 = 8 + PY39 = 9 + PY310 = 10 + + def is_python2(self) -> bool: + return self is TargetVersion.PY27 + + +class Feature(Enum): + # All string literals are unicode + UNICODE_LITERALS = 1 + F_STRINGS = 2 + NUMERIC_UNDERSCORES = 3 + TRAILING_COMMA_IN_CALL = 4 + TRAILING_COMMA_IN_DEF = 5 + # The following two feature-flags are mutually exclusive, and exactly one should be + # set for every version of python. + ASYNC_IDENTIFIERS = 6 + ASYNC_KEYWORDS = 7 + ASSIGNMENT_EXPRESSIONS = 8 + POS_ONLY_ARGUMENTS = 9 + RELAXED_DECORATORS = 10 + PATTERN_MATCHING = 11 + FORCE_OPTIONAL_PARENTHESES = 50 + + # temporary for Python 2 deprecation + PRINT_STMT = 200 + EXEC_STMT = 201 + AUTOMATIC_PARAMETER_UNPACKING = 202 + COMMA_STYLE_EXCEPT = 203 + COMMA_STYLE_RAISE = 204 + LONG_INT_LITERAL = 205 + OCTAL_INT_LITERAL = 206 + BACKQUOTE_REPR = 207 + + +VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { + TargetVersion.PY27: { + Feature.ASYNC_IDENTIFIERS, + Feature.PRINT_STMT, + Feature.EXEC_STMT, + Feature.AUTOMATIC_PARAMETER_UNPACKING, + Feature.COMMA_STYLE_EXCEPT, + Feature.COMMA_STYLE_RAISE, + Feature.LONG_INT_LITERAL, + Feature.OCTAL_INT_LITERAL, + Feature.BACKQUOTE_REPR, + }, + TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY35: { + Feature.UNICODE_LITERALS, + Feature.TRAILING_COMMA_IN_CALL, + Feature.ASYNC_IDENTIFIERS, + }, + TargetVersion.PY36: { + Feature.UNICODE_LITERALS, + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_IDENTIFIERS, + }, + TargetVersion.PY37: { + Feature.UNICODE_LITERALS, + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + }, + TargetVersion.PY38: { + Feature.UNICODE_LITERALS, + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.POS_ONLY_ARGUMENTS, + }, + TargetVersion.PY39: { + Feature.UNICODE_LITERALS, + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + }, + TargetVersion.PY310: { + Feature.UNICODE_LITERALS, + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.PATTERN_MATCHING, + }, +} + + +def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: + return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) + + +@dataclass +class Mode: + target_versions: Set[TargetVersion] = field(default_factory=set) + line_length: int = DEFAULT_LINE_LENGTH + string_normalization: bool = True + is_pyi: bool = False + is_ipynb: bool = False + magic_trailing_comma: bool = True + experimental_string_processing: bool = False + + def get_cache_key(self) -> str: + if self.target_versions: + version_str = ",".join( + str(version.value) + for version in sorted(self.target_versions, key=attrgetter("value")) + ) + else: + version_str = "-" + parts = [ + version_str, + str(self.line_length), + str(int(self.string_normalization)), + str(int(self.is_pyi)), + str(int(self.is_ipynb)), + str(int(self.magic_trailing_comma)), + str(int(self.experimental_string_processing)), + ] + return ".".join(parts) diff --git a/myenv/lib/python3.9/site-packages/black/nodes.py b/myenv/lib/python3.9/site-packages/black/nodes.py new file mode 100644 index 0000000..8bf1934 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/nodes.py @@ -0,0 +1,856 @@ +""" +blib2to3 Node/Leaf transformation-related utility functions. +""" + +import sys +from typing import ( + Collection, + Generic, + Iterator, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from mypy_extensions import mypyc_attr + +# lib2to3 fork +from blib2to3.pytree import Node, Leaf, type_repr +from blib2to3 import pygram +from blib2to3.pgen2 import token + +from black.cache import CACHE_DIR +from black.strings import has_triple_quotes + + +pygram.initialize(CACHE_DIR) +syms: Final = pygram.python_symbols + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +LeafID = int +NodeType = int + + +WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} +STATEMENT: Final = { + syms.if_stmt, + syms.while_stmt, + syms.for_stmt, + syms.try_stmt, + syms.except_clause, + syms.with_stmt, + syms.funcdef, + syms.classdef, + syms.match_stmt, + syms.case_block, +} +STANDALONE_COMMENT: Final = 153 +token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" +LOGIC_OPERATORS: Final = {"and", "or"} +COMPARATORS: Final = { + token.LESS, + token.GREATER, + token.EQEQUAL, + token.NOTEQUAL, + token.LESSEQUAL, + token.GREATEREQUAL, +} +MATH_OPERATORS: Final = { + token.VBAR, + token.CIRCUMFLEX, + token.AMPER, + token.LEFTSHIFT, + token.RIGHTSHIFT, + token.PLUS, + token.MINUS, + token.STAR, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.AT, + token.TILDE, + token.DOUBLESTAR, +} +STARS: Final = {token.STAR, token.DOUBLESTAR} +VARARGS_SPECIALS: Final = STARS | {token.SLASH} +VARARGS_PARENTS: Final = { + syms.arglist, + syms.argument, # double star in arglist + syms.trailer, # single argument to call + syms.typedargslist, + syms.varargslist, # lambdas +} +UNPACKING_PARENTS: Final = { + syms.atom, # single element of a list or set literal + syms.dictsetmaker, + syms.listmaker, + syms.testlist_gexp, + syms.testlist_star_expr, + syms.subject_expr, + syms.pattern, +} +TEST_DESCENDANTS: Final = { + syms.test, + syms.lambdef, + syms.or_test, + syms.and_test, + syms.not_test, + syms.comparison, + syms.star_expr, + syms.expr, + syms.xor_expr, + syms.and_expr, + syms.shift_expr, + syms.arith_expr, + syms.trailer, + syms.term, + syms.power, +} +ASSIGNMENTS: Final = { + "=", + "+=", + "-=", + "*=", + "@=", + "/=", + "%=", + "&=", + "|=", + "^=", + "<<=", + ">>=", + "**=", + "//=", +} + +IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist} +BRACKET: Final = { + token.LPAR: token.RPAR, + token.LSQB: token.RSQB, + token.LBRACE: token.RBRACE, +} +OPENING_BRACKETS: Final = set(BRACKET.keys()) +CLOSING_BRACKETS: Final = set(BRACKET.values()) +BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS +ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} + +RARROW = 55 + + +@mypyc_attr(allow_interpreted_subclasses=True) +class Visitor(Generic[T]): + """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" + + def visit(self, node: LN) -> Iterator[T]: + """Main method to visit `node` and its children. + + It tries to find a `visit_*()` method for the given `node.type`, like + `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. + If no dedicated `visit_*()` method is found, chooses `visit_default()` + instead. + + Then yields objects of type `T` from the selected visitor. + """ + if node.type < 256: + name = token.tok_name[node.type] + else: + name = str(type_repr(node.type)) + # We explicitly branch on whether a visitor exists (instead of + # using self.visit_default as the default arg to getattr) in order + # to save needing to create a bound method object and so mypyc can + # generate a native call to visit_default. + visitf = getattr(self, f"visit_{name}", None) + if visitf: + yield from visitf(node) + else: + yield from self.visit_default(node) + + def visit_default(self, node: LN) -> Iterator[T]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Node): + for child in node.children: + yield from self.visit(child) + + +def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 + """Return whitespace prefix if needed for the given `leaf`. + + `complex_subscript` signals whether the given leaf is part of a subscription + which has non-trivial arguments, like arithmetic expressions or function calls. + """ + NO: Final = "" + SPACE: Final = " " + DOUBLESPACE: Final = " " + t = leaf.type + p = leaf.parent + v = leaf.value + if t in ALWAYS_NO_SPACE: + return NO + + if t == token.COMMENT: + return DOUBLESPACE + + assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" + if t == token.COLON and p.type not in { + syms.subscript, + syms.subscriptlist, + syms.sliceop, + }: + return NO + + prev = leaf.prev_sibling + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + if t == token.COLON: + if prevp.type == token.COLON: + return NO + + elif prevp.type != token.COMMA and not complex_subscript: + return NO + + return SPACE + + if prevp.type == token.EQUAL: + if prevp.parent: + if prevp.parent.type in { + syms.arglist, + syms.argument, + syms.parameters, + syms.varargslist, + }: + return NO + + elif prevp.parent.type == syms.typedargslist: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using + # that, too. + return prevp.prefix + + elif prevp.type in VARARGS_SPECIALS: + if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): + return NO + + elif prevp.type == token.COLON: + if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: + return SPACE if complex_subscript else NO + + elif ( + prevp.parent + and prevp.parent.type == syms.factor + and prevp.type in MATH_OPERATORS + ): + return NO + + elif ( + prevp.type == token.RIGHTSHIFT + and prevp.parent + and prevp.parent.type == syms.shift_expr + and prevp.prev_sibling + and prevp.prev_sibling.type == token.NAME + and prevp.prev_sibling.value == "print" # type: ignore + ): + # Python 2 print chevron + return NO + elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator: + # no space in decorators + return NO + + elif prev.type in OPENING_BRACKETS: + return NO + + if p.type in {syms.parameters, syms.arglist}: + # untyped function signatures or calls + if not prev or prev.type != token.COMMA: + return NO + + elif p.type == syms.varargslist: + # lambdas + if prev and prev.type != token.COMMA: + return NO + + elif p.type == syms.typedargslist: + # typed function signatures + if not prev: + return NO + + if t == token.EQUAL: + if prev.type != syms.tname: + return NO + + elif prev.type == token.EQUAL: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using that, too. + return prev.prefix + + elif prev.type != token.COMMA: + return NO + + elif p.type == syms.tname: + # type names + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type != token.COMMA: + return NO + + elif p.type == syms.trailer: + # attributes and calls + if t == token.LPAR or t == token.RPAR: + return NO + + if not prev: + if t == token.DOT: + prevp = preceding_leaf(p) + if not prevp or prevp.type != token.NUMBER: + return NO + + elif t == token.LSQB: + return NO + + elif prev.type != token.COMMA: + return NO + + elif p.type == syms.argument: + # single argument + if t == token.EQUAL: + return NO + + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.LPAR: + return NO + + elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: + return NO + + elif p.type == syms.decorator: + # decorators + return NO + + elif p.type == syms.dotted_name: + if prev: + return NO + + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.AT or prevp.type == token.DOT: + return NO + + elif p.type == syms.classdef: + if t == token.LPAR: + return NO + + if prev and prev.type == token.LPAR: + return NO + + elif p.type in {syms.subscript, syms.sliceop}: + # indexing + if not prev: + assert p.parent is not None, "subscripts are always parented" + if p.parent.type == syms.subscriptlist: + return SPACE + + return NO + + elif not complex_subscript: + return NO + + elif p.type == syms.atom: + if prev and t == token.DOT: + # dots, but not the first one. + return NO + + elif p.type == syms.dictsetmaker: + # dict unpacking + if prev and prev.type == token.DOUBLESTAR: + return NO + + elif p.type in {syms.factor, syms.star_expr}: + # unary ops + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + prevp_parent = prevp.parent + assert prevp_parent is not None + if prevp.type == token.COLON and prevp_parent.type in { + syms.subscript, + syms.sliceop, + }: + return NO + + elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: + return NO + + elif t in {token.NAME, token.NUMBER, token.STRING}: + return NO + + elif p.type == syms.import_from: + if t == token.DOT: + if prev and prev.type == token.DOT: + return NO + + elif t == token.NAME: + if v == "import": + return SPACE + + if prev and prev.type == token.DOT: + return NO + + elif p.type == syms.sliceop: + return NO + + return SPACE + + +def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: + """Return the first leaf that precedes `node`, if any.""" + while node: + res = node.prev_sibling + if res: + if isinstance(res, Leaf): + return res + + try: + return list(res.leaves())[-1] + + except IndexError: + return None + + node = node.parent + return None + + +def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: + """Return if the `node` and its previous siblings match types against the provided + list of tokens; the provided `node`has its type matched against the last element in + the list. `None` can be used as the first element to declare that the start of the + list is anchored at the start of its parent's children.""" + if not tokens: + return True + if tokens[-1] is None: + return node is None + if not node: + return False + if node.type != tokens[-1]: + return False + return prev_siblings_are(node.prev_sibling, tokens[:-1]) + + +def last_two_except(leaves: List[Leaf], omit: Collection[LeafID]) -> Tuple[Leaf, Leaf]: + """Return (penultimate, last) leaves skipping brackets in `omit` and contents.""" + stop_after: Optional[Leaf] = None + last: Optional[Leaf] = None + for leaf in reversed(leaves): + if stop_after: + if leaf is stop_after: + stop_after = None + continue + + if last: + return leaf, last + + if id(leaf) in omit: + stop_after = leaf.opening_bracket + else: + last = leaf + else: + raise LookupError("Last two leaves were also skipped") + + +def parent_type(node: Optional[LN]) -> Optional[NodeType]: + """ + Returns: + @node.parent.type, if @node is not None and has a parent. + OR + None, otherwise. + """ + if node is None or node.parent is None: + return None + + return node.parent.type + + +def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: + """Return the child of `ancestor` that contains `descendant`.""" + node: Optional[LN] = descendant + while node and node.parent != ancestor: + node = node.parent + return node + + +def replace_child(old_child: LN, new_child: LN) -> None: + """ + Side Effects: + * If @old_child.parent is set, replace @old_child with @new_child in + @old_child's underlying Node structure. + OR + * Otherwise, this function does nothing. + """ + parent = old_child.parent + if not parent: + return + + child_idx = old_child.remove() + if child_idx is not None: + parent.insert_child(child_idx, new_child) + + +def container_of(leaf: Leaf) -> LN: + """Return `leaf` or one of its ancestors that is the topmost container of it. + + By "container" we mean a node where `leaf` is the very first child. + """ + same_prefix = leaf.prefix + container: LN = leaf + while container: + parent = container.parent + if parent is None: + break + + if parent.children[0].prefix != same_prefix: + break + + if parent.type == syms.file_input: + break + + if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: + break + + container = parent + return container + + +def first_leaf_column(node: Node) -> Optional[int]: + """Returns the column of the first leaf child of a node.""" + for child in node.children: + if isinstance(child, Leaf): + return child.column + return None + + +def first_child_is_arith(node: Node) -> bool: + """Whether first child is an arithmetic or a binary arithmetic expression""" + expr_types = { + syms.arith_expr, + syms.shift_expr, + syms.xor_expr, + syms.and_expr, + } + return bool(node.children and node.children[0].type in expr_types) + + +def is_docstring(leaf: Leaf) -> bool: + if prev_siblings_are( + leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] + ): + return True + + # Multiline docstring on the same line as the `def`. + if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): + # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python + # grammar. We're safe to return True without further checks. + return True + + return False + + +def is_empty_tuple(node: LN) -> bool: + """Return True if `node` holds an empty tuple.""" + return ( + node.type == syms.atom + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + + +def is_one_tuple(node: LN) -> bool: + """Return True if `node` holds a tuple with one element, with or without parens.""" + if node.type == syms.atom: + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA + + return ( + node.type in IMPLICIT_TUPLE + and len(node.children) == 2 + and node.children[1].type == token.COMMA + ) + + +def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool: + """Return True if content between `opening` and `closing` looks like a one-tuple.""" + if opening.type != token.LPAR and closing.type != token.RPAR: + return False + + depth = closing.bracket_depth + 1 + for _opening_index, leaf in enumerate(leaves): + if leaf is opening: + break + + else: + raise LookupError("Opening paren not found in `leaves`") + + commas = 0 + _opening_index += 1 + for leaf in leaves[_opening_index:]: + if leaf is closing: + break + + bracket_depth = leaf.bracket_depth + if bracket_depth == depth and leaf.type == token.COMMA: + commas += 1 + if leaf.parent and leaf.parent.type in { + syms.arglist, + syms.typedargslist, + }: + commas += 1 + break + + return commas < 2 + + +def is_walrus_assignment(node: LN) -> bool: + """Return True iff `node` is of the shape ( test := test )""" + inner = unwrap_singleton_parenthesis(node) + return inner is not None and inner.type == syms.namedexpr_test + + +def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool: + """Return True iff `node` is a trailer valid in a simple decorator""" + return node.type == syms.trailer and ( + ( + len(node.children) == 2 + and node.children[0].type == token.DOT + and node.children[1].type == token.NAME + ) + # last trailer can be an argument-less parentheses pair + or ( + last + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + # last trailer can be arguments + or ( + last + and len(node.children) == 3 + and node.children[0].type == token.LPAR + # and node.children[1].type == syms.argument + and node.children[2].type == token.RPAR + ) + ) + + +def is_simple_decorator_expression(node: LN) -> bool: + """Return True iff `node` could be a 'dotted name' decorator + + This function takes the node of the 'namedexpr_test' of the new decorator + grammar and test if it would be valid under the old decorator grammar. + + The old grammar was: decorator: @ dotted_name [arguments] NEWLINE + The new grammar is : decorator: @ namedexpr_test NEWLINE + """ + if node.type == token.NAME: + return True + if node.type == syms.power: + if node.children: + return ( + node.children[0].type == token.NAME + and all(map(is_simple_decorator_trailer, node.children[1:-1])) + and ( + len(node.children) < 2 + or is_simple_decorator_trailer(node.children[-1], last=True) + ) + ) + return False + + +def is_yield(node: LN) -> bool: + """Return True if `node` holds a `yield` or `yield from` expression.""" + if node.type == syms.yield_expr: + return True + + if node.type == token.NAME and node.value == "yield": # type: ignore + return True + + if node.type != syms.atom: + return False + + if len(node.children) != 3: + return False + + lpar, expr, rpar = node.children + if lpar.type == token.LPAR and rpar.type == token.RPAR: + return is_yield(expr) + + return False + + +def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: + """Return True if `leaf` is a star or double star in a vararg or kwarg. + + If `within` includes VARARGS_PARENTS, this applies to function signatures. + If `within` includes UNPACKING_PARENTS, it applies to right hand-side + extended iterable unpacking (PEP 3132) and additional unpacking + generalizations (PEP 448). + """ + if leaf.type not in VARARGS_SPECIALS or not leaf.parent: + return False + + p = leaf.parent + if p.type == syms.star_expr: + # Star expressions are also used as assignment targets in extended + # iterable unpacking (PEP 3132). See what its parent is instead. + if not p.parent: + return False + + p = p.parent + + return p.type in within + + +def is_multiline_string(leaf: Leaf) -> bool: + """Return True if `leaf` is a multiline string that actually spans many lines.""" + return has_triple_quotes(leaf.value) and "\n" in leaf.value + + +def is_stub_suite(node: Node) -> bool: + """Return True if `node` is a suite with a stub body.""" + if ( + len(node.children) != 4 + or node.children[0].type != token.NEWLINE + or node.children[1].type != token.INDENT + or node.children[3].type != token.DEDENT + ): + return False + + return is_stub_body(node.children[2]) + + +def is_stub_body(node: LN) -> bool: + """Return True if `node` is a simple statement containing an ellipsis.""" + if not isinstance(node, Node) or node.type != syms.simple_stmt: + return False + + if len(node.children) != 2: + return False + + child = node.children[0] + return ( + child.type == syms.atom + and len(child.children) == 3 + and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) + ) + + +def is_atom_with_invisible_parens(node: LN) -> bool: + """Given a `LN`, determines whether it's an atom `node` with invisible + parens. Useful in dedupe-ing and normalizing parens. + """ + if isinstance(node, Leaf) or node.type != syms.atom: + return False + + first, last = node.children[0], node.children[-1] + return ( + isinstance(first, Leaf) + and first.type == token.LPAR + and first.value == "" + and isinstance(last, Leaf) + and last.type == token.RPAR + and last.value == "" + ) + + +def is_empty_par(leaf: Leaf) -> bool: + return is_empty_lpar(leaf) or is_empty_rpar(leaf) + + +def is_empty_lpar(leaf: Leaf) -> bool: + return leaf.type == token.LPAR and leaf.value == "" + + +def is_empty_rpar(leaf: Leaf) -> bool: + return leaf.type == token.RPAR and leaf.value == "" + + +def is_import(leaf: Leaf) -> bool: + """Return True if the given leaf starts an import statement.""" + p = leaf.parent + t = leaf.type + v = leaf.value + return bool( + t == token.NAME + and ( + (v == "import" and p and p.type == syms.import_name) + or (v == "from" and p and p.type == syms.import_from) + ) + ) + + +def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: + """Return True if the given leaf is a special comment. + Only returns true for type comments for now.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) + + +def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: + """Wrap `child` in parentheses. + + This replaces `child` with an atom holding the parentheses and the old + child. That requires moving the prefix. + + If `visible` is False, the leaves will be valueless (and thus invisible). + """ + lpar = Leaf(token.LPAR, "(" if visible else "") + rpar = Leaf(token.RPAR, ")" if visible else "") + prefix = child.prefix + child.prefix = "" + index = child.remove() or 0 + new_child = Node(syms.atom, [lpar, child, rpar]) + new_child.prefix = prefix + parent.insert_child(index, new_child) + + +def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: + """Returns `wrapped` if `node` is of the shape ( wrapped ). + + Parenthesis can be optional. Returns None otherwise""" + if len(node.children) != 3: + return None + + lpar, wrapped, rpar = node.children + if not (lpar.type == token.LPAR and rpar.type == token.RPAR): + return None + + return wrapped + + +def ensure_visible(leaf: Leaf) -> None: + """Make sure parentheses are visible. + + They could be invisible as part of some statements (see + :func:`normalize_invisible_parens` and :func:`visit_import_from`). + """ + if leaf.type == token.LPAR: + leaf.value = "(" + elif leaf.type == token.RPAR: + leaf.value = ")" diff --git a/myenv/lib/python3.9/site-packages/black/numerics.py b/myenv/lib/python3.9/site-packages/black/numerics.py new file mode 100644 index 0000000..cb1c83e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/numerics.py @@ -0,0 +1,65 @@ +""" +Formatting numeric literals. +""" +from blib2to3.pytree import Leaf + + +def format_hex(text: str) -> str: + """ + Formats a hexadecimal string like "0x12B3" + """ + before, after = text[:2], text[2:] + return f"{before}{after.upper()}" + + +def format_scientific_notation(text: str) -> str: + """Formats a numeric string utilizing scentific notation""" + before, after = text.split("e") + sign = "" + if after.startswith("-"): + after = after[1:] + sign = "-" + elif after.startswith("+"): + after = after[1:] + before = format_float_or_int_string(before) + return f"{before}e{sign}{after}" + + +def format_long_or_complex_number(text: str) -> str: + """Formats a long or complex string like `10L` or `10j`""" + number = text[:-1] + suffix = text[-1] + # Capitalize in "2L" because "l" looks too similar to "1". + if suffix == "l": + suffix = "L" + return f"{format_float_or_int_string(number)}{suffix}" + + +def format_float_or_int_string(text: str) -> str: + """Formats a float string like "1.0".""" + if "." not in text: + return text + + before, after = text.split(".") + return f"{before or 0}.{after or 0}" + + +def normalize_numeric_literal(leaf: Leaf) -> None: + """Normalizes numeric (float, int, and complex) literals. + + All letters used in the representation are normalized to lowercase (except + in Python 2 long literals). + """ + text = leaf.value.lower() + if text.startswith(("0o", "0b")): + # Leave octal and binary literals alone. + pass + elif text.startswith("0x"): + text = format_hex(text) + elif "e" in text: + text = format_scientific_notation(text) + elif text.endswith(("j", "l")): + text = format_long_or_complex_number(text) + else: + text = format_float_or_int_string(text) + leaf.value = text diff --git a/myenv/lib/python3.9/site-packages/black/output.py b/myenv/lib/python3.9/site-packages/black/output.py new file mode 100644 index 0000000..f030d0a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/output.py @@ -0,0 +1,105 @@ +"""Nice output for Black. + +The double calls are for patching purposes in tests. +""" + +import json +from typing import Any, Optional +from mypy_extensions import mypyc_attr +import tempfile + +from click import echo, style + + +@mypyc_attr(patchable=True) +def _out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "bold" not in styles: + styles["bold"] = True + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def _err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "fg" not in styles: + styles["fg"] = "red" + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _out(message, nl=nl, **styles) + + +def err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _err(message, nl=nl, **styles) + + +def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between each cell in notebooks `a` and `b`.""" + a_nb = json.loads(a) + b_nb = json.loads(b) + diff_lines = [ + diff( + "".join(a_nb["cells"][cell_number]["source"]) + "\n", + "".join(b_nb["cells"][cell_number]["source"]) + "\n", + f"{a_name}:cell_{cell_number}", + f"{b_name}:cell_{cell_number}", + ) + for cell_number, cell in enumerate(a_nb["cells"]) + if cell["cell_type"] == "code" + ] + return "".join(diff_lines) + + +def diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between strings `a` and `b`.""" + import difflib + + a_lines = a.splitlines(keepends=True) + b_lines = b.splitlines(keepends=True) + diff_lines = [] + for line in difflib.unified_diff( + a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5 + ): + # Work around https://bugs.python.org/issue2142 + # See: + # https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html + if line[-1] == "\n": + diff_lines.append(line) + else: + diff_lines.append(line + "\n") + diff_lines.append("\\ No newline at end of file\n") + return "".join(diff_lines) + + +def color_diff(contents: str) -> str: + """Inject the ANSI color codes to the diff.""" + lines = contents.split("\n") + for i, line in enumerate(lines): + if line.startswith("+++") or line.startswith("---"): + line = "\033[1;37m" + line + "\033[0m" # bold white, reset + elif line.startswith("@@"): + line = "\033[36m" + line + "\033[0m" # cyan, reset + elif line.startswith("+"): + line = "\033[32m" + line + "\033[0m" # green, reset + elif line.startswith("-"): + line = "\033[31m" + line + "\033[0m" # red, reset + lines[i] = line + return "\n".join(lines) + + +@mypyc_attr(patchable=True) +def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str: + """Dump `output` to a temporary file. Return path to the file.""" + with tempfile.NamedTemporaryFile( + mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" + ) as f: + for lines in output: + f.write(lines) + if ensure_final_newline and lines and lines[-1] != "\n": + f.write("\n") + return f.name diff --git a/myenv/lib/python3.9/site-packages/black/parsing.py b/myenv/lib/python3.9/site-packages/black/parsing.py new file mode 100644 index 0000000..c101643 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/parsing.py @@ -0,0 +1,285 @@ +""" +Parse Python code and perform AST validation. +""" +import ast +import platform +import sys +from typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +# lib2to3 fork +from blib2to3.pytree import Node, Leaf +from blib2to3 import pygram +from blib2to3.pgen2 import driver +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.parse import ParseError +from blib2to3.pgen2.tokenize import TokenError + +from black.mode import TargetVersion, Feature, supports_feature +from black.nodes import syms + +ast3: Any +ast27: Any + +_IS_PYPY = platform.python_implementation() == "PyPy" + +try: + from typed_ast import ast3, ast27 +except ImportError: + # Either our python version is too low, or we're on pypy + if sys.version_info < (3, 7) or (sys.version_info < (3, 8) and not _IS_PYPY): + print( + "The typed_ast package is required but not installed.\n" + "You can upgrade to Python 3.8+ or install typed_ast with\n" + "`python3 -m pip install typed-ast`.", + file=sys.stderr, + ) + sys.exit(1) + else: + ast3 = ast27 = ast + + +PY310_HINT: Final[ + str +] = "Consider using --target-version py310 to parse Python 3.10 code." + + +class InvalidInput(ValueError): + """Raised when input source code fails all parse attempts.""" + + +def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7+ + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar_no_print_statement_no_exec_statement, + # Python 2.7 with future print_function import + pygram.python_grammar_no_print_statement, + # Python 2.7 + pygram.python_grammar, + ] + + if all(version.is_python2() for version in target_versions): + # Python 2-only code, so try Python 2 grammars. + return [ + # Python 2.7 with future print_function import + pygram.python_grammar_no_print_statement, + # Python 2.7 + pygram.python_grammar, + ] + + # Python 3-compatible code, so only try Python 3 grammar. + grammars = [] + if supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append( + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords + ) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + for grammar in grammars: + drv = driver.Driver(grammar) + try: + result = drv.parse_string(src_txt, True) + break + + except ParseError as pe: + lineno, column = pe.context[1] + lines = src_txt.splitlines() + try: + faulty_line = lines[lineno - 1] + except IndexError: + faulty_line = "" + exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}") + + except TokenError as te: + # In edge cases these are raised; and typically don't have a "faulty_line". + lineno, column = te.args[1] + exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {te.args[0]}") + + else: + if pygram.python_grammar_soft_keywords not in grammars and matches_grammar( + src_txt, pygram.python_grammar_soft_keywords + ): + original_msg = exc.args[0] + msg = f"{original_msg}\n{PY310_HINT}" + raise InvalidInput(msg) from None + raise exc from None + + if isinstance(result, Leaf): + result = Node(syms.file_input, [result]) + return result + + +def matches_grammar(src_txt: str, grammar: Grammar) -> bool: + drv = driver.Driver(grammar) + try: + drv.parse_string(src_txt, True) + except (ParseError, TokenError, IndentationError): + return False + else: + return True + + +def lib2to3_unparse(node: Node) -> str: + """Given a lib2to3 node, return its string representation.""" + code = str(node) + return code + + +def parse_single_version( + src: str, version: Tuple[int, int] +) -> Union[ast.AST, ast3.AST, ast27.AST]: + filename = "" + # typed_ast is needed because of feature version limitations in the builtin ast + if sys.version_info >= (3, 8) and version >= (3,): + return ast.parse(src, filename, feature_version=version) + elif version >= (3,): + if _IS_PYPY: + return ast3.parse(src, filename) + else: + return ast3.parse(src, filename, feature_version=version[1]) + elif version == (2, 7): + return ast27.parse(src) + raise AssertionError("INTERNAL ERROR: Tried parsing unsupported Python version!") + + +def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]: + # TODO: support Python 4+ ;) + versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] + + if ast27.__name__ != "ast": + versions.append((2, 7)) + + first_error = "" + for version in sorted(versions, reverse=True): + try: + return parse_single_version(src, version) + except SyntaxError as e: + if not first_error: + first_error = str(e) + + raise SyntaxError(first_error) + + +ast3_AST: Final[Type[ast3.AST]] = ast3.AST +ast27_AST: Final[Type[ast27.AST]] = ast27.AST + + +def stringify_ast( + node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0 +) -> Iterator[str]: + """Simple visitor generating strings to compare ASTs by content.""" + + node = fixup_ast_constants(node) + + yield f"{' ' * depth}{node.__class__.__name__}(" + + type_ignore_classes: Tuple[Type[Any], ...] + for field in sorted(node._fields): # noqa: F402 + # TypeIgnore will not be present using pypy < 3.8, so need for this + if not (_IS_PYPY and sys.version_info < (3, 8)): + # TypeIgnore has only one field 'lineno' which breaks this comparison + type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore) + if sys.version_info >= (3, 8): + type_ignore_classes += (ast.TypeIgnore,) + if isinstance(node, type_ignore_classes): + break + + try: + value = getattr(node, field) + except AttributeError: + continue + + yield f"{' ' * (depth+1)}{field}=" + + if isinstance(value, list): + for item in value: + # Ignore nested tuples within del statements, because we may insert + # parentheses and they change the AST. + if ( + field == "targets" + and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete)) + and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple)) + ): + for item in item.elts: + yield from stringify_ast(item, depth + 2) + + elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)): + yield from stringify_ast(item, depth + 2) + + # Note that we are referencing the typed-ast ASTs via global variables and not + # direct module attribute accesses because that breaks mypyc. It's probably + # something to do with the ast3 / ast27 variables being marked as Any leading + # mypy to think this branch is always taken, leaving the rest of the code + # unanalyzed. Tighting up the types for the typed-ast AST types avoids the + # mypyc crash. + elif isinstance(value, (ast.AST, ast3_AST, ast27_AST)): + yield from stringify_ast(value, depth + 2) + + else: + # Constant strings may be indented across newlines, if they are + # docstrings; fold spaces after newlines when comparing. Similarly, + # trailing and leading space may be removed. + # Note that when formatting Python 2 code, at least with Windows + # line-endings, docstrings can end up here as bytes instead of + # str so make sure that we handle both cases. + if ( + isinstance(node, ast.Constant) + and field == "value" + and isinstance(value, (str, bytes)) + ): + lineend = "\n" if isinstance(value, str) else b"\n" + # To normalize, we strip any leading and trailing space from + # each line... + stripped = [line.strip() for line in value.splitlines()] + normalized = lineend.join(stripped) # type: ignore[attr-defined] + # ...and remove any blank lines at the beginning and end of + # the whole string + normalized = normalized.strip() + else: + normalized = value + yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" + + yield f"{' ' * depth}) # /{node.__class__.__name__}" + + +def fixup_ast_constants( + node: Union[ast.AST, ast3.AST, ast27.AST] +) -> Union[ast.AST, ast3.AST, ast27.AST]: + """Map ast nodes deprecated in 3.8 to Constant.""" + if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)): + return ast.Constant(value=node.s) + + if isinstance(node, (ast.Num, ast3.Num, ast27.Num)): + return ast.Constant(value=node.n) + + if isinstance(node, (ast.NameConstant, ast3.NameConstant)): + return ast.Constant(value=node.value) + + return node diff --git a/myenv/lib/python3.9/site-packages/black/py.typed b/myenv/lib/python3.9/site-packages/black/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/black/report.py b/myenv/lib/python3.9/site-packages/black/report.py new file mode 100644 index 0000000..7e1c8b4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/report.py @@ -0,0 +1,104 @@ +""" +Summarize Black runs to users. +""" +from dataclasses import dataclass +from enum import Enum +from pathlib import Path + +from click import style + +from black.output import out, err + + +class Changed(Enum): + NO = 0 + CACHED = 1 + YES = 2 + + +class NothingChanged(UserWarning): + """Raised when reformatted code is the same as source.""" + + +@dataclass +class Report: + """Provides a reformatting counter. Can be rendered with `str(report)`.""" + + check: bool = False + diff: bool = False + quiet: bool = False + verbose: bool = False + change_count: int = 0 + same_count: int = 0 + failure_count: int = 0 + + def done(self, src: Path, changed: Changed) -> None: + """Increment the counter for successful reformatting. Write out a message.""" + if changed is Changed.YES: + reformatted = "would reformat" if self.check or self.diff else "reformatted" + if self.verbose or not self.quiet: + out(f"{reformatted} {src}") + self.change_count += 1 + else: + if self.verbose: + if changed is Changed.NO: + msg = f"{src} already well formatted, good job." + else: + msg = f"{src} wasn't modified on disk since last run." + out(msg, bold=False) + self.same_count += 1 + + def failed(self, src: Path, message: str) -> None: + """Increment the counter for failed reformatting. Write out a message.""" + err(f"error: cannot format {src}: {message}") + self.failure_count += 1 + + def path_ignored(self, path: Path, message: str) -> None: + if self.verbose: + out(f"{path} ignored: {message}", bold=False) + + @property + def return_code(self) -> int: + """Return the exit code that the app should use. + + This considers the current state of changed files and failures: + - if there were any failures, return 123; + - if any files were changed and --check is being used, return 1; + - otherwise return 0. + """ + # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with + # 126 we have special return codes reserved by the shell. + if self.failure_count: + return 123 + + elif self.change_count and self.check: + return 1 + + return 0 + + def __str__(self) -> str: + """Render a color report of the current state. + + Use `click.unstyle` to remove colors. + """ + if self.check or self.diff: + reformatted = "would be reformatted" + unchanged = "would be left unchanged" + failed = "would fail to reformat" + else: + reformatted = "reformatted" + unchanged = "left unchanged" + failed = "failed to reformat" + report = [] + if self.change_count: + s = "s" if self.change_count > 1 else "" + report.append( + style(f"{self.change_count} file{s} {reformatted}", bold=True) + ) + if self.same_count: + s = "s" if self.same_count > 1 else "" + report.append(f"{self.same_count} file{s} {unchanged}") + if self.failure_count: + s = "s" if self.failure_count > 1 else "" + report.append(style(f"{self.failure_count} file{s} {failed}", fg="red")) + return ", ".join(report) + "." diff --git a/myenv/lib/python3.9/site-packages/black/rusty.py b/myenv/lib/python3.9/site-packages/black/rusty.py new file mode 100644 index 0000000..822e3d7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/rusty.py @@ -0,0 +1,28 @@ +"""An error-handling model influenced by that used by the Rust programming language + +See https://doc.rust-lang.org/book/ch09-00-error-handling.html. +""" +from typing import Generic, TypeVar, Union + + +T = TypeVar("T") +E = TypeVar("E", bound=Exception) + + +class Ok(Generic[T]): + def __init__(self, value: T) -> None: + self._value = value + + def ok(self) -> T: + return self._value + + +class Err(Generic[E]): + def __init__(self, e: E) -> None: + self._e = e + + def err(self) -> E: + return self._e + + +Result = Union[Ok[T], Err[E]] diff --git a/myenv/lib/python3.9/site-packages/black/strings.py b/myenv/lib/python3.9/site-packages/black/strings.py new file mode 100644 index 0000000..06a5da0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/strings.py @@ -0,0 +1,234 @@ +""" +Simple formatting on strings. Further string formatting code is in trans.py. +""" + +import re +import sys +from functools import lru_cache +from typing import List, Pattern + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + + +STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. +STRING_PREFIX_RE: Final = re.compile( + r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL +) +FIRST_NON_WHITESPACE_RE: Final = re.compile(r"\s*\t+\s*(\S)") + + +def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: + """Replace `regex` with `replacement` twice on `original`. + + This is used by string normalization to perform replaces on + overlapping matches. + """ + return regex.sub(replacement, regex.sub(replacement, original)) + + +def has_triple_quotes(string: str) -> bool: + """ + Returns: + True iff @string starts with three quotation characters. + """ + raw_string = string.lstrip(STRING_PREFIX_CHARS) + return raw_string[:3] in {'"""', "'''"} + + +def lines_with_leading_tabs_expanded(s: str) -> List[str]: + """ + Splits string into lines and expands only leading tabs (following the normal + Python rules) + """ + lines = [] + for line in s.splitlines(): + # Find the index of the first non-whitespace character after a string of + # whitespace that includes at least one tab + match = FIRST_NON_WHITESPACE_RE.match(line) + if match: + first_non_whitespace_idx = match.start(1) + + lines.append( + line[:first_non_whitespace_idx].expandtabs() + + line[first_non_whitespace_idx:] + ) + else: + lines.append(line) + return lines + + +def fix_docstring(docstring: str, prefix: str) -> str: + # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation + if not docstring: + return "" + lines = lines_with_leading_tabs_expanded(docstring) + # Determine minimum indentation (first line doesn't count): + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + last_line_idx = len(lines) - 2 + for i, line in enumerate(lines[1:]): + stripped_line = line[indent:].rstrip() + if stripped_line or i == last_line_idx: + trimmed.append(prefix + stripped_line) + else: + trimmed.append("") + return "\n".join(trimmed) + + +def get_string_prefix(string: str) -> str: + """ + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + @string's prefix (e.g. '', 'r', 'f', or 'rf'). + """ + assert_is_leaf_string(string) + + prefix = "" + prefix_idx = 0 + while string[prefix_idx] in STRING_PREFIX_CHARS: + prefix += string[prefix_idx] + prefix_idx += 1 + + return prefix + + +def assert_is_leaf_string(string: str) -> None: + """ + Checks the pre-condition that @string has the format that you would expect + of `leaf.value` where `leaf` is some Leaf such that `leaf.type == + token.STRING`. A more precise description of the pre-conditions that are + checked are listed below. + + Pre-conditions: + * @string starts with either ', ", ', or " where + `set()` is some subset of `set(STRING_PREFIX_CHARS)`. + * @string ends with a quote character (' or "). + + Raises: + AssertionError(...) if the pre-conditions listed above are not + satisfied. + """ + dquote_idx = string.find('"') + squote_idx = string.find("'") + if -1 in [dquote_idx, squote_idx]: + quote_idx = max(dquote_idx, squote_idx) + else: + quote_idx = min(squote_idx, dquote_idx) + + assert ( + 0 <= quote_idx < len(string) - 1 + ), f"{string!r} is missing a starting quote character (' or \")." + assert string[-1] in ( + "'", + '"', + ), f"{string!r} is missing an ending quote character (' or \")." + assert set(string[:quote_idx]).issubset( + set(STRING_PREFIX_CHARS) + ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." + + +def normalize_string_prefix(s: str, remove_u_prefix: bool = False) -> str: + """Make all string prefixes lowercase. + + If remove_u_prefix is given, also removes any u prefix from the string. + """ + match = STRING_PREFIX_RE.match(s) + assert match is not None, f"failed to match string {s!r}" + orig_prefix = match.group(1) + new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u") + if remove_u_prefix: + new_prefix = new_prefix.replace("u", "") + return f"{new_prefix}{match.group(2)}" + + +# Re(gex) does actually cache patterns internally but this still improves +# performance on a long list literal of strings by 5-9% since lru_cache's +# caching overhead is much lower. +@lru_cache(maxsize=64) +def _cached_compile(pattern: str) -> Pattern[str]: + return re.compile(pattern) + + +def normalize_string_quotes(s: str) -> str: + """Prefer double quotes but only if it doesn't cause more escaping. + + Adds or removes backslashes as appropriate. Doesn't parse and fix + strings nested in f-strings. + """ + value = s.lstrip(STRING_PREFIX_CHARS) + if value[:3] == '"""': + return s + + elif value[:3] == "'''": + orig_quote = "'''" + new_quote = '"""' + elif value[0] == '"': + orig_quote = '"' + new_quote = "'" + else: + orig_quote = "'" + new_quote = '"' + first_quote_pos = s.find(orig_quote) + if first_quote_pos == -1: + return s # There's an internal error + + prefix = s[:first_quote_pos] + unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}") + escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") + escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") + body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] + if "r" in prefix.casefold(): + if unescaped_new_quote.search(body): + # There's at least one unescaped new_quote in this raw string + # so converting is impossible + return s + + # Do not introduce or remove backslashes in raw strings + new_body = body + else: + # remove unnecessary escapes + new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) + if body != new_body: + # Consider the string without unnecessary escapes as the original + body = new_body + s = f"{prefix}{orig_quote}{body}{orig_quote}" + new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) + new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) + if "f" in prefix.casefold(): + matches = re.findall( + r""" + (?:(? orig_escape_count: + return s # Do not introduce more escaping + + if new_escape_count == orig_escape_count and orig_quote == '"': + return s # Prefer double quotes + + return f"{prefix}{new_quote}{new_body}{new_quote}" diff --git a/myenv/lib/python3.9/site-packages/black/trans.py b/myenv/lib/python3.9/site-packages/black/trans.py new file mode 100644 index 0000000..cb41c1b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black/trans.py @@ -0,0 +1,2064 @@ +""" +String transformers that can split and merge strings. +""" +from abc import ABC, abstractmethod +from collections import defaultdict +from dataclasses import dataclass +import re +from typing import ( + Any, + Callable, + ClassVar, + Collection, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + TypeVar, + Union, +) +import sys + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from mypy_extensions import trait + +from black.rusty import Result, Ok, Err + +from black.mode import Feature +from black.nodes import syms, replace_child, parent_type +from black.nodes import is_empty_par, is_empty_lpar, is_empty_rpar +from black.nodes import OPENING_BRACKETS, CLOSING_BRACKETS, STANDALONE_COMMENT +from black.lines import Line, append_leaves +from black.brackets import BracketMatchError +from black.comments import contains_pragma_comment +from black.strings import has_triple_quotes, get_string_prefix, assert_is_leaf_string +from black.strings import normalize_string_quotes + +from blib2to3.pytree import Leaf, Node +from blib2to3.pgen2 import token + + +class CannotTransform(Exception): + """Base class for errors raised by Transformers.""" + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]] +Index = int +NodeType = int +ParserState = int +StringID = int +TResult = Result[T, CannotTransform] # (T)ransform Result +TMatchResult = TResult[Index] + + +def TErr(err_msg: str) -> Err[CannotTransform]: + """(T)ransform Err + + Convenience function used when working with the TResult type. + """ + cant_transform = CannotTransform(err_msg) + return Err(cant_transform) + + +class StringTransformer(ABC): + """ + An implementation of the Transformer protocol that relies on its + subclasses overriding the template methods `do_match(...)` and + `do_transform(...)`. + + This Transformer works exclusively on strings (for example, by merging + or splitting them). + + The following sections can be found among the docstrings of each concrete + StringTransformer subclass. + + Requirements: + Which requirements must be met of the given Line for this + StringTransformer to be applied? + + Transformations: + If the given Line meets all of the above requirements, which string + transformations can you expect to be applied to it by this + StringTransformer? + + Collaborations: + What contractual agreements does this StringTransformer have with other + StringTransfomers? Such collaborations should be eliminated/minimized + as much as possible. + """ + + __name__: Final = "StringTransformer" + + # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with + # `abc.ABC`. + def __init__(self, line_length: int, normalize_strings: bool) -> None: + self.line_length = line_length + self.normalize_strings = normalize_strings + + @abstractmethod + def do_match(self, line: Line) -> TMatchResult: + """ + Returns: + * Ok(string_idx) such that `line.leaves[string_idx]` is our target + string, if a match was able to be made. + OR + * Err(CannotTransform), if a match was not able to be made. + """ + + @abstractmethod + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + """ + Yields: + * Ok(new_line) where new_line is the new transformed line. + OR + * Err(CannotTransform) if the transformation failed for some reason. The + `do_match(...)` template method should usually be used to reject + the form of the given Line, but in some cases it is difficult to + know whether or not a Line meets the StringTransformer's + requirements until the transformation is already midway. + + Side Effects: + This method should NOT mutate @line directly, but it MAY mutate the + Line's underlying Node structure. (WARNING: If the underlying Node + structure IS altered, then this method should NOT be allowed to + yield an CannotTransform after that point.) + """ + + def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]: + """ + StringTransformer instances have a call signature that mirrors that of + the Transformer type. + + Raises: + CannotTransform(...) if the concrete StringTransformer class is unable + to transform @line. + """ + # Optimization to avoid calling `self.do_match(...)` when the line does + # not contain any string. + if not any(leaf.type == token.STRING for leaf in line.leaves): + raise CannotTransform("There are no strings in this line.") + + match_result = self.do_match(line) + + if isinstance(match_result, Err): + cant_transform = match_result.err() + raise CannotTransform( + f"The string transformer {self.__class__.__name__} does not recognize" + " this line as one that it can transform." + ) from cant_transform + + string_idx = match_result.ok() + + for line_result in self.do_transform(line, string_idx): + if isinstance(line_result, Err): + cant_transform = line_result.err() + raise CannotTransform( + "StringTransformer failed while attempting to transform string." + ) from cant_transform + line = line_result.ok() + yield line + + +@dataclass +class CustomSplit: + """A custom (i.e. manual) string split. + + A single CustomSplit instance represents a single substring. + + Examples: + Consider the following string: + ``` + "Hi there friend." + " This is a custom" + f" string {split}." + ``` + + This string will correspond to the following three CustomSplit instances: + ``` + CustomSplit(False, 16) + CustomSplit(False, 17) + CustomSplit(True, 16) + ``` + """ + + has_prefix: bool + break_idx: int + + +@trait +class CustomSplitMapMixin: + """ + This mixin class is used to map merged strings to a sequence of + CustomSplits, which will then be used to re-split the strings iff none of + the resultant substrings go over the configured max line length. + """ + + _Key: ClassVar = Tuple[StringID, str] + _CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict( + tuple + ) + + @staticmethod + def _get_key(string: str) -> "CustomSplitMapMixin._Key": + """ + Returns: + A unique identifier that is used internally to map @string to a + group of custom splits. + """ + return (id(string), string) + + def add_custom_splits( + self, string: str, custom_splits: Iterable[CustomSplit] + ) -> None: + """Custom Split Map Setter Method + + Side Effects: + Adds a mapping from @string to the custom splits @custom_splits. + """ + key = self._get_key(string) + self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) + + def pop_custom_splits(self, string: str) -> List[CustomSplit]: + """Custom Split Map Getter Method + + Returns: + * A list of the custom splits that are mapped to @string, if any + exist. + OR + * [], otherwise. + + Side Effects: + Deletes the mapping between @string and its associated custom + splits (which are returned to the caller). + """ + key = self._get_key(string) + + custom_splits = self._CUSTOM_SPLIT_MAP[key] + del self._CUSTOM_SPLIT_MAP[key] + + return list(custom_splits) + + def has_custom_splits(self, string: str) -> bool: + """ + Returns: + True iff @string is associated with a set of custom splits. + """ + key = self._get_key(string) + return key in self._CUSTOM_SPLIT_MAP + + +class StringMerger(StringTransformer, CustomSplitMapMixin): + """StringTransformer that merges strings together. + + Requirements: + (A) The line contains adjacent strings such that ALL of the validation checks + listed in StringMerger.__validate_msg(...)'s docstring pass. + OR + (B) The line contains a string which uses line continuation backslashes. + + Transformations: + Depending on which of the two requirements above where met, either: + + (A) The string group associated with the target string is merged. + OR + (B) All line-continuation backslashes are removed from the target string. + + Collaborations: + StringMerger provides custom split information to StringSplitter. + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + for (i, leaf) in enumerate(LL): + if ( + leaf.type == token.STRING + and is_valid_index(i + 1) + and LL[i + 1].type == token.STRING + ): + return Ok(i) + + if leaf.type == token.STRING and "\\\n" in leaf.value: + return Ok(i) + + return TErr("This line has no strings that need merging.") + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + new_line = line + rblc_result = self._remove_backslash_line_continuation_chars( + new_line, string_idx + ) + if isinstance(rblc_result, Ok): + new_line = rblc_result.ok() + + msg_result = self._merge_string_group(new_line, string_idx) + if isinstance(msg_result, Ok): + new_line = msg_result.ok() + + if isinstance(rblc_result, Err) and isinstance(msg_result, Err): + msg_cant_transform = msg_result.err() + rblc_cant_transform = rblc_result.err() + cant_transform = CannotTransform( + "StringMerger failed to merge any strings in this line." + ) + + # Chain the errors together using `__cause__`. + msg_cant_transform.__cause__ = rblc_cant_transform + cant_transform.__cause__ = msg_cant_transform + + yield Err(cant_transform) + else: + yield Ok(new_line) + + @staticmethod + def _remove_backslash_line_continuation_chars( + line: Line, string_idx: int + ) -> TResult[Line]: + """ + Merge strings that were split across multiple lines using + line-continuation backslashes. + + Returns: + Ok(new_line), if @line contains backslash line-continuation + characters. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + if not ( + string_leaf.type == token.STRING + and "\\\n" in string_leaf.value + and not has_triple_quotes(string_leaf.value) + ): + return TErr( + f"String leaf {string_leaf} does not contain any backslash line" + " continuation characters." + ) + + new_line = line.clone() + new_line.comments = line.comments.copy() + append_leaves(new_line, line, LL) + + new_string_leaf = new_line.leaves[string_idx] + new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") + + return Ok(new_line) + + def _merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]: + """ + Merges string group (i.e. set of adjacent strings) where the first + string in the group is `line.leaves[string_idx]`. + + Returns: + Ok(new_line), if ALL of the validation checks found in + __validate_msg(...) pass. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + vresult = self._validate_msg(line, string_idx) + if isinstance(vresult, Err): + return vresult + + # If the string group is wrapped inside an Atom node, we must make sure + # to later replace that Atom with our new (merged) string leaf. + atom_node = LL[string_idx].parent + + # We will place BREAK_MARK in between every two substrings that we + # merge. We will then later go through our final result and use the + # various instances of BREAK_MARK we find to add the right values to + # the custom split map. + BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" + + QUOTE = LL[string_idx].value[-1] + + def make_naked(string: str, string_prefix: str) -> str: + """Strip @string (i.e. make it a "naked" string) + + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + A string that is identical to @string except that + @string_prefix has been stripped, the surrounding QUOTE + characters have been removed, and any remaining QUOTE + characters have been escaped. + """ + assert_is_leaf_string(string) + + RE_EVEN_BACKSLASHES = r"(?:(?= 0 + ), "Logic error while filling the custom string breakpoint cache." + + temp_string = temp_string[mark_idx + len(BREAK_MARK) :] + breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 + custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) + + string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) + + if atom_node is not None: + replace_child(atom_node, string_leaf) + + # Build the final line ('new_line') that this method will later return. + new_line = line.clone() + for (i, leaf) in enumerate(LL): + if i == string_idx: + new_line.append(string_leaf) + + if string_idx <= i < string_idx + num_of_strings: + for comment_leaf in line.comments_after(LL[i]): + new_line.append(comment_leaf, preformatted=True) + continue + + append_leaves(new_line, line, [leaf]) + + self.add_custom_splits(string_leaf.value, custom_splits) + return Ok(new_line) + + @staticmethod + def _validate_msg(line: Line, string_idx: int) -> TResult[None]: + """Validate (M)erge (S)tring (G)roup + + Transform-time string validation logic for __merge_string_group(...). + + Returns: + * Ok(None), if ALL validation checks (listed below) pass. + OR + * Err(CannotTransform), if any of the following are true: + - The target string group does not contain ANY stand-alone comments. + - The target string is not in a string group (i.e. it has no + adjacent strings). + - The string group has more than one inline comment. + - The string group has an inline comment that appears to be a pragma. + - The set of all string prefixes in the string group is of + length greater than one and is not equal to {"", "f"}. + - The string group consists of raw strings. + """ + # We first check for "inner" stand-alone comments (i.e. stand-alone + # comments that have a string leaf before them AND after them). + for inc in [1, -1]: + i = string_idx + found_sa_comment = False + is_valid_index = is_valid_index_factory(line.leaves) + while is_valid_index(i) and line.leaves[i].type in [ + token.STRING, + STANDALONE_COMMENT, + ]: + if line.leaves[i].type == STANDALONE_COMMENT: + found_sa_comment = True + elif found_sa_comment: + return TErr( + "StringMerger does NOT merge string groups which contain " + "stand-alone comments." + ) + + i += inc + + num_of_inline_string_comments = 0 + set_of_prefixes = set() + num_of_strings = 0 + for leaf in line.leaves[string_idx:]: + if leaf.type != token.STRING: + # If the string group is trailed by a comma, we count the + # comments trailing the comma to be one of the string group's + # comments. + if leaf.type == token.COMMA and id(leaf) in line.comments: + num_of_inline_string_comments += 1 + break + + if has_triple_quotes(leaf.value): + return TErr("StringMerger does NOT merge multiline strings.") + + num_of_strings += 1 + prefix = get_string_prefix(leaf.value).lower() + if "r" in prefix: + return TErr("StringMerger does NOT merge raw strings.") + + set_of_prefixes.add(prefix) + + if id(leaf) in line.comments: + num_of_inline_string_comments += 1 + if contains_pragma_comment(line.comments[id(leaf)]): + return TErr("Cannot merge strings which have pragma comments.") + + if num_of_strings < 2: + return TErr( + f"Not enough strings to merge (num_of_strings={num_of_strings})." + ) + + if num_of_inline_string_comments > 1: + return TErr( + f"Too many inline string comments ({num_of_inline_string_comments})." + ) + + if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: + return TErr(f"Too many different prefixes ({set_of_prefixes}).") + + return Ok(None) + + +class StringParenStripper(StringTransformer): + """StringTransformer that strips surrounding parentheses from strings. + + Requirements: + The line contains a string which is surrounded by parentheses and: + - The target string is NOT the only argument to a function call. + - The target string is NOT a "pointless" string. + - If the target string contains a PERCENT, the brackets are not + preceded or followed by an operator with higher precedence than + PERCENT. + + Transformations: + The parentheses mentioned in the 'Requirements' section are stripped. + + Collaborations: + StringParenStripper has its own inherent usefulness, but it is also + relied on to clean up the parentheses created by StringParenWrapper (in + the event that they are no longer needed). + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + for (idx, leaf) in enumerate(LL): + # Should be a string... + if leaf.type != token.STRING: + continue + + # If this is a "pointless" string... + if ( + leaf.parent + and leaf.parent.parent + and leaf.parent.parent.type == syms.simple_stmt + ): + continue + + # Should be preceded by a non-empty LPAR... + if ( + not is_valid_index(idx - 1) + or LL[idx - 1].type != token.LPAR + or is_empty_lpar(LL[idx - 1]) + ): + continue + + # That LPAR should NOT be preceded by a function name or a closing + # bracket (which could be a function which returns a function or a + # list/dictionary that contains a function)... + if is_valid_index(idx - 2) and ( + LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS + ): + continue + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + next_idx = string_parser.parse(LL, string_idx) + + # if the leaves in the parsed string include a PERCENT, we need to + # make sure the initial LPAR is NOT preceded by an operator with + # higher or equal precedence to PERCENT + if is_valid_index(idx - 2): + # mypy can't quite follow unless we name this + before_lpar = LL[idx - 2] + if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( + ( + before_lpar.type + in { + token.STAR, + token.AT, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.TILDE, + token.DOUBLESTAR, + token.AWAIT, + token.LSQB, + token.LPAR, + } + ) + or ( + # only unary PLUS/MINUS + before_lpar.parent + and before_lpar.parent.type == syms.factor + and (before_lpar.type in {token.PLUS, token.MINUS}) + ) + ): + continue + + # Should be followed by a non-empty RPAR... + if ( + is_valid_index(next_idx) + and LL[next_idx].type == token.RPAR + and not is_empty_rpar(LL[next_idx]) + ): + # That RPAR should NOT be followed by anything with higher + # precedence than PERCENT + if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { + token.DOUBLESTAR, + token.LSQB, + token.LPAR, + token.DOT, + }: + continue + + return Ok(string_idx) + + return TErr("This line has no strings wrapped in parens.") + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + string_parser = StringParser() + rpar_idx = string_parser.parse(LL, string_idx) + + for leaf in (LL[string_idx - 1], LL[rpar_idx]): + if line.comments_after(leaf): + yield TErr( + "Will not strip parentheses which have comments attached to them." + ) + return + + new_line = line.clone() + new_line.comments = line.comments.copy() + try: + append_leaves(new_line, line, LL[: string_idx - 1]) + except BracketMatchError: + # HACK: I believe there is currently a bug somewhere in + # right_hand_split() that is causing brackets to not be tracked + # properly by a shared BracketTracker. + append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True) + + string_leaf = Leaf(token.STRING, LL[string_idx].value) + LL[string_idx - 1].remove() + replace_child(LL[string_idx], string_leaf) + new_line.append(string_leaf) + + append_leaves( + new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :] + ) + + LL[rpar_idx].remove() + + yield Ok(new_line) + + +class BaseStringSplitter(StringTransformer): + """ + Abstract class for StringTransformers which transform a Line's strings by splitting + them or placing them on their own lines where necessary to avoid going over + the configured line length. + + Requirements: + * The target string value is responsible for the line going over the + line length limit. It follows that after all of black's other line + split methods have been exhausted, this line (or one of the resulting + lines after all line splits are performed) would still be over the + line_length limit unless we split this string. + AND + * The target string is NOT a "pointless" string (i.e. a string that has + no parent or siblings). + AND + * The target string is not followed by an inline comment that appears + to be a pragma. + AND + * The target string is not a multiline (i.e. triple-quote) string. + """ + + STRING_OPERATORS: Final = [ + token.EQEQUAL, + token.GREATER, + token.GREATEREQUAL, + token.LESS, + token.LESSEQUAL, + token.NOTEQUAL, + token.PERCENT, + token.PLUS, + token.STAR, + ] + + @abstractmethod + def do_splitter_match(self, line: Line) -> TMatchResult: + """ + BaseStringSplitter asks its clients to override this method instead of + `StringTransformer.do_match(...)`. + + Follows the same protocol as `StringTransformer.do_match(...)`. + + Refer to `help(StringTransformer.do_match)` for more information. + """ + + def do_match(self, line: Line) -> TMatchResult: + match_result = self.do_splitter_match(line) + if isinstance(match_result, Err): + return match_result + + string_idx = match_result.ok() + vresult = self._validate(line, string_idx) + if isinstance(vresult, Err): + return vresult + + return match_result + + def _validate(self, line: Line, string_idx: int) -> TResult[None]: + """ + Checks that @line meets all of the requirements listed in this classes' + docstring. Refer to `help(BaseStringSplitter)` for a detailed + description of those requirements. + + Returns: + * Ok(None), if ALL of the requirements are met. + OR + * Err(CannotTransform), if ANY of the requirements are NOT met. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + + max_string_length = self._get_max_string_length(line, string_idx) + if len(string_leaf.value) <= max_string_length: + return TErr( + "The string itself is not what is causing this line to be too long." + ) + + if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ + token.STRING, + token.NEWLINE, + ]: + return TErr( + f"This string ({string_leaf.value}) appears to be pointless (i.e. has" + " no parent)." + ) + + if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( + line.comments[id(line.leaves[string_idx])] + ): + return TErr( + "Line appears to end with an inline pragma comment. Splitting the line" + " could modify the pragma's behavior." + ) + + if has_triple_quotes(string_leaf.value): + return TErr("We cannot split multiline strings.") + + return Ok(None) + + def _get_max_string_length(self, line: Line, string_idx: int) -> int: + """ + Calculates the max string length used when attempting to determine + whether or not the target string is responsible for causing the line to + go over the line length limit. + + WARNING: This method is tightly coupled to both StringSplitter and + (especially) StringParenWrapper. There is probably a better way to + accomplish what is being done here. + + Returns: + max_string_length: such that `line.leaves[string_idx].value > + max_string_length` implies that the target string IS responsible + for causing this line to exceed the line length limit. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # We use the shorthand "WMA4" in comments to abbreviate "We must + # account for". When giving examples, we use STRING to mean some/any + # valid string. + # + # Finally, we use the following convenience variables: + # + # P: The leaf that is before the target string leaf. + # N: The leaf that is after the target string leaf. + # NN: The leaf that is after N. + + # WMA4 the whitespace at the beginning of the line. + offset = line.depth * 4 + + if is_valid_index(string_idx - 1): + p_idx = string_idx - 1 + if ( + LL[string_idx - 1].type == token.LPAR + and LL[string_idx - 1].value == "" + and string_idx >= 2 + ): + # If the previous leaf is an empty LPAR placeholder, we should skip it. + p_idx -= 1 + + P = LL[p_idx] + if P.type in self.STRING_OPERATORS: + # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). + offset += len(str(P)) + 1 + + if P.type == token.COMMA: + # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. + offset += 3 + + if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: + # This conditional branch is meant to handle dictionary keys, + # variable assignments, 'return STRING' statement lines, and + # 'else STRING' ternary expression lines. + + # WMA4 a single space. + offset += 1 + + # WMA4 the lengths of any leaves that came before that space, + # but after any closing bracket before that space. + for leaf in reversed(LL[: p_idx + 1]): + offset += len(str(leaf)) + if leaf.type in CLOSING_BRACKETS: + break + + if is_valid_index(string_idx + 1): + N = LL[string_idx + 1] + if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: + # If the next leaf is an empty RPAR placeholder, we should skip it. + N = LL[string_idx + 2] + + if N.type == token.COMMA: + # WMA4 a single comma at the end of the string (e.g `STRING,`). + offset += 1 + + if is_valid_index(string_idx + 2): + NN = LL[string_idx + 2] + + if N.type == token.DOT and NN.type == token.NAME: + # This conditional branch is meant to handle method calls invoked + # off of a string literal up to and including the LPAR character. + + # WMA4 the '.' character. + offset += 1 + + if ( + is_valid_index(string_idx + 3) + and LL[string_idx + 3].type == token.LPAR + ): + # WMA4 the left parenthesis character. + offset += 1 + + # WMA4 the length of the method's name. + offset += len(NN.value) + + has_comments = False + for comment_leaf in line.comments_after(LL[string_idx]): + if not has_comments: + has_comments = True + # WMA4 two spaces before the '#' character. + offset += 2 + + # WMA4 the length of the inline comment. + offset += len(comment_leaf.value) + + max_string_length = self.line_length - offset + return max_string_length + + +def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]: + """ + Yields spans corresponding to expressions in a given f-string. + Spans are half-open ranges (left inclusive, right exclusive). + Assumes the input string is a valid f-string, but will not crash if the input + string is invalid. + """ + stack: List[int] = [] # our curly paren stack + i = 0 + while i < len(s): + if s[i] == "{": + # if we're in a string part of the f-string, ignore escaped curly braces + if not stack and i + 1 < len(s) and s[i + 1] == "{": + i += 2 + continue + stack.append(i) + i += 1 + continue + + if s[i] == "}": + if not stack: + i += 1 + continue + j = stack.pop() + # we've made it back out of the expression! yield the span + if not stack: + yield (j, i + 1) + i += 1 + continue + + # if we're in an expression part of the f-string, fast forward through strings + # note that backslashes are not legal in the expression portion of f-strings + if stack: + delim = None + if s[i : i + 3] in ("'''", '"""'): + delim = s[i : i + 3] + elif s[i] in ("'", '"'): + delim = s[i] + if delim: + i += len(delim) + while i < len(s) and s[i : i + len(delim)] != delim: + i += 1 + i += len(delim) + continue + i += 1 + + +def fstring_contains_expr(s: str) -> bool: + return any(iter_fexpr_spans(s)) + + +class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that splits "atom" strings (i.e. strings which exist on + lines by themselves). + + Requirements: + * The line consists ONLY of a single string (possibly prefixed by a + string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE + a trailing comma. + AND + * All of the requirements listed in BaseStringSplitter's docstring. + + Transformations: + The string mentioned in the 'Requirements' section is split into as + many substrings as necessary to adhere to the configured line length. + + In the final set of substrings, no substring should be smaller than + MIN_SUBSTR_SIZE characters. + + The string will ONLY be split on spaces (i.e. each new substring should + start with a space). Note that the string will NOT be split on a space + which is escaped with a backslash. + + If the string is an f-string, it will NOT be split in the middle of an + f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x + else bar()} is an f-expression). + + If the string that is being split has an associated set of custom split + records and those custom splits will NOT result in any line going over + the configured line length, those custom splits are used. Otherwise the + string is split as late as possible (from left-to-right) while still + adhering to the transformation rules listed above. + + Collaborations: + StringSplitter relies on StringMerger to construct the appropriate + CustomSplit objects and add them to the custom split map. + """ + + MIN_SUBSTR_SIZE: Final = 6 + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + idx = 0 + + # The first two leaves MAY be the 'not in' keywords... + if ( + is_valid_index(idx) + and is_valid_index(idx + 1) + and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] + and str(LL[idx]) + str(LL[idx + 1]) == "not in" + ): + idx += 2 + # Else the first leaf MAY be a string operator symbol or the 'in' keyword... + elif is_valid_index(idx) and ( + LL[idx].type in self.STRING_OPERATORS + or LL[idx].type == token.NAME + and str(LL[idx]) == "in" + ): + idx += 1 + + # The next/first leaf MAY be an empty LPAR... + if is_valid_index(idx) and is_empty_lpar(LL[idx]): + idx += 1 + + # The next/first leaf MUST be a string... + if not is_valid_index(idx) or LL[idx].type != token.STRING: + return TErr("Line does not start with a string.") + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by an empty RPAR... + if is_valid_index(idx) and is_empty_rpar(LL[idx]): + idx += 1 + + # That string / empty RPAR leaf MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if is_valid_index(idx): + return TErr("This line does not end with a string.") + + return Ok(string_idx) + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + QUOTE = LL[string_idx].value[-1] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + prefix = get_string_prefix(LL[string_idx].value).lower() + + # We MAY choose to drop the 'f' prefix from substrings that don't + # contain any f-expressions, but ONLY if the original f-string + # contains at least one f-expression. Otherwise, we will alter the AST + # of the program. + drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( + LL[string_idx].value + ) + + first_string_line = True + + string_op_leaves = self._get_string_operator_leaves(LL) + string_op_leaves_length = ( + sum([len(str(prefix_leaf)) for prefix_leaf in string_op_leaves]) + 1 + if string_op_leaves + else 0 + ) + + def maybe_append_string_operators(new_line: Line) -> None: + """ + Side Effects: + If @line starts with a string operator and this is the first + line we are constructing, this function appends the string + operator to @new_line and replaces the old string operator leaf + in the node structure. Otherwise this function does nothing. + """ + maybe_prefix_leaves = string_op_leaves if first_string_line else [] + for i, prefix_leaf in enumerate(maybe_prefix_leaves): + replace_child(LL[i], prefix_leaf) + new_line.append(prefix_leaf) + + ends_with_comma = ( + is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA + ) + + def max_last_string() -> int: + """ + Returns: + The max allowed length of the string value used for the last + line we will construct. + """ + result = self.line_length + result -= line.depth * 4 + result -= 1 if ends_with_comma else 0 + result -= string_op_leaves_length + return result + + # --- Calculate Max Break Index (for string value) + # We start with the line length limit + max_break_idx = self.line_length + # The last index of a string of length N is N-1. + max_break_idx -= 1 + # Leading whitespace is not present in the string value (e.g. Leaf.value). + max_break_idx -= line.depth * 4 + if max_break_idx < 0: + yield TErr( + f"Unable to split {LL[string_idx].value} at such high of a line depth:" + f" {line.depth}" + ) + return + + # Check if StringMerger registered any custom splits. + custom_splits = self.pop_custom_splits(LL[string_idx].value) + # We use them ONLY if none of them would produce lines that exceed the + # line limit. + use_custom_breakpoints = bool( + custom_splits + and all(csplit.break_idx <= max_break_idx for csplit in custom_splits) + ) + + # Temporary storage for the remaining chunk of the string line that + # can't fit onto the line currently being constructed. + rest_value = LL[string_idx].value + + def more_splits_should_be_made() -> bool: + """ + Returns: + True iff `rest_value` (the remaining string value from the last + split), should be split again. + """ + if use_custom_breakpoints: + return len(custom_splits) > 1 + else: + return len(rest_value) > max_last_string() + + string_line_results: List[Ok[Line]] = [] + while more_splits_should_be_made(): + if use_custom_breakpoints: + # Custom User Split (manual) + csplit = custom_splits.pop(0) + break_idx = csplit.break_idx + else: + # Algorithmic Split (automatic) + max_bidx = max_break_idx - string_op_leaves_length + maybe_break_idx = self._get_break_idx(rest_value, max_bidx) + if maybe_break_idx is None: + # If we are unable to algorithmically determine a good split + # and this string has custom splits registered to it, we + # fall back to using them--which means we have to start + # over from the beginning. + if custom_splits: + rest_value = LL[string_idx].value + string_line_results = [] + first_string_line = True + use_custom_breakpoints = True + continue + + # Otherwise, we stop splitting here. + break + + break_idx = maybe_break_idx + + # --- Construct `next_value` + next_value = rest_value[:break_idx] + QUOTE + + # HACK: The following 'if' statement is a hack to fix the custom + # breakpoint index in the case of either: (a) substrings that were + # f-strings but will have the 'f' prefix removed OR (b) substrings + # that were not f-strings but will now become f-strings because of + # redundant use of the 'f' prefix (i.e. none of the substrings + # contain f-expressions but one or more of them had the 'f' prefix + # anyway; in which case, we will prepend 'f' to _all_ substrings). + # + # There is probably a better way to accomplish what is being done + # here... + # + # If this substring is an f-string, we _could_ remove the 'f' + # prefix, and the current custom split did NOT originally use a + # prefix... + if ( + next_value != self._normalize_f_string(next_value, prefix) + and use_custom_breakpoints + and not csplit.has_prefix + ): + # Then `csplit.break_idx` will be off by one after removing + # the 'f' prefix. + break_idx += 1 + next_value = rest_value[:break_idx] + QUOTE + + if drop_pointless_f_prefix: + next_value = self._normalize_f_string(next_value, prefix) + + # --- Construct `next_leaf` + next_leaf = Leaf(token.STRING, next_value) + insert_str_child(next_leaf) + self._maybe_normalize_string_quotes(next_leaf) + + # --- Construct `next_line` + next_line = line.clone() + maybe_append_string_operators(next_line) + next_line.append(next_leaf) + string_line_results.append(Ok(next_line)) + + rest_value = prefix + QUOTE + rest_value[break_idx:] + first_string_line = False + + yield from string_line_results + + if drop_pointless_f_prefix: + rest_value = self._normalize_f_string(rest_value, prefix) + + rest_leaf = Leaf(token.STRING, rest_value) + insert_str_child(rest_leaf) + + # NOTE: I could not find a test case that verifies that the following + # line is actually necessary, but it seems to be. Otherwise we risk + # not normalizing the last substring, right? + self._maybe_normalize_string_quotes(rest_leaf) + + last_line = line.clone() + maybe_append_string_operators(last_line) + + # If there are any leaves to the right of the target string... + if is_valid_index(string_idx + 1): + # We use `temp_value` here to determine how long the last line + # would be if we were to append all the leaves to the right of the + # target string to the last string line. + temp_value = rest_value + for leaf in LL[string_idx + 1 :]: + temp_value += str(leaf) + if leaf.type == token.LPAR: + break + + # Try to fit them all on the same line with the last substring... + if ( + len(temp_value) <= max_last_string() + or LL[string_idx + 1].type == token.COMMA + ): + last_line.append(rest_leaf) + append_leaves(last_line, line, LL[string_idx + 1 :]) + yield Ok(last_line) + # Otherwise, place the last substring on one line and everything + # else on a line below that... + else: + last_line.append(rest_leaf) + yield Ok(last_line) + + non_string_line = line.clone() + append_leaves(non_string_line, line, LL[string_idx + 1 :]) + yield Ok(non_string_line) + # Else the target string was the last leaf... + else: + last_line.append(rest_leaf) + last_line.comments = line.comments.copy() + yield Ok(last_line) + + def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an \\N{...} expression (which is NOT + allowed). + """ + # True - the previous backslash was unescaped + # False - the previous backslash was escaped *or* there was no backslash + previous_was_unescaped_backslash = False + it = iter(enumerate(string)) + for idx, c in it: + if c == "\\": + previous_was_unescaped_backslash = not previous_was_unescaped_backslash + continue + if not previous_was_unescaped_backslash or c != "N": + previous_was_unescaped_backslash = False + continue + previous_was_unescaped_backslash = False + + begin = idx - 1 # the position of backslash before \N{...} + for idx, c in it: + if c == "}": + end = idx + break + else: + # malformed nameescape expression? + # should have been detected by AST parsing earlier... + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + yield begin, end + + def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an f-expression (which is NOT + allowed). + """ + if "f" not in get_string_prefix(string).lower(): + return + yield from iter_fexpr_spans(string) + + def _get_illegal_split_indices(self, string: str) -> Set[Index]: + illegal_indices: Set[Index] = set() + iterators = [ + self._iter_fexpr_slices(string), + self._iter_nameescape_slices(string), + ] + for it in iterators: + for begin, end in it: + illegal_indices.update(range(begin, end + 1)) + return illegal_indices + + def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: + """ + This method contains the algorithm that StringSplitter uses to + determine which character to split each string at. + + Args: + @string: The substring that we are attempting to split. + @max_break_idx: The ideal break index. We will return this value if it + meets all the necessary conditions. In the likely event that it + doesn't we will try to find the closest index BELOW @max_break_idx + that does. If that fails, we will expand our search by also + considering all valid indices ABOVE @max_break_idx. + + Pre-Conditions: + * assert_is_leaf_string(@string) + * 0 <= @max_break_idx < len(@string) + + Returns: + break_idx, if an index is able to be found that meets all of the + conditions listed in the 'Transformations' section of this classes' + docstring. + OR + None, otherwise. + """ + is_valid_index = is_valid_index_factory(string) + + assert is_valid_index(max_break_idx) + assert_is_leaf_string(string) + + _illegal_split_indices = self._get_illegal_split_indices(string) + + def breaks_unsplittable_expression(i: Index) -> bool: + """ + Returns: + True iff returning @i would result in the splitting of an + unsplittable expression (which is NOT allowed). + """ + return i in _illegal_split_indices + + def passes_all_checks(i: Index) -> bool: + """ + Returns: + True iff ALL of the conditions listed in the 'Transformations' + section of this classes' docstring would be be met by returning @i. + """ + is_space = string[i] == " " + + is_not_escaped = True + j = i - 1 + while is_valid_index(j) and string[j] == "\\": + is_not_escaped = not is_not_escaped + j -= 1 + + is_big_enough = ( + len(string[i:]) >= self.MIN_SUBSTR_SIZE + and len(string[:i]) >= self.MIN_SUBSTR_SIZE + ) + return ( + is_space + and is_not_escaped + and is_big_enough + and not breaks_unsplittable_expression(i) + ) + + # First, we check all indices BELOW @max_break_idx. + break_idx = max_break_idx + while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): + break_idx -= 1 + + if not passes_all_checks(break_idx): + # If that fails, we check all indices ABOVE @max_break_idx. + # + # If we are able to find a valid index here, the next line is going + # to be longer than the specified line length, but it's probably + # better than doing nothing at all. + break_idx = max_break_idx + 1 + while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): + break_idx += 1 + + if not is_valid_index(break_idx) or not passes_all_checks(break_idx): + return None + + return break_idx + + def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: + if self.normalize_strings: + leaf.value = normalize_string_quotes(leaf.value) + + def _normalize_f_string(self, string: str, prefix: str) -> str: + """ + Pre-Conditions: + * assert_is_leaf_string(@string) + + Returns: + * If @string is an f-string that contains no f-expressions, we + return a string identical to @string except that the 'f' prefix + has been stripped and all double braces (i.e. '{{' or '}}') have + been normalized (i.e. turned into '{' or '}'). + OR + * Otherwise, we return @string. + """ + assert_is_leaf_string(string) + + if "f" in prefix and not fstring_contains_expr(string): + new_prefix = prefix.replace("f", "") + + temp = string[len(prefix) :] + temp = re.sub(r"\{\{", "{", temp) + temp = re.sub(r"\}\}", "}", temp) + new_string = temp + + return f"{new_prefix}{new_string}" + else: + return string + + def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]: + LL = list(leaves) + + string_op_leaves = [] + i = 0 + while LL[i].type in self.STRING_OPERATORS + [token.NAME]: + prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) + string_op_leaves.append(prefix_leaf) + i += 1 + return string_op_leaves + + +class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that splits non-"atom" strings (i.e. strings that do not + exist on lines by themselves). + + Requirements: + All of the requirements listed in BaseStringSplitter's docstring in + addition to the requirements listed below: + + * The line is a return/yield statement, which returns/yields a string. + OR + * The line is part of a ternary expression (e.g. `x = y if cond else + z`) such that the line starts with `else `, where is + some string. + OR + * The line is an assert statement, which ends with a string. + OR + * The line is an assignment statement (e.g. `x = ` or `x += + `) such that the variable is being assigned the value of some + string. + OR + * The line is a dictionary key assignment where some valid key is being + assigned the value of some string. + + Transformations: + The chosen string is wrapped in parentheses and then split at the LPAR. + + We then have one line which ends with an LPAR and another line that + starts with the chosen string. The latter line is then split again at + the RPAR. This results in the RPAR (and possibly a trailing comma) + being placed on its own line. + + NOTE: If any leaves exist to the right of the chosen string (except + for a trailing comma, which would be placed after the RPAR), those + leaves are placed inside the parentheses. In effect, the chosen + string is not necessarily being "wrapped" by parentheses. We can, + however, count on the LPAR being placed directly before the chosen + string. + + In other words, StringParenWrapper creates "atom" strings. These + can then be split again by StringSplitter, if necessary. + + Collaborations: + In the event that a string line split by StringParenWrapper is + changed such that it no longer needs to be given its own line, + StringParenWrapper relies on StringParenStripper to clean up the + parentheses it created. + """ + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if line.leaves[-1].type in OPENING_BRACKETS: + return TErr( + "Cannot wrap parens around a line that ends in an opening bracket." + ) + + string_idx = ( + self._return_match(LL) + or self._else_match(LL) + or self._assert_match(LL) + or self._assign_match(LL) + or self._dict_match(LL) + ) + + if string_idx is not None: + string_value = line.leaves[string_idx].value + # If the string has no spaces... + if " " not in string_value: + # And will still violate the line length limit when split... + max_string_length = self.line_length - ((line.depth + 1) * 4) + if len(string_value) > max_string_length: + # And has no associated custom splits... + if not self.has_custom_splits(string_value): + # Then we should NOT put this string on its own line. + return TErr( + "We do not wrap long strings in parentheses when the" + " resultant line would still be over the specified line" + " length and can't be split further by StringSplitter." + ) + return Ok(string_idx) + + return TErr("This line does not contain any non-atomic strings.") + + @staticmethod + def _return_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the return/yield statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a return/yield statement and the first leaf + # contains either the "return" or "yield" keywords... + if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ + 0 + ].value in ["return", "yield"]: + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _else_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the ternary expression + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a ternary expression and the first leaf + # contains the "else" keyword... + if ( + parent_type(LL[0]) == syms.test + and LL[0].type == token.NAME + and LL[0].value == "else" + ): + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _assert_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assert statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an assert statement and the first leaf + # contains the "assert" keyword... + if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": + is_valid_index = is_valid_index_factory(LL) + + for (i, leaf) in enumerate(LL): + # We MUST find a comma... + if leaf.type == token.COMMA: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That comma MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _assign_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assignment statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an expression statement or is a function + # argument AND the first leaf contains a variable name... + if ( + parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] + and LL[0].type == token.NAME + ): + is_valid_index = is_valid_index_factory(LL) + + for (i, leaf) in enumerate(LL): + # We MUST find either an '=' or '+=' symbol... + if leaf.type in [token.EQUAL, token.PLUSEQUAL]: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That symbol MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # The next leaf MAY be a comma iff this line is apart + # of a function argument... + if ( + parent_type(LL[0]) == syms.argument + and is_valid_index(idx) + and LL[idx].type == token.COMMA + ): + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _dict_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the dictionary key assignment + statement requirements listed in the 'Requirements' section of this + classes' docstring. + OR + None, otherwise. + """ + # If this line is apart of a dictionary key assignment... + if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]: + is_valid_index = is_valid_index_factory(LL) + + for (i, leaf) in enumerate(LL): + # We MUST find a colon... + if leaf.type == token.COLON: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That colon MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + comma_idx = -1 + ends_with_comma = False + if LL[comma_idx].type == token.COMMA: + ends_with_comma = True + + leaves_to_steal_comments_from = [LL[string_idx]] + if ends_with_comma: + leaves_to_steal_comments_from.append(LL[comma_idx]) + + # --- First Line + first_line = line.clone() + left_leaves = LL[:string_idx] + + # We have to remember to account for (possibly invisible) LPAR and RPAR + # leaves that already wrapped the target string. If these leaves do + # exist, we will replace them with our own LPAR and RPAR leaves. + old_parens_exist = False + if left_leaves and left_leaves[-1].type == token.LPAR: + old_parens_exist = True + leaves_to_steal_comments_from.append(left_leaves[-1]) + left_leaves.pop() + + append_leaves(first_line, line, left_leaves) + + lpar_leaf = Leaf(token.LPAR, "(") + if old_parens_exist: + replace_child(LL[string_idx - 1], lpar_leaf) + else: + insert_str_child(lpar_leaf) + first_line.append(lpar_leaf) + + # We throw inline comments that were originally to the right of the + # target string to the top line. They will now be shown to the right of + # the LPAR. + for leaf in leaves_to_steal_comments_from: + for comment_leaf in line.comments_after(leaf): + first_line.append(comment_leaf, preformatted=True) + + yield Ok(first_line) + + # --- Middle (String) Line + # We only need to yield one (possibly too long) string line, since the + # `StringSplitter` will break it down further if necessary. + string_value = LL[string_idx].value + string_line = Line( + mode=line.mode, + depth=line.depth + 1, + inside_brackets=True, + should_split_rhs=line.should_split_rhs, + magic_trailing_comma=line.magic_trailing_comma, + ) + string_leaf = Leaf(token.STRING, string_value) + insert_str_child(string_leaf) + string_line.append(string_leaf) + + old_rpar_leaf = None + if is_valid_index(string_idx + 1): + right_leaves = LL[string_idx + 1 :] + if ends_with_comma: + right_leaves.pop() + + if old_parens_exist: + assert right_leaves and right_leaves[-1].type == token.RPAR, ( + "Apparently, old parentheses do NOT exist?!" + f" (left_leaves={left_leaves}, right_leaves={right_leaves})" + ) + old_rpar_leaf = right_leaves.pop() + + append_leaves(string_line, line, right_leaves) + + yield Ok(string_line) + + # --- Last Line + last_line = line.clone() + last_line.bracket_tracker = first_line.bracket_tracker + + new_rpar_leaf = Leaf(token.RPAR, ")") + if old_rpar_leaf is not None: + replace_child(old_rpar_leaf, new_rpar_leaf) + else: + insert_str_child(new_rpar_leaf) + last_line.append(new_rpar_leaf) + + # If the target string ended with a comma, we place this comma to the + # right of the RPAR on the last line. + if ends_with_comma: + comma_leaf = Leaf(token.COMMA, ",") + replace_child(LL[comma_idx], comma_leaf) + last_line.append(comma_leaf) + + yield Ok(last_line) + + +class StringParser: + """ + A state machine that aids in parsing a string's "trailer", which can be + either non-existent, an old-style formatting sequence (e.g. `% varX` or `% + (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, + varY)`). + + NOTE: A new StringParser object MUST be instantiated for each string + trailer we need to parse. + + Examples: + We shall assume that `line` equals the `Line` object that corresponds + to the following line of python code: + ``` + x = "Some {}.".format("String") + some_other_string + ``` + + Furthermore, we will assume that `string_idx` is some index such that: + ``` + assert line.leaves[string_idx].value == "Some {}." + ``` + + The following code snippet then holds: + ``` + string_parser = StringParser() + idx = string_parser.parse(line.leaves, string_idx) + assert line.leaves[idx].type == token.PLUS + ``` + """ + + DEFAULT_TOKEN: Final = 20210605 + + # String Parser States + START: Final = 1 + DOT: Final = 2 + NAME: Final = 3 + PERCENT: Final = 4 + SINGLE_FMT_ARG: Final = 5 + LPAR: Final = 6 + RPAR: Final = 7 + DONE: Final = 8 + + # Lookup Table for Next State + _goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = { + # A string trailer may start with '.' OR '%'. + (START, token.DOT): DOT, + (START, token.PERCENT): PERCENT, + (START, DEFAULT_TOKEN): DONE, + # A '.' MUST be followed by an attribute or method name. + (DOT, token.NAME): NAME, + # A method name MUST be followed by an '(', whereas an attribute name + # is the last symbol in the string trailer. + (NAME, token.LPAR): LPAR, + (NAME, DEFAULT_TOKEN): DONE, + # A '%' symbol can be followed by an '(' or a single argument (e.g. a + # string or variable name). + (PERCENT, token.LPAR): LPAR, + (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, + # If a '%' symbol is followed by a single argument, that argument is + # the last leaf in the string trailer. + (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, + # If present, a ')' symbol is the last symbol in a string trailer. + # (NOTE: LPARS and nested RPARS are not included in this lookup table, + # since they are treated as a special case by the parsing logic in this + # classes' implementation.) + (RPAR, DEFAULT_TOKEN): DONE, + } + + def __init__(self) -> None: + self._state = self.START + self._unmatched_lpars = 0 + + def parse(self, leaves: List[Leaf], string_idx: int) -> int: + """ + Pre-conditions: + * @leaves[@string_idx].type == token.STRING + + Returns: + The index directly after the last leaf which is apart of the string + trailer, if a "trailer" exists. + OR + @string_idx + 1, if no string "trailer" exists. + """ + assert leaves[string_idx].type == token.STRING + + idx = string_idx + 1 + while idx < len(leaves) and self._next_state(leaves[idx]): + idx += 1 + return idx + + def _next_state(self, leaf: Leaf) -> bool: + """ + Pre-conditions: + * On the first call to this function, @leaf MUST be the leaf that + was directly after the string leaf in question (e.g. if our target + string is `line.leaves[i]` then the first call to this method must + be `line.leaves[i + 1]`). + * On the next call to this function, the leaf parameter passed in + MUST be the leaf directly following @leaf. + + Returns: + True iff @leaf is apart of the string's trailer. + """ + # We ignore empty LPAR or RPAR leaves. + if is_empty_par(leaf): + return True + + next_token = leaf.type + if next_token == token.LPAR: + self._unmatched_lpars += 1 + + current_state = self._state + + # The LPAR parser state is a special case. We will return True until we + # find the matching RPAR token. + if current_state == self.LPAR: + if next_token == token.RPAR: + self._unmatched_lpars -= 1 + if self._unmatched_lpars == 0: + self._state = self.RPAR + # Otherwise, we use a lookup table to determine the next state. + else: + # If the lookup table matches the current state to the next + # token, we use the lookup table. + if (current_state, next_token) in self._goto: + self._state = self._goto[current_state, next_token] + else: + # Otherwise, we check if a the current state was assigned a + # default. + if (current_state, self.DEFAULT_TOKEN) in self._goto: + self._state = self._goto[current_state, self.DEFAULT_TOKEN] + # If no default has been assigned, then this parser has a logic + # error. + else: + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + + if self._state == self.DONE: + return False + + return True + + +def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: + """ + Factory for a convenience function that is used to orphan @string_leaf + and then insert multiple new leaves into the same part of the node + structure that @string_leaf had originally occupied. + + Examples: + Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = + string_leaf.parent`. Assume the node `N` has the following + original structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(STRING, '"foo"'), + ] + ) + + We then run the code snippet shown below. + ``` + insert_str_child = insert_str_child_factory(string_leaf) + + lpar = Leaf(token.LPAR, '(') + insert_str_child(lpar) + + bar = Leaf(token.STRING, '"bar"') + insert_str_child(bar) + + rpar = Leaf(token.RPAR, ')') + insert_str_child(rpar) + ``` + + After which point, it follows that `string_leaf.parent is None` and + the node `N` now has the following structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(LPAR, '('), + Leaf(STRING, '"bar"'), + Leaf(RPAR, ')'), + ] + ) + """ + string_parent = string_leaf.parent + string_child_idx = string_leaf.remove() + + def insert_str_child(child: LN) -> None: + nonlocal string_child_idx + + assert string_parent is not None + assert string_child_idx is not None + + string_parent.insert_child(string_child_idx, child) + string_child_idx += 1 + + return insert_str_child + + +def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: + """ + Examples: + ``` + my_list = [1, 2, 3] + + is_valid_index = is_valid_index_factory(my_list) + + assert is_valid_index(0) + assert is_valid_index(2) + + assert not is_valid_index(3) + assert not is_valid_index(-1) + ``` + """ + + def is_valid_index(idx: int) -> bool: + """ + Returns: + True iff @idx is positive AND seq[@idx] does NOT raise an + IndexError. + """ + return 0 <= idx < len(seq) + + return is_valid_index diff --git a/myenv/lib/python3.9/site-packages/black_primer/__init__.py b/myenv/lib/python3.9/site-packages/black_primer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/black_primer/cli.py b/myenv/lib/python3.9/site-packages/black_primer/cli.py new file mode 100644 index 0000000..8524b59 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black_primer/cli.py @@ -0,0 +1,195 @@ +# coding=utf8 + +import asyncio +import json +import logging +import sys +from datetime import datetime +from pathlib import Path +from shutil import rmtree, which +from tempfile import gettempdir +from typing import Any, List, Optional, Union + +import click + +from black_primer import lib + +# If our environment has uvloop installed lets use it +try: + import uvloop + + uvloop.install() +except ImportError: + pass + + +DEFAULT_CONFIG = Path(__file__).parent / "primer.json" +_timestamp = datetime.now().strftime("%Y%m%d%H%M%S") +DEFAULT_WORKDIR = Path(gettempdir()) / f"primer.{_timestamp}" +LOG = logging.getLogger(__name__) + + +def _handle_debug( + ctx: Optional[click.core.Context], + param: Optional[Union[click.core.Option, click.core.Parameter]], + debug: Union[bool, int, str], +) -> Union[bool, int, str]: + """Turn on debugging if asked otherwise INFO default""" + log_level = logging.DEBUG if debug else logging.INFO + logging.basicConfig( + format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)", + level=log_level, + ) + return debug + + +def load_projects(config_path: Path) -> List[str]: + with open(config_path) as config: + return sorted(json.load(config)["projects"].keys()) + + +# Unfortunately does import time file IO - but appears to be the only +# way to get `black-primer --help` to show projects list +DEFAULT_PROJECTS = load_projects(DEFAULT_CONFIG) + + +def _projects_callback( + ctx: click.core.Context, + param: Optional[Union[click.core.Option, click.core.Parameter]], + projects: str, +) -> List[str]: + requested_projects = set(projects.split(",")) + available_projects = set( + DEFAULT_PROJECTS + if str(DEFAULT_CONFIG) == ctx.params["config"] + else load_projects(ctx.params["config"]) + ) + + unavailable = requested_projects - available_projects + if unavailable: + LOG.error(f"Projects not found: {unavailable}. Available: {available_projects}") + + return sorted(requested_projects & available_projects) + + +async def async_main( + config: str, + debug: bool, + keep: bool, + long_checkouts: bool, + no_diff: bool, + projects: List[str], + rebase: bool, + workdir: str, + workers: int, +) -> int: + work_path = Path(workdir) + if not work_path.exists(): + LOG.debug(f"Creating {work_path}") + work_path.mkdir() + + if not which("black"): + LOG.error("Can not find 'black' executable in PATH. No point in running") + return -1 + + try: + ret_val = await lib.process_queue( + config, + work_path, + workers, + projects, + keep, + long_checkouts, + rebase, + no_diff, + ) + return int(ret_val) + + finally: + if not keep and work_path.exists(): + LOG.debug(f"Removing {work_path}") + rmtree(work_path, onerror=lib.handle_PermissionError) + + +@click.command(context_settings={"help_option_names": ["-h", "--help"]}) +@click.option( + "-c", + "--config", + default=str(DEFAULT_CONFIG), + type=click.Path(exists=True), + show_default=True, + help="JSON config file path", + # Eager - because config path is used by other callback options + is_eager=True, +) +@click.option( + "--debug", + is_flag=True, + callback=_handle_debug, + show_default=True, + help="Turn on debug logging", +) +@click.option( + "-k", + "--keep", + is_flag=True, + show_default=True, + help="Keep workdir + repos post run", +) +@click.option( + "-L", + "--long-checkouts", + is_flag=True, + show_default=True, + help="Pull big projects to test", +) +@click.option( + "--no-diff", + is_flag=True, + show_default=True, + help="Disable showing source file changes in black output", +) +@click.option( + "--projects", + default=",".join(DEFAULT_PROJECTS), + callback=_projects_callback, + show_default=True, + help="Comma separated list of projects to run", +) +@click.option( + "-R", + "--rebase", + is_flag=True, + show_default=True, + help="Rebase project if already checked out", +) +@click.option( + "-w", + "--workdir", + default=str(DEFAULT_WORKDIR), + type=click.Path(exists=False), + show_default=True, + help="Directory path for repo checkouts", +) +@click.option( + "-W", + "--workers", + default=2, + type=int, + show_default=True, + help="Number of parallel worker coroutines", +) +@click.pass_context +def main(ctx: click.core.Context, **kwargs: Any) -> None: + """primer - prime projects for blackening... 🏴""" + LOG.debug(f"Starting {sys.argv[0]}") + # TODO: Change to asyncio.run when Black >= 3.7 only + loop = asyncio.get_event_loop() + try: + ctx.exit(loop.run_until_complete(async_main(**kwargs))) + finally: + loop.close() + + +if __name__ == "__main__": # pragma: nocover + main() diff --git a/myenv/lib/python3.9/site-packages/black_primer/lib.py b/myenv/lib/python3.9/site-packages/black_primer/lib.py new file mode 100644 index 0000000..13724f4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black_primer/lib.py @@ -0,0 +1,423 @@ +import asyncio +import errno +import json +import logging +import os +import stat +import sys +from functools import partial +from pathlib import Path +from platform import system +from shutil import rmtree, which +from subprocess import CalledProcessError +from sys import version_info +from tempfile import TemporaryDirectory +from typing import ( + Any, + Callable, + Dict, + List, + NamedTuple, + Optional, + Sequence, + Tuple, + Union, +) +from urllib.parse import urlparse + +import click + + +TEN_MINUTES_SECONDS = 600 +WINDOWS = system() == "Windows" +BLACK_BINARY = "black.exe" if WINDOWS else "black" +GIT_BINARY = "git.exe" if WINDOWS else "git" +LOG = logging.getLogger(__name__) + + +# Windows needs a ProactorEventLoop if you want to exec subprocesses +# Starting with 3.8 this is the default - can remove when Black >= 3.8 +# mypy only respects sys.platform if directly in the evaluation +# https://mypy.readthedocs.io/en/latest/common_issues.html#python-version-and-system-platform-checks # noqa: B950 +if sys.platform == "win32": + asyncio.set_event_loop(asyncio.ProactorEventLoop()) + + +class Results(NamedTuple): + stats: Dict[str, int] = {} + failed_projects: Dict[str, CalledProcessError] = {} + + +async def _gen_check_output( + cmd: Sequence[str], + timeout: float = TEN_MINUTES_SECONDS, + env: Optional[Dict[str, str]] = None, + cwd: Optional[Path] = None, + stdin: Optional[bytes] = None, +) -> Tuple[bytes, bytes]: + process = await asyncio.create_subprocess_exec( + *cmd, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + env=env, + cwd=cwd, + ) + try: + (stdout, stderr) = await asyncio.wait_for(process.communicate(stdin), timeout) + except asyncio.TimeoutError: + process.kill() + await process.wait() + raise + + # A non-optional timeout was supplied to asyncio.wait_for, guaranteeing + # a timeout or completed process. A terminated Python process will have a + # non-empty returncode value. + assert process.returncode is not None + + if process.returncode != 0: + cmd_str = " ".join(cmd) + raise CalledProcessError( + process.returncode, cmd_str, output=stdout, stderr=stderr + ) + + return (stdout, stderr) + + +def analyze_results(project_count: int, results: Results) -> int: + failed_pct = round(((results.stats["failed"] / project_count) * 100), 2) + success_pct = round(((results.stats["success"] / project_count) * 100), 2) + + if results.failed_projects: + click.secho("\nFailed projects:\n", bold=True) + + for project_name, project_cpe in results.failed_projects.items(): + print(f"## {project_name}:") + print(f" - Returned {project_cpe.returncode}") + if project_cpe.stderr: + print(f" - stderr:\n{project_cpe.stderr.decode('utf8')}") + if project_cpe.stdout: + print(f" - stdout:\n{project_cpe.stdout.decode('utf8')}") + print("") + + click.secho("-- primer results 📊 --\n", bold=True) + click.secho( + f"{results.stats['success']} / {project_count} succeeded ({success_pct}%) ✅", + bold=True, + fg="green", + ) + click.secho( + f"{results.stats['failed']} / {project_count} FAILED ({failed_pct}%) 💩", + bold=bool(results.stats["failed"]), + fg="red", + ) + s = "" if results.stats["disabled"] == 1 else "s" + click.echo(f" - {results.stats['disabled']} project{s} disabled by config") + s = "" if results.stats["wrong_py_ver"] == 1 else "s" + click.echo( + f" - {results.stats['wrong_py_ver']} project{s} skipped due to Python version" + ) + click.echo( + f" - {results.stats['skipped_long_checkout']} skipped due to long checkout" + ) + + if results.failed_projects: + failed = ", ".join(results.failed_projects.keys()) + click.secho(f"\nFailed projects: {failed}\n", bold=True) + + return results.stats["failed"] + + +def _flatten_cli_args(cli_args: List[Union[Sequence[str], str]]) -> List[str]: + """Allow a user to put long arguments into a list of strs + to make the JSON human readable""" + flat_args = [] + for arg in cli_args: + if isinstance(arg, str): + flat_args.append(arg) + continue + + args_as_str = "".join(arg) + flat_args.append(args_as_str) + + return flat_args + + +async def black_run( + project_name: str, + repo_path: Optional[Path], + project_config: Dict[str, Any], + results: Results, + no_diff: bool = False, +) -> None: + """Run Black and record failures""" + if not repo_path: + results.stats["failed"] += 1 + results.failed_projects[project_name] = CalledProcessError( + 69, [], f"{project_name} has no repo_path: {repo_path}".encode(), b"" + ) + return + + stdin_test = project_name.upper() == "STDIN" + cmd = [str(which(BLACK_BINARY))] + if "cli_arguments" in project_config and project_config["cli_arguments"]: + cmd.extend(_flatten_cli_args(project_config["cli_arguments"])) + cmd.append("--check") + if not no_diff: + cmd.append("--diff") + + # Workout if we should read in a python file or search from cwd + stdin = None + if stdin_test: + cmd.append("-") + stdin = repo_path.read_bytes() + elif "base_path" in project_config: + cmd.append(project_config["base_path"]) + else: + cmd.append(".") + + timeout = ( + project_config["timeout_seconds"] + if "timeout_seconds" in project_config + else TEN_MINUTES_SECONDS + ) + with TemporaryDirectory() as tmp_path: + # Prevent reading top-level user configs by manipulating environment variables + env = { + **os.environ, + "XDG_CONFIG_HOME": tmp_path, # Unix-like + "USERPROFILE": tmp_path, # Windows (changes `Path.home()` output) + } + + cwd_path = repo_path.parent if stdin_test else repo_path + try: + LOG.debug(f"Running black for {project_name}: {' '.join(cmd)}") + _stdout, _stderr = await _gen_check_output( + cmd, cwd=cwd_path, env=env, stdin=stdin, timeout=timeout + ) + except asyncio.TimeoutError: + results.stats["failed"] += 1 + LOG.error(f"Running black for {repo_path} timed out ({cmd})") + except CalledProcessError as cpe: + # TODO: Tune for smarter for higher signal + # If any other return value than 1 we raise - can disable project in config + if cpe.returncode == 1: + if not project_config["expect_formatting_changes"]: + results.stats["failed"] += 1 + results.failed_projects[repo_path.name] = cpe + else: + results.stats["success"] += 1 + return + elif cpe.returncode > 1: + results.stats["failed"] += 1 + results.failed_projects[repo_path.name] = cpe + return + + LOG.error(f"Unknown error with {repo_path}") + raise + + # If we get here and expect formatting changes something is up + if project_config["expect_formatting_changes"]: + results.stats["failed"] += 1 + results.failed_projects[repo_path.name] = CalledProcessError( + 0, cmd, b"Expected formatting changes but didn't get any!", b"" + ) + return + + results.stats["success"] += 1 + + +async def git_checkout_or_rebase( + work_path: Path, + project_config: Dict[str, Any], + rebase: bool = False, + *, + depth: int = 1, +) -> Optional[Path]: + """git Clone project or rebase""" + git_bin = str(which(GIT_BINARY)) + if not git_bin: + LOG.error("No git binary found") + return None + + repo_url_parts = urlparse(project_config["git_clone_url"]) + path_parts = repo_url_parts.path[1:].split("/", maxsplit=1) + + repo_path: Path = work_path / path_parts[1].replace(".git", "") + cmd = [git_bin, "clone", "--depth", str(depth), project_config["git_clone_url"]] + cwd = work_path + if repo_path.exists() and rebase: + cmd = [git_bin, "pull", "--rebase"] + cwd = repo_path + elif repo_path.exists(): + return repo_path + + try: + _stdout, _stderr = await _gen_check_output(cmd, cwd=cwd) + except (asyncio.TimeoutError, CalledProcessError) as e: + LOG.error(f"Unable to git clone / pull {project_config['git_clone_url']}: {e}") + return None + + return repo_path + + +def handle_PermissionError( + func: Callable[..., None], path: Path, exc: Tuple[Any, Any, Any] +) -> None: + """ + Handle PermissionError during shutil.rmtree. + + This checks if the erroring function is either 'os.rmdir' or 'os.unlink', and that + the error was EACCES (i.e. Permission denied). If true, the path is set writable, + readable, and executable by everyone. Finally, it tries the error causing delete + operation again. + + If the check is false, then the original error will be reraised as this function + can't handle it. + """ + excvalue = exc[1] + LOG.debug(f"Handling {excvalue} from {func.__name__}... ") + if func in (os.rmdir, os.unlink) and excvalue.errno == errno.EACCES: + LOG.debug(f"Setting {path} writable, readable, and executable by everyone... ") + os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # chmod 0777 + func(path) # Try the error causing delete operation again + else: + raise + + +async def load_projects_queue( + config_path: Path, + projects_to_run: List[str], +) -> Tuple[Dict[str, Any], asyncio.Queue]: + """Load project config and fill queue with all the project names""" + with config_path.open("r") as cfp: + config = json.load(cfp) + + # TODO: Offer more options here + # e.g. Run on X random packages etc. + queue: asyncio.Queue = asyncio.Queue(maxsize=len(projects_to_run)) + for project in projects_to_run: + await queue.put(project) + + return config, queue + + +async def project_runner( + idx: int, + config: Dict[str, Any], + queue: asyncio.Queue, + work_path: Path, + results: Results, + long_checkouts: bool = False, + rebase: bool = False, + keep: bool = False, + no_diff: bool = False, +) -> None: + """Check out project and run Black on it + record result""" + loop = asyncio.get_event_loop() + py_version = f"{version_info[0]}.{version_info[1]}" + while True: + try: + project_name = queue.get_nowait() + except asyncio.QueueEmpty: + LOG.debug(f"project_runner {idx} exiting") + return + LOG.debug(f"worker {idx} working on {project_name}") + + project_config = config["projects"][project_name] + + # Check if disabled by config + if "disabled" in project_config and project_config["disabled"]: + results.stats["disabled"] += 1 + LOG.info(f"Skipping {project_name} as it's disabled via config") + continue + + # Check if we should run on this version of Python + if ( + "all" not in project_config["py_versions"] + and py_version not in project_config["py_versions"] + ): + results.stats["wrong_py_ver"] += 1 + LOG.debug(f"Skipping {project_name} as it's not enabled for {py_version}") + continue + + # Check if we're doing big projects / long checkouts + if not long_checkouts and project_config["long_checkout"]: + results.stats["skipped_long_checkout"] += 1 + LOG.debug(f"Skipping {project_name} as it's configured as a long checkout") + continue + + repo_path: Optional[Path] = Path(__file__) + stdin_project = project_name.upper() == "STDIN" + if not stdin_project: + repo_path = await git_checkout_or_rebase(work_path, project_config, rebase) + if not repo_path: + continue + await black_run(project_name, repo_path, project_config, results, no_diff) + + if not keep and not stdin_project: + LOG.debug(f"Removing {repo_path}") + rmtree_partial = partial( + rmtree, path=repo_path, onerror=handle_PermissionError + ) + await loop.run_in_executor(None, rmtree_partial) + + LOG.info(f"Finished {project_name}") + + +async def process_queue( + config_file: str, + work_path: Path, + workers: int, + projects_to_run: List[str], + keep: bool = False, + long_checkouts: bool = False, + rebase: bool = False, + no_diff: bool = False, +) -> int: + """ + Process the queue with X workers and evaluate results + - Success is guaged via the config "expect_formatting_changes" + + Integer return equals the number of failed projects + """ + results = Results() + results.stats["disabled"] = 0 + results.stats["failed"] = 0 + results.stats["skipped_long_checkout"] = 0 + results.stats["success"] = 0 + results.stats["wrong_py_ver"] = 0 + + config, queue = await load_projects_queue(Path(config_file), projects_to_run) + project_count = queue.qsize() + s = "" if project_count == 1 else "s" + LOG.info(f"{project_count} project{s} to run Black over") + if project_count < 1: + return -1 + + s = "" if workers == 1 else "s" + LOG.debug(f"Using {workers} parallel worker{s} to run Black") + # Wait until we finish running all the projects before analyzing + await asyncio.gather( + *[ + project_runner( + i, + config, + queue, + work_path, + results, + long_checkouts, + rebase, + keep, + no_diff, + ) + for i in range(workers) + ] + ) + + LOG.info("Analyzing results") + return analyze_results(project_count, results) + + +if __name__ == "__main__": # pragma: nocover + raise NotImplementedError("lib is a library, funnily enough.") diff --git a/myenv/lib/python3.9/site-packages/black_primer/primer.json b/myenv/lib/python3.9/site-packages/black_primer/primer.json new file mode 100644 index 0000000..8fe61e8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/black_primer/primer.json @@ -0,0 +1,188 @@ +{ + "configuration_format_version": 20210815, + "projects": { + "STDIN": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": false, + "git_clone_url": "", + "long_checkout": false, + "py_versions": ["all"] + }, + "aioexabgp": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": false, + "git_clone_url": "https://github.com/cooperlees/aioexabgp.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "attrs": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/python-attrs/attrs.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "bandersnatch": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/pypa/bandersnatch.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "channels": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/django/channels.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "cpython": { + "disabled": true, + "disabled_reason": "To big / slow for GitHub Actions but handy to keep config to use manually or in some other CI in the future", + "base_path": "Lib", + "cli_arguments": [ + "--experimental-string-processing", + "--extend-exclude", + [ + "Lib/lib2to3/tests/data/different_encoding.py", + "|Lib/lib2to3/tests/data/false_encoding.py", + "|Lib/lib2to3/tests/data/py2_test_grammar.py", + "|Lib/test/bad_coding.py", + "|Lib/test/bad_coding2.py", + "|Lib/test/badsyntax_3131.py", + "|Lib/test/badsyntax_pep3120.py", + "|Lib/test/test_base64.py", + "|Lib/test/test_exceptions.py", + "|Lib/test/test_grammar.py", + "|Lib/test/test_named_expressions.py", + "|Lib/test/test_patma.py", + "|Lib/test/test_tokenize.py", + "|Lib/test/test_xml_etree.py", + "|Lib/traceback.py" + ] + ], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/python/cpython.git", + "long_checkout": false, + "py_versions": ["3.9", "3.10"], + "timeout_seconds": 900 + }, + "django": { + "cli_arguments": [ + "--experimental-string-processing", + "--skip-string-normalization", + "--extend-exclude", + "/((docs|scripts)/|django/forms/models.py|tests/gis_tests/test_spatialrefsys.py|tests/test_runner_apps/tagged/tests_syntax_error.py)" + ], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/django/django.git", + "long_checkout": false, + "py_versions": ["3.8", "3.9", "3.10"] + }, + "flake8-bugbear": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": false, + "git_clone_url": "https://github.com/PyCQA/flake8-bugbear.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "hypothesis": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/HypothesisWorks/hypothesis.git", + "long_checkout": false, + "py_versions": ["3.8", "3.9", "3.10"] + }, + "pandas": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/pandas-dev/pandas.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "pillow": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/python-pillow/Pillow.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "poetry": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/python-poetry/poetry.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "pyanalyze": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/quora/pyanalyze.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "pyramid": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/Pylons/pyramid.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "ptr": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": false, + "git_clone_url": "https://github.com/facebookincubator/ptr.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "pytest": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/pytest-dev/pytest.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "scikit-lego": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/koaning/scikit-lego", + "long_checkout": false, + "py_versions": ["all"] + }, + "sqlalchemy": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/sqlalchemy/sqlalchemy.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "tox": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/tox-dev/tox.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "typeshed": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/python/typeshed.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "virtualenv": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/pypa/virtualenv.git", + "long_checkout": false, + "py_versions": ["all"] + }, + "warehouse": { + "cli_arguments": ["--experimental-string-processing"], + "expect_formatting_changes": true, + "git_clone_url": "https://github.com/pypa/warehouse.git", + "long_checkout": false, + "py_versions": ["all"] + } + } +} diff --git a/myenv/lib/python3.9/site-packages/blackd/__init__.py b/myenv/lib/python3.9/site-packages/blackd/__init__.py new file mode 100644 index 0000000..cc96640 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blackd/__init__.py @@ -0,0 +1,201 @@ +import asyncio +import logging +from concurrent.futures import Executor, ProcessPoolExecutor +from datetime import datetime +from functools import partial +from multiprocessing import freeze_support +from typing import Set, Tuple + +try: + from aiohttp import web + from .middlewares import cors +except ImportError as ie: + raise ImportError( + f"aiohttp dependency is not installed: {ie}. " + + "Please re-install black with the '[d]' extra install " + + "to obtain aiohttp_cors: `pip install black[d]`" + ) from None + +import black +from black.concurrency import maybe_install_uvloop +import click + +from _black_version import version as __version__ + +# This is used internally by tests to shut down the server prematurely +_stop_signal = asyncio.Event() + +# Request headers +PROTOCOL_VERSION_HEADER = "X-Protocol-Version" +LINE_LENGTH_HEADER = "X-Line-Length" +PYTHON_VARIANT_HEADER = "X-Python-Variant" +SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization" +SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma" +FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe" +DIFF_HEADER = "X-Diff" + +BLACK_HEADERS = [ + PROTOCOL_VERSION_HEADER, + LINE_LENGTH_HEADER, + PYTHON_VARIANT_HEADER, + SKIP_STRING_NORMALIZATION_HEADER, + SKIP_MAGIC_TRAILING_COMMA, + FAST_OR_SAFE_HEADER, + DIFF_HEADER, +] + +# Response headers +BLACK_VERSION_HEADER = "X-Black-Version" + + +class InvalidVariantHeader(Exception): + pass + + +@click.command(context_settings={"help_option_names": ["-h", "--help"]}) +@click.option( + "--bind-host", type=str, help="Address to bind the server to.", default="localhost" +) +@click.option("--bind-port", type=int, help="Port to listen on", default=45484) +@click.version_option(version=black.__version__) +def main(bind_host: str, bind_port: int) -> None: + logging.basicConfig(level=logging.INFO) + app = make_app() + ver = black.__version__ + black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}") + web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None) + + +def make_app() -> web.Application: + app = web.Application( + middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))] + ) + executor = ProcessPoolExecutor() + app.add_routes([web.post("/", partial(handle, executor=executor))]) + return app + + +async def handle(request: web.Request, executor: Executor) -> web.Response: + headers = {BLACK_VERSION_HEADER: __version__} + try: + if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1": + return web.Response( + status=501, text="This server only supports protocol version 1" + ) + try: + line_length = int( + request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH) + ) + except ValueError: + return web.Response(status=400, text="Invalid line length header value") + + if PYTHON_VARIANT_HEADER in request.headers: + value = request.headers[PYTHON_VARIANT_HEADER] + try: + pyi, versions = parse_python_variant_header(value) + except InvalidVariantHeader as e: + return web.Response( + status=400, + text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}", + ) + else: + pyi = False + versions = set() + + skip_string_normalization = bool( + request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False) + ) + skip_magic_trailing_comma = bool( + request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False) + ) + fast = False + if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast": + fast = True + mode = black.FileMode( + target_versions=versions, + is_pyi=pyi, + line_length=line_length, + string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, + ) + req_bytes = await request.content.read() + charset = request.charset if request.charset is not None else "utf8" + req_str = req_bytes.decode(charset) + then = datetime.utcnow() + + loop = asyncio.get_event_loop() + formatted_str = await loop.run_in_executor( + executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode) + ) + + # Only output the diff in the HTTP response + only_diff = bool(request.headers.get(DIFF_HEADER, False)) + if only_diff: + now = datetime.utcnow() + src_name = f"In\t{then} +0000" + dst_name = f"Out\t{now} +0000" + loop = asyncio.get_event_loop() + formatted_str = await loop.run_in_executor( + executor, + partial(black.diff, req_str, formatted_str, src_name, dst_name), + ) + + return web.Response( + content_type=request.content_type, + charset=charset, + headers=headers, + text=formatted_str, + ) + except black.NothingChanged: + return web.Response(status=204, headers=headers) + except black.InvalidInput as e: + return web.Response(status=400, headers=headers, text=str(e)) + except Exception as e: + logging.exception("Exception during handling a request") + return web.Response(status=500, headers=headers, text=str(e)) + + +def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]: + if value == "pyi": + return True, set() + else: + versions = set() + for version in value.split(","): + if version.startswith("py"): + version = version[len("py") :] + if "." in version: + major_str, *rest = version.split(".") + else: + major_str = version[0] + rest = [version[1:]] if len(version) > 1 else [] + try: + major = int(major_str) + if major not in (2, 3): + raise InvalidVariantHeader("major version must be 2 or 3") + if len(rest) > 0: + minor = int(rest[0]) + if major == 2 and minor != 7: + raise InvalidVariantHeader( + "minor version must be 7 for Python 2" + ) + else: + # Default to lowest supported minor version. + minor = 7 if major == 2 else 3 + version_str = f"PY{major}{minor}" + if major == 3 and not hasattr(black.TargetVersion, version_str): + raise InvalidVariantHeader(f"3.{minor} is not supported") + versions.add(black.TargetVersion[version_str]) + except (KeyError, ValueError): + raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None + return False, versions + + +def patched_main() -> None: + maybe_install_uvloop() + freeze_support() + black.patch_click() + main() + + +if __name__ == "__main__": + patched_main() diff --git a/myenv/lib/python3.9/site-packages/blackd/middlewares.py b/myenv/lib/python3.9/site-packages/blackd/middlewares.py new file mode 100644 index 0000000..97994ec --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blackd/middlewares.py @@ -0,0 +1,34 @@ +from typing import Iterable, Awaitable, Callable +from aiohttp.web_response import StreamResponse +from aiohttp.web_request import Request +from aiohttp.web_middlewares import middleware + +Handler = Callable[[Request], Awaitable[StreamResponse]] +Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] + + +def cors(allow_headers: Iterable[str]) -> Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + is_options = request.method == "OPTIONS" + is_preflight = is_options and "Access-Control-Request-Method" in request.headers + if is_preflight: + resp = StreamResponse() + else: + resp = await handler(request) + + origin = request.headers.get("Origin") + if not origin: + return resp + + resp.headers["Access-Control-Allow-Origin"] = "*" + resp.headers["Access-Control-Expose-Headers"] = "*" + if is_options: + resp.headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers) + resp.headers["Access-Control-Allow-Methods"] = ", ".join( + ("OPTIONS", "POST") + ) + + return resp + + return impl # type: ignore diff --git a/myenv/lib/python3.9/site-packages/blib2to3/Grammar.txt b/myenv/lib/python3.9/site-packages/blib2to3/Grammar.txt new file mode 100644 index 0000000..c3001e8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/Grammar.txt @@ -0,0 +1,251 @@ +# Grammar for 2to3. This grammar supports Python 2.x and 3.x. + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# file_input is a module or sequence of commands read from an input file; +# single_input is a single interactive statement; +# eval_input is the input for the eval() and input() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +file_input: (NEWLINE | stmt)* ENDMARKER +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' namedexpr_test NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) +async_funcdef: ASYNC funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' + +# The following definition for typedarglist is equivalent to this set of rules: +# +# arguments = argument (',' argument)* +# argument = tfpdef ['=' test] +# kwargs = '**' tname [','] +# args = '*' [tname] +# kwonly_kwargs = (',' argument)* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# typedargslist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)" +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ + ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] + ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + +tname: NAME [':' test] +tfpdef: tname | '(' tfplist ')' +tfplist: tfpdef (',' tfpdef)* [','] + +# The following definition for varargslist is equivalent to this set of rules: +# +# arguments = argument (',' argument )* +# argument = vfpdef ['=' test] +# kwargs = '**' vname [','] +# args = '*' [vname] +# kwonly_kwargs = (',' argument )* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly) +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ + ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* + [',' ['**' vname [',']]] | '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + ]] | ((vfpdef ['=' test] ',')* + ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]]| '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + +vname: NAME +vfpdef: vname | '(' vfplist ')' +vfplist: vfpdef (',' vfpdef)* [','] + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | exec_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +import_from: ('from' ('.'* dotted_name | '.'+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt | match_stmt +async_stmt: ASYNC (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' asexpr_test (',' asexpr_test)* ':' suite + +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [(',' | 'as') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +# Backward compatibility cruft to support: +# [ x for x in lambda: True, lambda: False if x() ] +# even while also allowing: +# lambda x: 5 if x else 2 +# (But not a mix of the two) +testlist_safe: old_test [(',' old_test)+ [',']] +old_test: or_test | old_lambdef +old_lambdef: 'lambda' [varargslist] ':' old_test + +namedexpr_test: asexpr_test [':=' asexpr_test] + +# This is actually not a real rule, though since the parser is very +# limited in terms of the strategy about match/case rules, we are inserting +# a virtual case ( as ) as a valid expression. Unless a better +# approach is thought, the only side effect of this seem to be just allowing +# more stuff to be parser (which would fail on the ast). +asexpr_test: test ['as' test] + +test: or_test ['if' or_test 'else' test] | lambdef +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: [AWAIT] atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_gexp] ')' | + '[' [listmaker] ']' | + '{' [dictsetmaker] '}' | + '`' testlist1 '`' | + NAME | NUMBER | STRING+ | '.' '.' '.') +listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) +testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) +lambdef: 'lambda' [varargslist] ':' test +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test 'as' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +comp_for: [ASYNC] 'for' exprlist 'in' or_test [comp_iter] +comp_if: 'if' old_test [comp_iter] + +# As noted above, testlist_safe extends the syntax allowed in list +# comprehensions and generators. We can't use it indiscriminately in all +# derivations using a comp_for-like pattern because the testlist_safe derivation +# contains comma which clashes with trailing comma in arglist. +# +# This was an issue because the parser would not follow the correct derivation +# when parsing syntactically valid Python code. Since testlist_safe was created +# specifically to handle list comprehensions and generator expressions enclosed +# with parentheses, it's safe to only use it in those. That avoids the issue; we +# can parse code like set(x for x in [],). +# +# The syntax supported by this set of rules is not a valid Python 3 syntax, +# hence the prefix "old". +# +# See https://bugs.python.org/issue27494 +old_comp_iter: old_comp_for | old_comp_if +old_comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [old_comp_iter] +old_comp_if: 'if' old_test [old_comp_iter] + +testlist1: test (',' test)* + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + + +# 3.10 match statement definition + +# PS: normally the grammar is much much more restricted, but +# at this moment for not trying to bother much with encoding the +# exact same DSL in a LL(1) parser, we will just accept an expression +# and let the ast.parse() step of the safe mode to reject invalid +# grammar. + +# The reason why it is more restricted is that, patterns are some +# sort of a DSL (more advanced than our LHS on assignments, but +# still in a very limited python subset). They are not really +# expressions, but who cares. If we can parse them, that is enough +# to reformat them. + +match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT + +# This is more permissive than the actual version. For example it +# accepts `match *something:`, even though single-item starred expressions +# are forbidden. +subject_expr: (namedexpr_test|star_expr) (',' (namedexpr_test|star_expr))* [','] + +# cases +case_block: "case" patterns [guard] ':' suite +guard: 'if' namedexpr_test +patterns: pattern ['as' pattern] +pattern: (expr|star_expr) (',' (expr|star_expr))* [','] diff --git a/myenv/lib/python3.9/site-packages/blib2to3/PatternGrammar.txt b/myenv/lib/python3.9/site-packages/blib2to3/PatternGrammar.txt new file mode 100644 index 0000000..36bf814 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/PatternGrammar.txt @@ -0,0 +1,28 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# A grammar to describe tree matching patterns. +# Not shown here: +# - 'TOKEN' stands for any token (leaf node) +# - 'any' stands for any node (leaf or interior) +# With 'any' we can still specify the sub-structure. + +# The start symbol is 'Matcher'. + +Matcher: Alternatives ENDMARKER + +Alternatives: Alternative ('|' Alternative)* + +Alternative: (Unit | NegatedUnit)+ + +Unit: [NAME '='] ( STRING [Repeater] + | NAME [Details] [Repeater] + | '(' Alternatives ')' [Repeater] + | '[' Alternatives ']' + ) + +NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') + +Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' + +Details: '<' Alternatives '>' diff --git a/myenv/lib/python3.9/site-packages/blib2to3/__init__.py b/myenv/lib/python3.9/site-packages/blib2to3/__init__.py new file mode 100644 index 0000000..1bb8bf6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/__init__.py @@ -0,0 +1 @@ +# empty diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/__init__.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/__init__.py new file mode 100644 index 0000000..af39048 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""The pgen2 package.""" diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/conv.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/conv.py new file mode 100644 index 0000000..fa9825e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/conv.py @@ -0,0 +1,256 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# mypy: ignore-errors + +"""Convert graminit.[ch] spit out by pgen to Python code. + +Pgen is the Python parser generator. It is useful to quickly create a +parser from a grammar file in Python's grammar notation. But I don't +want my parsers to be written in C (yet), so I'm translating the +parsing tables to Python data structures and writing a Python parse +engine. + +Note that the token numbers are constants determined by the standard +Python tokenizer. The standard token module defines these numbers and +their names (the names are not used much). The token numbers are +hardcoded into the Python tokenizer and into pgen. A Python +implementation of the Python tokenizer is also available, in the +standard tokenize module. + +On the other hand, symbol numbers (representing the grammar's +non-terminals) are assigned by pgen based on the actual grammar +input. + +Note: this module is pretty much obsolete; the pgen module generates +equivalent grammar tables directly from the Grammar.txt input file +without having to invoke the Python pgen C program. + +""" + +# Python imports +import re + +# Local imports +from pgen2 import grammar, token + + +class Converter(grammar.Grammar): + """Grammar subclass that reads classic pgen output files. + + The run() method reads the tables as produced by the pgen parser + generator, typically contained in two C files, graminit.h and + graminit.c. The other methods are for internal use only. + + See the base class for more documentation. + + """ + + def run(self, graminit_h, graminit_c): + """Load the grammar tables from the text files written by pgen.""" + self.parse_graminit_h(graminit_h) + self.parse_graminit_c(graminit_c) + self.finish_off() + + def parse_graminit_h(self, filename): + """Parse the .h file written by pgen. (Internal) + + This file is a sequence of #define statements defining the + nonterminals of the grammar as numbers. We build two tables + mapping the numbers to names and back. + + """ + try: + f = open(filename) + except OSError as err: + print("Can't open %s: %s" % (filename, err)) + return False + self.symbol2number = {} + self.number2symbol = {} + lineno = 0 + for line in f: + lineno += 1 + mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) + if not mo and line.strip(): + print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) + else: + symbol, number = mo.groups() + number = int(number) + assert symbol not in self.symbol2number + assert number not in self.number2symbol + self.symbol2number[symbol] = number + self.number2symbol[number] = symbol + return True + + def parse_graminit_c(self, filename): + """Parse the .c file written by pgen. (Internal) + + The file looks as follows. The first two lines are always this: + + #include "pgenheaders.h" + #include "grammar.h" + + After that come four blocks: + + 1) one or more state definitions + 2) a table defining dfas + 3) a table defining labels + 4) a struct defining the grammar + + A state definition has the following form: + - one or more arc arrays, each of the form: + static arc arcs__[] = { + {, }, + ... + }; + - followed by a state array, of the form: + static state states_[] = { + {, arcs__}, + ... + }; + + """ + try: + f = open(filename) + except OSError as err: + print("Can't open %s: %s" % (filename, err)) + return False + # The code below essentially uses f's iterator-ness! + lineno = 0 + + # Expect the two #include lines + lineno, line = lineno + 1, next(f) + assert line == '#include "pgenheaders.h"\n', (lineno, line) + lineno, line = lineno + 1, next(f) + assert line == '#include "grammar.h"\n', (lineno, line) + + # Parse the state definitions + lineno, line = lineno + 1, next(f) + allarcs = {} + states = [] + while line.startswith("static arc "): + while line.startswith("static arc "): + mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line) + assert mo, (lineno, line) + n, m, k = list(map(int, mo.groups())) + arcs = [] + for _ in range(k): + lineno, line = lineno + 1, next(f) + mo = re.match(r"\s+{(\d+), (\d+)},$", line) + assert mo, (lineno, line) + i, j = list(map(int, mo.groups())) + arcs.append((i, j)) + lineno, line = lineno + 1, next(f) + assert line == "};\n", (lineno, line) + allarcs[(n, m)] = arcs + lineno, line = lineno + 1, next(f) + mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line) + assert mo, (lineno, line) + s, t = list(map(int, mo.groups())) + assert s == len(states), (lineno, line) + state = [] + for _ in range(t): + lineno, line = lineno + 1, next(f) + mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line) + assert mo, (lineno, line) + k, n, m = list(map(int, mo.groups())) + arcs = allarcs[n, m] + assert k == len(arcs), (lineno, line) + state.append(arcs) + states.append(state) + lineno, line = lineno + 1, next(f) + assert line == "};\n", (lineno, line) + lineno, line = lineno + 1, next(f) + self.states = states + + # Parse the dfas + dfas = {} + mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line) + assert mo, (lineno, line) + ndfas = int(mo.group(1)) + for i in range(ndfas): + lineno, line = lineno + 1, next(f) + mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line) + assert mo, (lineno, line) + symbol = mo.group(2) + number, x, y, z = list(map(int, mo.group(1, 3, 4, 5))) + assert self.symbol2number[symbol] == number, (lineno, line) + assert self.number2symbol[number] == symbol, (lineno, line) + assert x == 0, (lineno, line) + state = states[z] + assert y == len(state), (lineno, line) + lineno, line = lineno + 1, next(f) + mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line) + assert mo, (lineno, line) + first = {} + rawbitset = eval(mo.group(1)) + for i, c in enumerate(rawbitset): + byte = ord(c) + for j in range(8): + if byte & (1 << j): + first[i * 8 + j] = 1 + dfas[number] = (state, first) + lineno, line = lineno + 1, next(f) + assert line == "};\n", (lineno, line) + self.dfas = dfas + + # Parse the labels + labels = [] + lineno, line = lineno + 1, next(f) + mo = re.match(r"static label labels\[(\d+)\] = {$", line) + assert mo, (lineno, line) + nlabels = int(mo.group(1)) + for i in range(nlabels): + lineno, line = lineno + 1, next(f) + mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line) + assert mo, (lineno, line) + x, y = mo.groups() + x = int(x) + if y == "0": + y = None + else: + y = eval(y) + labels.append((x, y)) + lineno, line = lineno + 1, next(f) + assert line == "};\n", (lineno, line) + self.labels = labels + + # Parse the grammar struct + lineno, line = lineno + 1, next(f) + assert line == "grammar _PyParser_Grammar = {\n", (lineno, line) + lineno, line = lineno + 1, next(f) + mo = re.match(r"\s+(\d+),$", line) + assert mo, (lineno, line) + ndfas = int(mo.group(1)) + assert ndfas == len(self.dfas) + lineno, line = lineno + 1, next(f) + assert line == "\tdfas,\n", (lineno, line) + lineno, line = lineno + 1, next(f) + mo = re.match(r"\s+{(\d+), labels},$", line) + assert mo, (lineno, line) + nlabels = int(mo.group(1)) + assert nlabels == len(self.labels), (lineno, line) + lineno, line = lineno + 1, next(f) + mo = re.match(r"\s+(\d+)$", line) + assert mo, (lineno, line) + start = int(mo.group(1)) + assert start in self.number2symbol, (lineno, line) + self.start = start + lineno, line = lineno + 1, next(f) + assert line == "};\n", (lineno, line) + try: + lineno, line = lineno + 1, next(f) + except StopIteration: + pass + else: + assert 0, (lineno, line) + + def finish_off(self): + """Create additional useful structures. (Internal).""" + self.keywords = {} # map from keyword strings to arc labels + self.tokens = {} # map from numeric token values to arc labels + for ilabel, (type, value) in enumerate(self.labels): + if type == token.NAME and value is not None: + self.keywords[value] = ilabel + elif value is None: + self.tokens[type] = ilabel diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/driver.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/driver.py new file mode 100644 index 0000000..8fe8206 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/driver.py @@ -0,0 +1,327 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser driver. + +This provides a high-level interface to parse a file into a syntax tree. + +""" + +__author__ = "Guido van Rossum " + +__all__ = ["Driver", "load_grammar"] + +# Python imports +import io +import os +import logging +import pkgutil +import sys +from typing import ( + Any, + cast, + IO, + Iterable, + List, + Optional, + Text, + Iterator, + Tuple, + TypeVar, + Generic, + Union, +) +from contextlib import contextmanager +from dataclasses import dataclass, field + +# Pgen imports +from . import grammar, parse, token, tokenize, pgen +from logging import Logger +from blib2to3.pytree import NL +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.tokenize import GoodTokenInfo + +Path = Union[str, "os.PathLike[str]"] + + +@dataclass +class ReleaseRange: + start: int + end: Optional[int] = None + tokens: List[Any] = field(default_factory=list) + + def lock(self) -> None: + total_eaten = len(self.tokens) + self.end = self.start + total_eaten + + +class TokenProxy: + def __init__(self, generator: Any) -> None: + self._tokens = generator + self._counter = 0 + self._release_ranges: List[ReleaseRange] = [] + + @contextmanager + def release(self) -> Iterator["TokenProxy"]: + release_range = ReleaseRange(self._counter) + self._release_ranges.append(release_range) + try: + yield self + finally: + # Lock the last release range to the final position that + # has been eaten. + release_range.lock() + + def eat(self, point: int) -> Any: + eaten_tokens = self._release_ranges[-1].tokens + if point < len(eaten_tokens): + return eaten_tokens[point] + else: + while point >= len(eaten_tokens): + token = next(self._tokens) + eaten_tokens.append(token) + return token + + def __iter__(self) -> "TokenProxy": + return self + + def __next__(self) -> Any: + # If the current position is already compromised (looked up) + # return the eaten token, if not just go further on the given + # token producer. + for release_range in self._release_ranges: + assert release_range.end is not None + + start, end = release_range.start, release_range.end + if start <= self._counter < end: + token = release_range.tokens[self._counter - start] + break + else: + token = next(self._tokens) + self._counter += 1 + return token + + def can_advance(self, to: int) -> bool: + # Try to eat, fail if it can't. The eat operation is cached + # so there wont be any additional cost of eating here + try: + self.eat(to) + except StopIteration: + return False + else: + return True + + +class Driver(object): + def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None: + self.grammar = grammar + if logger is None: + logger = logging.getLogger(__name__) + self.logger = logger + + def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> NL: + """Parse a series of tokens and return the syntax tree.""" + # XXX Move the prefix computation into a wrapper around tokenize. + proxy = TokenProxy(tokens) + + p = parse.Parser(self.grammar) + p.setup(proxy=proxy) + + lineno = 1 + column = 0 + indent_columns: List[int] = [] + type = value = start = end = line_text = None + prefix = "" + + for quintuple in proxy: + type, value, start, end, line_text = quintuple + if start != (lineno, column): + assert (lineno, column) <= start, ((lineno, column), start) + s_lineno, s_column = start + if lineno < s_lineno: + prefix += "\n" * (s_lineno - lineno) + lineno = s_lineno + column = 0 + if column < s_column: + prefix += line_text[column:s_column] + column = s_column + if type in (tokenize.COMMENT, tokenize.NL): + prefix += value + lineno, column = end + if value.endswith("\n"): + lineno += 1 + column = 0 + continue + if type == token.OP: + type = grammar.opmap[value] + if debug: + assert type is not None + self.logger.debug( + "%s %r (prefix=%r)", token.tok_name[type], value, prefix + ) + if type == token.INDENT: + indent_columns.append(len(value)) + _prefix = prefix + value + prefix = "" + value = "" + elif type == token.DEDENT: + _indent_col = indent_columns.pop() + prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col) + if p.addtoken(cast(int, type), value, (prefix, start)): + if debug: + self.logger.debug("Stop.") + break + prefix = "" + if type in {token.INDENT, token.DEDENT}: + prefix = _prefix + lineno, column = end + if value.endswith("\n"): + lineno += 1 + column = 0 + else: + # We never broke out -- EOF is too soon (how can this happen???) + assert start is not None + raise parse.ParseError("incomplete input", type, value, (prefix, start)) + assert p.rootnode is not None + return p.rootnode + + def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL: + """Parse a stream and return the syntax tree.""" + tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar) + return self.parse_tokens(tokens, debug) + + def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL: + """Parse a stream and return the syntax tree.""" + return self.parse_stream_raw(stream, debug) + + def parse_file( + self, filename: Path, encoding: Optional[Text] = None, debug: bool = False + ) -> NL: + """Parse a file and return the syntax tree.""" + with io.open(filename, "r", encoding=encoding) as stream: + return self.parse_stream(stream, debug) + + def parse_string(self, text: Text, debug: bool = False) -> NL: + """Parse a string and return the syntax tree.""" + tokens = tokenize.generate_tokens( + io.StringIO(text).readline, grammar=self.grammar + ) + return self.parse_tokens(tokens, debug) + + def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]: + lines: List[str] = [] + current_line = "" + current_column = 0 + wait_for_nl = False + for char in prefix: + current_line += char + if wait_for_nl: + if char == "\n": + if current_line.strip() and current_column < column: + res = "".join(lines) + return res, prefix[len(res) :] + + lines.append(current_line) + current_line = "" + current_column = 0 + wait_for_nl = False + elif char in " \t": + current_column += 1 + elif char == "\n": + # unexpected empty line + current_column = 0 + else: + # indent is finished + wait_for_nl = True + return "".join(lines), current_line + + +def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text: + head, tail = os.path.splitext(gt) + if tail == ".txt": + tail = "" + name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle" + if cache_dir: + return os.path.join(cache_dir, os.path.basename(name)) + else: + return name + + +def load_grammar( + gt: Text = "Grammar.txt", + gp: Optional[Text] = None, + save: bool = True, + force: bool = False, + logger: Optional[Logger] = None, +) -> Grammar: + """Load the grammar (maybe from a pickle).""" + if logger is None: + logger = logging.getLogger(__name__) + gp = _generate_pickle_name(gt) if gp is None else gp + if force or not _newer(gp, gt): + logger.info("Generating grammar tables from %s", gt) + g: grammar.Grammar = pgen.generate_grammar(gt) + if save: + logger.info("Writing grammar tables to %s", gp) + try: + g.dump(gp) + except OSError as e: + logger.info("Writing failed: %s", e) + else: + g = grammar.Grammar() + g.load(gp) + return g + + +def _newer(a: Text, b: Text) -> bool: + """Inquire whether file a was written since file b.""" + if not os.path.exists(a): + return False + if not os.path.exists(b): + return True + return os.path.getmtime(a) >= os.path.getmtime(b) + + +def load_packaged_grammar( + package: str, grammar_source: Text, cache_dir: Optional[Path] = None +) -> grammar.Grammar: + """Normally, loads a pickled grammar by doing + pkgutil.get_data(package, pickled_grammar) + where *pickled_grammar* is computed from *grammar_source* by adding the + Python version and using a ``.pickle`` extension. + + However, if *grammar_source* is an extant file, load_grammar(grammar_source) + is called instead. This facilitates using a packaged grammar file when needed + but preserves load_grammar's automatic regeneration behavior when possible. + + """ + if os.path.isfile(grammar_source): + gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None + return load_grammar(grammar_source, gp=gp) + pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir) + data = pkgutil.get_data(package, pickled_name) + assert data is not None + g = grammar.Grammar() + g.loads(data) + return g + + +def main(*args: Text) -> bool: + """Main program, when run as a script: produce grammar pickle files. + + Calls load_grammar for each argument, a path to a grammar text file. + """ + if not args: + args = tuple(sys.argv[1:]) + logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s") + for gt in args: + load_grammar(gt, save=True, force=True) + return True + + +if __name__ == "__main__": + sys.exit(int(not main())) diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/grammar.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/grammar.py new file mode 100644 index 0000000..5685107 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/grammar.py @@ -0,0 +1,225 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""This module defines the data structures used to represent a grammar. + +These are a bit arcane because they are derived from the data +structures used by Python's 'pgen' parser generator. + +There's also a table here mapping operators to their names in the +token module; the Python tokenize module reports all operators as the +fallback token code OP, but the parser needs the actual token code. + +""" + +# Python imports +import os +import pickle +import tempfile +from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar, Union + +# Local imports +from . import token + +_P = TypeVar("_P", bound="Grammar") +Label = Tuple[int, Optional[Text]] +DFA = List[List[Tuple[int, int]]] +DFAS = Tuple[DFA, Dict[int, int]] +Path = Union[str, "os.PathLike[str]"] + + +class Grammar(object): + """Pgen parsing tables conversion class. + + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. The class here does not + provide initialization of the tables; several subclasses exist to + do this (see the conv and pgen modules). + + The load() method reads the tables from a pickle file, which is + much faster than the other ways offered by subclasses. The pickle + file is written by calling dump() (after loading the grammar + tables using a subclass). The report() method prints a readable + representation of the tables to stdout, for debugging. + + The instance variables are as follows: + + symbol2number -- a dict mapping symbol names to numbers. Symbol + numbers are always 256 or higher, to distinguish + them from token numbers, which are between 0 and + 255 (inclusive). + + number2symbol -- a dict mapping numbers to symbol names; + these two are each other's inverse. + + states -- a list of DFAs, where each DFA is a list of + states, each state is a list of arcs, and each + arc is a (i, j) pair where i is a label and j is + a state number. The DFA number is the index into + this list. (This name is slightly confusing.) + Final states are represented by a special arc of + the form (0, j) where j is its own state number. + + dfas -- a dict mapping symbol numbers to (DFA, first) + pairs, where DFA is an item from the states list + above, and first is a set of tokens that can + begin this grammar rule (represented by a dict + whose values are always 1). + + labels -- a list of (x, y) pairs where x is either a token + number or a symbol number, and y is either None + or a string; the strings are keywords. The label + number is the index in this list; label numbers + are used to mark state transitions (arcs) in the + DFAs. + + start -- the number of the grammar's start symbol. + + keywords -- a dict mapping keyword strings to arc labels. + + tokens -- a dict mapping token numbers to arc labels. + + """ + + def __init__(self) -> None: + self.symbol2number: Dict[str, int] = {} + self.number2symbol: Dict[int, str] = {} + self.states: List[DFA] = [] + self.dfas: Dict[int, DFAS] = {} + self.labels: List[Label] = [(0, "EMPTY")] + self.keywords: Dict[str, int] = {} + self.soft_keywords: Dict[str, int] = {} + self.tokens: Dict[int, int] = {} + self.symbol2label: Dict[str, int] = {} + self.start = 256 + # Python 3.7+ parses async as a keyword, not an identifier + self.async_keywords = False + + def dump(self, filename: Path) -> None: + """Dump the grammar tables to a pickle file.""" + + # mypyc generates objects that don't have a __dict__, but they + # do have __getstate__ methods that will return an equivalent + # dictionary + if hasattr(self, "__dict__"): + d = self.__dict__ + else: + d = self.__getstate__() # type: ignore + + with tempfile.NamedTemporaryFile( + dir=os.path.dirname(filename), delete=False + ) as f: + pickle.dump(d, f, pickle.HIGHEST_PROTOCOL) + os.replace(f.name, filename) + + def _update(self, attrs: Dict[str, Any]) -> None: + for k, v in attrs.items(): + setattr(self, k, v) + + def load(self, filename: Path) -> None: + """Load the grammar tables from a pickle file.""" + with open(filename, "rb") as f: + d = pickle.load(f) + self._update(d) + + def loads(self, pkl: bytes) -> None: + """Load the grammar tables from a pickle bytes object.""" + self._update(pickle.loads(pkl)) + + def copy(self: _P) -> _P: + """ + Copy the grammar. + """ + new = self.__class__() + for dict_attr in ( + "symbol2number", + "number2symbol", + "dfas", + "keywords", + "soft_keywords", + "tokens", + "symbol2label", + ): + setattr(new, dict_attr, getattr(self, dict_attr).copy()) + new.labels = self.labels[:] + new.states = self.states[:] + new.start = self.start + new.async_keywords = self.async_keywords + return new + + def report(self) -> None: + """Dump the grammar tables to standard output, for debugging.""" + from pprint import pprint + + print("s2n") + pprint(self.symbol2number) + print("n2s") + pprint(self.number2symbol) + print("states") + pprint(self.states) + print("dfas") + pprint(self.dfas) + print("labels") + pprint(self.labels) + print("start", self.start) + + +# Map from operator to number (since tokenize doesn't do this) + +opmap_raw = """ +( LPAR +) RPAR +[ LSQB +] RSQB +: COLON +, COMMA +; SEMI ++ PLUS +- MINUS +* STAR +/ SLASH +| VBAR +& AMPER +< LESS +> GREATER += EQUAL +. DOT +% PERCENT +` BACKQUOTE +{ LBRACE +} RBRACE +@ AT +@= ATEQUAL +== EQEQUAL +!= NOTEQUAL +<> NOTEQUAL +<= LESSEQUAL +>= GREATEREQUAL +~ TILDE +^ CIRCUMFLEX +<< LEFTSHIFT +>> RIGHTSHIFT +** DOUBLESTAR ++= PLUSEQUAL +-= MINEQUAL +*= STAREQUAL +/= SLASHEQUAL +%= PERCENTEQUAL +&= AMPEREQUAL +|= VBAREQUAL +^= CIRCUMFLEXEQUAL +<<= LEFTSHIFTEQUAL +>>= RIGHTSHIFTEQUAL +**= DOUBLESTAREQUAL +// DOUBLESLASH +//= DOUBLESLASHEQUAL +-> RARROW +:= COLONEQUAL +""" + +opmap = {} +for line in opmap_raw.splitlines(): + if line: + op, name = line.split() + opmap[op] = getattr(token, name) diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/literals.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/literals.py new file mode 100644 index 0000000..b5fe428 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/literals.py @@ -0,0 +1,68 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Safely evaluate Python string literals without using eval().""" + +import re + +from typing import Dict, Match, Text + + +simple_escapes: Dict[Text, Text] = { + "a": "\a", + "b": "\b", + "f": "\f", + "n": "\n", + "r": "\r", + "t": "\t", + "v": "\v", + "'": "'", + '"': '"', + "\\": "\\", +} + + +def escape(m: Match[Text]) -> Text: + all, tail = m.group(0, 1) + assert all.startswith("\\") + esc = simple_escapes.get(tail) + if esc is not None: + return esc + if tail.startswith("x"): + hexes = tail[1:] + if len(hexes) < 2: + raise ValueError("invalid hex string escape ('\\%s')" % tail) + try: + i = int(hexes, 16) + except ValueError: + raise ValueError("invalid hex string escape ('\\%s')" % tail) from None + else: + try: + i = int(tail, 8) + except ValueError: + raise ValueError("invalid octal string escape ('\\%s')" % tail) from None + return chr(i) + + +def evalString(s: Text) -> Text: + assert s.startswith("'") or s.startswith('"'), repr(s[:1]) + q = s[0] + if s[:3] == q * 3: + q = q * 3 + assert s.endswith(q), repr(s[-len(q) :]) + assert len(s) >= 2 * len(q) + s = s[len(q) : -len(q)] + return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) + + +def test() -> None: + for i in range(256): + c = chr(i) + s = repr(c) + e = evalString(s) + if e != c: + print(i, c, s, e) + + +if __name__ == "__main__": + test() diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/parse.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/parse.py new file mode 100644 index 0000000..e5dad3a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/parse.py @@ -0,0 +1,346 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +""" +import copy +from contextlib import contextmanager + +# Local imports +from . import grammar, token, tokenize +from typing import ( + cast, + Any, + Optional, + Text, + Union, + Tuple, + Dict, + List, + Iterator, + Callable, + Set, + TYPE_CHECKING, +) +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node + +if TYPE_CHECKING: + from blib2to3.driver import TokenProxy + + +Results = Dict[Text, NL] +Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]] +DFA = List[List[Tuple[int, int]]] +DFAS = Tuple[DFA, Dict[int, int]] + + +def lam_sub(grammar: Grammar, node: RawNode) -> NL: + assert node[3] is not None + return Node(type=node[0], children=node[3], context=node[2]) + + +class Recorder: + def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None: + self.parser = parser + self._ilabels = ilabels + self.context = context # not really matter + + self._dead_ilabels: Set[int] = set() + self._start_point = self.parser.stack + self._points = {ilabel: copy.deepcopy(self._start_point) for ilabel in ilabels} + + @property + def ilabels(self) -> Set[int]: + return self._dead_ilabels.symmetric_difference(self._ilabels) + + @contextmanager + def switch_to(self, ilabel: int) -> Iterator[None]: + self.parser.stack = self._points[ilabel] + try: + yield + except ParseError: + self._dead_ilabels.add(ilabel) + finally: + self.parser.stack = self._start_point + + def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: + func: Callable[..., Any] + if raw: + func = self.parser._addtoken + else: + func = self.parser.addtoken + + for ilabel in self.ilabels: + with self.switch_to(ilabel): + args = [tok_type, tok_val, self.context] + if raw: + args.insert(0, ilabel) + func(*args) + + def determine_route(self, value: Text = None, force: bool = False) -> Optional[int]: + alive_ilabels = self.ilabels + if len(alive_ilabels) == 0: + *_, most_successful_ilabel = self._dead_ilabels + raise ParseError("bad input", most_successful_ilabel, value, self.context) + + ilabel, *rest = alive_ilabels + if force or not rest: + return ilabel + else: + return None + + +class ParseError(Exception): + """Exception to signal the parser is stuck.""" + + def __init__( + self, msg: Text, type: Optional[int], value: Optional[Text], context: Context + ) -> None: + Exception.__init__( + self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context) + ) + self.msg = msg + self.type = type + self.value = value + self.context = context + + +class Parser(object): + """Parser engine. + + The proper usage sequence is: + + p = Parser(grammar, [converter]) # create instance + p.setup([start]) # prepare for parsing + : + if p.addtoken(...): # parse a token; may raise ParseError + break + root = p.rootnode # root of abstract syntax tree + + A Parser instance may be reused by calling setup() repeatedly. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See driver.py for how to get input tokens by tokenizing a file or + string. + + Parsing is complete when addtoken() returns True; the root of the + abstract syntax tree can then be retrieved from the rootnode + instance variable. When a syntax error occurs, addtoken() raises + the ParseError exception. There is no error recovery; the parser + cannot be used after a syntax error was reported (but it can be + reinitialized by calling setup()). + + """ + + def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None: + """Constructor. + + The grammar argument is a grammar.Grammar instance; see the + grammar module for more information. + + The parser is not ready yet for parsing; you must call the + setup() method to get it started. + + The optional convert argument is a function mapping concrete + syntax tree nodes to abstract syntax tree nodes. If not + given, no conversion is done and the syntax tree produced is + the concrete syntax tree. If given, it must be a function of + two arguments, the first being the grammar (a grammar.Grammar + instance), and the second being the concrete syntax tree node + to be converted. The syntax tree is converted from the bottom + up. + + **post-note: the convert argument is ignored since for Black's + usage, convert will always be blib2to3.pytree.convert. Allowing + this to be dynamic hurts mypyc's ability to use early binding. + These docs are left for historical and informational value. + + A concrete syntax tree node is a (type, value, context, nodes) + tuple, where type is the node type (a token or symbol number), + value is None for symbols and a string for tokens, context is + None or an opaque value used for error reporting (typically a + (lineno, offset) pair), and nodes is a list of children for + symbols, and None for tokens. + + An abstract syntax tree node may be anything; this is entirely + up to the converter function. + + """ + self.grammar = grammar + # See note in docstring above. TL;DR this is ignored. + self.convert = convert or lam_sub + + def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None: + """Prepare for parsing. + + This *must* be called before starting to parse. + + The optional argument is an alternative start symbol; it + defaults to the grammar's start symbol. + + You can use a Parser instance to parse any number of programs; + each time you call setup() the parser is reset to an initial + state determined by the (implicit or explicit) start symbol. + + """ + if start is None: + start = self.grammar.start + # Each stack entry is a tuple: (dfa, state, node). + # A node is a tuple: (type, value, context, children), + # where children is a list of nodes or None, and context may be None. + newnode: RawNode = (start, None, None, []) + stackentry = (self.grammar.dfas[start], 0, newnode) + self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry] + self.rootnode: Optional[NL] = None + self.used_names: Set[str] = set() + self.proxy = proxy + + def addtoken(self, type: int, value: Text, context: Context) -> bool: + """Add a token; return True iff this is the end of the program.""" + # Map from token to label + ilabels = self.classify(type, value, context) + assert len(ilabels) >= 1 + + # If we have only one state to advance, we'll directly + # take it as is. + if len(ilabels) == 1: + [ilabel] = ilabels + return self._addtoken(ilabel, type, value, context) + + # If there are multiple states which we can advance (only + # happen under soft-keywords), then we will try all of them + # in parallel and as soon as one state can reach further than + # the rest, we'll choose that one. This is a pretty hacky + # and hopefully temporary algorithm. + # + # For a more detailed explanation, check out this post: + # https://tree.science/what-the-backtracking.html + + with self.proxy.release() as proxy: + counter, force = 0, False + recorder = Recorder(self, ilabels, context) + recorder.add_token(type, value, raw=True) + + next_token_value = value + while recorder.determine_route(next_token_value) is None: + if not proxy.can_advance(counter): + force = True + break + + next_token_type, next_token_value, *_ = proxy.eat(counter) + if next_token_type == tokenize.OP: + next_token_type = grammar.opmap[next_token_value] + + recorder.add_token(next_token_type, next_token_value) + counter += 1 + + ilabel = cast(int, recorder.determine_route(next_token_value, force=force)) + assert ilabel is not None + + return self._addtoken(ilabel, type, value, context) + + def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: + # Loop until the token is shifted; may raise exceptions + while True: + dfa, state, node = self.stack[-1] + states, first = dfa + arcs = states[state] + # Look for a state with this label + for i, newstate in arcs: + t = self.grammar.labels[i][0] + if t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self.grammar.dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, itsdfa, newstate, context) + break # To continue the outer while loop + + elif ilabel == i: + # Look it up in the list of labels + # Shift a token; we're done with it + self.shift(type, value, newstate, context) + # Pop while we are in an accept-only state + state = newstate + while states[state] == [(0, state)]: + self.pop() + if not self.stack: + # Done parsing! + return True + dfa, state, node = self.stack[-1] + states, first = dfa + # Done with this token + return False + + else: + if (0, state) in arcs: + # An accepting state, pop it and try something else + self.pop() + if not self.stack: + # Done parsing, but another token is input + raise ParseError("too much input", type, value, context) + else: + # No success finding a transition + raise ParseError("bad input", type, value, context) + + def classify(self, type: int, value: Text, context: Context) -> List[int]: + """Turn a token into a label. (Internal) + + Depending on whether the value is a soft-keyword or not, + this function may return multiple labels to choose from.""" + if type == token.NAME: + # Keep a listing of all used names + self.used_names.add(value) + # Check for reserved words + if value in self.grammar.keywords: + return [self.grammar.keywords[value]] + elif value in self.grammar.soft_keywords: + assert type in self.grammar.tokens + return [ + self.grammar.soft_keywords[value], + self.grammar.tokens[type], + ] + + ilabel = self.grammar.tokens.get(type) + if ilabel is None: + raise ParseError("bad token", type, value, context) + return [ilabel] + + def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: + """Shift a token. (Internal)""" + dfa, state, node = self.stack[-1] + rawnode: RawNode = (type, value, context, None) + newnode = convert(self.grammar, rawnode) + assert node[-1] is not None + node[-1].append(newnode) + self.stack[-1] = (dfa, newstate, node) + + def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None: + """Push a nonterminal. (Internal)""" + dfa, state, node = self.stack[-1] + newnode: RawNode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) + + def pop(self) -> None: + """Pop a nonterminal. (Internal)""" + popdfa, popstate, popnode = self.stack.pop() + newnode = convert(self.grammar, popnode) + if self.stack: + dfa, state, node = self.stack[-1] + assert node[-1] is not None + node[-1].append(newnode) + else: + self.rootnode = newnode + self.rootnode.used_names = self.used_names diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/pgen.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/pgen.py new file mode 100644 index 0000000..631682a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/pgen.py @@ -0,0 +1,433 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Pgen imports +from . import grammar, token, tokenize + +from typing import ( + Any, + Dict, + IO, + Iterator, + List, + Optional, + Text, + Tuple, + Union, + Sequence, + NoReturn, +) +from blib2to3.pgen2 import grammar +from blib2to3.pgen2.tokenize import GoodTokenInfo +import os + + +Path = Union[str, "os.PathLike[str]"] + + +class PgenGrammar(grammar.Grammar): + pass + + +class ParserGenerator(object): + + filename: Path + stream: IO[Text] + generator: Iterator[GoodTokenInfo] + first: Dict[Text, Optional[Dict[Text, int]]] + + def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None: + close_stream = None + if stream is None: + stream = open(filename) + close_stream = stream.close + self.filename = filename + self.stream = stream + self.generator = tokenize.generate_tokens(stream.readline) + self.gettoken() # Initialize lookahead + self.dfas, self.startsymbol = self.parse() + if close_stream is not None: + close_stream() + self.first = {} # map from symbol name to set of tokens + self.addfirstsets() + + def make_grammar(self) -> PgenGrammar: + c = PgenGrammar() + names = list(self.dfas.keys()) + names.sort() + names.remove(self.startsymbol) + names.insert(0, self.startsymbol) + for name in names: + i = 256 + len(c.symbol2number) + c.symbol2number[name] = i + c.number2symbol[i] = name + for name in names: + dfa = self.dfas[name] + states = [] + for state in dfa: + arcs = [] + for label, next in sorted(state.arcs.items()): + arcs.append((self.make_label(c, label), dfa.index(next))) + if state.isfinal: + arcs.append((0, dfa.index(state))) + states.append(arcs) + c.states.append(states) + c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) + c.start = c.symbol2number[self.startsymbol] + return c + + def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: + rawfirst = self.first[name] + assert rawfirst is not None + first = {} + for label in sorted(rawfirst): + ilabel = self.make_label(c, label) + ##assert ilabel not in first # XXX failed on <> ... != + first[ilabel] = 1 + return first + + def make_label(self, c: PgenGrammar, label: Text) -> int: + # XXX Maybe this should be a method on a subclass of converter? + ilabel = len(c.labels) + if label[0].isalpha(): + # Either a symbol name or a named token + if label in c.symbol2number: + # A symbol name (a non-terminal) + if label in c.symbol2label: + return c.symbol2label[label] + else: + c.labels.append((c.symbol2number[label], None)) + c.symbol2label[label] = ilabel + return ilabel + else: + # A named token (NAME, NUMBER, STRING) + itoken = getattr(token, label, None) + assert isinstance(itoken, int), label + assert itoken in token.tok_name, label + if itoken in c.tokens: + return c.tokens[itoken] + else: + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel + else: + # Either a keyword or an operator + assert label[0] in ('"', "'"), label + value = eval(label) + if value[0].isalpha(): + if label[0] == '"': + keywords = c.soft_keywords + else: + keywords = c.keywords + + # A keyword + if value in keywords: + return keywords[value] + else: + c.labels.append((token.NAME, value)) + keywords[value] = ilabel + return ilabel + else: + # An operator (any non-numeric token) + itoken = grammar.opmap[value] # Fails if unknown token + if itoken in c.tokens: + return c.tokens[itoken] + else: + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel + + def addfirstsets(self) -> None: + names = list(self.dfas.keys()) + names.sort() + for name in names: + if name not in self.first: + self.calcfirst(name) + # print name, self.first[name].keys() + + def calcfirst(self, name: Text) -> None: + dfa = self.dfas[name] + self.first[name] = None # dummy to detect left recursion + state = dfa[0] + totalset: Dict[str, int] = {} + overlapcheck = {} + for label, next in state.arcs.items(): + if label in self.dfas: + if label in self.first: + fset = self.first[label] + if fset is None: + raise ValueError("recursion for rule %r" % name) + else: + self.calcfirst(label) + fset = self.first[label] + assert fset is not None + totalset.update(fset) + overlapcheck[label] = fset + else: + totalset[label] = 1 + overlapcheck[label] = {label: 1} + inverse: Dict[str, str] = {} + for label, itsfirst in overlapcheck.items(): + for symbol in itsfirst: + if symbol in inverse: + raise ValueError( + "rule %s is ambiguous; %s is in the first sets of %s as well" + " as %s" % (name, symbol, label, inverse[symbol]) + ) + inverse[symbol] = label + self.first[name] = totalset + + def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]: + dfas = {} + startsymbol: Optional[str] = None + # MSTART: (NEWLINE | RULE)* ENDMARKER + while self.type != token.ENDMARKER: + while self.type == token.NEWLINE: + self.gettoken() + # RULE: NAME ':' RHS NEWLINE + name = self.expect(token.NAME) + self.expect(token.OP, ":") + a, z = self.parse_rhs() + self.expect(token.NEWLINE) + # self.dump_nfa(name, a, z) + dfa = self.make_dfa(a, z) + # self.dump_dfa(name, dfa) + oldlen = len(dfa) + self.simplify_dfa(dfa) + newlen = len(dfa) + dfas[name] = dfa + # print name, oldlen, newlen + if startsymbol is None: + startsymbol = name + assert startsymbol is not None + return dfas, startsymbol + + def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]: + # To turn an NFA into a DFA, we define the states of the DFA + # to correspond to *sets* of states of the NFA. Then do some + # state reduction. Let's represent sets as dicts with 1 for + # values. + assert isinstance(start, NFAState) + assert isinstance(finish, NFAState) + + def closure(state: NFAState) -> Dict[NFAState, int]: + base: Dict[NFAState, int] = {} + addclosure(state, base) + return base + + def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None: + assert isinstance(state, NFAState) + if state in base: + return + base[state] = 1 + for label, next in state.arcs: + if label is None: + addclosure(next, base) + + states = [DFAState(closure(start), finish)] + for state in states: # NB states grows while we're iterating + arcs: Dict[str, Dict[NFAState, int]] = {} + for nfastate in state.nfaset: + for label, next in nfastate.arcs: + if label is not None: + addclosure(next, arcs.setdefault(label, {})) + for label, nfaset in sorted(arcs.items()): + for st in states: + if st.nfaset == nfaset: + break + else: + st = DFAState(nfaset, finish) + states.append(st) + state.addarc(st, label) + return states # List of DFAState instances; first one is start + + def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None: + print("Dump of NFA for", name) + todo = [start] + for i, state in enumerate(todo): + print(" State", i, state is finish and "(final)" or "") + for label, next in state.arcs: + if next in todo: + j = todo.index(next) + else: + j = len(todo) + todo.append(next) + if label is None: + print(" -> %d" % j) + else: + print(" %s -> %d" % (label, j)) + + def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None: + print("Dump of DFA for", name) + for i, state in enumerate(dfa): + print(" State", i, state.isfinal and "(final)" or "") + for label, next in sorted(state.arcs.items()): + print(" %s -> %d" % (label, dfa.index(next))) + + def simplify_dfa(self, dfa: List["DFAState"]) -> None: + # This is not theoretically optimal, but works well enough. + # Algorithm: repeatedly look for two states that have the same + # set of arcs (same labels pointing to the same nodes) and + # unify them, until things stop changing. + + # dfa is a list of DFAState instances + changes = True + while changes: + changes = False + for i, state_i in enumerate(dfa): + for j in range(i + 1, len(dfa)): + state_j = dfa[j] + if state_i == state_j: + # print " unify", i, j + del dfa[j] + for state in dfa: + state.unifystate(state_j, state_i) + changes = True + break + + def parse_rhs(self) -> Tuple["NFAState", "NFAState"]: + # RHS: ALT ('|' ALT)* + a, z = self.parse_alt() + if self.value != "|": + return a, z + else: + aa = NFAState() + zz = NFAState() + aa.addarc(a) + z.addarc(zz) + while self.value == "|": + self.gettoken() + a, z = self.parse_alt() + aa.addarc(a) + z.addarc(zz) + return aa, zz + + def parse_alt(self) -> Tuple["NFAState", "NFAState"]: + # ALT: ITEM+ + a, b = self.parse_item() + while self.value in ("(", "[") or self.type in (token.NAME, token.STRING): + c, d = self.parse_item() + b.addarc(c) + b = d + return a, b + + def parse_item(self) -> Tuple["NFAState", "NFAState"]: + # ITEM: '[' RHS ']' | ATOM ['+' | '*'] + if self.value == "[": + self.gettoken() + a, z = self.parse_rhs() + self.expect(token.OP, "]") + a.addarc(z) + return a, z + else: + a, z = self.parse_atom() + value = self.value + if value not in ("+", "*"): + return a, z + self.gettoken() + z.addarc(a) + if value == "+": + return a, z + else: + return a, a + + def parse_atom(self) -> Tuple["NFAState", "NFAState"]: + # ATOM: '(' RHS ')' | NAME | STRING + if self.value == "(": + self.gettoken() + a, z = self.parse_rhs() + self.expect(token.OP, ")") + return a, z + elif self.type in (token.NAME, token.STRING): + a = NFAState() + z = NFAState() + a.addarc(z, self.value) + self.gettoken() + return a, z + else: + self.raise_error( + "expected (...) or NAME or STRING, got %s/%s", self.type, self.value + ) + assert False + + def expect(self, type: int, value: Optional[Any] = None) -> Text: + if self.type != type or (value is not None and self.value != value): + self.raise_error( + "expected %s/%s, got %s/%s", type, value, self.type, self.value + ) + value = self.value + self.gettoken() + return value + + def gettoken(self) -> None: + tup = next(self.generator) + while tup[0] in (tokenize.COMMENT, tokenize.NL): + tup = next(self.generator) + self.type, self.value, self.begin, self.end, self.line = tup + # print token.tok_name[self.type], repr(self.value) + + def raise_error(self, msg: str, *args: Any) -> NoReturn: + if args: + try: + msg = msg % args + except: + msg = " ".join([msg] + list(map(str, args))) + raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) + + +class NFAState(object): + arcs: List[Tuple[Optional[Text], "NFAState"]] + + def __init__(self) -> None: + self.arcs = [] # list of (label, NFAState) pairs + + def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None: + assert label is None or isinstance(label, str) + assert isinstance(next, NFAState) + self.arcs.append((label, next)) + + +class DFAState(object): + nfaset: Dict[NFAState, Any] + isfinal: bool + arcs: Dict[Text, "DFAState"] + + def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: + assert isinstance(nfaset, dict) + assert isinstance(next(iter(nfaset)), NFAState) + assert isinstance(final, NFAState) + self.nfaset = nfaset + self.isfinal = final in nfaset + self.arcs = {} # map from label to DFAState + + def addarc(self, next: "DFAState", label: Text) -> None: + assert isinstance(label, str) + assert label not in self.arcs + assert isinstance(next, DFAState) + self.arcs[label] = next + + def unifystate(self, old: "DFAState", new: "DFAState") -> None: + for label, next in self.arcs.items(): + if next is old: + self.arcs[label] = new + + def __eq__(self, other: Any) -> bool: + # Equality test -- ignore the nfaset instance variable + assert isinstance(other, DFAState) + if self.isfinal != other.isfinal: + return False + # Can't just return self.arcs == other.arcs, because that + # would invoke this method recursively, with cycles... + if len(self.arcs) != len(other.arcs): + return False + for label, next in self.arcs.items(): + if next is not other.arcs.get(label): + return False + return True + + __hash__: Any = None # For Py3 compatibility. + + +def generate_grammar(filename: Path = "Grammar.txt") -> PgenGrammar: + p = ParserGenerator(filename) + return p.make_grammar() diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/token.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/token.py new file mode 100644 index 0000000..1e0dec9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/token.py @@ -0,0 +1,94 @@ +"""Token constants (from "token.h").""" + +import sys +from typing import Dict + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +# Taken from Python (r53757) and modified to include some tokens +# originally monkeypatched in by pgen2.tokenize + +# --start constants-- +ENDMARKER: Final = 0 +NAME: Final = 1 +NUMBER: Final = 2 +STRING: Final = 3 +NEWLINE: Final = 4 +INDENT: Final = 5 +DEDENT: Final = 6 +LPAR: Final = 7 +RPAR: Final = 8 +LSQB: Final = 9 +RSQB: Final = 10 +COLON: Final = 11 +COMMA: Final = 12 +SEMI: Final = 13 +PLUS: Final = 14 +MINUS: Final = 15 +STAR: Final = 16 +SLASH: Final = 17 +VBAR: Final = 18 +AMPER: Final = 19 +LESS: Final = 20 +GREATER: Final = 21 +EQUAL: Final = 22 +DOT: Final = 23 +PERCENT: Final = 24 +BACKQUOTE: Final = 25 +LBRACE: Final = 26 +RBRACE: Final = 27 +EQEQUAL: Final = 28 +NOTEQUAL: Final = 29 +LESSEQUAL: Final = 30 +GREATEREQUAL: Final = 31 +TILDE: Final = 32 +CIRCUMFLEX: Final = 33 +LEFTSHIFT: Final = 34 +RIGHTSHIFT: Final = 35 +DOUBLESTAR: Final = 36 +PLUSEQUAL: Final = 37 +MINEQUAL: Final = 38 +STAREQUAL: Final = 39 +SLASHEQUAL: Final = 40 +PERCENTEQUAL: Final = 41 +AMPEREQUAL: Final = 42 +VBAREQUAL: Final = 43 +CIRCUMFLEXEQUAL: Final = 44 +LEFTSHIFTEQUAL: Final = 45 +RIGHTSHIFTEQUAL: Final = 46 +DOUBLESTAREQUAL: Final = 47 +DOUBLESLASH: Final = 48 +DOUBLESLASHEQUAL: Final = 49 +AT: Final = 50 +ATEQUAL: Final = 51 +OP: Final = 52 +COMMENT: Final = 53 +NL: Final = 54 +RARROW: Final = 55 +AWAIT: Final = 56 +ASYNC: Final = 57 +ERRORTOKEN: Final = 58 +COLONEQUAL: Final = 59 +N_TOKENS: Final = 60 +NT_OFFSET: Final = 256 +# --end constants-- + +tok_name: Final[Dict[int, str]] = {} +for _name, _value in list(globals().items()): + if type(_value) is type(0): + tok_name[_value] = _name + + +def ISTERMINAL(x: int) -> bool: + return x < NT_OFFSET + + +def ISNONTERMINAL(x: int) -> bool: + return x >= NT_OFFSET + + +def ISEOF(x: int) -> bool: + return x == ENDMARKER diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pgen2/tokenize.py b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/tokenize.py new file mode 100644 index 0000000..a7e17df --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pgen2/tokenize.py @@ -0,0 +1,688 @@ +# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. +# All rights reserved. + +# mypy: allow-untyped-defs, allow-untyped-calls + +"""Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.""" + +import sys +from typing import ( + Callable, + Iterable, + Iterator, + List, + Optional, + Text, + Tuple, + Pattern, + Union, + cast, +) + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from blib2to3.pgen2.token import * +from blib2to3.pgen2.grammar import Grammar + +__author__ = "Ka-Ping Yee " +__credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro" + +import re +from codecs import BOM_UTF8, lookup +from blib2to3.pgen2.token import * + +from . import token + +__all__ = [x for x in dir(token) if x[0] != "_"] + [ + "tokenize", + "generate_tokens", + "untokenize", +] +del token + + +def group(*choices): + return "(" + "|".join(choices) + ")" + + +def any(*choices): + return group(*choices) + "*" + + +def maybe(*choices): + return group(*choices) + "?" + + +def _combinations(*l): + return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) + + +Whitespace = r"[ \f\t]*" +Comment = r"#[^\r\n]*" +Ignore = Whitespace + any(r"\\\r?\n" + Whitespace) + maybe(Comment) +Name = ( # this is invalid but it's fine because Name comes after Number in all groups + r"[^\s#\(\)\[\]\{\}+\-*/!@$%^&=|;:'\",\.<>/?`~\\]+" +) + +Binnumber = r"0[bB]_?[01]+(?:_[01]+)*" +Hexnumber = r"0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?" +Octnumber = r"0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?" +Decnumber = group(r"[1-9]\d*(?:_\d+)*[lL]?", "0[lL]?") +Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) +Exponent = r"[eE][-+]?\d+(?:_\d+)*" +Pointfloat = group(r"\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?", r"\.\d+(?:_\d+)*") + maybe( + Exponent +) +Expfloat = r"\d+(?:_\d+)*" + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r"\d+(?:_\d+)*[jJ]", Floatnumber + r"[jJ]") +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +_litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?" +Triple = group(_litprefix + "'''", _litprefix + '"""') +# Single-line ' or " string. +String = group( + _litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"', +) + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group( + r"\*\*=?", + r">>=?", + r"<<=?", + r"<>", + r"!=", + r"//=?", + r"->", + r"[+\-*/%&@|^=<>:]=?", + r"~", +) + +Bracket = "[][(){}]" +Special = group(r"\r?\n", r"[:;.,`@]") +Funny = group(Operator, Bracket, Special) + +# First (or only) line of ' or " string. +ContStr = group( + _litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r"\\\r?\n"), + _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r"\\\r?\n"), +) +PseudoExtras = group(r"\\\r?\n", Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +pseudoprog: Final = re.compile(PseudoToken, re.UNICODE) +single3prog = re.compile(Single3) +double3prog = re.compile(Double3) + +_strprefixes = ( + _combinations("r", "R", "f", "F") + | _combinations("r", "R", "b", "B") + | {"u", "U", "ur", "uR", "Ur", "UR"} +) + +endprogs: Final = { + "'": re.compile(Single), + '"': re.compile(Double), + "'''": single3prog, + '"""': double3prog, + **{f"{prefix}'''": single3prog for prefix in _strprefixes}, + **{f'{prefix}"""': double3prog for prefix in _strprefixes}, + **{prefix: None for prefix in _strprefixes}, +} + +triple_quoted: Final = ( + {"'''", '"""'} + | {f"{prefix}'''" for prefix in _strprefixes} + | {f'{prefix}"""' for prefix in _strprefixes} +) +single_quoted: Final = ( + {"'", '"'} + | {f"{prefix}'" for prefix in _strprefixes} + | {f'{prefix}"' for prefix in _strprefixes} +) + +tabsize = 8 + + +class TokenError(Exception): + pass + + +class StopTokenizing(Exception): + pass + + +def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing + (srow, scol) = xxx_todo_changeme + (erow, ecol) = xxx_todo_changeme1 + print( + "%d,%d-%d,%d:\t%s\t%s" % (srow, scol, erow, ecol, tok_name[type], repr(token)) + ) + + +Coord = Tuple[int, int] +TokenEater = Callable[[int, Text, Coord, Coord, Text], None] + + +def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None: + """ + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + """ + try: + tokenize_loop(readline, tokeneater) + except StopTokenizing: + pass + + +# backwards compatible interface +def tokenize_loop(readline, tokeneater): + for token_info in generate_tokens(readline): + tokeneater(*token_info) + + +GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text] +TokenInfo = Union[Tuple[int, str], GoodTokenInfo] + + +class Untokenizer: + + tokens: List[Text] + prev_row: int + prev_col: int + + def __init__(self) -> None: + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + + def add_whitespace(self, start: Coord) -> None: + row, col = start + assert row <= self.prev_row + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: + for t in iterable: + if len(t) == 2: + self.compat(cast(Tuple[int, str], t), iterable) + break + tok_type, token, start, end, line = cast( + Tuple[int, Text, Coord, Coord, Text], t + ) + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None: + startline = False + indents = [] + toks_append = self.tokens.append + toknum, tokval = token + if toknum in (NAME, NUMBER): + tokval += " " + if toknum in (NEWLINE, NL): + startline = True + for tok in iterable: + toknum, tokval = tok[:2] + + if toknum in (NAME, NUMBER, ASYNC, AWAIT): + tokval += " " + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + + +cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)", re.ASCII) +blank_re = re.compile(br"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) + + +def _get_normal_name(orig_enc: str) -> str: + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith( + ("latin-1-", "iso-8859-1-", "iso-latin-1-") + ): + return "iso-8859-1" + return orig_enc + + +def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read + in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, but + disagree, a SyntaxError will be raised. If the encoding cookie is an invalid + charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + bom_found = False + encoding = None + default = "utf-8" + + def read_or_stop() -> bytes: + try: + return readline() + except StopIteration: + return bytes() + + def find_cookie(line: bytes) -> Optional[str]: + try: + line_string = line.decode("ascii") + except UnicodeDecodeError: + return None + match = cookie_re.match(line_string) + if not match: + return None + encoding = _get_normal_name(match.group(1)) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + raise SyntaxError("unknown encoding: " + encoding) + + if bom_found: + if codec.name != "utf-8": + # This behaviour mimics the Python interpreter + raise SyntaxError("encoding problem: utf-8") + encoding += "-sig" + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = "utf-8-sig" + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + if not blank_re.match(first): + return default, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + + +def untokenize(iterable: Iterable[TokenInfo]) -> Text: + """Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tokin generate_tokens(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + return ut.untokenize(iterable) + + +def generate_tokens( + readline: Callable[[], Text], grammar: Optional[Grammar] = None +) -> Iterator[GoodTokenInfo]: + """ + The generate_tokens() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + """ + lnum = parenlev = continued = 0 + numchars: Final = "0123456789" + contstr, needcont = "", 0 + contline: Optional[str] = None + indents = [0] + + # If we know we're parsing 3.7+, we can unconditionally parse `async` and + # `await` as keywords. + async_keywords = False if grammar is None else grammar.async_keywords + # 'stashed' and 'async_*' are used for async/await parsing + stashed: Optional[GoodTokenInfo] = None + async_def = False + async_def_indent = 0 + async_def_nl = False + + strstart: Tuple[int, int] + endprog: Pattern[str] + + while 1: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = "" + lnum += 1 + pos, max = 0, len(line) + + if contstr: # continued string + assert contline is not None + if not line: + raise TokenError("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield ( + STRING, + contstr + line[:end], + strstart, + (lnum, end), + contline + line, + ) + contstr, needcont = "", 0 + contline = None + elif needcont and line[-2:] != "\\\n" and line[-3:] != "\\\r\n": + yield ( + ERRORTOKEN, + contstr + line, + strstart, + (lnum, len(line)), + contline, + ) + contstr = "" + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: + break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == " ": + column += 1 + elif line[pos] == "\t": + column = (column // tabsize + 1) * tabsize + elif line[pos] == "\f": + column = 0 + else: + break + pos += 1 + if pos == max: + break + + if stashed: + yield stashed + stashed = None + + if line[pos] in "\r\n": # skip blank lines + yield (NL, line[pos:], (lnum, pos), (lnum, len(line)), line) + continue + + if line[pos] == "#": # skip comments + comment_token = line[pos:].rstrip("\r\n") + nl_pos = pos + len(comment_token) + yield ( + COMMENT, + comment_token, + (lnum, pos), + (lnum, nl_pos), + line, + ) + yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents + indents.append(column) + yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + + while column < indents[-1]: # count dedents + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line), + ) + indents = indents[:-1] + + if async_def and async_def_indent >= indents[-1]: + async_def = False + async_def_nl = False + async_def_indent = 0 + + yield (DEDENT, "", (lnum, pos), (lnum, pos), line) + + if async_def and async_def_nl and async_def_indent >= indents[-1]: + async_def = False + async_def_nl = False + async_def_indent = 0 + + else: # continued statement + if not line: + raise TokenError("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = pseudoprog.match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + token, initial = line[start:end], line[start] + + if initial in numchars or ( + initial == "." and token != "." + ): # ordinary number + yield (NUMBER, token, spos, epos, line) + elif initial in "\r\n": + newline = NEWLINE + if parenlev > 0: + newline = NL + elif async_def: + async_def_nl = True + if stashed: + yield stashed + stashed = None + yield (newline, token, spos, epos, line) + + elif initial == "#": + assert not token.endswith("\n") + if stashed: + yield stashed + stashed = None + yield (COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = endprogs[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + if stashed: + yield stashed + stashed = None + yield (STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif ( + initial in single_quoted + or token[:2] in single_quoted + or token[:3] in single_quoted + ): + if token[-1] == "\n": # continued string + strstart = (lnum, start) + endprog = ( + endprogs[initial] + or endprogs[token[1]] + or endprogs[token[2]] + ) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + if stashed: + yield stashed + stashed = None + yield (STRING, token, spos, epos, line) + elif initial.isidentifier(): # ordinary name + if token in ("async", "await"): + if async_keywords or async_def: + yield ( + ASYNC if token == "async" else AWAIT, + token, + spos, + epos, + line, + ) + continue + + tok = (NAME, token, spos, epos, line) + if token == "async" and not stashed: + stashed = tok + continue + + if token in ("def", "for"): + if stashed and stashed[0] == NAME and stashed[1] == "async": + + if token == "def": + async_def = True + async_def_indent = indents[-1] + + yield ( + ASYNC, + stashed[1], + stashed[2], + stashed[3], + stashed[4], + ) + stashed = None + + if stashed: + yield stashed + stashed = None + + yield tok + elif initial == "\\": # continued stmt + # This yield is new; needed for better idempotency: + if stashed: + yield stashed + stashed = None + yield (NL, token, spos, (lnum, pos), line) + continued = 1 + else: + if initial in "([{": + parenlev += 1 + elif initial in ")]}": + parenlev -= 1 + if stashed: + yield stashed + stashed = None + yield (OP, token, spos, epos, line) + else: + yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) + pos += 1 + + if stashed: + yield stashed + stashed = None + + for indent in indents[1:]: # pop remaining indent levels + yield (DEDENT, "", (lnum, 0), (lnum, 0), "") + yield (ENDMARKER, "", (lnum, 0), (lnum, 0), "") + + +if __name__ == "__main__": # testing + import sys + + if len(sys.argv) > 1: + tokenize(open(sys.argv[1]).readline) + else: + tokenize(sys.stdin.readline) diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pygram.py b/myenv/lib/python3.9/site-packages/blib2to3/pygram.py new file mode 100644 index 0000000..aa20b81 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pygram.py @@ -0,0 +1,212 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Export the Python grammar and symbols.""" + +# Python imports +import os + +from typing import Union + +# Local imports +from .pgen2 import token +from .pgen2 import driver + +from .pgen2.grammar import Grammar + +# Moved into initialize because mypyc can't handle __file__ (XXX bug) +# # The grammar file +# _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") +# _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), +# "PatternGrammar.txt") + + +class Symbols(object): + def __init__(self, grammar: Grammar) -> None: + """Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + """ + for name, symbol in grammar.symbol2number.items(): + setattr(self, name, symbol) + + +class _python_symbols(Symbols): + and_expr: int + and_test: int + annassign: int + arglist: int + argument: int + arith_expr: int + asexpr_test: int + assert_stmt: int + async_funcdef: int + async_stmt: int + atom: int + augassign: int + break_stmt: int + case_block: int + classdef: int + comp_for: int + comp_if: int + comp_iter: int + comp_op: int + comparison: int + compound_stmt: int + continue_stmt: int + decorated: int + decorator: int + decorators: int + del_stmt: int + dictsetmaker: int + dotted_as_name: int + dotted_as_names: int + dotted_name: int + encoding_decl: int + eval_input: int + except_clause: int + exec_stmt: int + expr: int + expr_stmt: int + exprlist: int + factor: int + file_input: int + flow_stmt: int + for_stmt: int + funcdef: int + global_stmt: int + guard: int + if_stmt: int + import_as_name: int + import_as_names: int + import_from: int + import_name: int + import_stmt: int + lambdef: int + listmaker: int + match_stmt: int + namedexpr_test: int + not_test: int + old_comp_for: int + old_comp_if: int + old_comp_iter: int + old_lambdef: int + old_test: int + or_test: int + parameters: int + pass_stmt: int + pattern: int + patterns: int + power: int + print_stmt: int + raise_stmt: int + return_stmt: int + shift_expr: int + simple_stmt: int + single_input: int + sliceop: int + small_stmt: int + subject_expr: int + star_expr: int + stmt: int + subscript: int + subscriptlist: int + suite: int + term: int + test: int + testlist: int + testlist1: int + testlist_gexp: int + testlist_safe: int + testlist_star_expr: int + tfpdef: int + tfplist: int + tname: int + trailer: int + try_stmt: int + typedargslist: int + varargslist: int + vfpdef: int + vfplist: int + vname: int + while_stmt: int + with_stmt: int + xor_expr: int + yield_arg: int + yield_expr: int + yield_stmt: int + + +class _pattern_symbols(Symbols): + Alternative: int + Alternatives: int + Details: int + Matcher: int + NegatedUnit: int + Repeater: int + Unit: int + + +python_grammar: Grammar +python_grammar_no_print_statement: Grammar +python_grammar_no_print_statement_no_exec_statement: Grammar +python_grammar_no_print_statement_no_exec_statement_async_keywords: Grammar +python_grammar_no_exec_statement: Grammar +pattern_grammar: Grammar +python_grammar_soft_keywords: Grammar + +python_symbols: _python_symbols +pattern_symbols: _pattern_symbols + + +def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: + global python_grammar + global python_grammar_no_print_statement + global python_grammar_no_print_statement_no_exec_statement + global python_grammar_no_print_statement_no_exec_statement_async_keywords + global python_grammar_soft_keywords + global python_symbols + global pattern_grammar + global pattern_symbols + + # The grammar file + _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") + _PATTERN_GRAMMAR_FILE = os.path.join( + os.path.dirname(__file__), "PatternGrammar.txt" + ) + + # Python 2 + python_grammar = driver.load_packaged_grammar("blib2to3", _GRAMMAR_FILE, cache_dir) + soft_keywords = python_grammar.soft_keywords.copy() + python_grammar.soft_keywords.clear() + + python_symbols = _python_symbols(python_grammar) + + # Python 2 + from __future__ import print_function + python_grammar_no_print_statement = python_grammar.copy() + del python_grammar_no_print_statement.keywords["print"] + + # Python 3.0-3.6 + python_grammar_no_print_statement_no_exec_statement = python_grammar.copy() + del python_grammar_no_print_statement_no_exec_statement.keywords["print"] + del python_grammar_no_print_statement_no_exec_statement.keywords["exec"] + + # Python 3.7+ + python_grammar_no_print_statement_no_exec_statement_async_keywords = ( + python_grammar_no_print_statement_no_exec_statement.copy() + ) + python_grammar_no_print_statement_no_exec_statement_async_keywords.async_keywords = ( + True + ) + + # Python 3.10+ + python_grammar_soft_keywords = ( + python_grammar_no_print_statement_no_exec_statement_async_keywords.copy() + ) + python_grammar_soft_keywords.soft_keywords = soft_keywords + + pattern_grammar = driver.load_packaged_grammar( + "blib2to3", _PATTERN_GRAMMAR_FILE, cache_dir + ) + pattern_symbols = _pattern_symbols(pattern_grammar) diff --git a/myenv/lib/python3.9/site-packages/blib2to3/pytree.py b/myenv/lib/python3.9/site-packages/blib2to3/pytree.py new file mode 100644 index 0000000..bd86270 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/blib2to3/pytree.py @@ -0,0 +1,980 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +""" +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +""" + +# mypy: allow-untyped-defs + +from typing import ( + Any, + Dict, + Iterator, + List, + Optional, + Text, + Tuple, + TypeVar, + Union, + Set, + Iterable, +) +from blib2to3.pgen2.grammar import Grammar + +__author__ = "Guido van Rossum " + +import sys +from io import StringIO + +HUGE: int = 0x7FFFFFFF # maximum repeat count, default max + +_type_reprs: Dict[int, Union[Text, int]] = {} + + +def type_repr(type_num: int) -> Union[Text, int]: + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name in dir(python_symbols): + val = getattr(python_symbols, name) + if type(val) == int: + _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) + + +_P = TypeVar("_P", bound="Base") + +NL = Union["Node", "Leaf"] +Context = Tuple[Text, Tuple[int, int]] +RawNode = Tuple[int, Optional[Text], Optional[Context], Optional[List[NL]]] + + +class Base(object): + + """ + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type: int # int: token number (< 256) or symbol number (>= 256) + parent: Optional["Node"] = None # Parent node pointer, or None + children: List[NL] # List of subnodes + was_changed: bool = False + was_checked: bool = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other: Any) -> bool: + """ + Compare two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + @property + def prefix(self) -> Text: + raise NotImplementedError + + def _eq(self: _P, other: _P) -> bool: + """ + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + """ + raise NotImplementedError + + def __deepcopy__(self: _P, memo: Any) -> _P: + return self.clone() + + def clone(self: _P) -> _P: + """ + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def post_order(self) -> Iterator[NL]: + """ + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def pre_order(self) -> Iterator[NL]: + """ + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def replace(self, new: Union[NL, List[NL]]) -> None: + """Replace this node with a new one in the parent.""" + assert self.parent is not None, str(self) + assert new is not None + if not isinstance(new, list): + new = [new] + l_children = [] + found = False + for ch in self.parent.children: + if ch is self: + assert not found, (self.parent.children, self, new) + if new is not None: + l_children.extend(new) + found = True + else: + l_children.append(ch) + assert found, (self.children, self, new) + self.parent.children = l_children + self.parent.changed() + self.parent.invalidate_sibling_maps() + for x in new: + x.parent = self.parent + self.parent = None + + def get_lineno(self) -> Optional[int]: + """Return the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return None + node = node.children[0] + return node.lineno + + def changed(self) -> None: + if self.was_changed: + return + if self.parent: + self.parent.changed() + self.was_changed = True + + def remove(self) -> Optional[int]: + """ + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + """ + if self.parent: + for i, node in enumerate(self.parent.children): + if node is self: + del self.parent.children[i] + self.parent.changed() + self.parent.invalidate_sibling_maps() + self.parent = None + return i + return None + + @property + def next_sibling(self) -> Optional[NL]: + """ + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + """ + if self.parent is None: + return None + + if self.parent.next_sibling_map is None: + self.parent.update_sibling_maps() + assert self.parent.next_sibling_map is not None + return self.parent.next_sibling_map[id(self)] + + @property + def prev_sibling(self) -> Optional[NL]: + """ + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + """ + if self.parent is None: + return None + + if self.parent.prev_sibling_map is None: + self.parent.update_sibling_maps() + assert self.parent.prev_sibling_map is not None + return self.parent.prev_sibling_map[id(self)] + + def leaves(self) -> Iterator["Leaf"]: + for child in self.children: + yield from child.leaves() + + def depth(self) -> int: + if self.parent is None: + return 0 + return 1 + self.parent.depth() + + def get_suffix(self) -> Text: + """ + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + """ + next_sib = self.next_sibling + if next_sib is None: + return "" + prefix = next_sib.prefix + return prefix + + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + fixers_applied: Optional[List[Any]] + used_names: Optional[Set[Text]] + + def __init__( + self, + type: int, + children: List[NL], + context: Optional[Any] = None, + prefix: Optional[Text] = None, + fixers_applied: Optional[List[Any]] = None, + ) -> None: + """ + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + self.invalidate_sibling_maps() + if prefix is not None: + self.prefix = prefix + if fixers_applied: + self.fixers_applied = fixers_applied[:] + else: + self.fixers_applied = None + + def __repr__(self) -> Text: + """Return a canonical string representation.""" + assert self.type is not None + return "%s(%s, %r)" % ( + self.__class__.__name__, + type_repr(self.type), + self.children, + ) + + def __str__(self) -> Text: + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return "".join(map(str, self.children)) + + def _eq(self, other) -> bool: + """Compare two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def clone(self) -> "Node": + assert self.type is not None + """Return a cloned (deep) copy of self.""" + return Node( + self.type, + [ch.clone() for ch in self.children], + fixers_applied=self.fixers_applied, + ) + + def post_order(self) -> Iterator[NL]: + """Return a post-order iterator for the tree.""" + for child in self.children: + yield from child.post_order() + yield self + + def pre_order(self) -> Iterator[NL]: + """Return a pre-order iterator for the tree.""" + yield self + for child in self.children: + yield from child.pre_order() + + @property + def prefix(self) -> Text: + """ + The whitespace and comments preceding this node in the input. + """ + if not self.children: + return "" + return self.children[0].prefix + + @prefix.setter + def prefix(self, prefix) -> None: + if self.children: + self.children[0].prefix = prefix + + def set_child(self, i: int, child: NL) -> None: + """ + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children[i].parent = None + self.children[i] = child + self.changed() + self.invalidate_sibling_maps() + + def insert_child(self, i: int, child: NL) -> None: + """ + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + """ + child.parent = self + self.children.insert(i, child) + self.changed() + self.invalidate_sibling_maps() + + def append_child(self, child: NL) -> None: + """ + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children.append(child) + self.changed() + self.invalidate_sibling_maps() + + def invalidate_sibling_maps(self) -> None: + self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None + self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None + + def update_sibling_maps(self) -> None: + _prev: Dict[int, Optional[NL]] = {} + _next: Dict[int, Optional[NL]] = {} + self.prev_sibling_map = _prev + self.next_sibling_map = _next + previous: Optional[NL] = None + for current in self.children: + _prev[id(current)] = previous + _next[id(previous)] = current + previous = current + _next[id(current)] = None + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + value: Text + fixers_applied: List[Any] + bracket_depth: int + opening_bracket: "Leaf" + used_names: Optional[Set[Text]] + _prefix = "" # Whitespace and comments preceding this token in the input + lineno: int = 0 # Line where this token starts in the input + column: int = 0 # Column where this token starts in the input + + def __init__( + self, + type: int, + value: Text, + context: Optional[Context] = None, + prefix: Optional[Text] = None, + fixers_applied: List[Any] = [], + ) -> None: + """ + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + """ + + assert 0 <= type < 256, type + if context is not None: + self._prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self._prefix = prefix + self.fixers_applied: Optional[List[Any]] = fixers_applied[:] + self.children = [] + + def __repr__(self) -> str: + """Return a canonical string representation.""" + from .pgen2.token import tok_name + + assert self.type is not None + return "%s(%s, %r)" % ( + self.__class__.__name__, + tok_name.get(self.type, self.type), + self.value, + ) + + def __str__(self) -> Text: + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return self._prefix + str(self.value) + + def _eq(self, other) -> bool: + """Compare two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def clone(self) -> "Leaf": + assert self.type is not None + """Return a cloned (deep) copy of self.""" + return Leaf( + self.type, + self.value, + (self.prefix, (self.lineno, self.column)), + fixers_applied=self.fixers_applied, + ) + + def leaves(self) -> Iterator["Leaf"]: + yield self + + def post_order(self) -> Iterator["Leaf"]: + """Return a post-order iterator for the tree.""" + yield self + + def pre_order(self) -> Iterator["Leaf"]: + """Return a pre-order iterator for the tree.""" + yield self + + @property + def prefix(self) -> Text: + """ + The whitespace and comments preceding this token in the input. + """ + return self._prefix + + @prefix.setter + def prefix(self, prefix) -> None: + self.changed() + self._prefix = prefix + + +def convert(gr: Grammar, raw_node: RawNode) -> NL: + """ + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + type, value, context, children = raw_node + if children or type in gr.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + assert children is not None + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value or "", context=context) + + +_Results = Dict[Text, NL] + + +class BasePattern(object): + + """ + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + """ + + # Defaults for instance variables + type: Optional[int] + type = None # Node type (token if < 256, symbol if >= 256) + content: Any = None # Optional content matching pattern + name: Optional[Text] = None # Optional name used to store match in results dict + + def __new__(cls, *args, **kwds): + """Constructor that prevents BasePattern from being instantiated.""" + assert cls is not BasePattern, "Cannot instantiate BasePattern" + return object.__new__(cls) + + def __repr__(self) -> Text: + assert self.type is not None + args = [type_repr(self.type), self.content, self.name] + while args and args[-1] is None: + del args[-1] + return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + + def _submatch(self, node, results=None) -> bool: + raise NotImplementedError + + def optimize(self) -> "BasePattern": + """ + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + """ + return self + + def match(self, node: NL, results: Optional[_Results] = None) -> bool: + """ + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + """ + if self.type is not None and node.type != self.type: + return False + if self.content is not None: + r: Optional[_Results] = None + if results is not None: + r = {} + if not self._submatch(node, r): + return False + if r: + assert results is not None + results.update(r) + if results is not None and self.name: + results[self.name] = node + return True + + def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool: + """ + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + """ + if len(nodes) != 1: + return False + return self.match(nodes[0], results) + + def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]: + """ + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + """ + r: _Results = {} + if nodes and self.match(nodes[0], r): + yield 1, r + + +class LeafPattern(BasePattern): + def __init__( + self, + type: Optional[int] = None, + content: Optional[Text] = None, + name: Optional[Text] = None, + ) -> None: + """ + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert 0 <= type < 256, type + if content is not None: + assert isinstance(content, str), repr(content) + self.type = type + self.content = content + self.name = name + + def match(self, node: NL, results=None): + """Override match() to insist on a leaf node.""" + if not isinstance(node, Leaf): + return False + return BasePattern.match(self, node, results) + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + return self.content == node.value + + +class NodePattern(BasePattern): + + wildcards: bool = False + + def __init__( + self, + type: Optional[int] = None, + content: Optional[Iterable[Text]] = None, + name: Optional[Text] = None, + ) -> None: + """ + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert type >= 256, type + if content is not None: + assert not isinstance(content, str), repr(content) + newcontent = list(content) + for i, item in enumerate(newcontent): + assert isinstance(item, BasePattern), (i, item) + # I don't even think this code is used anywhere, but it does cause + # unreachable errors from mypy. This function's signature does look + # odd though *shrug*. + if isinstance(item, WildcardPattern): # type: ignore[unreachable] + self.wildcards = True # type: ignore[unreachable] + self.type = type + self.content = newcontent + self.name = name + + def _submatch(self, node, results=None) -> bool: + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + if self.wildcards: + for c, r in generate_matches(self.content, node.children): + if c == len(node.children): + if results is not None: + results.update(r) + return True + return False + if len(self.content) != len(node.children): + return False + for subpattern, child in zip(self.content, node.children): + if not subpattern.match(child, results): + return False + return True + + +class WildcardPattern(BasePattern): + + """ + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + """ + + min: int + max: int + + def __init__( + self, + content: Optional[Text] = None, + min: int = 0, + max: int = HUGE, + name: Optional[Text] = None, + ) -> None: + """ + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + """ + assert 0 <= min <= max <= HUGE, (min, max) + if content is not None: + f = lambda s: tuple(s) + wrapped_content = tuple(map(f, content)) # Protect against alterations + # Check sanity of alternatives + assert len(wrapped_content), repr( + wrapped_content + ) # Can't have zero alternatives + for alt in wrapped_content: + assert len(alt), repr(alt) # Can have empty alternatives + self.content = wrapped_content + self.min = min + self.max = max + self.name = name + + def optimize(self) -> Any: + """Optimize certain stacked wildcard patterns.""" + subpattern = None + if ( + self.content is not None + and len(self.content) == 1 + and len(self.content[0]) == 1 + ): + subpattern = self.content[0][0] + if self.min == 1 and self.max == 1: + if self.content is None: + return NodePattern(name=self.name) + if subpattern is not None and self.name == subpattern.name: + return subpattern.optimize() + if ( + self.min <= 1 + and isinstance(subpattern, WildcardPattern) + and subpattern.min <= 1 + and self.name == subpattern.name + ): + return WildcardPattern( + subpattern.content, + self.min * subpattern.min, + self.max * subpattern.max, + subpattern.name, + ) + return self + + def match(self, node, results=None) -> bool: + """Does this pattern exactly match a node?""" + return self.match_seq([node], results) + + def match_seq(self, nodes, results=None) -> bool: + """Does this pattern exactly match a sequence of nodes?""" + for c, r in self.generate_matches(nodes): + if c == len(nodes): + if results is not None: + results.update(r) + if self.name: + results[self.name] = list(nodes) + return True + return False + + def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + """ + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + """ + if self.content is None: + # Shortcut for special case (see __init__.__doc__) + for count in range(self.min, 1 + min(len(nodes), self.max)): + r = {} + if self.name: + r[self.name] = nodes[:count] + yield count, r + elif self.name == "bare_name": + yield self._bare_name_matches(nodes) + else: + # The reason for this is that hitting the recursion limit usually + # results in some ugly messages about how RuntimeErrors are being + # ignored. We only have to do this on CPython, though, because other + # implementations don't have this nasty bug in the first place. + if hasattr(sys, "getrefcount"): + save_stderr = sys.stderr + sys.stderr = StringIO() + try: + for count, r in self._recursive_matches(nodes, 0): + if self.name: + r[self.name] = nodes[:count] + yield count, r + except RuntimeError: + # We fall back to the iterative pattern matching scheme if the recursive + # scheme hits the recursion limit. + for count, r in self._iterative_matches(nodes): + if self.name: + r[self.name] = nodes[:count] + yield count, r + finally: + if hasattr(sys, "getrefcount"): + sys.stderr = save_stderr + + def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + """Helper to iteratively yield the matches.""" + nodelen = len(nodes) + if 0 >= self.min: + yield 0, {} + + results = [] + # generate matches that use just one alt from self.content + for alt in self.content: + for c, r in generate_matches(alt, nodes): + yield c, r + results.append((c, r)) + + # for each match, iterate down the nodes + while results: + new_results = [] + for c0, r0 in results: + # stop if the entire set of nodes has been matched + if c0 < nodelen and c0 <= self.max: + for alt in self.content: + for c1, r1 in generate_matches(alt, nodes[c0:]): + if c1 > 0: + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + new_results.append((c0 + c1, r)) + results = new_results + + def _bare_name_matches(self, nodes) -> Tuple[int, _Results]: + """Special optimized matcher for bare_name.""" + count = 0 + r = {} # type: _Results + done = False + max = len(nodes) + while not done and count < max: + done = True + for leaf in self.content: + if leaf[0].match(nodes[count], r): + count += 1 + done = False + break + assert self.name is not None + r[self.name] = nodes[:count] + return count, r + + def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]: + """Helper to recursively yield the matches.""" + assert self.content is not None + if count >= self.min: + yield 0, {} + if count < self.max: + for alt in self.content: + for c0, r0 in generate_matches(alt, nodes): + for c1, r1 in self._recursive_matches(nodes[c0:], count + 1): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + + +class NegatedPattern(BasePattern): + def __init__(self, content: Optional[Any] = None) -> None: + """ + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + """ + if content is not None: + assert isinstance(content, BasePattern), repr(content) + self.content = content + + def match(self, node, results=None) -> bool: + # We never match a node in its entirety + return False + + def match_seq(self, nodes, results=None) -> bool: + # We only match an empty sequence of nodes in its entirety + return len(nodes) == 0 + + def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + if self.content is None: + # Return a match if there is an empty sequence + if len(nodes) == 0: + yield 0, {} + else: + # Return a match if the argument pattern has no matches + for c, r in self.content.generate_matches(nodes): + return + yield 0, {} + + +def generate_matches( + patterns: List[BasePattern], nodes: List[NL] +) -> Iterator[Tuple[int, _Results]]: + """ + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + """ + if not patterns: + yield 0, {} + else: + p, rest = patterns[0], patterns[1:] + for c0, r0 in p.generate_matches(nodes): + if not rest: + yield c0, r0 + else: + for c1, r1 in generate_matches(rest, nodes[c0:]): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r diff --git a/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/LICENSE new file mode 100644 index 0000000..c2fda9a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/LICENSE @@ -0,0 +1,21 @@ +This package contains a modified version of ca-bundle.crt: + +ca-bundle.crt -- Bundle of CA Root Certificates + +Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011# +This is a bundle of X.509 certificates of public Certificate Authorities +(CA). These were automatically extracted from Mozilla's root certificates +file (certdata.txt). This file can be found in the mozilla source tree: +http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1# +It contains the certificates in PEM format and therefore +can be directly used with curl / libcurl / php_curl, or with +an Apache+mod_ssl webserver for SSL client authentication. +Just configure this file as the SSLCACertificateFile.# + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/METADATA b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/METADATA new file mode 100644 index 0000000..6f8de5d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/METADATA @@ -0,0 +1,81 @@ +Metadata-Version: 2.1 +Name: certifi +Version: 2022.6.15 +Summary: Python package for providing Mozilla's CA Bundle. +Home-page: https://github.com/certifi/python-certifi +Author: Kenneth Reitz +Author-email: me@kennethreitz.com +License: MPL-2.0 +Project-URL: Source, https://github.com/certifi/python-certifi +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.6 +License-File: LICENSE + +Certifi: Python SSL Certificates +================================ + +Certifi provides Mozilla's carefully curated collection of Root Certificates for +validating the trustworthiness of SSL certificates while verifying the identity +of TLS hosts. It has been extracted from the `Requests`_ project. + +Installation +------------ + +``certifi`` is available on PyPI. Simply install it with ``pip``:: + + $ pip install certifi + +Usage +----- + +To reference the installed certificate authority (CA) bundle, you can use the +built-in function:: + + >>> import certifi + + >>> certifi.where() + '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem' + +Or from the command line:: + + $ python -m certifi + /usr/local/lib/python3.7/site-packages/certifi/cacert.pem + +Enjoy! + +1024-bit Root Certificates +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Browsers and certificate authorities have concluded that 1024-bit keys are +unacceptably weak for certificates, particularly root certificates. For this +reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its +bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key) +certificate from the same CA. Because Mozilla removed these certificates from +its bundle, ``certifi`` removed them as well. + +In previous versions, ``certifi`` provided the ``certifi.old_where()`` function +to intentionally re-add the 1024-bit roots back into your bundle. This was not +recommended in production and therefore was removed at the end of 2018. + +.. _`Requests`: https://requests.readthedocs.io/en/master/ + +Addition/Removal of Certificates +-------------------------------- + +Certifi does not support any addition/removal or other modification of the +CA trust store content. This project is intended to provide a reliable and +highly portable root of trust to python deployments. Look to upstream projects +for methods to use alternate trust. + + diff --git a/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/RECORD b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/RECORD new file mode 100644 index 0000000..df832c3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/RECORD @@ -0,0 +1,11 @@ +certifi/__init__.py,sha256=SuZ3iYmzdRyUv-PiaZkquUgXtWZ16ICUKgymlEBspx0,94 +certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 +certifi/cacert.pem,sha256=pZ_eiDoO-ddKudrQCWieABc9KFlbV0FsmLLugygMbkw,285222 +certifi/core.py,sha256=G5LqCBr4o8bozzzlYBE8nsd_ziB6XcxJiuMV4llFeYY,2515 +certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +certifi-2022.6.15.dist-info/LICENSE,sha256=vp2C82ES-Hp_HXTs1Ih-FGe7roh4qEAEoAEXseR1o-I,1049 +certifi-2022.6.15.dist-info/METADATA,sha256=1sLjV7SjXkcGhJr631JUqCLCDnqgTyFoFe-tRLxakTE,2804 +certifi-2022.6.15.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +certifi-2022.6.15.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi-2022.6.15.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +certifi-2022.6.15.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/WHEEL new file mode 100644 index 0000000..5bad85f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/top_level.txt new file mode 100644 index 0000000..963eac5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi-2022.6.15.dist-info/top_level.txt @@ -0,0 +1 @@ +certifi diff --git a/myenv/lib/python3.9/site-packages/certifi/__init__.py b/myenv/lib/python3.9/site-packages/certifi/__init__.py new file mode 100644 index 0000000..bdeb06b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi/__init__.py @@ -0,0 +1,4 @@ +from .core import contents, where + +__all__ = ["contents", "where"] +__version__ = "2022.06.15" diff --git a/myenv/lib/python3.9/site-packages/certifi/__main__.py b/myenv/lib/python3.9/site-packages/certifi/__main__.py new file mode 100644 index 0000000..8945b5d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi/__main__.py @@ -0,0 +1,12 @@ +import argparse + +from certifi import contents, where + +parser = argparse.ArgumentParser() +parser.add_argument("-c", "--contents", action="store_true") +args = parser.parse_args() + +if args.contents: + print(contents()) +else: + print(where()) diff --git a/myenv/lib/python3.9/site-packages/certifi/cacert.pem b/myenv/lib/python3.9/site-packages/certifi/cacert.pem new file mode 100644 index 0000000..ee9be4c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi/cacert.pem @@ -0,0 +1,4685 @@ + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes +# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes +# Label: "EC-ACC" +# Serial: -23701579247955709139626555126524820479 +# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09 +# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8 +# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99 +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB +8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy +dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1 +YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3 +dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh +IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD +LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG +EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g +KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD +ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu +bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg +ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R +85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm +4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV +HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd +QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t +lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB +o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4 +opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo +dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW +ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN +AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y +/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k +SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy +Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS +Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl +nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI= +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G4" +# Serial: 289383649854506086828220374796556676440 +# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88 +# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01 +# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88 +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw +gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL +Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg +MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw +BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0 +MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1 +c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ +bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ +2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E +T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j +5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM +C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T +DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX +wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A +2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm +nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl +N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj +c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS +5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS +Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr +hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/ +B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI +AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw +H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+ +b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk +2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol +IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk +5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY +n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft ECC Root Certificate Authority 2017" +# Serial: 136839042543790627607696632466672567020 +# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67 +# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5 +# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02 +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD +VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw +MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy +b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR +ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb +hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3 +FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV +L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB +iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft RSA Root Certificate Authority 2017" +# Serial: 40975477897264996090493496164228220339 +# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47 +# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74 +# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0 +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N +aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ +Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0 +ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1 +HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm +gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ +jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc +aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG +YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6 +W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K +UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH ++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q +W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC +LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC +gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6 +tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh +SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2 +TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3 +pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR +xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp +GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9 +dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN +AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB +RA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Label: "e-Szigno Root CA 2017" +# Serial: 411379200276854331539784714 +# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98 +# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1 +# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99 +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV +BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk +LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv +b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ +BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg +THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v +IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv +xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H +Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB +eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo +jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ ++efcMQ== +-----END CERTIFICATE----- + +# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Label: "certSIGN Root CA G2" +# Serial: 313609486401300475190 +# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7 +# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32 +# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV +BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g +Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ +BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ +R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF +dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw +vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ +uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp +n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs +cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW +xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P +rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF +DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx +DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy +LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C +eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ +d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq +kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl +qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0 +OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c +NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk +ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO +pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj +03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk +PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE +1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX +QRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global Certification Authority" +# Serial: 1846098327275375458322922162 +# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e +# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5 +# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8 +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x +ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1 +c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx +OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI +SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn +swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu +7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8 +1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW +80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP +JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l +RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw +hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10 +coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc +BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n +twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud +DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W +0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe +uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q +lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB +aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE +sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT +MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe +qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh +VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8 +h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9 +EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK +yeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P256 Certification Authority" +# Serial: 4151900041497450638097112925 +# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54 +# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf +# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4 +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN +FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w +DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw +CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh +DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P384 Certification Authority" +# Serial: 2704997926503831671788816187 +# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6 +# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2 +# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97 +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB +BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ +j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF +1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G +A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3 +AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC +MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu +Sw== +-----END CERTIFICATE----- + +# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Label: "NAVER Global Root Certification Authority" +# Serial: 9013692873798656336226253319739695165984492813 +# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b +# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1 +# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65 +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM +BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG +T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx +CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD +b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA +iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH +38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE +HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz +kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP +szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq +vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf +nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG +YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo +0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a +CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K +AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I +36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN +qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj +cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm ++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL +hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe +lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7 +p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8 +piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR +LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX +5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO +dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul +9XXeifdy +-----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Label: "GLOBALTRUST 2020" +# Serial: 109160994242082918454945253 +# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8 +# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2 +# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG +A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw +FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx +MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u +aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b +RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z +YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3 +QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw +yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+ +BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ +SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH +r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0 +4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me +dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw +q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2 +nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu +H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC +XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd +6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf ++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi +kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7 +wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB +TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C +MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn +4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I +aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy +qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA RSA v3" +# Serial: 75951268308633135324246244059508261641472512052 +# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4 +# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9 +# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2 +-----BEGIN CERTIFICATE----- +MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL +BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt +VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw +JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw +OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG +QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1 +Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD +QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7 +7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx +uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8 +7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/ +rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL +l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG +wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4 +znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO +M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK +5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH +nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo +DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy +tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL +BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ +6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18 +Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ +3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk +vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9 +9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ +mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA +VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF +9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM +moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8 +bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA ECC v3" +# Serial: 218504919822255052842371958738296604628416471745 +# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64 +# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84 +# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13 +-----BEGIN CERTIFICATE----- +MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw +gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn +cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD +VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2 +NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r +YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh +IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF +Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ +KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK +fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB +Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C +MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp +ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6 +7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx +vmjkI6TZraE3 +-----END CERTIFICATE----- diff --git a/myenv/lib/python3.9/site-packages/certifi/core.py b/myenv/lib/python3.9/site-packages/certifi/core.py new file mode 100644 index 0000000..497d938 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/certifi/core.py @@ -0,0 +1,68 @@ +""" +certifi.py +~~~~~~~~~~ + +This module returns the installation location of cacert.pem or its contents. +""" +import os +import types +from typing import Union + +try: + from importlib.resources import path as get_path, read_text + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the file + # in cases where we're inside of a zipimport situation until someone + # actually calls where(), but we don't want to re-extract the file + # on every call of where(), so we'll do it once then store it in a + # global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you to + # manage the cleanup of this file, so it doesn't actually return a + # path, it returns a context manager that will give you the path + # when you enter it and will do any cleanup when you leave it. In + # the common case of not needing a temporary file, it will just + # return the file system location and the __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = get_path("certifi", "cacert.pem") + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + + return _CACERT_PATH + + +except ImportError: + Package = Union[types.ModuleType, str] + Resource = Union[str, "os.PathLike"] + + # This fallback will work for Python versions prior to 3.7 that lack the + # importlib.resources module but relies on the existing `where` function + # so won't address issues with environments like PyOxidizer that don't set + # __file__ on modules. + def read_text( + package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict' + ) -> str: + with open(where(), encoding=encoding) as data: + return data.read() + + # If we don't have importlib.resources, then we will just do the old logic + # of assuming we're on the filesystem and munge the path directly. + def where() -> str: + f = os.path.dirname(__file__) + + return os.path.join(f, "cacert.pem") + + +def contents() -> str: + return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/myenv/lib/python3.9/site-packages/certifi/py.typed b/myenv/lib/python3.9/site-packages/certifi/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/LICENSE new file mode 100644 index 0000000..29225ee --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/LICENSE @@ -0,0 +1,26 @@ + +Except when otherwise stated (look for LICENSE files in directories or +information at the beginning of each file) all software and +documentation is licensed as follows: + + The MIT License + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/METADATA new file mode 100644 index 0000000..538e679 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/METADATA @@ -0,0 +1,34 @@ +Metadata-Version: 2.1 +Name: cffi +Version: 1.15.1 +Summary: Foreign Function Interface for Python calling C code. +Home-page: http://cffi.readthedocs.org +Author: Armin Rigo, Maciej Fijalkowski +Author-email: python-cffi@googlegroups.com +License: MIT +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: License :: OSI Approved :: MIT License +License-File: LICENSE +Requires-Dist: pycparser + + +CFFI +==== + +Foreign Function Interface for Python calling C code. +Please see the `Documentation `_. + +Contact +------- + +`Mailing list `_ diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/RECORD new file mode 100644 index 0000000..e19bbf9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/RECORD @@ -0,0 +1,28 @@ +_cffi_backend.cpython-39-darwin.so,sha256=g3b3EkJ_bteITLFqq1CQ04r_H2TV6SWbw3OcmnViTU0,202424 +cffi/_cffi_include.h,sha256=tKnA1rdSoPHp23FnDL1mDGwFo-Uj6fXfA6vA6kcoEUc,14800 +cffi/backend_ctypes.py,sha256=h5ZIzLc6BFVXnGyc9xPqZWUS7qGy7yFSDqXe68Sa8z4,42454 +cffi/error.py,sha256=v6xTiS4U0kvDcy4h_BDRo5v39ZQuj-IMRYLv5ETddZs,877 +cffi/setuptools_ext.py,sha256=RUR17N5f8gpiQBBlXL34P9FtOu1mhHIaAf3WJlg5S4I,8931 +cffi/_cffi_errors.h,sha256=zQXt7uR_m8gUW-fI2hJg0KoSkJFwXv8RGUkEDZ177dQ,3908 +cffi/__init__.py,sha256=6xB_tafGvhhM5Xvj0Ova3oPC2SEhVlLTEObVLnazeiM,513 +cffi/cffi_opcode.py,sha256=v9RdD_ovA8rCtqsC95Ivki5V667rAOhGgs3fb2q9xpM,5724 +cffi/vengine_gen.py,sha256=5dX7s1DU6pTBOMI6oTVn_8Bnmru_lj932B6b4v29Hlg,26684 +cffi/pkgconfig.py,sha256=LP1w7vmWvmKwyqLaU1Z243FOWGNQMrgMUZrvgFuOlco,4374 +cffi/model.py,sha256=_GH_UF1Rn9vC4AvmgJm6qj7RUXXG3eqKPc8bPxxyBKE,21768 +cffi/ffiplatform.py,sha256=HMXqR8ks2wtdsNxGaWpQ_PyqIvtiuos_vf1qKCy-cwg,4046 +cffi/api.py,sha256=yxJalIePbr1mz_WxAHokSwyP5CVYde44m-nolHnbJNo,42064 +cffi/vengine_cpy.py,sha256=YglN8YS-UaHEv2k2cxgotNWE87dHX20-68EyKoiKUYA,43320 +cffi/_embedding.h,sha256=9tnjF44QRobR8z0FGqAmAZY-wMSBOae1SUPqHccowqc,17680 +cffi/commontypes.py,sha256=QS4uxCDI7JhtTyjh1hlnCA-gynmaszWxJaRRLGkJa1A,2689 +cffi/lock.py,sha256=l9TTdwMIMpi6jDkJGnQgE9cvTIR7CAntIJr8EGHt3pY,747 +cffi/recompiler.py,sha256=YgVYTh2CrXIobo-vMk7_K9mwAXdd_LqB4-IbYABQ488,64598 +cffi/cparser.py,sha256=rO_1pELRw1gI1DE1m4gi2ik5JMfpxouAACLXpRPlVEA,44231 +cffi/verifier.py,sha256=ESwuXWXtXrKEagCKveLRDjFzLNCyaKdqAgAlKREcyhY,11253 +cffi/parse_c_type.h,sha256=OdwQfwM9ktq6vlCB43exFQmxDBtj2MBNdK8LYl15tjw,5976 +cffi-1.15.1.dist-info/LICENSE,sha256=BLgPWwd7vtaICM_rreteNSPyqMmpZJXFh72W3x6sKjM,1294 +cffi-1.15.1.dist-info/WHEEL,sha256=pfjXB0CCNW4PSSqQc2t4Up6p3o0jxBnHy_2o38FQkyE,109 +cffi-1.15.1.dist-info/entry_points.txt,sha256=y6jTxnyeuLnL-XJcDv8uML3n6wyYiGRg8MTp_QGJ9Ho,75 +cffi-1.15.1.dist-info/top_level.txt,sha256=rE7WR3rZfNKxWI9-jn6hsHCAl7MDkB-FmuQbxWjFehQ,19 +cffi-1.15.1.dist-info/METADATA,sha256=KP4G3WmavRgDGwD2b8Y_eDsM1YeV6ckcG6Alz3-D8VY,1144 +cffi-1.15.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +cffi-1.15.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/WHEEL new file mode 100644 index 0000000..ef5e11a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_10_9_x86_64 + diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/entry_points.txt new file mode 100644 index 0000000..4b0274f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[distutils.setup_keywords] +cffi_modules = cffi.setuptools_ext:cffi_modules diff --git a/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/top_level.txt new file mode 100644 index 0000000..f645779 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi-1.15.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_cffi_backend +cffi diff --git a/myenv/lib/python3.9/site-packages/cffi/__init__.py b/myenv/lib/python3.9/site-packages/cffi/__init__.py new file mode 100644 index 0000000..90e2e65 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/__init__.py @@ -0,0 +1,14 @@ +__all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError', + 'FFIError'] + +from .api import FFI +from .error import CDefError, FFIError, VerificationError, VerificationMissing +from .error import PkgConfigError + +__version__ = "1.15.1" +__version_info__ = (1, 15, 1) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/myenv/lib/python3.9/site-packages/cffi/_cffi_errors.h b/myenv/lib/python3.9/site-packages/cffi/_cffi_errors.h new file mode 100644 index 0000000..158e059 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/_cffi_errors.h @@ -0,0 +1,149 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " try:\n" + " of.write(x)\n" + " except: pass\n" + " self.buf += x\n" + " def flush(self):\n" + " pass\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/myenv/lib/python3.9/site-packages/cffi/_cffi_include.h b/myenv/lib/python3.9/site-packages/cffi/_cffi_include.h new file mode 100644 index 0000000..e4c0a67 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/_cffi_include.h @@ -0,0 +1,385 @@ +#define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. + + The implementation is messy (issue #350): on Windows, with _MSC_VER, + we have to define Py_LIMITED_API even before including pyconfig.h. + In that case, we guess what pyconfig.h will do to the macros above, + and check our guess after the #include. + + Note that on Windows, with CPython 3.x, you need >= 3.5 and virtualenv + version >= 16.0.0. With older versions of either, you don't get a + copy of PYTHON3.DLL in the virtualenv. We can't check the version of + CPython *before* we even include pyconfig.h. ffi.set_source() puts + a ``#define _CFFI_NO_LIMITED_API'' at the start of this file if it is + running on Windows < 3.5, as an attempt at fixing it, but that's + arguably wrong because it may not be the target version of Python. + Still better than nothing I guess. As another workaround, you can + remove the definition of Py_LIMITED_API here. + + See also 'py_limited_api' in cffi/setuptools_ext.py. +*/ +#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API) +# ifdef _MSC_VER +# if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API) +# define Py_LIMITED_API +# endif +# include + /* sanity-check: Py_LIMITED_API will cause crashes if any of these + are also defined. Normally, the Python file PC/pyconfig.h does not + cause any of these to be defined, with the exception that _DEBUG + causes Py_DEBUG. Double-check that. */ +# ifdef Py_LIMITED_API +# if defined(Py_DEBUG) +# error "pyconfig.h unexpectedly defines Py_DEBUG, but Py_LIMITED_API is set" +# endif +# if defined(Py_TRACE_REFS) +# error "pyconfig.h unexpectedly defines Py_TRACE_REFS, but Py_LIMITED_API is set" +# endif +# if defined(Py_REF_DEBUG) +# error "pyconfig.h unexpectedly defines Py_REF_DEBUG, but Py_LIMITED_API is set" +# endif +# endif +# else +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API) +# define Py_LIMITED_API +# endif +# endif +#endif + +#include +#ifdef __cplusplus +extern "C" { +#endif +#include +#include "parse_c_type.h" + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +#endif + +#ifdef __GNUC__ +# define _CFFI_UNUSED_FN __attribute__((unused)) +#else +# define _CFFI_UNUSED_FN /* nothing */ +#endif + +#ifdef __cplusplus +# ifndef _Bool + typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */ +# endif +#endif + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + + +#if PY_MAJOR_VERSION >= 3 +# define PyInt_FromLong PyLong_FromLong +#endif + +#define _cffi_from_c_double PyFloat_FromDouble +#define _cffi_from_c_float PyFloat_FromDouble +#define _cffi_from_c_long PyInt_FromLong +#define _cffi_from_c_ulong PyLong_FromUnsignedLong +#define _cffi_from_c_longlong PyLong_FromLongLong +#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong + +#define _cffi_to_c_double PyFloat_AsDouble +#define _cffi_to_c_float PyFloat_AsDouble + +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) + +#define _cffi_to_c_int(o, type) \ + ((type)( \ + sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ + (Py_FatalError("unsupported size for type " #type), (type)0))) + +#define _cffi_to_c_i8 \ + ((int(*)(PyObject *))_cffi_exports[1]) +#define _cffi_to_c_u8 \ + ((int(*)(PyObject *))_cffi_exports[2]) +#define _cffi_to_c_i16 \ + ((int(*)(PyObject *))_cffi_exports[3]) +#define _cffi_to_c_u16 \ + ((int(*)(PyObject *))_cffi_exports[4]) +#define _cffi_to_c_i32 \ + ((int(*)(PyObject *))_cffi_exports[5]) +#define _cffi_to_c_u32 \ + ((unsigned int(*)(PyObject *))_cffi_exports[6]) +#define _cffi_to_c_i64 \ + ((long long(*)(PyObject *))_cffi_exports[7]) +#define _cffi_to_c_u64 \ + ((unsigned long long(*)(PyObject *))_cffi_exports[8]) +#define _cffi_to_c_char \ + ((int(*)(PyObject *))_cffi_exports[9]) +#define _cffi_from_c_pointer \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[10]) +#define _cffi_to_c_pointer \ + ((char *(*)(PyObject *, struct _cffi_ctypedescr *))_cffi_exports[11]) +#define _cffi_get_struct_layout \ + not used any more +#define _cffi_restore_errno \ + ((void(*)(void))_cffi_exports[13]) +#define _cffi_save_errno \ + ((void(*)(void))_cffi_exports[14]) +#define _cffi_from_c_char \ + ((PyObject *(*)(char))_cffi_exports[15]) +#define _cffi_from_c_deref \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[16]) +#define _cffi_to_c \ + ((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[17]) +#define _cffi_from_c_struct \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18]) +#define _cffi_to_c_wchar_t \ + ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19]) +#define _cffi_from_c_wchar_t \ + ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20]) +#define _cffi_to_c_long_double \ + ((long double(*)(PyObject *))_cffi_exports[21]) +#define _cffi_to_c__Bool \ + ((_Bool(*)(PyObject *))_cffi_exports[22]) +#define _cffi_prepare_pointer_call_argument \ + ((Py_ssize_t(*)(struct _cffi_ctypedescr *, \ + PyObject *, char **))_cffi_exports[23]) +#define _cffi_convert_array_from_object \ + ((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) +#define _cffi_to_c_wchar3216_t \ + ((int(*)(PyObject *))_cffi_exports[26]) +#define _cffi_from_c_wchar3216_t \ + ((PyObject *(*)(int))_cffi_exports[27]) +#define _CFFI_NUM_EXPORTS 28 + +struct _cffi_ctypedescr; + +static void *_cffi_exports[_CFFI_NUM_EXPORTS]; + +#define _cffi_type(index) ( \ + assert((((uintptr_t)_cffi_types[index]) & 1) == 0), \ + (struct _cffi_ctypedescr *)_cffi_types[index]) + +static PyObject *_cffi_init(const char *module_name, Py_ssize_t version, + const struct _cffi_type_context_s *ctx) +{ + PyObject *module, *o_arg, *new_module; + void *raw[] = { + (void *)module_name, + (void *)version, + (void *)_cffi_exports, + (void *)ctx, + }; + + module = PyImport_ImportModule("_cffi_backend"); + if (module == NULL) + goto failure; + + o_arg = PyLong_FromVoidPtr((void *)raw); + if (o_arg == NULL) + goto failure; + + new_module = PyObject_CallMethod( + module, (char *)"_init_cffi_1_0_external_module", (char *)"O", o_arg); + + Py_DECREF(o_arg); + Py_DECREF(module); + return new_module; + + failure: + Py_XDECREF(module); + return NULL; +} + + +#ifdef HAVE_WCHAR_H +typedef wchar_t _cffi_wchar_t; +#else +typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */ +#endif + +_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 2) + return (uint16_t)_cffi_to_c_wchar_t(o); + else + return (uint16_t)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x) +{ + if (sizeof(_cffi_wchar_t) == 2) + return _cffi_from_c_wchar_t((_cffi_wchar_t)x); + else + return _cffi_from_c_wchar3216_t((int)x); +} + +_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 4) + return (int)_cffi_to_c_wchar_t(o); + else + return (int)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(unsigned int x) +{ + if (sizeof(_cffi_wchar_t) == 4) + return _cffi_from_c_wchar_t((_cffi_wchar_t)x); + else + return _cffi_from_c_wchar3216_t((int)x); +} + +union _cffi_union_alignment_u { + unsigned char m_char; + unsigned short m_short; + unsigned int m_int; + unsigned long m_long; + unsigned long long m_longlong; + float m_float; + double m_double; + long double m_longdouble; +}; + +struct _cffi_freeme_s { + struct _cffi_freeme_s *next; + union _cffi_union_alignment_u alignment; +}; + +_CFFI_UNUSED_FN static int +_cffi_convert_array_argument(struct _cffi_ctypedescr *ctptr, PyObject *arg, + char **output_data, Py_ssize_t datasize, + struct _cffi_freeme_s **freeme) +{ + char *p; + if (datasize < 0) + return -1; + + p = *output_data; + if (p == NULL) { + struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc( + offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize); + if (fp == NULL) + return -1; + fp->next = *freeme; + *freeme = fp; + p = *output_data = (char *)&fp->alignment; + } + memset((void *)p, 0, (size_t)datasize); + return _cffi_convert_array_from_object(p, ctptr, arg); +} + +_CFFI_UNUSED_FN static void +_cffi_free_array_arguments(struct _cffi_freeme_s *freeme) +{ + do { + void *p = (void *)freeme; + freeme = freeme->next; + PyObject_Free(p); + } while (freeme != NULL); +} + +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org +#endif + + +#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) + +#define _cffi_prim_int(size, sign) \ + ((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \ + (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \ + (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \ + (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ + _CFFI__UNKNOWN_PRIM) + +#define _cffi_prim_float(size) \ + ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \ + (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \ + (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \ + _CFFI__UNKNOWN_FLOAT_PRIM) + +#define _cffi_check_int(got, got_nonpos, expected) \ + ((got_nonpos) == (expected <= 0) && \ + (got) == (unsigned long long)expected) + +#ifdef MS_WIN32 +# define _cffi_stdcall __stdcall +#else +# define _cffi_stdcall /* nothing */ +#endif + +#ifdef __cplusplus +} +#endif diff --git a/myenv/lib/python3.9/site-packages/cffi/_embedding.h b/myenv/lib/python3.9/site-packages/cffi/_embedding.h new file mode 100644 index 0000000..8e8df88 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/_embedding.h @@ -0,0 +1,528 @@ + +/***** Support code for embedding *****/ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. If _cffi_start_python() fails, then this is set + to NULL; otherwise, it should never be NULL. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + +#include "_cffi_errors.h" + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + PyObject *builtins; + + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + builtins = PyEval_GetBuiltins(); + if (builtins == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *ecap; + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.15.1" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + _cffi_stop_error_capture(ecap); + } + result = -1; + goto done; +} + +#if PY_VERSION_HEX < 0x03080000 +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ +#endif + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + (What it really does used to be completely different in Python 2 + and Python 3, with the Python 2 solution avoiding the spin-lock + around the Py_InitializeEx() call. However, after recent changes + to CPython 2.7 (issue #358) it no longer works. So we use the + Python 3 solution everywhere.) + + This initializes Python by calling Py_InitializeEx(). + Important: this must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. + + In Python < 3.8, we choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + In Python >= 3.8, this string array is no longer writable, so + instead we pick PyCapsuleType.tp_version_tag. We can't change + Python < 3.8 because someone might use a mixture of cffi + embedded modules, some of which were compiled before this file + changed. + */ + +#ifdef WITH_THREAD +# if PY_VERSION_HEX < 0x03080000 + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value, *locked_value; + + while (1) { /* spin loop */ + old_value = *lock; + locked_value = old_value + 1; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# else + int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag; + int old_value, locked_value; + assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG)); + + while (1) { /* spin loop */ + old_value = *lock; + locked_value = -42; + if (old_value == 0) { + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { + assert(old_value == locked_value); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# endif +#endif + + /* call Py_InitializeEx() */ + if (!Py_IsInitialized()) { + _cffi_py_initialize(); +#if PY_VERSION_HEX < 0x03070000 + PyEval_InitThreads(); +#endif + PyEval_SaveThread(); /* release the GIL */ + /* the returned tstate must be the one that has been stored into the + autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */ + } + else { +#if PY_VERSION_HEX < 0x03070000 + /* PyEval_InitThreads() is always a no-op from CPython 3.7 */ + PyGILState_STATE state = PyGILState_Ensure(); + PyEval_InitThreads(); + PyGILState_Release(state); +#endif + } + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, locked_value, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void *func; /* function pointer */ + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + _CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + + /* Here the GIL exists, but we don't have it. We're only protected + from concurrency by the reentrant mutex. */ + + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ + if (!called) { + called = 1; /* invoke _cffi_initialize_python() only once, + but don't set '_cffi_call_python' right now, + otherwise concurrent threads won't call + this function at all (we need them to wait) */ + if (_cffi_initialize_python() == 0) { + /* now initialization is finished. Switch to the fast-path. */ + + /* We would like nobody to see the new value of + '_cffi_call_python' without also seeing the rest of the + data initialized. However, this is not possible. But + the new value of '_cffi_call_python' is the function + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. + */ + + assert(_cffi_call_python_org != NULL); + _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; + } + else { + /* initialization failed. Reset this to NULL, even if it was + already set to some other value. Future calls to + _cffi_start_python() are still forced to occur, and will + always return NULL from now on. */ + _cffi_call_python_org = NULL; + } + } + + _cffi_release_reentrant_mutex(); + + return (_cffi_call_python_fnptr)_cffi_call_python_org; +} + +static +void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args) +{ + _cffi_call_python_fnptr fnptr; + int current_err = errno; +#ifdef _MSC_VER + int current_lasterr = GetLastError(); +#endif + fnptr = _cffi_start_python(); + if (fnptr == NULL) { + fprintf(stderr, "function %s() called, but initialization code " + "failed. Returning 0.\n", externpy->name); + memset(args, 0, externpy->size_of_result); + } +#ifdef _MSC_VER + SetLastError(current_lasterr); +#endif + errno = current_err; + + if (fnptr != NULL) + fnptr(externpy, args); +} + + +/* The cffi_start_python() function makes sure Python is initialized + and our cffi module is set up. It can be called manually from the + user C code. The same effect is obtained automatically from any + dll-exported ``extern "Python"`` function. This function returns + -1 if initialization failed, 0 if all is OK. */ +_CFFI_UNUSED_FN +static int cffi_start_python(void) +{ + if (_cffi_call_python == &_cffi_start_and_call_python) { + if (_cffi_start_python() == NULL) + return -1; + } + cffi_read_barrier(); + return 0; +} + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/myenv/lib/python3.9/site-packages/cffi/api.py b/myenv/lib/python3.9/site-packages/cffi/api.py new file mode 100644 index 0000000..999a8ae --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/api.py @@ -0,0 +1,965 @@ +import sys, types +from .lock import allocate_lock +from .error import CDefError +from . import model + +try: + callable +except NameError: + # Python 3.1 + from collections import Callable + callable = lambda x: isinstance(x, Callable) + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +_unspecified = object() + + + +class FFI(object): + r''' + The main top-level class that you instantiate once, or once per module. + + Example usage: + + ffi = FFI() + ffi.cdef(""" + int printf(const char *, ...); + """) + + C = ffi.dlopen(None) # standard library + -or- + C = ffi.verify() # use a C compiler: verify the decl above is right + + C.printf("hello, %s!\n", ffi.new("char[]", "world")) + ''' + + def __init__(self, backend=None): + """Create an FFI instance. The 'backend' argument is used to + select a non-default backend, mostly for tests. + """ + if backend is None: + # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with + # _cffi_backend.so compiled. + import _cffi_backend as backend + from . import __version__ + if backend.__version__ != __version__: + # bad version! Try to be as explicit as possible. + if hasattr(backend, '__file__'): + # CPython + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % ( + __version__, __file__, + backend.__version__, backend.__file__)) + else: + # PyPy + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % ( + __version__, __file__, backend.__version__)) + # (If you insist you can also try to pass the option + # 'backend=backend_ctypes.CTypesBackend()', but don't + # rely on it! It's probably not going to work well.) + + from . import cparser + self._backend = backend + self._lock = allocate_lock() + self._parser = cparser.Parser() + self._cached_btypes = {} + self._parsed_types = types.ModuleType('parsed_types').__dict__ + self._new_types = types.ModuleType('new_types').__dict__ + self._function_caches = [] + self._libraries = [] + self._cdefsources = [] + self._included_ffis = [] + self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None + self._embedding = None + self._typecache = model.get_typecache(backend) + if hasattr(backend, 'set_ffi'): + backend.set_ffi(self) + for name in list(backend.__dict__): + if name.startswith('RTLD_'): + setattr(self, name, getattr(backend, name)) + # + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) + if isinstance(backend, types.ModuleType): + # _cffi_backend: attach these constants to the class + if not hasattr(FFI, 'NULL'): + FFI.NULL = self.cast(self.BVoidP, 0) + FFI.CData, FFI.CType = backend._get_types() + else: + # ctypes backend: attach these constants to the instance + self.NULL = self.cast(self.BVoidP, 0) + self.CData, self.CType = backend._get_types() + self.buffer = backend.buffer + + def cdef(self, csource, override=False, packed=False, pack=None): + """Parse the given C source. This registers all declared functions, + types, and global variables. The functions and global variables can + then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. + The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. + Alternatively, 'pack' can be a small integer, and requests for + alignment greater than that are ignored (pack=1 is equivalent to + packed=True). + """ + self._cdef(csource, override=override, packed=packed, pack=pack) + + def embedding_api(self, csource, packed=False, pack=None): + self._cdef(csource, packed=packed, pack=pack, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): + if not isinstance(csource, str): # unicode, on Python 2 + if not isinstance(csource, basestring): + raise TypeError("cdef() argument must be a string") + csource = csource.encode('ascii') + with self._lock: + self._cdef_version = object() + self._parser.parse(csource, override=override, **options) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() + finishlist = self._parser._recomplete + if finishlist: + self._parser._recomplete = [] + for tp in finishlist: + tp.finish_backend_type(self, finishlist) + + def dlopen(self, name, flags=0): + """Load and return a dynamic library identified by 'name'. + The standard C library can be loaded by passing None. + Note that functions and types declared by 'ffi.cdef()' are not + linked to a particular library, just like C headers; in the + library we only look for the actual (untyped) symbols. + """ + if not (isinstance(name, basestring) or + name is None or + isinstance(name, self.CData)): + raise TypeError("dlopen(name): name must be a file name, None, " + "or an already-opened 'void *' handle") + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) + return lib + + def dlclose(self, lib): + """Close a library obtained with ffi.dlopen(). After this call, + access to functions or variables from the library will fail + (possibly with a segmentation fault). + """ + type(lib).__cffi_close__(lib) + + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + + def _typeof(self, cdecl, consider_function_as_funcptr=False): + # string -> ctype object + try: + result = self._parsed_types[cdecl] + except KeyError: + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) + return btype + + def typeof(self, cdecl): + """Parse the C type given as a string and return the + corresponding object. + It can also be used on 'cdata' instance to get its C type. + """ + if isinstance(cdecl, basestring): + return self._typeof(cdecl) + if isinstance(cdecl, self.CData): + return self._backend.typeof(cdecl) + if isinstance(cdecl, types.BuiltinFunctionType): + res = _builtin_function_type(cdecl) + if res is not None: + return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) + raise TypeError(type(cdecl)) + + def sizeof(self, cdecl): + """Return the size in bytes of the argument. It can be a + string naming a C type, or a 'cdata' instance. + """ + if isinstance(cdecl, basestring): + BType = self._typeof(cdecl) + return self._backend.sizeof(BType) + else: + return self._backend.sizeof(cdecl) + + def alignof(self, cdecl): + """Return the natural alignment size in bytes of the C type + given as a string. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.alignof(cdecl) + + def offsetof(self, cdecl, *fields_or_indexes): + """Return the offset of the named field inside the given + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] + + def new(self, cdecl, init=None): + """Allocate an instance according to the specified C type and + return a pointer to it. The specified C type must be either a + pointer or an array: ``new('X *')`` allocates an X and returns + a pointer to it, whereas ``new('X[n]')`` allocates an array of + n X'es and returns an array referencing it (which works + mostly like a pointer, like in C). You can also use + ``new('X[]', n)`` to allocate an array of a non-constant + length n. + + The memory is initialized following the rules of declaring a + global variable in C: by default it is zero-initialized, but + an explicit initializer can be given which can be used to + fill all or part of the memory. + + When the returned object goes out of scope, the memory + is freed. In other words the returned object has + ownership of the value of type 'cdecl' that it points to. This + means that the raw data can be used as long as this object is + kept alive, but must not be used for a longer time. Be careful + about that when copying the pointer to the memory somewhere + else, e.g. into another structure. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.newp(cdecl, init) + + def new_allocator(self, alloc=None, free=None, + should_clear_after_alloc=True): + """Return a new allocator, i.e. a function that behaves like ffi.new() + but uses the provided low-level 'alloc' and 'free' functions. + + 'alloc' is called with the size as argument. If it returns NULL, a + MemoryError is raised. 'free' is called with the result of 'alloc' + as argument. Both can be either Python function or directly C + functions. If 'free' is None, then no free function is called. + If both 'alloc' and 'free' are None, the default is used. + + If 'should_clear_after_alloc' is set to False, then the memory + returned by 'alloc' is assumed to be already cleared (or you are + fine with garbage); otherwise CFFI will clear it. + """ + compiled_ffi = self._backend.FFI() + allocator = compiled_ffi.new_allocator(alloc, free, + should_clear_after_alloc) + def allocate(cdecl, init=None): + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return allocator(cdecl, init) + return allocate + + def cast(self, cdecl, source): + """Similar to a C cast: returns an instance of the named C + type initialized with the given 'source'. The source is + casted between integers or pointers of any type. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.cast(cdecl, source) + + def string(self, cdata, maxlen=-1): + """Return a Python string (or unicode string) from the 'cdata'. + If 'cdata' is a pointer or array of characters or bytes, returns + the null-terminated string. The returned string extends until + the first null character, or at most 'maxlen' characters. If + 'cdata' is an array then 'maxlen' defaults to its length. + + If 'cdata' is a pointer or array of wchar_t, returns a unicode + string following the same rules. + + If 'cdata' is a single character or byte or a wchar_t, returns + it as a string or unicode string. + + If 'cdata' is an enum, returns the value of the enumerator as a + string, or 'NUMBER' if the value is out of range. + """ + return self._backend.string(cdata, maxlen) + + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + + #def buffer(self, cdata, size=-1): + # """Return a read-write buffer object that references the raw C data + # pointed to by the given 'cdata'. The 'cdata' must be a pointer or + # an array. Can be passed to functions expecting a buffer, or directly + # manipulated with: + # + # buf[:] get a copy of it in a regular string, or + # buf[idx] as a single character + # buf[:] = ... + # buf[idx] = ... change the content + # """ + # note that 'buffer' is a type, set on this instance by __init__ + + def from_buffer(self, cdecl, python_buffer=_unspecified, + require_writable=False): + """Return a cdata of the given type pointing to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types + str or unicode (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + + The first argument is optional and default to 'char[]'. + """ + if python_buffer is _unspecified: + cdecl, python_buffer = self.BCharA, cdecl + elif isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.from_buffer(cdecl, python_buffer, + require_writable) + + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + + def callback(self, cdecl, python_callable=None, error=None, onerror=None): + """Return a callback object or a decorator making such a + callback object. 'cdecl' must name a C function pointer type. + The callback invokes the specified 'python_callable' (which may + be provided either directly or via a decorator). Important: the + callback object must be manually kept alive for as long as the + callback may be invoked from the C level. + """ + def callback_decorator_wrap(python_callable): + if not callable(python_callable): + raise TypeError("the 'python_callable' argument " + "is not callable") + return self._backend.callback(cdecl, python_callable, + error, onerror) + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl, consider_function_as_funcptr=True) + if python_callable is None: + return callback_decorator_wrap # decorator mode + else: + return callback_decorator_wrap(python_callable) # direct mode + + def getctype(self, cdecl, replace_with=''): + """Return a string giving the C type 'cdecl', which may be itself + a string or a object. If 'replace_with' is given, it gives + extra text to append (or insert for more complicated C types), like + a variable name, or '*' to get actually the C type 'pointer-to-cdecl'. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + replace_with = replace_with.strip() + if (replace_with.startswith('*') + and '&[' in self._backend.getcname(cdecl, '&')): + replace_with = '(%s)' % replace_with + elif replace_with and not replace_with[0] in '[(': + replace_with = ' ' + replace_with + return self._backend.getcname(cdecl, replace_with) + + def gc(self, cdata, destructor, size=0): + """Return a new cdata object that points to the same + data. Later, when this new cdata object is garbage-collected, + 'destructor(old_cdata_object)' will be called. + + The optional 'size' gives an estimate of the size, used to + trigger the garbage collection more eagerly. So far only used + on PyPy. It tells the GC that the returned object keeps alive + roughly 'size' bytes of external memory. + """ + return self._backend.gcp(cdata, destructor, size) + + def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! + try: + BType = self._cached_btypes[type] + except KeyError: + finishlist = [] + BType = type.get_cached_btype(self, finishlist) + for type in finishlist: + type.finish_backend_type(self, finishlist) + return BType + + def verify(self, source='', tmpdir=None, **kwargs): + """Verify that the current ffi signatures compile on this + machine, and return a dynamic library object. The dynamic + library can be used to call functions and access global + variables declared in this 'ffi'. The library is compiled + by the C compiler: it gives you C-level API compatibility + (including calling macros). This is unlike 'ffi.dlopen()', + which requires binary compatibility in the signatures. + """ + from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). + tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. + self.verifier = Verifier(self, source, tmpdir, **kwargs) + lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). + self._libraries.append(lib) + return lib + + def _get_errno(self): + return self._backend.get_errno() + def _set_errno(self, errno): + self._backend.set_errno(errno) + errno = property(_get_errno, _set_errno, None, + "the value of 'errno' from/to the C calls") + + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + + def _pointer_to(self, ctype): + with self._lock: + return model.pointer_cache(self, ctype) + + def addressof(self, cdata, *fields_or_indexes): + """Return the address of a . + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. + """ + try: + ctype = self._backend.typeof(cdata) + except TypeError: + if '__addressof__' in type(cdata).__dict__: + return type(cdata).__addressof__(cdata, *fields_or_indexes) + raise + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 + ctypeptr = self._pointer_to(ctype) + return self._backend.rawaddressof(ctypeptr, cdata, offset) + + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + + def include(self, ffi_to_include): + """Includes the typedefs, structs, unions and enums defined + in another FFI instance. Usage is similar to a #include in C, + where a part of the program might include types defined in + another part for its own usage. Note that the include() + method has no effect on functions, constants and global + variables, which must anyway be accessed directly from the + lib object returned by the original FFI instance. + """ + if not isinstance(ffi_to_include, FFI): + raise TypeError("ffi.include() expects an argument that is also of" + " type cffi.FFI, not %r" % ( + type(ffi_to_include).__name__,)) + if ffi_to_include is self: + raise ValueError("self.include(self)") + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') + self._included_ffis.append(ffi_to_include) + + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + + def release(self, x): + self._backend.release(x) + + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + def ensure(key, value): + lst = kwds.setdefault(key, []) + if value not in lst: + lst.append(value) + # + if '__pypy__' in sys.builtin_module_names: + import os + if sys.platform == "win32": + # we need 'libpypy-c.lib'. Current distributions of + # pypy (>= 4.1) contain it as 'libs/python27.lib'. + pythonlib = "python{0[0]}{0[1]}".format(sys.version_info) + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'libs')) + else: + # we need 'libpypy-c.{so,dylib}', which should be by + # default located in 'sys.prefix/bin' for installed + # systems. + if sys.version_info < (3,): + pythonlib = "pypy-c" + else: + pythonlib = "pypy3-c" + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + # On uninstalled pypy's, the libpypy-c is typically found in + # .../pypy/goal/. + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) + else: + if sys.platform == "win32": + template = "python%d%d" + if hasattr(sys, 'gettotalrefcount'): + template += '_d' + else: + try: + import sysconfig + except ImportError: # 2.6 + from distutils import sysconfig + template = "python%d.%d" + if sysconfig.get_config_var('DEBUG_EXT'): + template += sysconfig.get_config_var('DEBUG_EXT') + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + ensure('libraries', pythonlib) + if sys.platform == "win32": + ensure('extra_link_args', '/MANIFEST') + + def set_source(self, module_name, source, source_extension='.c', **kwds): + import os + if hasattr(self, '_assigned_source'): + raise ValueError("set_source() cannot be called several times " + "per ffi object") + if not isinstance(module_name, basestring): + raise TypeError("'module_name' must be a string") + if os.sep in module_name or (os.altsep and os.altsep in module_name): + raise ValueError("'module_name' must not contain '/': use a dotted " + "name to make a 'package.module' location") + self._assigned_source = (str(module_name), source, + source_extension, kwds) + + def set_source_pkgconfig(self, module_name, pkgconfig_libs, source, + source_extension='.c', **kwds): + from . import pkgconfig + if not isinstance(pkgconfig_libs, list): + raise TypeError("the pkgconfig_libs argument must be a list " + "of package names") + kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs) + pkgconfig.merge_flags(kwds, kwds2) + self.set_source(module_name, source, source_extension, **kwds) + + def distutils_extension(self, tmpdir='build', verbose=True): + from distutils.dir_util import mkpath + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored + return self.verifier.get_extension() + raise ValueError("set_source() must be called before" + " distutils_extension()") + module_name, source, source_extension, kwds = self._assigned_source + if source is None: + raise TypeError("distutils_extension() is only for C extension " + "modules, not for dlopen()-style pure Python " + "modules") + mkpath(tmpdir) + ext, updated = recompile(self, module_name, + source, tmpdir=tmpdir, extradir=tmpdir, + source_extension=source_extension, + call_c_compiler=False, **kwds) + if verbose: + if updated: + sys.stderr.write("regenerated: %r\n" % (ext.sources[0],)) + else: + sys.stderr.write("not modified: %r\n" % (ext.sources[0],)) + return ext + + def emit_c_code(self, filename): + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before emit_c_code()") + module_name, source, source_extension, kwds = self._assigned_source + if source is None: + raise TypeError("emit_c_code() is only for C extension modules, " + "not for dlopen()-style pure Python modules") + recompile(self, module_name, source, + c_file=filename, call_c_compiler=False, **kwds) + + def emit_python_code(self, filename): + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before emit_c_code()") + module_name, source, source_extension, kwds = self._assigned_source + if source is not None: + raise TypeError("emit_python_code() is only for dlopen()-style " + "pure Python modules, not for C extension modules") + recompile(self, module_name, source, + c_file=filename, call_c_compiler=False, **kwds) + + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll/.dylib). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before compile()") + module_name, source, source_extension, kwds = self._assigned_source + return recompile(self, module_name, source, tmpdir=tmpdir, + target=target, source_extension=source_extension, + compiler_verbose=verbose, debug=debug, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result + + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + + def def_extern(self, *args, **kwds): + raise ValueError("ffi.def_extern() is only available on API-mode FFI " + "objects") + + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + + +def _load_backend_lib(backend, name, flags): + import os + if not isinstance(name, basestring): + if sys.platform != "win32" or name is not None: + return backend.load_library(name, flags) + name = "c" # Windows: load_library(None) fails, but this works + # on Python 2 (backward compatibility hack only) + first_error = None + if '.' in name or '/' in name or os.sep in name: + try: + return backend.load_library(name, flags) + except OSError as e: + first_error = e + import ctypes.util + path = ctypes.util.find_library(name) + if path is None: + if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): + raise OSError("dlopen(None) cannot work on Windows for Python 3 " + "(see http://bugs.python.org/issue23606)") + msg = ("ctypes.util.find_library() did not manage " + "to locate a library called %r" % (name,)) + if first_error is not None: + msg = "%s. Additionally, %s" % (first_error, msg) + raise OSError(msg) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) + # + def accessor_function(name): + key = 'function ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + value = backendlib.load_function(BType, name) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def addressof_var(name): + try: + return addr_variables[name] + except KeyError: + with ffi._lock: + if name not in addr_variables: + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + if BType.kind != 'array': + BType = model.pointer_cache(ffi, BType) + p = backendlib.load_function(BType, name) + addr_variables[name] = p + return addr_variables[name] + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + addr_variables = {} + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: + return + # + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version + # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) + # + class FFILibrary(object): + def __getattr__(self, name): + make_accessor(name) + return getattr(self, name) + def __setattr__(self, name, value): + try: + property = getattr(self.__class__, name) + except AttributeError: + make_accessor(name) + setattr(self, name, value) + else: + property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() + def __addressof__(self, name): + if name in library.__dict__: + return library.__dict__[name] + if name in FFILibrary.__dict__: + return addressof_var(name) + make_accessor(name) + if name in library.__dict__: + return library.__dict__[name] + if name in FFILibrary.__dict__: + return addressof_var(name) + raise AttributeError("cffi library has no function or " + "global variable named '%s'" % (name,)) + def __cffi_close__(self): + backendlib.close_lib() + self.__dict__.clear() + # + if isinstance(libname, basestring): + try: + if not isinstance(libname, str): # unicode, on Python 2 + libname = libname.encode('utf-8') + FFILibrary.__name__ = 'FFILibrary_%s' % libname + except UnicodeError: + pass + library = FFILibrary() + return library, library.__dict__ + +def _builtin_function_type(func): + # a hack to make at least ffi.typeof(builtin_function) work, + # if the builtin function was obtained by 'vengine_cpy'. + import sys + try: + module = sys.modules[func.__module__] + ffi = module._cffi_original_ffi + types_of_builtin_funcs = module._cffi_types_of_builtin_funcs + tp = types_of_builtin_funcs[func] + except (KeyError, AttributeError, TypeError): + return None + else: + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/myenv/lib/python3.9/site-packages/cffi/backend_ctypes.py b/myenv/lib/python3.9/site-packages/cffi/backend_ctypes.py new file mode 100644 index 0000000..e7956a7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/backend_ctypes.py @@ -0,0 +1,1121 @@ +import ctypes, ctypes.util, operator, sys +from . import model + +if sys.version_info < (3,): + bytechr = chr +else: + unicode = str + long = int + xrange = range + bytechr = lambda num: bytes([num]) + +class CTypesType(type): + pass + +class CTypesData(object): + __metaclass__ = CTypesType + __slots__ = ['__weakref__'] + __name__ = '' + + def __init__(self, *args): + raise TypeError("cannot instantiate %r" % (self.__class__,)) + + @classmethod + def _newp(cls, init): + raise TypeError("expected a pointer or array ctype, got '%s'" + % (cls._get_c_name(),)) + + @staticmethod + def _to_ctypes(value): + raise TypeError + + @classmethod + def _arg_to_ctypes(cls, *value): + try: + ctype = cls._ctype + except AttributeError: + raise TypeError("cannot create an instance of %r" % (cls,)) + if value: + res = cls._to_ctypes(*value) + if not isinstance(res, ctype): + res = cls._ctype(res) + else: + res = cls._ctype() + return res + + @classmethod + def _create_ctype_obj(cls, init): + if init is None: + return cls._arg_to_ctypes() + else: + return cls._arg_to_ctypes(init) + + @staticmethod + def _from_ctypes(ctypes_value): + raise TypeError + + @classmethod + def _get_c_name(cls, replace_with=''): + return cls._reftypename.replace(' &', replace_with) + + @classmethod + def _fix_class(cls): + cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__module__ = 'ffi' + + def _get_own_repr(self): + raise NotImplementedError + + def _addr_repr(self, address): + if address == 0: + return 'NULL' + else: + if address < 0: + address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) + return '0x%x' % address + + def __repr__(self, c_name=None): + own = self._get_own_repr() + return '' % (c_name or self._get_c_name(), own) + + def _convert_to_address(self, BClass): + if BClass is None: + raise TypeError("cannot convert %r to an address" % ( + self._get_c_name(),)) + else: + raise TypeError("cannot convert %r to %r" % ( + self._get_c_name(), BClass._get_c_name())) + + @classmethod + def _get_size(cls): + return ctypes.sizeof(cls._ctype) + + def _get_size_of_instance(self): + return ctypes.sizeof(self._ctype) + + @classmethod + def _cast_from(cls, source): + raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) + + def _cast_to_integer(self): + return self._convert_to_address(None) + + @classmethod + def _alignment(cls): + return ctypes.alignment(cls._ctype) + + def __iter__(self): + raise TypeError("cdata %r does not support iteration" % ( + self._get_c_name()),) + + def _make_cmp(name): + cmpfunc = getattr(operator, name) + def cmp(self, other): + v_is_ptr = not isinstance(self, CTypesGenericPrimitive) + w_is_ptr = (isinstance(other, CTypesData) and + not isinstance(other, CTypesGenericPrimitive)) + if v_is_ptr and w_is_ptr: + return cmpfunc(self._convert_to_address(None), + other._convert_to_address(None)) + elif v_is_ptr or w_is_ptr: + return NotImplemented + else: + if isinstance(self, CTypesGenericPrimitive): + self = self._value + if isinstance(other, CTypesGenericPrimitive): + other = other._value + return cmpfunc(self, other) + cmp.func_name = name + return cmp + + __eq__ = _make_cmp('__eq__') + __ne__ = _make_cmp('__ne__') + __lt__ = _make_cmp('__lt__') + __le__ = _make_cmp('__le__') + __gt__ = _make_cmp('__gt__') + __ge__ = _make_cmp('__ge__') + + def __hash__(self): + return hash(self._convert_to_address(None)) + + def _to_string(self, maxlen): + raise TypeError("string(): %r" % (self,)) + + +class CTypesGenericPrimitive(CTypesData): + __slots__ = [] + + def __hash__(self): + return hash(self._value) + + def _get_own_repr(self): + return repr(self._from_ctypes(self._value)) + + +class CTypesGenericArray(CTypesData): + __slots__ = [] + + @classmethod + def _newp(cls, init): + return cls(init) + + def __iter__(self): + for i in xrange(len(self)): + yield self[i] + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + +class CTypesGenericPtr(CTypesData): + __slots__ = ['_address', '_as_ctype_ptr'] + _automatic_casts = False + kind = "pointer" + + @classmethod + def _newp(cls, init): + return cls(init) + + @classmethod + def _cast_from(cls, source): + if source is None: + address = 0 + elif isinstance(source, CTypesData): + address = source._cast_to_integer() + elif isinstance(source, (int, long)): + address = source + else: + raise TypeError("bad type for cast to %r: %r" % + (cls, type(source).__name__)) + return cls._new_pointer_at(address) + + @classmethod + def _new_pointer_at(cls, address): + self = cls.__new__(cls) + self._address = address + self._as_ctype_ptr = ctypes.cast(address, cls._ctype) + return self + + def _get_own_repr(self): + try: + return self._addr_repr(self._address) + except AttributeError: + return '???' + + def _cast_to_integer(self): + return self._address + + def __nonzero__(self): + return bool(self._address) + __bool__ = __nonzero__ + + @classmethod + def _to_ctypes(cls, value): + if not isinstance(value, CTypesData): + raise TypeError("unexpected %s object" % type(value).__name__) + address = value._convert_to_address(cls) + return ctypes.cast(address, cls._ctype) + + @classmethod + def _from_ctypes(cls, ctypes_ptr): + address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 + return cls._new_pointer_at(address) + + @classmethod + def _initialize(cls, ctypes_ptr, value): + if value: + ctypes_ptr.contents = cls._to_ctypes(value).contents + + def _convert_to_address(self, BClass): + if (BClass in (self.__class__, None) or BClass._automatic_casts + or self._automatic_casts): + return self._address + else: + return CTypesData._convert_to_address(self, BClass) + + +class CTypesBaseStructOrUnion(CTypesData): + __slots__ = ['_blob'] + + @classmethod + def _create_ctype_obj(cls, init): + # may be overridden + raise TypeError("cannot instantiate opaque type %s" % (cls,)) + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + @classmethod + def _offsetof(cls, fieldname): + return getattr(cls._ctype, fieldname).offset + + def _convert_to_address(self, BClass): + if getattr(BClass, '_BItem', None) is self.__class__: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @classmethod + def _from_ctypes(cls, ctypes_struct_or_union): + self = cls.__new__(cls) + self._blob = ctypes_struct_or_union + return self + + @classmethod + def _to_ctypes(cls, value): + return value._blob + + def __repr__(self, c_name=None): + return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) + + +class CTypesBackend(object): + + PRIMITIVE_TYPES = { + 'char': ctypes.c_char, + 'short': ctypes.c_short, + 'int': ctypes.c_int, + 'long': ctypes.c_long, + 'long long': ctypes.c_longlong, + 'signed char': ctypes.c_byte, + 'unsigned char': ctypes.c_ubyte, + 'unsigned short': ctypes.c_ushort, + 'unsigned int': ctypes.c_uint, + 'unsigned long': ctypes.c_ulong, + 'unsigned long long': ctypes.c_ulonglong, + 'float': ctypes.c_float, + 'double': ctypes.c_double, + '_Bool': ctypes.c_bool, + } + + for _name in ['unsigned long long', 'unsigned long', + 'unsigned int', 'unsigned short', 'unsigned char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] + + for _name in ['long long', 'long', 'int', 'short', 'signed char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] + PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] + + + def __init__(self): + self.RTLD_LAZY = 0 # not supported anyway by ctypes + self.RTLD_NOW = 0 + self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL + self.RTLD_LOCAL = ctypes.RTLD_LOCAL + + def set_ffi(self, ffi): + self.ffi = ffi + + def _get_types(self): + return CTypesData, CTypesType + + def load_library(self, path, flags=0): + cdll = ctypes.CDLL(path, flags) + return CTypesLibrary(self, cdll) + + def new_void_type(self): + class CTypesVoid(CTypesData): + __slots__ = [] + _reftypename = 'void &' + @staticmethod + def _from_ctypes(novalue): + return None + @staticmethod + def _to_ctypes(novalue): + if novalue is not None: + raise TypeError("None expected, got %s object" % + (type(novalue).__name__,)) + return None + CTypesVoid._fix_class() + return CTypesVoid + + def new_primitive_type(self, name): + if name == 'wchar_t': + raise NotImplementedError(name) + ctype = self.PRIMITIVE_TYPES[name] + if name == 'char': + kind = 'char' + elif name in ('float', 'double'): + kind = 'float' + else: + if name in ('signed char', 'unsigned char'): + kind = 'byte' + elif name == '_Bool': + kind = 'bool' + else: + kind = 'int' + is_signed = (ctype(-1).value == -1) + # + def _cast_source_to_int(source): + if isinstance(source, (int, long, float)): + source = int(source) + elif isinstance(source, CTypesData): + source = source._cast_to_integer() + elif isinstance(source, bytes): + source = ord(source) + elif source is None: + source = 0 + else: + raise TypeError("bad type for cast to %r: %r" % + (CTypesPrimitive, type(source).__name__)) + return source + # + kind1 = kind + class CTypesPrimitive(CTypesGenericPrimitive): + __slots__ = ['_value'] + _ctype = ctype + _reftypename = '%s &' % name + kind = kind1 + + def __init__(self, value): + self._value = value + + @staticmethod + def _create_ctype_obj(init): + if init is None: + return ctype() + return ctype(CTypesPrimitive._to_ctypes(init)) + + if kind == 'int' or kind == 'byte': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = ctype(source).value # cast within range + return cls(source) + def __int__(self): + return self._value + + if kind == 'bool': + @classmethod + def _cast_from(cls, source): + if not isinstance(source, (int, long, float)): + source = _cast_source_to_int(source) + return cls(bool(source)) + def __int__(self): + return int(self._value) + + if kind == 'char': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = bytechr(source & 0xFF) + return cls(source) + def __int__(self): + return ord(self._value) + + if kind == 'float': + @classmethod + def _cast_from(cls, source): + if isinstance(source, float): + pass + elif isinstance(source, CTypesGenericPrimitive): + if hasattr(source, '__float__'): + source = float(source) + else: + source = int(source) + else: + source = _cast_source_to_int(source) + source = ctype(source).value # fix precision + return cls(source) + def __int__(self): + return int(self._value) + def __float__(self): + return self._value + + _cast_to_integer = __int__ + + if kind == 'int' or kind == 'byte' or kind == 'bool': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long)): + if isinstance(x, CTypesData): + x = int(x) + else: + raise TypeError("integer expected, got %s" % + type(x).__name__) + if ctype(x).value != x: + if not is_signed and x < 0: + raise OverflowError("%s: negative integer" % name) + else: + raise OverflowError("%s: integer out of bounds" + % name) + return x + + if kind == 'char': + @staticmethod + def _to_ctypes(x): + if isinstance(x, bytes) and len(x) == 1: + return x + if isinstance(x, CTypesPrimitive): # > + return x._value + raise TypeError("character expected, got %s" % + type(x).__name__) + def __nonzero__(self): + return ord(self._value) != 0 + else: + def __nonzero__(self): + return self._value != 0 + __bool__ = __nonzero__ + + if kind == 'float': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long, float, CTypesData)): + raise TypeError("float expected, got %s" % + type(x).__name__) + return ctype(x).value + + @staticmethod + def _from_ctypes(value): + return getattr(value, 'value', value) + + @staticmethod + def _initialize(blob, init): + blob.value = CTypesPrimitive._to_ctypes(init) + + if kind == 'char': + def _to_string(self, maxlen): + return self._value + if kind == 'byte': + def _to_string(self, maxlen): + return chr(self._value & 0xff) + # + CTypesPrimitive._fix_class() + return CTypesPrimitive + + def new_pointer_type(self, BItem): + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'charp' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' + else: + kind = 'generic' + # + class CTypesPtr(CTypesGenericPtr): + __slots__ = ['_own'] + if kind == 'charp': + __slots__ += ['__as_strbuf'] + _BItem = BItem + if hasattr(BItem, '_ctype'): + _ctype = ctypes.POINTER(BItem._ctype) + _bitem_size = ctypes.sizeof(BItem._ctype) + else: + _ctype = ctypes.c_void_p + if issubclass(BItem, CTypesGenericArray): + _reftypename = BItem._get_c_name('(* &)') + else: + _reftypename = BItem._get_c_name(' * &') + + def __init__(self, init): + ctypeobj = BItem._create_ctype_obj(init) + if kind == 'charp': + self.__as_strbuf = ctypes.create_string_buffer( + ctypeobj.value + b'\x00') + self._as_ctype_ptr = ctypes.cast( + self.__as_strbuf, self._ctype) + else: + self._as_ctype_ptr = ctypes.pointer(ctypeobj) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own = True + + def __add__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address + + other * self._bitem_size) + else: + return NotImplemented + + def __sub__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address - + other * self._bitem_size) + elif type(self) is type(other): + return (self._address - other._address) // self._bitem_size + else: + return NotImplemented + + def __getitem__(self, index): + if getattr(self, '_own', False) and index != 0: + raise IndexError + return BItem._from_ctypes(self._as_ctype_ptr[index]) + + def __setitem__(self, index, value): + self._as_ctype_ptr[index] = BItem._to_ctypes(value) + + if kind == 'charp' or kind == 'voidp': + @classmethod + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) + else: + return super(CTypesPtr, cls)._arg_to_ctypes(*value) + + if kind == 'charp' or kind == 'bytep': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = sys.maxsize + p = ctypes.cast(self._as_ctype_ptr, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % ( + ctypes.sizeof(self._as_ctype_ptr.contents),) + return super(CTypesPtr, self)._get_own_repr() + # + if (BItem is self.ffi._get_cached_btype(model.void_type) or + BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): + CTypesPtr._automatic_casts = True + # + CTypesPtr._fix_class() + return CTypesPtr + + def new_array_type(self, CTypesPtr, length): + if length is None: + brackets = ' &[]' + else: + brackets = ' &[%d]' % length + BItem = CTypesPtr._BItem + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'char' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'byte' + else: + kind = 'generic' + # + class CTypesArray(CTypesGenericArray): + __slots__ = ['_blob', '_own'] + if length is not None: + _ctype = BItem._ctype * length + else: + __slots__.append('_ctype') + _reftypename = BItem._get_c_name(brackets) + _declared_length = length + _CTPtr = CTypesPtr + + def __init__(self, init): + if length is None: + if isinstance(init, (int, long)): + len1 = init + init = None + elif kind == 'char' and isinstance(init, bytes): + len1 = len(init) + 1 # extra null + else: + init = tuple(init) + len1 = len(init) + self._ctype = BItem._ctype * len1 + self._blob = self._ctype() + self._own = True + if init is not None: + self._initialize(self._blob, init) + + @staticmethod + def _initialize(blob, init): + if isinstance(init, bytes): + init = [init[i:i+1] for i in range(len(init))] + else: + if isinstance(init, CTypesGenericArray): + if (len(init) != len(blob) or + not isinstance(init, CTypesArray)): + raise TypeError("length/type mismatch: %s" % (init,)) + init = tuple(init) + if len(init) > len(blob): + raise IndexError("too many initializers") + addr = ctypes.cast(blob, ctypes.c_void_p).value + PTR = ctypes.POINTER(BItem._ctype) + itemsize = ctypes.sizeof(BItem._ctype) + for i, value in enumerate(init): + p = ctypes.cast(addr + i * itemsize, PTR) + BItem._initialize(p.contents, value) + + def __len__(self): + return len(self._blob) + + def __getitem__(self, index): + if not (0 <= index < len(self._blob)): + raise IndexError + return BItem._from_ctypes(self._blob[index]) + + def __setitem__(self, index, value): + if not (0 <= index < len(self._blob)): + raise IndexError + self._blob[index] = BItem._to_ctypes(value) + + if kind == 'char' or kind == 'byte': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = len(self._blob) + p = ctypes.cast(self._blob, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % (ctypes.sizeof(self._blob),) + return super(CTypesArray, self)._get_own_repr() + + def _convert_to_address(self, BClass): + if BClass in (CTypesPtr, None) or BClass._automatic_casts: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @staticmethod + def _from_ctypes(ctypes_array): + self = CTypesArray.__new__(CTypesArray) + self._blob = ctypes_array + return self + + @staticmethod + def _arg_to_ctypes(value): + return CTypesPtr._arg_to_ctypes(value) + + def __add__(self, other): + if isinstance(other, (int, long)): + return CTypesPtr._new_pointer_at( + ctypes.addressof(self._blob) + + other * ctypes.sizeof(BItem._ctype)) + else: + return NotImplemented + + @classmethod + def _cast_from(cls, source): + raise NotImplementedError("casting to %r" % ( + cls._get_c_name(),)) + # + CTypesArray._fix_class() + return CTypesArray + + def _new_struct_or_union(self, kind, name, base_ctypes_class): + # + class struct_or_union(base_ctypes_class): + pass + struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind + # + class CTypesStructOrUnion(CTypesBaseStructOrUnion): + __slots__ = ['_blob'] + _ctype = struct_or_union + _reftypename = '%s &' % (name,) + _kind = kind = kind1 + # + CTypesStructOrUnion._fix_class() + return CTypesStructOrUnion + + def new_struct_type(self, name): + return self._new_struct_or_union('struct', name, ctypes.Structure) + + def new_union_type(self, name): + return self._new_struct_or_union('union', name, ctypes.Union) + + def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, + totalsize=-1, totalalignment=-1, sflags=0, + pack=0): + if totalsize >= 0 or totalalignment >= 0: + raise NotImplementedError("the ctypes backend of CFFI does not support " + "structures completed by verify(); please " + "compile and install the _cffi_backend module.") + struct_or_union = CTypesStructOrUnion._ctype + fnames = [fname for (fname, BField, bitsize) in fields] + btypes = [BField for (fname, BField, bitsize) in fields] + bitfields = [bitsize for (fname, BField, bitsize) in fields] + # + bfield_types = {} + cfields = [] + for (fname, BField, bitsize) in fields: + if bitsize < 0: + cfields.append((fname, BField._ctype)) + bfield_types[fname] = BField + else: + cfields.append((fname, BField._ctype, bitsize)) + bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 + elif pack: + struct_or_union._pack_ = pack + struct_or_union._fields_ = cfields + CTypesStructOrUnion._bfield_types = bfield_types + # + @staticmethod + def _create_ctype_obj(init): + result = struct_or_union() + if init is not None: + initialize(result, init) + return result + CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj + # + def initialize(blob, init): + if is_union: + if len(init) > 1: + raise ValueError("union initializer: %d items given, but " + "only one supported (use a dict if needed)" + % (len(init),)) + if not isinstance(init, dict): + if isinstance(init, (bytes, unicode)): + raise TypeError("union initializer: got a str") + init = tuple(init) + if len(init) > len(fnames): + raise ValueError("too many values for %s initializer" % + CTypesStructOrUnion._get_c_name()) + init = dict(zip(fnames, init)) + addr = ctypes.addressof(blob) + for fname, value in init.items(): + BField, bitsize = name2fieldtype[fname] + assert bitsize < 0, \ + "not implemented: initializer with bit fields" + offset = CTypesStructOrUnion._offsetof(fname) + PTR = ctypes.POINTER(BField._ctype) + p = ctypes.cast(addr + offset, PTR) + BField._initialize(p.contents, value) + is_union = CTypesStructOrUnion._kind == 'union' + name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) + # + for fname, BField, bitsize in fields: + if fname == '': + raise NotImplementedError("nested anonymous structs/unions") + if hasattr(CTypesStructOrUnion, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + if bitsize < 0: + def getter(self, fname=fname, BField=BField, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BField._from_ctypes(p.contents) + def setter(self, value, fname=fname, BField=BField): + setattr(self._blob, fname, BField._to_ctypes(value)) + # + if issubclass(BField, CTypesGenericArray): + setter = None + if BField._declared_length == 0: + def getter(self, fname=fname, BFieldPtr=BField._CTPtr, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BFieldPtr._from_ctypes(p) + # + else: + def getter(self, fname=fname, BField=BField): + return BField._from_ctypes(getattr(self._blob, fname)) + def setter(self, value, fname=fname, BField=BField): + # xxx obscure workaround + value = BField._to_ctypes(value) + oldvalue = getattr(self._blob, fname) + setattr(self._blob, fname, value) + if value != getattr(self._blob, fname): + setattr(self._blob, fname, oldvalue) + raise OverflowError("value too large for bitfield") + setattr(CTypesStructOrUnion, fname, property(getter, setter)) + # + CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) + for fname in fnames: + if hasattr(CTypesPtr, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + def getter(self, fname=fname): + return getattr(self[0], fname) + def setter(self, value, fname=fname): + setattr(self[0], fname, value) + setattr(CTypesPtr, fname, property(getter, setter)) + + def new_function_type(self, BArgs, BResult, has_varargs): + nameargs = [BArg._get_c_name() for BArg in BArgs] + if has_varargs: + nameargs.append('...') + nameargs = ', '.join(nameargs) + # + class CTypesFunctionPtr(CTypesGenericPtr): + __slots__ = ['_own_callback', '_name'] + _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), + *[BArg._ctype for BArg in BArgs], + use_errno=True) + _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) + + def __init__(self, init, error=None): + # create a callback to the Python callable init() + import traceback + assert not has_varargs, "varargs not supported for callbacks" + if getattr(BResult, '_ctype', None) is not None: + error = BResult._from_ctypes( + BResult._create_ctype_obj(error)) + else: + error = None + def callback(*args): + args2 = [] + for arg, BArg in zip(args, BArgs): + args2.append(BArg._from_ctypes(arg)) + try: + res2 = init(*args2) + res2 = BResult._to_ctypes(res2) + except: + traceback.print_exc() + res2 = error + if issubclass(BResult, CTypesGenericPtr): + if res2: + res2 = ctypes.cast(res2, ctypes.c_void_p).value + # .value: http://bugs.python.org/issue1574593 + else: + res2 = None + #print repr(res2) + return res2 + if issubclass(BResult, CTypesGenericPtr): + # The only pointers callbacks can return are void*s: + # http://bugs.python.org/issue5710 + callback_ctype = ctypes.CFUNCTYPE( + ctypes.c_void_p, + *[BArg._ctype for BArg in BArgs], + use_errno=True) + else: + callback_ctype = CTypesFunctionPtr._ctype + self._as_ctype_ptr = callback_ctype(callback) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own_callback = init + + @staticmethod + def _initialize(ctypes_ptr, value): + if value: + raise NotImplementedError("ctypes backend: not supported: " + "initializers for function pointers") + + def __repr__(self): + c_name = getattr(self, '_name', None) + if c_name: + i = self._reftypename.index('(* &)') + if self._reftypename[i-1] not in ' )*': + c_name = ' ' + c_name + c_name = self._reftypename.replace('(* &)', c_name) + return CTypesData.__repr__(self, c_name) + + def _get_own_repr(self): + if getattr(self, '_own_callback', None) is not None: + return 'calling %r' % (self._own_callback,) + return super(CTypesFunctionPtr, self)._get_own_repr() + + def __call__(self, *args): + if has_varargs: + assert len(args) >= len(BArgs) + extraargs = args[len(BArgs):] + args = args[:len(BArgs)] + else: + assert len(args) == len(BArgs) + ctypes_args = [] + for arg, BArg in zip(args, BArgs): + ctypes_args.append(BArg._arg_to_ctypes(arg)) + if has_varargs: + for i, arg in enumerate(extraargs): + if arg is None: + ctypes_args.append(ctypes.c_void_p(0)) # NULL + continue + if not isinstance(arg, CTypesData): + raise TypeError( + "argument %d passed in the variadic part " + "needs to be a cdata object (got %s)" % + (1 + len(BArgs) + i, type(arg).__name__)) + ctypes_args.append(arg._arg_to_ctypes(arg)) + result = self._as_ctype_ptr(*ctypes_args) + return BResult._from_ctypes(result) + # + CTypesFunctionPtr._fix_class() + return CTypesFunctionPtr + + def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): + assert isinstance(name, str) + reverse_mapping = dict(zip(reversed(enumvalues), + reversed(enumerators))) + # + class CTypesEnum(CTypesInt): + __slots__ = [] + _reftypename = '%s &' % name + + def _get_own_repr(self): + value = self._value + try: + return '%d: %s' % (value, reverse_mapping[value]) + except KeyError: + return str(value) + + def _to_string(self, maxlen): + value = self._value + try: + return reverse_mapping[value] + except KeyError: + return str(value) + # + CTypesEnum._fix_class() + return CTypesEnum + + def get_errno(self): + return ctypes.get_errno() + + def set_errno(self, value): + ctypes.set_errno(value) + + def string(self, b, maxlen=-1): + return b._to_string(maxlen) + + def buffer(self, bptr, size=-1): + raise NotImplementedError("buffer() with ctypes backend") + + def sizeof(self, cdata_or_BType): + if isinstance(cdata_or_BType, CTypesData): + return cdata_or_BType._get_size_of_instance() + else: + assert issubclass(cdata_or_BType, CTypesData) + return cdata_or_BType._get_size() + + def alignof(self, BType): + assert issubclass(BType, CTypesData) + return BType._alignment() + + def newp(self, BType, source): + if not issubclass(BType, CTypesData): + raise TypeError + return BType._newp(source) + + def cast(self, BType, source): + return BType._cast_from(source) + + def callback(self, BType, source, error, onerror): + assert onerror is None # XXX not implemented + return BType(source, error) + + _weakref_cache_ref = None + + def gcp(self, cdata, destructor, size=0): + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref + + if destructor is None: + try: + del weak_cache[MyRef(cdata)] + except KeyError: + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + return None + + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) + return new_cdata + + typeof = type + + def getcname(self, BType, replace_with): + return BType._get_c_name(replace_with) + + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") + BField = BType._bfield_types[fieldname] + if BField is Ellipsis: + raise TypeError("not supported for bitfields") + return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) + + def rawaddressof(self, BTypePtr, cdata, offset=None): + if isinstance(cdata, CTypesBaseStructOrUnion): + ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) + elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): + ptr = type(cdata)._to_ctypes(cdata) + else: + raise TypeError("expected a ") + if offset: + ptr = ctypes.cast( + ctypes.c_void_p( + ctypes.cast(ptr, ctypes.c_void_p).value + offset), + type(ptr)) + return BTypePtr._from_ctypes(ptr) + + +class CTypesLibrary(object): + + def __init__(self, backend, cdll): + self.backend = backend + self.cdll = cdll + + def load_function(self, BType, name): + c_func = getattr(self.cdll, name) + funcobj = BType._from_ctypes(c_func) + funcobj._name = name + return funcobj + + def read_variable(self, BType, name): + try: + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + except AttributeError as e: + raise NotImplementedError(e) + return BType._from_ctypes(ctypes_obj) + + def write_variable(self, BType, name, value): + new_ctypes_obj = BType._to_ctypes(value) + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + ctypes.memmove(ctypes.addressof(ctypes_obj), + ctypes.addressof(new_ctypes_obj), + ctypes.sizeof(BType._ctype)) diff --git a/myenv/lib/python3.9/site-packages/cffi/cffi_opcode.py b/myenv/lib/python3.9/site-packages/cffi/cffi_opcode.py new file mode 100644 index 0000000..a0df98d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/cffi_opcode.py @@ -0,0 +1,187 @@ +from .error import VerificationError + +class CffiOp(object): + def __init__(self, op, arg): + self.op = op + self.arg = arg + + def as_c_expr(self): + if self.op is None: + assert isinstance(self.arg, str) + return '(_cffi_opcode_t)(%s)' % (self.arg,) + classname = CLASS_NAME[self.op] + return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) + + def as_python_bytes(self): + if self.op is None and self.arg.isdigit(): + value = int(self.arg) # non-negative: '-' not in self.arg + if value >= 2**31: + raise OverflowError("cannot emit %r: limited to 2**31-1" + % (self.arg,)) + return format_four_bytes(value) + if isinstance(self.arg, str): + raise VerificationError("cannot emit to Python: %r" % (self.arg,)) + return format_four_bytes((self.arg << 8) | self.op) + + def __str__(self): + classname = CLASS_NAME.get(self.op, self.op) + return '(%s %s)' % (classname, self.arg) + +def format_four_bytes(num): + return '\\x%02X\\x%02X\\x%02X\\x%02X' % ( + (num >> 24) & 0xFF, + (num >> 16) & 0xFF, + (num >> 8) & 0xFF, + (num ) & 0xFF) + +OP_PRIMITIVE = 1 +OP_POINTER = 3 +OP_ARRAY = 5 +OP_OPEN_ARRAY = 7 +OP_STRUCT_UNION = 9 +OP_ENUM = 11 +OP_FUNCTION = 13 +OP_FUNCTION_END = 15 +OP_NOOP = 17 +OP_BITFIELD = 19 +OP_TYPENAME = 21 +OP_CPYTHON_BLTN_V = 23 # varargs +OP_CPYTHON_BLTN_N = 25 # noargs +OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg) +OP_CONSTANT = 29 +OP_CONSTANT_INT = 31 +OP_GLOBAL_VAR = 33 +OP_DLOPEN_FUNC = 35 +OP_DLOPEN_CONST = 37 +OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 + +PRIM_VOID = 0 +PRIM_BOOL = 1 +PRIM_CHAR = 2 +PRIM_SCHAR = 3 +PRIM_UCHAR = 4 +PRIM_SHORT = 5 +PRIM_USHORT = 6 +PRIM_INT = 7 +PRIM_UINT = 8 +PRIM_LONG = 9 +PRIM_ULONG = 10 +PRIM_LONGLONG = 11 +PRIM_ULONGLONG = 12 +PRIM_FLOAT = 13 +PRIM_DOUBLE = 14 +PRIM_LONGDOUBLE = 15 + +PRIM_WCHAR = 16 +PRIM_INT8 = 17 +PRIM_UINT8 = 18 +PRIM_INT16 = 19 +PRIM_UINT16 = 20 +PRIM_INT32 = 21 +PRIM_UINT32 = 22 +PRIM_INT64 = 23 +PRIM_UINT64 = 24 +PRIM_INTPTR = 25 +PRIM_UINTPTR = 26 +PRIM_PTRDIFF = 27 +PRIM_SIZE = 28 +PRIM_SSIZE = 29 +PRIM_INT_LEAST8 = 30 +PRIM_UINT_LEAST8 = 31 +PRIM_INT_LEAST16 = 32 +PRIM_UINT_LEAST16 = 33 +PRIM_INT_LEAST32 = 34 +PRIM_UINT_LEAST32 = 35 +PRIM_INT_LEAST64 = 36 +PRIM_UINT_LEAST64 = 37 +PRIM_INT_FAST8 = 38 +PRIM_UINT_FAST8 = 39 +PRIM_INT_FAST16 = 40 +PRIM_UINT_FAST16 = 41 +PRIM_INT_FAST32 = 42 +PRIM_UINT_FAST32 = 43 +PRIM_INT_FAST64 = 44 +PRIM_UINT_FAST64 = 45 +PRIM_INTMAX = 46 +PRIM_UINTMAX = 47 +PRIM_FLOATCOMPLEX = 48 +PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 + +_NUM_PRIM = 52 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 + +_IO_FILE_STRUCT = -1 + +PRIMITIVE_TO_INDEX = { + 'char': PRIM_CHAR, + 'short': PRIM_SHORT, + 'int': PRIM_INT, + 'long': PRIM_LONG, + 'long long': PRIM_LONGLONG, + 'signed char': PRIM_SCHAR, + 'unsigned char': PRIM_UCHAR, + 'unsigned short': PRIM_USHORT, + 'unsigned int': PRIM_UINT, + 'unsigned long': PRIM_ULONG, + 'unsigned long long': PRIM_ULONGLONG, + 'float': PRIM_FLOAT, + 'double': PRIM_DOUBLE, + 'long double': PRIM_LONGDOUBLE, + 'float _Complex': PRIM_FLOATCOMPLEX, + 'double _Complex': PRIM_DOUBLECOMPLEX, + '_Bool': PRIM_BOOL, + 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, + 'int8_t': PRIM_INT8, + 'uint8_t': PRIM_UINT8, + 'int16_t': PRIM_INT16, + 'uint16_t': PRIM_UINT16, + 'int32_t': PRIM_INT32, + 'uint32_t': PRIM_UINT32, + 'int64_t': PRIM_INT64, + 'uint64_t': PRIM_UINT64, + 'intptr_t': PRIM_INTPTR, + 'uintptr_t': PRIM_UINTPTR, + 'ptrdiff_t': PRIM_PTRDIFF, + 'size_t': PRIM_SIZE, + 'ssize_t': PRIM_SSIZE, + 'int_least8_t': PRIM_INT_LEAST8, + 'uint_least8_t': PRIM_UINT_LEAST8, + 'int_least16_t': PRIM_INT_LEAST16, + 'uint_least16_t': PRIM_UINT_LEAST16, + 'int_least32_t': PRIM_INT_LEAST32, + 'uint_least32_t': PRIM_UINT_LEAST32, + 'int_least64_t': PRIM_INT_LEAST64, + 'uint_least64_t': PRIM_UINT_LEAST64, + 'int_fast8_t': PRIM_INT_FAST8, + 'uint_fast8_t': PRIM_UINT_FAST8, + 'int_fast16_t': PRIM_INT_FAST16, + 'uint_fast16_t': PRIM_UINT_FAST16, + 'int_fast32_t': PRIM_INT_FAST32, + 'uint_fast32_t': PRIM_UINT_FAST32, + 'int_fast64_t': PRIM_INT_FAST64, + 'uint_fast64_t': PRIM_UINT_FAST64, + 'intmax_t': PRIM_INTMAX, + 'uintmax_t': PRIM_UINTMAX, + } + +F_UNION = 0x01 +F_CHECK_FIELDS = 0x02 +F_PACKED = 0x04 +F_EXTERNAL = 0x08 +F_OPAQUE = 0x10 + +G_FLAGS = dict([('_CFFI_' + _key, globals()[_key]) + for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED', + 'F_EXTERNAL', 'F_OPAQUE']]) + +CLASS_NAME = {} +for _name, _value in list(globals().items()): + if _name.startswith('OP_') and isinstance(_value, int): + CLASS_NAME[_value] = _name[3:] diff --git a/myenv/lib/python3.9/site-packages/cffi/commontypes.py b/myenv/lib/python3.9/site-packages/cffi/commontypes.py new file mode 100644 index 0000000..8ec97c7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/commontypes.py @@ -0,0 +1,80 @@ +import sys +from . import model +from .error import FFIError + + +COMMON_TYPES = {} + +try: + # fetch "bool" and all simple Windows types + from _cffi_backend import _get_common_types + _get_common_types(COMMON_TYPES) +except ImportError: + pass + +COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE') +COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above + +for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + if _type.endswith('_t'): + COMMON_TYPES[_type] = _type +del _type + +_CACHE = {} + +def resolve_common_type(parser, commontype): + try: + return _CACHE[commontype] + except KeyError: + cdecl = COMMON_TYPES.get(commontype, commontype) + if not isinstance(cdecl, str): + result, quals = cdecl, 0 # cdecl is already a BaseType + elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + result, quals = model.PrimitiveType(cdecl), 0 + elif cdecl == 'set-unicode-needed': + raise FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) + else: + if commontype == cdecl: + raise FFIError( + "Unsupported type: %r. Please look at " + "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations " + "and file an issue if you think this type should really " + "be supported." % (commontype,)) + result, quals = parser.parse_type_and_quals(cdecl) # recursive + + assert isinstance(result, model.BaseTypeByIdentity) + _CACHE[commontype] = result, quals + return result, quals + + +# ____________________________________________________________ +# extra types for Windows (most of them are in commontypes.c) + + +def win_common_types(): + return { + "UNICODE_STRING": model.StructType( + "_UNICODE_STRING", + ["Length", + "MaximumLength", + "Buffer"], + [model.PrimitiveType("unsigned short"), + model.PrimitiveType("unsigned short"), + model.PointerType(model.PrimitiveType("wchar_t"))], + [-1, -1, -1]), + "PUNICODE_STRING": "UNICODE_STRING *", + "PCUNICODE_STRING": "const UNICODE_STRING *", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", + } + +if sys.platform == 'win32': + COMMON_TYPES.update(win_common_types()) diff --git a/myenv/lib/python3.9/site-packages/cffi/cparser.py b/myenv/lib/python3.9/site-packages/cffi/cparser.py new file mode 100644 index 0000000..74830e9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/cparser.py @@ -0,0 +1,1006 @@ +from . import model +from .commontypes import COMMON_TYPES, resolve_common_type +from .error import FFIError, CDefError +try: + from . import _pycparser as pycparser +except ImportError: + import pycparser +import weakref, re, sys + +try: + if sys.version_info < (3,): + import thread as _thread + else: + import _thread + lock = _thread.allocate_lock() +except ImportError: + lock = None + +def _workaround_for_static_import_finders(): + # Issue #392: packaging tools like cx_Freeze can not find these + # because pycparser uses exec dynamic import. This is an obscure + # workaround. This function is never called. + import pycparser.yacctab + import pycparser.lextab + +CDEF_SOURCE_STRING = "" +_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", + re.DOTALL | re.MULTILINE) +_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" + r"\b((?:[^\n\\]|\\.)*?)$", + re.DOTALL | re.MULTILINE) +_r_line_directive = re.compile(r"^[ \t]*#[ \t]*(?:line|\d+)\b.*$", re.MULTILINE) +_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}") +_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$") +_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") +_r_words = re.compile(r"\w+|\S") +_parser_cache = None +_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') +_r_star_const_space = re.compile( # matches "* const " + r"[*]\s*((const|volatile|restrict)\b\s*)+") +_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+" + r"\.\.\.") +_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.") + +def _get_parser(): + global _parser_cache + if _parser_cache is None: + _parser_cache = pycparser.CParser() + return _parser_cache + +def _workaround_for_old_pycparser(csource): + # Workaround for a pycparser issue (fixed between pycparser 2.10 and + # 2.14): "char*const***" gives us a wrong syntax tree, the same as + # for "char***(*const)". This means we can't tell the difference + # afterwards. But "char(*const(***))" gives us the right syntax + # tree. The issue only occurs if there are several stars in + # sequence with no parenthesis inbetween, just possibly qualifiers. + # Attempt to fix it by adding some parentheses in the source: each + # time we see "* const" or "* const *", we add an opening + # parenthesis before each star---the hard part is figuring out where + # to close them. + parts = [] + while True: + match = _r_star_const_space.search(csource) + if not match: + break + #print repr(''.join(parts)+csource), '=>', + parts.append(csource[:match.start()]) + parts.append('('); closing = ')' + parts.append(match.group()) # e.g. "* const " + endpos = match.end() + if csource.startswith('*', endpos): + parts.append('('); closing += ')' + level = 0 + i = endpos + while i < len(csource): + c = csource[i] + if c == '(': + level += 1 + elif c == ')': + if level == 0: + break + level -= 1 + elif c in ',;=': + if level == 0: + break + i += 1 + csource = csource[endpos:i] + closing + csource[i:] + #print repr(''.join(parts)+csource) + parts.append(csource) + return ''.join(parts) + +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + +def _warn_for_string_literal(csource): + if '"' not in csource: + return + for line in csource.splitlines(): + if '"' in line and not line.lstrip().startswith('#'): + import warnings + warnings.warn("String literal found in cdef() or type source. " + "String literals are ignored here, but you should " + "remove them anyway because some character sequences " + "confuse pre-parsing.") + break + +def _warn_for_non_extern_non_static_global_variable(decl): + if not decl.storage: + import warnings + warnings.warn("Global variable '%s' in cdef(): for consistency " + "with C it should have a storage class specifier " + "(usually 'extern')" % (decl.name,)) + +def _remove_line_directives(csource): + # _r_line_directive matches whole lines, without the final \n, if they + # start with '#line' with some spacing allowed, or '#NUMBER'. This + # function stores them away and replaces them with exactly the string + # '#line@N', where N is the index in the list 'line_directives'. + line_directives = [] + def replace(m): + i = len(line_directives) + line_directives.append(m.group()) + return '#line@%d' % i + csource = _r_line_directive.sub(replace, csource) + return csource, line_directives + +def _put_back_line_directives(csource, line_directives): + def replace(m): + s = m.group() + if not s.startswith('#line@'): + raise AssertionError("unexpected #line directive " + "(should have been processed and removed") + return line_directives[int(s[6:])] + return _r_line_directive.sub(replace, csource) + +def _preprocess(csource): + # First, remove the lines of the form '#line N "filename"' because + # the "filename" part could confuse the rest + csource, line_directives = _remove_line_directives(csource) + # Remove comments. NOTE: this only work because the cdef() section + # should not contain any string literals (except in line directives)! + def replace_keeping_newlines(m): + return ' ' + m.group().count('\n') * '\n' + csource = _r_comment.sub(replace_keeping_newlines, csource) + # Remove the "#define FOO x" lines + macros = {} + for match in _r_define.finditer(csource): + macroname, macrovalue = match.groups() + macrovalue = macrovalue.replace('\\\n', '').strip() + macros[macroname] = macrovalue + csource = _r_define.sub('', csource) + # + if pycparser.__version__ < '2.14': + csource = _workaround_for_old_pycparser(csource) + # + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # + # Now there should not be any string literal left; warn if we get one + _warn_for_string_literal(csource) + # + # Replace "[...]" with "[__dotdotdotarray__]" + csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # + # Replace "...}" with "__dotdotdotNUM__}". This construction should + # occur only at the end of enums; at the end of structs we have "...;}" + # and at the end of vararg functions "...);". Also replace "=...[,}]" + # with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when + # giving an unknown value. + matches = list(_r_partial_enum.finditer(csource)) + for number, match in enumerate(reversed(matches)): + p = match.start() + if csource[p] == '=': + p2 = csource.find('...', p, match.end()) + assert p2 > p + csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number, + csource[p2+3:]) + else: + assert csource[p:p+3] == '...' + csource = '%s __dotdotdot%d__ %s' % (csource[:p], number, + csource[p+3:]) + # Replace "int ..." or "unsigned long int..." with "__dotdotdotint__" + csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource) + # Replace "float ..." or "double..." with "__dotdotdotfloat__" + csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource) + # Replace all remaining "..." with the same name, "__dotdotdot__", + # which is declared with a typedef for the purpose of C parsing. + csource = csource.replace('...', ' __dotdotdot__ ') + # Finally, put back the line directives + csource = _put_back_line_directives(csource, line_directives) + return csource, macros + +def _common_type_names(csource): + # Look in the source for what looks like usages of types from the + # list of common types. A "usage" is approximated here as the + # appearance of the word, minus a "definition" of the type, which + # is the last word in a "typedef" statement. Approximative only + # but should be fine for all the common types. + look_for_words = set(COMMON_TYPES) + look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') + look_for_words.add('typedef') + words_used = set() + is_typedef = False + paren = 0 + previous_word = '' + for word in _r_words.findall(csource): + if word in look_for_words: + if word == ';': + if is_typedef: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + is_typedef = False + elif word == 'typedef': + is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + else: # word in COMMON_TYPES + words_used.add(word) + previous_word = word + return words_used + + +class Parser(object): + + def __init__(self): + self._declarations = {} + self._included_declarations = set() + self._anonymous_counter = 0 + self._structnode2type = weakref.WeakKeyDictionary() + self._options = {} + self._int_constants = {} + self._recomplete = [] + self._uses_new_feature = None + + def _parse(self, csource): + csource, macros = _preprocess(csource) + # XXX: for more efficiency we would need to poke into the + # internals of CParser... the following registers the + # typedefs, because their presence or absence influences the + # parsing itself (but what they are typedef'ed to plays no role) + ctn = _common_type_names(csource) + typenames = [] + for name in sorted(self._declarations): + if name.startswith('typedef '): + name = name[8:] + typenames.append(name) + ctn.discard(name) + typenames += sorted(ctn) + # + csourcelines = [] + csourcelines.append('# 1 ""') + for typename in typenames: + csourcelines.append('typedef int %s;' % typename) + csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' + ' __dotdotdot__;') + # this forces pycparser to consider the following in the file + # called from line 1 + csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) + csourcelines.append(csource) + fullcsource = '\n'.join(csourcelines) + if lock is not None: + lock.acquire() # pycparser is not thread-safe... + try: + ast = _get_parser().parse(fullcsource) + except pycparser.c_parser.ParseError as e: + self.convert_pycparser_error(e, csource) + finally: + if lock is not None: + lock.release() + # csource will be used to find buggy source text + return ast, macros, csource + + def _convert_pycparser_error(self, e, csource): + # xxx look for ":NUM:" at the start of str(e) + # and interpret that as a line number. This will not work if + # the user gives explicit ``# NUM "FILE"`` directives. + line = None + msg = str(e) + match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) + if match: + linenum = int(match.group(1), 10) + csourcelines = csource.splitlines() + if 1 <= linenum <= len(csourcelines): + line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) + if line: + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) + else: + msg = 'parse error\n%s' % (msg,) + raise CDefError(msg) + + def parse(self, csource, override=False, packed=False, pack=None, + dllexport=False): + if packed: + if packed != True: + raise ValueError("'packed' should be False or True; use " + "'pack' to give another value") + if pack: + raise ValueError("cannot give both 'pack' and 'packed'") + pack = 1 + elif pack: + if pack & (pack - 1): + raise ValueError("'pack' must be a power of two, not %r" % + (pack,)) + else: + pack = 0 + prev_options = self._options + try: + self._options = {'override': override, + 'packed': pack, + 'dllexport': dllexport} + self._internal_parse(csource) + finally: + self._options = prev_options + + def _internal_parse(self, csource): + ast, macros, csource = self._parse(csource) + # add the macros + self._process_macros(macros) + # find the first "__dotdotdot__" and use that as a separator + # between the repeated typedefs and the real csource + iterator = iter(ast.ext) + for decl in iterator: + if decl.name == '__dotdotdot__': + break + else: + assert 0 + current_decl = None + # + try: + self._inside_extern_python = '__cffi_extern_python_stop' + for decl in iterator: + current_decl = decl + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise CDefError("typedef does not declare any name", + decl) + quals = 0 + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and + decl.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_type(decl) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_ptr_type(decl) + else: + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name, partial_length_ok=True, + typedef_example="*(%s *)0" % (decl.name,)) + self._declare('typedef ' + decl.name, realtype, quals=quals) + elif decl.__class__.__name__ == 'Pragma': + pass # skip pragma, only in pycparser 2.15 + else: + raise CDefError("unexpected <%s>: this construct is valid " + "C but not valid in cdef()" % + decl.__class__.__name__, decl) + except CDefError as e: + if len(e.args) == 1: + e.args = e.args + (current_decl,) + raise + except FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations + raise FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _add_integer_constant(self, name, int_str): + int_str = int_str.lower().rstrip("ul") + neg = int_str.startswith('-') + if neg: + int_str = int_str[1:] + # "010" is not valid oct in py3 + if (int_str.startswith("0") and int_str != '0' + and not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + pyvalue = int(int_str, 0) + if neg: + pyvalue = -pyvalue + self._add_constants(name, pyvalue) + self._declare('macro ' + name, pyvalue) + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + if _r_int_literal.match(value): + self._add_integer_constant(key, value) + elif value == '...': + self._declare('macro ' + key, value) + else: + raise CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) + + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._options.get('dllexport'): + tag = 'dllexport_python ' + elif self._inside_extern_python == '__cffi_extern_python_start': + tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' + else: + tag = 'function ' + self._declare(tag + decl.name, tp) + + def _parse_decl(self, decl): + node = decl.type + if isinstance(node, pycparser.c_ast.FuncDecl): + tp, quals = self._get_type_and_quals(node, name=decl.name) + assert isinstance(tp, model.RawFunctionType) + self._declare_function(tp, quals, decl) + else: + if isinstance(node, pycparser.c_ast.Struct): + self._get_struct_union_enum_type('struct', node) + elif isinstance(node, pycparser.c_ast.Union): + self._get_struct_union_enum_type('union', node) + elif isinstance(node, pycparser.c_ast.Enum): + self._get_struct_union_enum_type('enum', node) + elif not decl.name: + raise CDefError("construct does not declare any variable", + decl) + # + if decl.name: + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) + if tp.is_raw_function: + self._declare_function(tp, quals, decl) + elif (tp.is_integer_type() and + hasattr(decl, 'init') and + hasattr(decl.init, 'value') and + _r_int_literal.match(decl.init.value)): + self._add_integer_constant(decl.name, decl.init.value) + elif (tp.is_integer_type() and + isinstance(decl.init, pycparser.c_ast.UnaryOp) and + decl.init.op == '-' and + hasattr(decl.init.expr, 'value') and + _r_int_literal.match(decl.init.expr.value)): + self._add_integer_constant(decl.name, + '-' + decl.init.expr.value) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = decl.name + else: + if self._inside_extern_python !='__cffi_extern_python_stop': + raise CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + _warn_for_non_extern_non_static_global_variable(decl) + self._declare('variable ' + decl.name, tp, quals=quals) + + def parse_type(self, cdecl): + return self.parse_type_and_quals(cdecl)[0] + + def parse_type_and_quals(self, cdecl): + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] + assert not macros + exprnode = ast.ext[-1].type.args.params[0] + if isinstance(exprnode, pycparser.c_ast.ID): + raise CDefError("unknown identifier '%s'" % (exprnode.name,)) + return self._get_type_and_quals(exprnode.type) + + def _declare(self, name, obj, included=False, quals=0): + if name in self._declarations: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: + return + if not self._options.get('override'): + raise FFIError( + "multiple declarations of %s (for interactive usage, " + "try cdef(xx, override=True))" % (name,)) + assert '__dotdotdot__' not in name.split() + self._declarations[name] = (obj, quals) + if included: + self._included_declarations.add(obj) + + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'volatile' in type.quals: + quals |= model.Q_VOLATILE + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): + if isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + if (isinstance(type, model.StructOrUnionOrEnum) and + type.name.startswith('$') and type.name[1:].isdigit() and + type.forcename is None and declname is not None): + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) + + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False, + typedef_example=None): + # first, dereference typedefs, if we have it already parsed, we're good + if (isinstance(typenode, pycparser.c_ast.TypeDecl) and + isinstance(typenode.type, pycparser.c_ast.IdentifierType) and + len(typenode.type.names) == 1 and + ('typedef ' + typenode.type.names[0]) in self._declarations): + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.ArrayDecl): + # array type + if typenode.dim is None: + length = None + else: + length = self._parse_constant( + typenode.dim, partial_length_ok=partial_length_ok) + # a hack: in 'typedef int foo_t[...][...];', don't use '...' as + # the length but use directly the C expression that would be + # generated by recompiler.py. This lets the typedef be used in + # many more places within recompiler.py + if typedef_example is not None: + if length == '...': + length = '_cffi_array_len(%s)' % (typedef_example,) + typedef_example = "*" + typedef_example + # + tp, quals = self._get_type_and_quals(typenode.type, + partial_length_ok=partial_length_ok, + typedef_example=typedef_example) + return model.ArrayType(tp, length), quals + # + if isinstance(typenode, pycparser.c_ast.PtrDecl): + # pointer type + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) + type = typenode.type + if isinstance(type, pycparser.c_ast.IdentifierType): + # assume a primitive type. get it from .names, but reduce + # synonyms to a single chosen combination + names = list(type.names) + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names + ident = ' '.join(names) + if ident == 'void': + return model.void_type, quals + if ident == '__dotdotdot__': + raise FFIError(':%d: bad usage of "..."' % + typenode.coord.line) + tp0, quals0 = resolve_common_type(self, ident) + return tp0, (quals | quals0) + # + if isinstance(type, pycparser.c_ast.Struct): + # 'struct foobar' + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Union): + # 'union foobar' + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Enum): + # 'enum foobar' + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.FuncDecl): + # a function type + return self._parse_function_type(typenode, name), 0 + # + # nested anonymous structs or unions end up here + if isinstance(typenode, pycparser.c_ast.Struct): + return self._get_struct_union_enum_type('struct', typenode, name, + nested=True), 0 + if isinstance(typenode, pycparser.c_ast.Union): + return self._get_struct_union_enum_type('union', typenode, name, + nested=True), 0 + # + raise FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) + + def _parse_function_type(self, typenode, funcname=None): + params = list(getattr(typenode.args, 'params', [])) + for i, arg in enumerate(params): + if not hasattr(arg, 'type'): + raise CDefError("%s arg %d: unknown type '%s'" + " (if you meant to use the old C syntax of giving" + " untyped arguments, it is not supported)" + % (funcname or 'in expression', i + 1, + getattr(arg, 'name', '?'))) + ellipsis = ( + len(params) > 0 and + isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and + isinstance(params[-1].type.type, + pycparser.c_ast.IdentifierType) and + params[-1].type.type.names == ['__dotdotdot__']) + if ellipsis: + params.pop() + if not params: + raise CDefError( + "%s: a function with only '(...)' as argument" + " is not correct C" % (funcname or 'in expression')) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) + for argdeclnode in params] + if not ellipsis and args == [model.void_type]: + args = [] + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) + + def _as_func_arg(self, type, quals): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item, quals) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): + # First, a level of caching on the exact 'type' node of the AST. + # This is obscure, but needed because pycparser "unrolls" declarations + # such as "typedef struct { } foo_t, *foo_p" and we end up with + # an AST that is not a tree, but a DAG, with the "type" node of the + # two branches foo_t and foo_p of the trees being the same node. + # It's a bit silly but detecting "DAG-ness" in the AST tree seems + # to be the only way to distinguish this case from two independent + # structs. See test_struct_with_two_usages. + try: + return self._structnode2type[type] + except KeyError: + pass + # + # Note that this must handle parsing "struct foo" any number of + # times and always return the same StructType object. Additionally, + # one of these times (not necessarily the first), the fields of + # the struct can be specified with "struct foo { ...fields... }". + # If no name is given, then we have to create a new anonymous struct + # with no caching; in this case, the fields are either specified + # right now or never. + # + force_name = name + name = type.name + # + # get the type or create it if needed + if name is None: + # 'force_name' is used to guess a more readable name for + # anonymous structs, for the common case "typedef struct { } foo". + if force_name is not None: + explicit_name = '$%s' % force_name + else: + self._anonymous_counter += 1 + explicit_name = '$%d' % self._anonymous_counter + tp = None + else: + explicit_name = name + key = '%s %s' % (kind, name) + tp, _ = self._declarations.get(key, (None, None)) + # + if tp is None: + if kind == 'struct': + tp = model.StructType(explicit_name, None, None, None) + elif kind == 'union': + tp = model.UnionType(explicit_name, None, None, None) + elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") + tp = self._build_enum_type(explicit_name, type.values) + else: + raise AssertionError("kind = %r" % (kind,)) + if name is not None: + self._declare(key, tp) + else: + if kind == 'enum' and type.values is not None: + raise NotImplementedError( + "enum %s: the '{}' declaration should appear on the first " + "time the enum is mentioned, not later" % explicit_name) + if not tp.forcename: + tp.force_the_name(force_name) + if tp.forcename and '$' in tp.name: + self._declare('anonymous %s' % tp.forcename, tp) + # + self._structnode2type[type] = tp + # + # enums: done here + if kind == 'enum': + return tp + # + # is there a 'type.decls'? If yes, then this is the place in the + # C sources that declare the fields. If no, then just return the + # existing type, possibly still incomplete. + if type.decls is None: + return tp + # + if tp.fldnames is not None: + raise CDefError("duplicate declaration of struct %s" % name) + fldnames = [] + fldtypes = [] + fldbitsize = [] + fldquals = [] + for decl in type.decls: + if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and + ''.join(decl.type.names) == '__dotdotdot__'): + # XXX pycparser is inconsistent: 'names' should be a list + # of strings, but is sometimes just one string. Use + # str.join() as a way to cope with both. + self._make_partial(tp, nested) + continue + if decl.bitsize is None: + bitsize = -1 + else: + bitsize = self._parse_constant(decl.bitsize) + self._partial_length = False + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) + if self._partial_length: + self._make_partial(tp, nested) + if isinstance(type, model.StructType) and type.partial: + self._make_partial(tp, nested) + fldnames.append(decl.name or '') + fldtypes.append(type) + fldbitsize.append(bitsize) + fldquals.append(fqual) + tp.fldnames = tuple(fldnames) + tp.fldtypes = tuple(fldtypes) + tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) + if fldbitsize != [-1] * len(fldbitsize): + if isinstance(tp, model.StructType) and tp.partial: + raise NotImplementedError("%s: using both bitfields and '...;'" + % (tp,)) + tp.packed = self._options.get('packed') + if tp.completed: # must be re-completed: it is not opaque any more + tp.completed = 0 + self._recomplete.append(tp) + return tp + + def _make_partial(self, tp, nested): + if not isinstance(tp, model.StructOrUnion): + raise CDefError("%s cannot be partial" % (tp,)) + if not tp.has_c_name() and not nested: + raise NotImplementedError("%s is partial but has no C name" %(tp,)) + tp.partial = True + + def _parse_constant(self, exprnode, partial_length_ok=False): + # for now, limited to expressions that are an immediate number + # or positive/negative number + if isinstance(exprnode, pycparser.c_ast.Constant): + s = exprnode.value + if '0' <= s[0] <= '9': + s = s.rstrip('uUlL') + try: + if s.startswith('0'): + return int(s, 8) + else: + return int(s, 10) + except ValueError: + if len(s) > 1: + if s.lower()[0:2] == '0x': + return int(s, 16) + elif s.lower()[0:2] == '0b': + return int(s, 2) + raise CDefError("invalid constant %r" % (s,)) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '-'): + return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] + # + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name == '__dotdotdotarray__'): + if partial_length_ok: + self._partial_length = True + return '...' + raise FFIError(":%d: unsupported '[...]' here, cannot derive " + "the actual array length in this context" + % exprnode.coord.line) + # + if isinstance(exprnode, pycparser.c_ast.BinaryOp): + left = self._parse_constant(exprnode.left) + right = self._parse_constant(exprnode.right) + if exprnode.op == '+': + return left + right + elif exprnode.op == '-': + return left - right + elif exprnode.op == '*': + return left * right + elif exprnode.op == '/': + return self._c_div(left, right) + elif exprnode.op == '%': + return left - self._c_div(left, right) * right + elif exprnode.op == '<<': + return left << right + elif exprnode.op == '>>': + return left >> right + elif exprnode.op == '&': + return left & right + elif exprnode.op == '|': + return left | right + elif exprnode.op == '^': + return left ^ right + # + raise FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) + + def _c_div(self, a, b): + result = a // b + if ((a < 0) ^ (b < 0)) and (a % b) != 0: + result += 1 + return result + + def _build_enum_type(self, explicit_name, decls): + if decls is not None: + partial = False + enumerators = [] + enumvalues = [] + nextenumvalue = 0 + for enum in decls.enumerators: + if _r_enum_dotdotdot.match(enum.name): + partial = True + continue + if enum.value is not None: + nextenumvalue = self._parse_constant(enum.value) + enumerators.append(enum.name) + enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) + nextenumvalue += 1 + enumerators = tuple(enumerators) + enumvalues = tuple(enumvalues) + tp = model.EnumType(explicit_name, enumerators, enumvalues) + tp.partial = partial + else: # opaque enum + tp = model.EnumType(explicit_name, (), ()) + return tp + + def include(self, other): + for name, (tp, quals) in other._declarations.items(): + if name.startswith('anonymous $enum_$'): + continue # fix for test_anonymous_enum_include + kind = name.split(' ', 1)[0] + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) + for k, v in other._int_constants.items(): + self._add_constants(k, v) + + def _get_unknown_type(self, decl): + typenames = decl.type.type.names + if typenames == ['__dotdotdot__']: + return model.unknown_type(decl.name) + + if typenames == ['__dotdotdotint__']: + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef int... %s'" % decl.name + return model.UnknownIntegerType(decl.name) + + if typenames == ['__dotdotdotfloat__']: + # note: not for 'long double' so far + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef float... %s'" % decl.name + return model.UnknownFloatType(decl.name) + + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) + + def _get_unknown_ptr_type(self, decl): + if decl.type.type.type.names == ['__dotdotdot__']: + return model.unknown_ptr_type(decl.name) + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) diff --git a/myenv/lib/python3.9/site-packages/cffi/error.py b/myenv/lib/python3.9/site-packages/cffi/error.py new file mode 100644 index 0000000..0a27247 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/error.py @@ -0,0 +1,31 @@ + +class FFIError(Exception): + __module__ = 'cffi' + +class CDefError(Exception): + __module__ = 'cffi' + def __str__(self): + try: + current_decl = self.args[1] + filename = current_decl.coord.file + linenum = current_decl.coord.line + prefix = '%s:%d: ' % (filename, linenum) + except (AttributeError, TypeError, IndexError): + prefix = '' + return '%s%s' % (prefix, self.args[0]) + +class VerificationError(Exception): + """ An error raised when verification fails + """ + __module__ = 'cffi' + +class VerificationMissing(Exception): + """ An error raised when incomplete structures are passed into + cdef, but no verification has been done + """ + __module__ = 'cffi' + +class PkgConfigError(Exception): + """ An error raised for missing modules in pkg-config + """ + __module__ = 'cffi' diff --git a/myenv/lib/python3.9/site-packages/cffi/ffiplatform.py b/myenv/lib/python3.9/site-packages/cffi/ffiplatform.py new file mode 100644 index 0000000..8531346 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/ffiplatform.py @@ -0,0 +1,127 @@ +import sys, os +from .error import VerificationError + + +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + +def get_extension(srcfilename, modname, sources=(), **kwds): + _hack_at_distutils() + from distutils.core import Extension + allsources = [srcfilename] + for src in sources: + allsources.append(os.path.normpath(src)) + return Extension(name=modname, sources=allsources, **kwds) + +def compile(tmpdir, ext, compiler_verbose=0, debug=None): + """Compile a C extension module using distutils.""" + + _hack_at_distutils() + saved_environ = os.environ.copy() + try: + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) + outputfilename = os.path.abspath(outputfilename) + finally: + # workaround for a distutils bugs where some env vars can + # become longer and longer every time it is used + for key, value in saved_environ.items(): + if os.environ.get(key) != value: + os.environ[key] = value + return outputfilename + +def _build(tmpdir, ext, compiler_verbose=0, debug=None): + # XXX compact but horrible :-( + from distutils.core import Distribution + import distutils.errors, distutils.log + # + dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() + options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) + options['force'] = ('ffiplatform', True) + options['build_lib'] = ('ffiplatform', tmpdir) + options['build_temp'] = ('ffiplatform', tmpdir) + # + try: + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() + finally: + distutils.log.set_threshold(old_level) + except (distutils.errors.CompileError, + distutils.errors.LinkError) as e: + raise VerificationError('%s: %s' % (e.__class__.__name__, e)) + # + return soname + +try: + from os.path import samefile +except ImportError: + def samefile(f1, f2): + return os.path.abspath(f1) == os.path.abspath(f2) + +def maybe_relative_path(path): + if not os.path.isabs(path): + return path # already relative + dir = path + names = [] + while True: + prevdir = dir + dir, name = os.path.split(prevdir) + if dir == prevdir or not dir: + return path # failed to make it relative + names.append(name) + try: + if samefile(dir, os.curdir): + names.reverse() + return os.path.join(*names) + except OSError: + pass + +# ____________________________________________________________ + +try: + int_or_long = (int, long) + import cStringIO +except NameError: + int_or_long = int # Python 3 + import io as cStringIO + +def _flatten(x, f): + if isinstance(x, str): + f.write('%ds%s' % (len(x), x)) + elif isinstance(x, dict): + keys = sorted(x.keys()) + f.write('%dd' % len(keys)) + for key in keys: + _flatten(key, f) + _flatten(x[key], f) + elif isinstance(x, (list, tuple)): + f.write('%dl' % len(x)) + for value in x: + _flatten(value, f) + elif isinstance(x, int_or_long): + f.write('%di' % (x,)) + else: + raise TypeError( + "the keywords to verify() contains unsupported object %r" % (x,)) + +def flatten(x): + f = cStringIO.StringIO() + _flatten(x, f) + return f.getvalue() + +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass diff --git a/myenv/lib/python3.9/site-packages/cffi/lock.py b/myenv/lib/python3.9/site-packages/cffi/lock.py new file mode 100644 index 0000000..db91b71 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/myenv/lib/python3.9/site-packages/cffi/model.py b/myenv/lib/python3.9/site-packages/cffi/model.py new file mode 100644 index 0000000..ad1c176 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/model.py @@ -0,0 +1,617 @@ +import types +import weakref + +from .lock import allocate_lock +from .error import CDefError, VerificationError, VerificationMissing + +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 +Q_VOLATILE = 0x04 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_VOLATILE: + replace_with = ' volatile ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + +class BaseTypeByIdentity(object): + is_array_type = False + is_raw_function = False + + def get_c_name(self, replace_with='', context='a C file', quals=0): + result = self.c_name_with_marker + assert result.count('&') == 1 + # some logic duplication with ffi.getctype()... :-( + replace_with = replace_with.strip() + if replace_with: + if replace_with.startswith('*') and '&[' in result: + replace_with = '(%s)' % replace_with + elif not replace_with[0] in '[(': + replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) + result = result.replace('&', replace_with) + if '$' in result: + raise VerificationError( + "cannot generate '%s' in %s: unknown type name" + % (self._get_c_name(), context)) + return result + + def _get_c_name(self): + return self.c_name_with_marker.replace('&', '') + + def has_c_name(self): + return '$' not in self._get_c_name() + + def is_integer_type(self): + return False + + def get_cached_btype(self, ffi, finishlist, can_delay=False): + try: + BType = ffi._cached_btypes[self] + except KeyError: + BType = self.build_backend_type(ffi, finishlist) + BType2 = ffi._cached_btypes.setdefault(self, BType) + assert BType2 is BType + return BType + + def __repr__(self): + return '<%s>' % (self._get_c_name(),) + + def _get_items(self): + return [(name, getattr(self, name)) for name in self._attrs_] + + +class BaseType(BaseTypeByIdentity): + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self._get_items() == other._get_items()) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.__class__, tuple(self._get_items()))) + + +class VoidType(BaseType): + _attrs_ = () + + def __init__(self): + self.c_name_with_marker = 'void&' + + def build_backend_type(self, ffi, finishlist): + return global_cache(self, ffi, 'new_void_type') + +void_type = VoidType() + + +class BasePrimitiveType(BaseType): + def is_complex_type(self): + return False + + +class PrimitiveType(BasePrimitiveType): + _attrs_ = ('name',) + + ALL_PRIMITIVE_TYPES = { + 'char': 'c', + 'short': 'i', + 'int': 'i', + 'long': 'i', + 'long long': 'i', + 'signed char': 'i', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', + 'float': 'f', + 'double': 'f', + 'long double': 'f', + 'float _Complex': 'j', + 'double _Complex': 'j', + '_Bool': 'i', + # the following types are not primitive in the C sense + 'wchar_t': 'c', + 'char16_t': 'c', + 'char32_t': 'c', + 'int8_t': 'i', + 'uint8_t': 'i', + 'int16_t': 'i', + 'uint16_t': 'i', + 'int32_t': 'i', + 'uint32_t': 'i', + 'int64_t': 'i', + 'uint64_t': 'i', + 'int_least8_t': 'i', + 'uint_least8_t': 'i', + 'int_least16_t': 'i', + 'uint_least16_t': 'i', + 'int_least32_t': 'i', + 'uint_least32_t': 'i', + 'int_least64_t': 'i', + 'uint_least64_t': 'i', + 'int_fast8_t': 'i', + 'uint_fast8_t': 'i', + 'int_fast16_t': 'i', + 'uint_fast16_t': 'i', + 'int_fast32_t': 'i', + 'uint_fast32_t': 'i', + 'int_fast64_t': 'i', + 'uint_fast64_t': 'i', + 'intptr_t': 'i', + 'uintptr_t': 'i', + 'intmax_t': 'i', + 'uintmax_t': 'i', + 'ptrdiff_t': 'i', + 'size_t': 'i', + 'ssize_t': 'i', + } + + def __init__(self, name): + assert name in self.ALL_PRIMITIVE_TYPES + self.name = name + self.c_name_with_marker = name + '&' + + def is_char_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' + def is_integer_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' + def is_float_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' + def is_complex_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' + + def build_backend_type(self, ffi, finishlist): + return global_cache(self, ffi, 'new_primitive_type', self.name) + + +class UnknownIntegerType(BasePrimitiveType): + _attrs_ = ('name',) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def is_integer_type(self): + return True + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("integer type '%s' can only be used after " + "compilation" % self.name) + +class UnknownFloatType(BasePrimitiveType): + _attrs_ = ('name', ) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("float type '%s' can only be used after " + "compilation" % self.name) + + +class BaseFunctionType(BaseType): + _attrs_ = ('args', 'result', 'ellipsis', 'abi') + + def __init__(self, args, result, ellipsis, abi=None): + self.args = args + self.result = result + self.ellipsis = ellipsis + self.abi = abi + # + reprargs = [arg._get_c_name() for arg in self.args] + if self.ellipsis: + reprargs.append('...') + reprargs = reprargs or ['void'] + replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] + self.c_name_with_marker = ( + self.result.c_name_with_marker.replace('&', replace_with)) + + +class RawFunctionType(BaseFunctionType): + # Corresponds to a C type like 'int(int)', which is the C type of + # a function, but not a pointer-to-function. The backend has no + # notion of such a type; it's used temporarily by parsing. + _base_pattern = '(&)(%s)' + is_raw_function = True + + def build_backend_type(self, ffi, finishlist): + raise CDefError("cannot render the type %r: it is a function " + "type, not a pointer-to-function type" % (self,)) + + def as_function_pointer(self): + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) + + +class FunctionPtrType(BaseFunctionType): + _base_pattern = '(*&)(%s)' + + def build_backend_type(self, ffi, finishlist): + result = self.result.get_cached_btype(ffi, finishlist) + args = [] + for tp in self.args: + args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass + return global_cache(self, ffi, 'new_function_type', + tuple(args), result, self.ellipsis, *abi_args) + + def as_raw_function(self): + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) + + +class PointerType(BaseType): + _attrs_ = ('totype', 'quals') + + def __init__(self, totype, quals=0): + self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") + if totype.is_array_type: + extra = "(%s)" % (extra.lstrip(),) + self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) + + def build_backend_type(self, ffi, finishlist): + BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) + return global_cache(self, ffi, 'new_pointer_type', BItem) + +voidp_type = PointerType(void_type) + +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) + +const_voidp_type = ConstPointerType(void_type) + + +class NamedPointerType(PointerType): + _attrs_ = ('totype', 'name') + + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) + self.name = name + self.c_name_with_marker = name + '&' + + +class ArrayType(BaseType): + _attrs_ = ('item', 'length') + is_array_type = True + + def __init__(self, item, length): + self.item = item + self.length = length + # + if length is None: + brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' + else: + brackets = '&[%s]' % length + self.c_name_with_marker = ( + self.item.c_name_with_marker.replace('&', brackets)) + + def length_is_unknown(self): + return isinstance(self.length, str) + + def resolve_length(self, newlength): + return ArrayType(self.item, newlength) + + def build_backend_type(self, ffi, finishlist): + if self.length_is_unknown(): + raise CDefError("cannot render the type %r: unknown length" % + (self,)) + self.item.get_cached_btype(ffi, finishlist) # force the item BType + BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) + return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) + +char_array_type = ArrayType(PrimitiveType('char'), None) + + +class StructOrUnionOrEnum(BaseTypeByIdentity): + _attrs_ = ('name',) + forcename = None + + def build_c_name_with_marker(self): + name = self.forcename or '%s %s' % (self.kind, self.name) + self.c_name_with_marker = name + '&' + + def force_the_name(self, forcename): + self.forcename = forcename + self.build_c_name_with_marker() + + def get_official_name(self): + assert self.c_name_with_marker.endswith('&') + return self.c_name_with_marker[:-1] + + +class StructOrUnion(StructOrUnionOrEnum): + fixedlayout = None + completed = 0 + partial = False + packed = 0 + + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): + self.name = name + self.fldnames = fldnames + self.fldtypes = fldtypes + self.fldbitsize = fldbitsize + self.fldquals = fldquals + self.build_c_name_with_marker() + + def anonymous_struct_fields(self): + if self.fldtypes is not None: + for name, type in zip(self.fldnames, self.fldtypes): + if name == '' and isinstance(type, StructOrUnion): + yield type + + def enumfields(self, expand_anonymous_struct_union=True): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): + if (name == '' and isinstance(type, StructOrUnion) + and expand_anonymous_struct_union): + # nested anonymous struct/union + for result in type.enumfields(): + yield result + else: + yield (name, type, bitsize, quals) + + def force_flatten(self): + # force the struct or union to have a declaration that lists + # directly all fields returned by enumfields(), flattening + # nested anonymous structs/unions. + names = [] + types = [] + bitsizes = [] + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): + names.append(name) + types.append(type) + bitsizes.append(bitsize) + fldquals.append(quals) + self.fldnames = tuple(names) + self.fldtypes = tuple(types) + self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) + + def get_cached_btype(self, ffi, finishlist, can_delay=False): + BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, + can_delay) + if not can_delay: + self.finish_backend_type(ffi, finishlist) + return BType + + def finish_backend_type(self, ffi, finishlist): + if self.completed: + if self.completed != 2: + raise NotImplementedError("recursive structure declaration " + "for '%s'" % (self.name,)) + return + BType = ffi._cached_btypes[self] + # + self.completed = 1 + # + if self.fldtypes is None: + pass # not completing it: it's an opaque struct + # + elif self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] + lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) + extra_flags = () + if self.packed: + if self.packed == 1: + extra_flags = (8,) # SF_PACKED + else: + extra_flags = (0, self.packed) + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, *extra_flags) + # + else: + fldtypes = [] + fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout + for i in range(len(self.fldnames)): + fsize = fieldsize[i] + ftype = self.fldtypes[i] + # + if isinstance(ftype, ArrayType) and ftype.length_is_unknown(): + # fix the length to match the total size + BItemType = ftype.item.get_cached_btype(ffi, finishlist) + nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) + if nrest != 0: + self._verification_error( + "field '%s.%s' has a bogus size?" % ( + self.name, self.fldnames[i] or '{}')) + ftype = ftype.resolve_length(nlen) + self.fldtypes = (self.fldtypes[:i] + (ftype,) + + self.fldtypes[i+1:]) + # + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # + lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) + ffi._backend.complete_struct_or_union(BType, lst, self, + totalsize, totalalignment) + self.completed = 2 + + def _verification_error(self, msg): + raise VerificationError(msg) + + def check_not_partial(self): + if self.partial and self.fixedlayout is None: + raise VerificationMissing(self._get_c_name()) + + def build_backend_type(self, ffi, finishlist): + self.check_not_partial() + finishlist.append(self) + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, + self.get_official_name(), key=self) + + +class StructType(StructOrUnion): + kind = 'struct' + + +class UnionType(StructOrUnion): + kind = 'union' + + +class EnumType(StructOrUnionOrEnum): + kind = 'enum' + partial = False + partial_resolved = False + + def __init__(self, name, enumerators, enumvalues, baseinttype=None): + self.name = name + self.enumerators = enumerators + self.enumvalues = enumvalues + self.baseinttype = baseinttype + self.build_c_name_with_marker() + + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + + def check_not_partial(self): + if self.partial and not self.partial_resolved: + raise VerificationMissing(self._get_c_name()) + + def build_backend_type(self, ffi, finishlist): + self.check_not_partial() + base_btype = self.build_baseinttype(ffi, finishlist) + return global_cache(self, ffi, 'new_enum_type', + self.get_official_name(), + self.enumerators, self.enumvalues, + base_btype, key=self) + + def build_baseinttype(self, ffi, finishlist): + if self.baseinttype is not None: + return self.baseinttype.get_cached_btype(ffi, finishlist) + # + if self.enumvalues: + smallest_value = min(self.enumvalues) + largest_value = max(self.enumvalues) + else: + import warnings + try: + # XXX! The goal is to ensure that the warnings.warn() + # will not suppress the warning. We want to get it + # several times if we reach this point several times. + __warningregistry__.clear() + except NameError: + pass + warnings.warn("%r has no values explicitly defined; " + "guessing that it is equivalent to 'unsigned int'" + % self._get_c_name()) + smallest_value = largest_value = 0 + if smallest_value < 0: # needs a signed type + sign = 1 + candidate1 = PrimitiveType("int") + candidate2 = PrimitiveType("long") + else: + sign = 0 + candidate1 = PrimitiveType("unsigned int") + candidate2 = PrimitiveType("unsigned long") + btype1 = candidate1.get_cached_btype(ffi, finishlist) + btype2 = candidate2.get_cached_btype(ffi, finishlist) + size1 = ffi.sizeof(btype1) + size2 = ffi.sizeof(btype2) + if (smallest_value >= ((-1) << (8*size1-1)) and + largest_value < (1 << (8*size1-sign))): + return btype1 + if (smallest_value >= ((-1) << (8*size2-1)) and + largest_value < (1 << (8*size2-sign))): + return btype2 + raise CDefError("%s values don't all fit into either 'long' " + "or 'unsigned long'" % self._get_c_name()) + +def unknown_type(name, structname=None): + if structname is None: + structname = '$%s' % name + tp = StructType(structname, None, None, None) + tp.force_the_name(name) + tp.origin = "unknown_type" + return tp + +def unknown_ptr_type(name, structname=None): + if structname is None: + structname = '$$%s' % name + tp = StructType(structname, None, None, None) + return NamedPointerType(tp, name) + + +global_lock = allocate_lock() +_typecache_cffi_backend = weakref.WeakValueDictionary() + +def get_typecache(backend): + # returns _typecache_cffi_backend if backend is the _cffi_backend + # module, or type(backend).__typecache if backend is an instance of + # CTypesBackend (or some FakeBackend class during tests) + if isinstance(backend, types.ModuleType): + return _typecache_cffi_backend + with global_lock: + if not hasattr(type(backend), '__typecache'): + type(backend).__typecache = weakref.WeakValueDictionary() + return type(backend).__typecache + +def global_cache(srctype, ffi, funcname, *args, **kwds): + key = kwds.pop('key', (funcname, args)) + assert not kwds + try: + return ffi._typecache[key] + except KeyError: + pass + try: + res = getattr(ffi._backend, funcname)(*args) + except NotImplementedError as e: + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 + +def pointer_cache(ffi, BType): + return global_cache('?', ffi, 'new_pointer_type', BType) + +def attach_exception_info(e, name): + if e.args and type(e.args[0]) is str: + e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:] diff --git a/myenv/lib/python3.9/site-packages/cffi/parse_c_type.h b/myenv/lib/python3.9/site-packages/cffi/parse_c_type.h new file mode 100644 index 0000000..84e4ef8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/parse_c_type.h @@ -0,0 +1,181 @@ + +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ + +typedef void *_cffi_opcode_t; + +#define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) +#define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) + +#define _CFFI_OP_PRIMITIVE 1 +#define _CFFI_OP_POINTER 3 +#define _CFFI_OP_ARRAY 5 +#define _CFFI_OP_OPEN_ARRAY 7 +#define _CFFI_OP_STRUCT_UNION 9 +#define _CFFI_OP_ENUM 11 +#define _CFFI_OP_FUNCTION 13 +#define _CFFI_OP_FUNCTION_END 15 +#define _CFFI_OP_NOOP 17 +#define _CFFI_OP_BITFIELD 19 +#define _CFFI_OP_TYPENAME 21 +#define _CFFI_OP_CPYTHON_BLTN_V 23 // varargs +#define _CFFI_OP_CPYTHON_BLTN_N 25 // noargs +#define _CFFI_OP_CPYTHON_BLTN_O 27 // O (i.e. a single arg) +#define _CFFI_OP_CONSTANT 29 +#define _CFFI_OP_CONSTANT_INT 31 +#define _CFFI_OP_GLOBAL_VAR 33 +#define _CFFI_OP_DLOPEN_FUNC 35 +#define _CFFI_OP_DLOPEN_CONST 37 +#define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 + +#define _CFFI_PRIM_VOID 0 +#define _CFFI_PRIM_BOOL 1 +#define _CFFI_PRIM_CHAR 2 +#define _CFFI_PRIM_SCHAR 3 +#define _CFFI_PRIM_UCHAR 4 +#define _CFFI_PRIM_SHORT 5 +#define _CFFI_PRIM_USHORT 6 +#define _CFFI_PRIM_INT 7 +#define _CFFI_PRIM_UINT 8 +#define _CFFI_PRIM_LONG 9 +#define _CFFI_PRIM_ULONG 10 +#define _CFFI_PRIM_LONGLONG 11 +#define _CFFI_PRIM_ULONGLONG 12 +#define _CFFI_PRIM_FLOAT 13 +#define _CFFI_PRIM_DOUBLE 14 +#define _CFFI_PRIM_LONGDOUBLE 15 + +#define _CFFI_PRIM_WCHAR 16 +#define _CFFI_PRIM_INT8 17 +#define _CFFI_PRIM_UINT8 18 +#define _CFFI_PRIM_INT16 19 +#define _CFFI_PRIM_UINT16 20 +#define _CFFI_PRIM_INT32 21 +#define _CFFI_PRIM_UINT32 22 +#define _CFFI_PRIM_INT64 23 +#define _CFFI_PRIM_UINT64 24 +#define _CFFI_PRIM_INTPTR 25 +#define _CFFI_PRIM_UINTPTR 26 +#define _CFFI_PRIM_PTRDIFF 27 +#define _CFFI_PRIM_SIZE 28 +#define _CFFI_PRIM_SSIZE 29 +#define _CFFI_PRIM_INT_LEAST8 30 +#define _CFFI_PRIM_UINT_LEAST8 31 +#define _CFFI_PRIM_INT_LEAST16 32 +#define _CFFI_PRIM_UINT_LEAST16 33 +#define _CFFI_PRIM_INT_LEAST32 34 +#define _CFFI_PRIM_UINT_LEAST32 35 +#define _CFFI_PRIM_INT_LEAST64 36 +#define _CFFI_PRIM_UINT_LEAST64 37 +#define _CFFI_PRIM_INT_FAST8 38 +#define _CFFI_PRIM_UINT_FAST8 39 +#define _CFFI_PRIM_INT_FAST16 40 +#define _CFFI_PRIM_UINT_FAST16 41 +#define _CFFI_PRIM_INT_FAST32 42 +#define _CFFI_PRIM_UINT_FAST32 43 +#define _CFFI_PRIM_INT_FAST64 44 +#define _CFFI_PRIM_UINT_FAST64 45 +#define _CFFI_PRIM_INTMAX 46 +#define _CFFI_PRIM_UINTMAX 47 +#define _CFFI_PRIM_FLOATCOMPLEX 48 +#define _CFFI_PRIM_DOUBLECOMPLEX 49 +#define _CFFI_PRIM_CHAR16 50 +#define _CFFI_PRIM_CHAR32 51 + +#define _CFFI__NUM_PRIM 52 +#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_FLOAT_PRIM (-2) +#define _CFFI__UNKNOWN_LONG_DOUBLE (-3) + +#define _CFFI__IO_FILE_STRUCT (-1) + + +struct _cffi_global_s { + const char *name; + void *address; + _cffi_opcode_t type_op; + void *size_or_direct_fn; // OP_GLOBAL_VAR: size, or 0 if unknown + // OP_CPYTHON_BLTN_*: addr of direct function +}; + +struct _cffi_getconst_s { + unsigned long long value; + const struct _cffi_type_context_s *ctx; + int gindex; +}; + +struct _cffi_struct_union_s { + const char *name; + int type_index; // -> _cffi_types, on a OP_STRUCT_UNION + int flags; // _CFFI_F_* flags below + size_t size; + int alignment; + int first_field_index; // -> _cffi_fields array + int num_fields; +}; +#define _CFFI_F_UNION 0x01 // is a union, not a struct +#define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the + // "standard layout" or if some are missing +#define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct +#define _CFFI_F_EXTERNAL 0x08 // in some other ffi.include() +#define _CFFI_F_OPAQUE 0x10 // opaque + +struct _cffi_field_s { + const char *name; + size_t field_offset; + size_t field_size; + _cffi_opcode_t field_type_op; +}; + +struct _cffi_enum_s { + const char *name; + int type_index; // -> _cffi_types, on a OP_ENUM + int type_prim; // _CFFI_PRIM_xxx + const char *enumerators; // comma-delimited string +}; + +struct _cffi_typename_s { + const char *name; + int type_index; /* if opaque, points to a possibly artificial + OP_STRUCT which is itself opaque */ +}; + +struct _cffi_type_context_s { + _cffi_opcode_t *types; + const struct _cffi_global_s *globals; + const struct _cffi_field_s *fields; + const struct _cffi_struct_union_s *struct_unions; + const struct _cffi_enum_s *enums; + const struct _cffi_typename_s *typenames; + int num_globals; + int num_struct_unions; + int num_enums; + int num_typenames; + const char *const *includes; + int num_types; + int flags; /* future extension */ +}; + +struct _cffi_parse_info_s { + const struct _cffi_type_context_s *ctx; + _cffi_opcode_t *output; + unsigned int output_size; + size_t error_location; + const char *error_message; +}; + +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + +#ifdef _CFFI_INTERNAL +static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); +static int search_in_globals(const struct _cffi_type_context_s *ctx, + const char *search, size_t search_len); +static int search_in_struct_unions(const struct _cffi_type_context_s *ctx, + const char *search, size_t search_len); +#endif diff --git a/myenv/lib/python3.9/site-packages/cffi/pkgconfig.py b/myenv/lib/python3.9/site-packages/cffi/pkgconfig.py new file mode 100644 index 0000000..5c93f15 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/pkgconfig.py @@ -0,0 +1,121 @@ +# pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi +import sys, os, subprocess + +from .error import PkgConfigError + + +def merge_flags(cfg1, cfg2): + """Merge values from cffi config flags cfg2 to cf1 + + Example: + merge_flags({"libraries": ["one"]}, {"libraries": ["two"]}) + {"libraries": ["one", "two"]} + """ + for key, value in cfg2.items(): + if key not in cfg1: + cfg1[key] = value + else: + if not isinstance(cfg1[key], list): + raise TypeError("cfg1[%r] should be a list of strings" % (key,)) + if not isinstance(value, list): + raise TypeError("cfg2[%r] should be a list of strings" % (key,)) + cfg1[key].extend(value) + return cfg1 + + +def call(libname, flag, encoding=sys.getfilesystemencoding()): + """Calls pkg-config and returns the output if found + """ + a = ["pkg-config", "--print-errors"] + a.append(flag) + a.append(libname) + try: + pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except EnvironmentError as e: + raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),)) + + bout, berr = pc.communicate() + if pc.returncode != 0: + try: + berr = berr.decode(encoding) + except Exception: + pass + raise PkgConfigError(berr.strip()) + + if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x + try: + bout = bout.decode(encoding) + except UnicodeDecodeError: + raise PkgConfigError("pkg-config %s %s returned bytes that cannot " + "be decoded with encoding %r:\n%r" % + (flag, libname, encoding, bout)) + + if os.altsep != '\\' and '\\' in bout: + raise PkgConfigError("pkg-config %s %s returned an unsupported " + "backslash-escaped output:\n%r" % + (flag, libname, bout)) + return bout + + +def flags_from_pkgconfig(libs): + r"""Return compiler line flags for FFI.set_source based on pkg-config output + + Usage + ... + ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"]) + + If pkg-config is installed on build machine, then arguments include_dirs, + library_dirs, libraries, define_macros, extra_compile_args and + extra_link_args are extended with an output of pkg-config for libfoo and + libbar. + + Raises PkgConfigError in case the pkg-config call fails. + """ + + def get_include_dirs(string): + return [x[2:] for x in string.split() if x.startswith("-I")] + + def get_library_dirs(string): + return [x[2:] for x in string.split() if x.startswith("-L")] + + def get_libraries(string): + return [x[2:] for x in string.split() if x.startswith("-l")] + + # convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils + def get_macros(string): + def _macro(x): + x = x[2:] # drop "-D" + if '=' in x: + return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar") + else: + return (x, None) # "-Dfoo" => ("foo", None) + return [_macro(x) for x in string.split() if x.startswith("-D")] + + def get_other_cflags(string): + return [x for x in string.split() if not x.startswith("-I") and + not x.startswith("-D")] + + def get_other_libs(string): + return [x for x in string.split() if not x.startswith("-L") and + not x.startswith("-l")] + + # return kwargs for given libname + def kwargs(libname): + fse = sys.getfilesystemencoding() + all_cflags = call(libname, "--cflags") + all_libs = call(libname, "--libs") + return { + "include_dirs": get_include_dirs(all_cflags), + "library_dirs": get_library_dirs(all_libs), + "libraries": get_libraries(all_libs), + "define_macros": get_macros(all_cflags), + "extra_compile_args": get_other_cflags(all_cflags), + "extra_link_args": get_other_libs(all_libs), + } + + # merge all arguments together + ret = {} + for libname in libs: + lib_flags = kwargs(libname) + merge_flags(ret, lib_flags) + return ret diff --git a/myenv/lib/python3.9/site-packages/cffi/recompiler.py b/myenv/lib/python3.9/site-packages/cffi/recompiler.py new file mode 100644 index 0000000..5d9d32d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/recompiler.py @@ -0,0 +1,1581 @@ +import os, sys, io +from . import ffiplatform, model +from .error import VerificationError +from .cffi_opcode import * + +VERSION_BASE = 0x2601 +VERSION_EMBEDDED = 0x2701 +VERSION_CHAR16CHAR32 = 0x2801 + +USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or + sys.version_info >= (3, 5)) + + +class GlobalExpr: + def __init__(self, name, address, type_op, size=0, check_value=0): + self.name = name + self.address = address + self.type_op = type_op + self.size = size + self.check_value = check_value + + def as_c_expr(self): + return ' { "%s", (void *)%s, %s, (void *)%s },' % ( + self.name, self.address, self.type_op.as_c_expr(), self.size) + + def as_python_expr(self): + return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, + self.check_value) + +class FieldExpr: + def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): + self.name = name + self.field_offset = field_offset + self.field_size = field_size + self.fbitsize = fbitsize + self.field_type_op = field_type_op + + def as_c_expr(self): + spaces = " " * len(self.name) + return (' { "%s", %s,\n' % (self.name, self.field_offset) + + ' %s %s,\n' % (spaces, self.field_size) + + ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) + + def as_python_expr(self): + raise NotImplementedError + + def as_field_python_expr(self): + if self.field_type_op.op == OP_NOOP: + size_expr = '' + elif self.field_type_op.op == OP_BITFIELD: + size_expr = format_four_bytes(self.fbitsize) + else: + raise NotImplementedError + return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), + size_expr, + self.name) + +class StructUnionExpr: + def __init__(self, name, type_index, flags, size, alignment, comment, + first_field_index, c_fields): + self.name = name + self.type_index = type_index + self.flags = flags + self.size = size + self.alignment = alignment + self.comment = comment + self.first_field_index = first_field_index + self.c_fields = c_fields + + def as_c_expr(self): + return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) + + '\n %s, %s, ' % (self.size, self.alignment) + + '%d, %d ' % (self.first_field_index, len(self.c_fields)) + + ('/* %s */ ' % self.comment if self.comment else '') + + '},') + + def as_python_expr(self): + flags = eval(self.flags, G_FLAGS) + fields_expr = [c_field.as_field_python_expr() + for c_field in self.c_fields] + return "(b'%s%s%s',%s)" % ( + format_four_bytes(self.type_index), + format_four_bytes(flags), + self.name, + ','.join(fields_expr)) + +class EnumExpr: + def __init__(self, name, type_index, size, signed, allenums): + self.name = name + self.type_index = type_index + self.size = size + self.signed = signed + self.allenums = allenums + + def as_c_expr(self): + return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' + ' "%s" },' % (self.name, self.type_index, + self.size, self.signed, self.allenums)) + + def as_python_expr(self): + prim_index = { + (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, + (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, + (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, + (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, + }[self.size, self.signed] + return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), + format_four_bytes(prim_index), + self.name, self.allenums) + +class TypenameExpr: + def __init__(self, name, type_index): + self.name = name + self.type_index = type_index + + def as_c_expr(self): + return ' { "%s", %d },' % (self.name, self.type_index) + + def as_python_expr(self): + return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) + + +# ____________________________________________________________ + + +class Recompiler: + _num_externpy = 0 + + def __init__(self, ffi, module_name, target_is_python=False): + self.ffi = ffi + self.module_name = module_name + self.target_is_python = target_is_python + self._version = VERSION_BASE + + def needs_version(self, ver): + self._version = max(self._version, ver) + + def collect_type_table(self): + self._typesdict = {} + self._generate("collecttype") + # + all_decls = sorted(self._typesdict, key=str) + # + # prepare all FUNCTION bytecode sequences first + self.cffi_types = [] + for tp in all_decls: + if tp.is_raw_function: + assert self._typesdict[tp] is None + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + for tp1 in tp.args: + assert isinstance(tp1, (model.VoidType, + model.BasePrimitiveType, + model.PointerType, + model.StructOrUnionOrEnum, + model.FunctionPtrType)) + if self._typesdict[tp1] is None: + self._typesdict[tp1] = len(self.cffi_types) + self.cffi_types.append(tp1) # placeholder + self.cffi_types.append('END') # placeholder + # + # prepare all OTHER bytecode sequences + for tp in all_decls: + if not tp.is_raw_function and self._typesdict[tp] is None: + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + if tp.is_array_type and tp.length is not None: + self.cffi_types.append('LEN') # placeholder + assert None not in self._typesdict.values() + # + # collect all structs and unions and enums + self._struct_unions = {} + self._enums = {} + for tp in all_decls: + if isinstance(tp, model.StructOrUnion): + self._struct_unions[tp] = None + elif isinstance(tp, model.EnumType): + self._enums[tp] = None + for i, tp in enumerate(sorted(self._struct_unions, + key=lambda tp: tp.name)): + self._struct_unions[tp] = i + for i, tp in enumerate(sorted(self._enums, + key=lambda tp: tp.name)): + self._enums[tp] = i + # + # emit all bytecode sequences now + for tp in all_decls: + method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) + method(tp, self._typesdict[tp]) + # + # consistency check + for op in self.cffi_types: + assert isinstance(op, CffiOp) + self.cffi_types = tuple(self.cffi_types) # don't change any more + + def _enum_fields(self, tp): + # When producing C, expand all anonymous struct/union fields. + # That's necessary to have C code checking the offsets of the + # individual fields contained in them. When producing Python, + # don't do it and instead write it like it is, with the + # corresponding fields having an empty name. Empty names are + # recognized at runtime when we import the generated Python + # file. + expand_anonymous_struct_union = not self.target_is_python + return tp.enumfields(expand_anonymous_struct_union) + + def _do_collect_type(self, tp): + if not isinstance(tp, model.BaseTypeByIdentity): + if isinstance(tp, tuple): + for x in tp: + self._do_collect_type(x) + return + if tp not in self._typesdict: + self._typesdict[tp] = None + if isinstance(tp, model.FunctionPtrType): + self._do_collect_type(tp.as_raw_function()) + elif isinstance(tp, model.StructOrUnion): + if tp.fldtypes is not None and ( + tp not in self.ffi._parser._included_declarations): + for name1, tp1, _, _ in self._enum_fields(tp): + self._do_collect_type(self._field_type(tp, name1, tp1)) + else: + for _, x in tp._get_items(): + self._do_collect_type(x) + + def _generate(self, step_name): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in recompile(): %r" % name) + try: + self._current_quals = quals + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + # ---------- + + ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] + + def collect_step_tables(self): + # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. + self._lsts = {} + for step_name in self.ALL_STEPS: + self._lsts[step_name] = [] + self._seen_struct_unions = set() + self._generate("ctx") + self._add_missing_struct_unions() + # + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if step_name != "field": + lst.sort(key=lambda entry: entry.name) + self._lsts[step_name] = tuple(lst) # don't change any more + # + # check for a possible internal inconsistency: _cffi_struct_unions + # should have been generated with exactly self._struct_unions + lst = self._lsts["struct_union"] + for tp, i in self._struct_unions.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._struct_unions) + # same with enums + lst = self._lsts["enum"] + for tp, i in self._enums.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._enums) + + # ---------- + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self, f, preamble): + if self.target_is_python: + assert preamble is None + self.write_py_source_to_f(f) + else: + assert preamble is not None + self.write_c_source_to_f(f, preamble) + + def _rel_readlines(self, filename): + g = open(os.path.join(os.path.dirname(__file__), filename), 'r') + lines = g.readlines() + g.close() + return lines + + def write_c_source_to_f(self, f, preamble): + self._f = f + prnt = self._prnt + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') + if not USE_LIMITED_API: + prnt('#define _CFFI_NO_LIMITED_API') + # + # first the '#include' (actually done by inlining the file's content) + lines = self._rel_readlines('_cffi_include.h') + i = lines.index('#include "parse_c_type.h"\n') + lines[i:i+1] = self._rel_readlines('parse_c_type.h') + prnt(''.join(lines)) + # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') + self._print_string_literal_in_array(self.ffi._embedding) + prnt('0 };') + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + i = lines.index('#include "_cffi_errors.h"\n') + lines[i:i+1] = self._rel_readlines('_cffi_errors.h') + prnt(''.join(lines)) + self.needs_version(VERSION_EMBEDDED) + # + # then paste the C source given by the user, verbatim. + prnt('/************************************************************/') + prnt() + prnt(preamble) + prnt() + prnt('/************************************************************/') + prnt() + # + # the declaration of '_cffi_types' + prnt('static void *_cffi_types[] = {') + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + for i, op in enumerate(self.cffi_types): + comment = '' + if i in typeindex2type: + comment = ' // ' + typeindex2type[i]._get_c_name() + prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) + if not self.cffi_types: + prnt(' 0') + prnt('};') + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._seen_constants = set() + self._generate("decl") + # + # the declaration of '_cffi_globals' and '_cffi_typenames' + nums = {} + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + nums[step_name] = len(lst) + if nums[step_name] > 0: + prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( + step_name, step_name)) + for entry in lst: + prnt(entry.as_c_expr()) + prnt('};') + prnt() + # + # the declaration of '_cffi_includes' + if self.ffi._included_ffis: + prnt('static const char * const _cffi_includes[] = {') + for ffi_to_include in self.ffi._included_ffis: + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is None: + raise VerificationError( + "not implemented yet: ffi.include() of a Python-based " + "ffi inside a C-based ffi") + prnt(' "%s",' % (included_module_name,)) + prnt(' NULL') + prnt('};') + prnt() + # + # the declaration of '_cffi_type_context' + prnt('static const struct _cffi_type_context_s _cffi_type_context = {') + prnt(' _cffi_types,') + for step_name in self.ALL_STEPS: + if nums[step_name] > 0: + prnt(' _cffi_%ss,' % step_name) + else: + prnt(' NULL, /* no %ss */' % step_name) + for step_name in self.ALL_STEPS: + if step_name != "field": + prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) + if self.ffi._included_ffis: + prnt(' _cffi_includes,') + else: + prnt(' NULL, /* no includes */') + prnt(' %d, /* num_types */' % (len(self.cffi_types),)) + flags = 0 + if self._num_externpy > 0 or self.ffi._embedding is not None: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) + prnt('};') + prnt() + # + # the init function + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') + prnt('#endif') + prnt() + prnt('#ifdef PYPY_VERSION') + prnt('PyMODINIT_FUNC') + prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) + prnt('{') + if flags & 1: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python_org = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') + prnt(' p[0] = (const void *)0x%x;' % self._version) + prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') + prnt('}') + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + prnt('# ifdef _MSC_VER') + prnt(' PyMODINIT_FUNC') + prnt('# if PY_MAJOR_VERSION >= 3') + prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) + prnt('# else') + prnt(' init%s(void) { }' % (base_module_name,)) + prnt('# endif') + prnt('# endif') + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % (base_module_name,)) + prnt('{') + prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#else') + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % (base_module_name,)) + prnt('{') + prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#endif') + prnt() + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility pop') + prnt('#endif') + self._version = None + + def _to_py(self, x): + if isinstance(x, str): + return "b'%s'" % (x,) + if isinstance(x, (list, tuple)): + rep = [self._to_py(item) for item in x] + if len(rep) == 1: + rep.append('') + return "(%s)" % (','.join(rep),) + return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. + + def write_py_source_to_f(self, f): + self._f = f + prnt = self._prnt + # + # header + prnt("# auto-generated file") + prnt("import _cffi_backend") + # + # the 'import' of the included ffis + num_includes = len(self.ffi._included_ffis or ()) + for i in range(num_includes): + ffi_to_include = self.ffi._included_ffis[i] + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is not None: + raise VerificationError( + "not implemented yet: ffi.include() of a C-based " + "ffi inside a Python-based ffi") + prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) + prnt() + prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) + prnt(" _version = 0x%x," % (self._version,)) + self._version = None + # + # the '_types' keyword argument + self.cffi_types = tuple(self.cffi_types) # don't change any more + types_lst = [op.as_python_bytes() for op in self.cffi_types] + prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + # + # the keyword arguments from ALL_STEPS + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if len(lst) > 0 and step_name != "field": + prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) + # + # the '_includes' keyword argument + if num_includes > 0: + prnt(' _includes = (%s,),' % ( + ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) + # + # the footer + prnt(')') + + # ---------- + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + elif isinstance(tp, model.UnknownFloatType): + # don't check with is_float_type(): it may be a 'long + # double' here, and _cffi_to_c_double would loose precision + converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) + else: + cname = tp.get_c_name('') + converter = '(%s)_cffi_to_c_%s' % (cname, + tp.name.replace(' ', '_')) + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif (isinstance(tp, model.StructOrUnionOrEnum) or + isinstance(tp, model.BasePrimitiveType)): + # a struct (not a struct pointer) as a function argument; + # or, a complex (the same code works) + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars, freelines): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + localvars.add('struct _cffi_freeme_s *large_args_free = NULL') + freelines.add('if (large_args_free != NULL)' + ' _cffi_free_array_arguments(large_args_free);') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' %s = ((size_t)datasize) <= 640 ? ' + '(%s)alloca((size_t)datasize) : NULL;' % ( + tovar, tp.get_c_name(''))) + self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' + '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) + self._prnt(' datasize, &large_args_free) < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.BasePrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif isinstance(tp, model.UnknownFloatType): + return '_cffi_from_c_double(%s)' % (var,) + elif tp.name != 'long double' and not tp.is_complex_type(): + cname = tp.name.replace(' ', '_') + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + return '_cffi_from_c_%s(%s)' % (cname, var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs + + def _typedef_type(self, tp, name): + return self._global_type(tp, "(*(%s *)0)" % (name,)) + + def _generate_cpy_typedef_collecttype(self, tp, name): + self._do_collect_type(self._typedef_type(tp, name)) + + def _generate_cpy_typedef_decl(self, tp, name): + pass + + def _typedef_ctx(self, tp, name): + type_index = self._typesdict[tp] + self._lsts["typename"].append(TypenameExpr(name, type_index)) + + def _generate_cpy_typedef_ctx(self, tp, name): + tp = self._typedef_type(tp, name) + self._typedef_ctx(tp, name) + if getattr(tp, "origin", None) == "unknown_type": + self._struct_ctx(tp, tp.name, approxname=None) + elif isinstance(tp, model.NamedPointerType): + self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, + named_ptr=tp) + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + self._do_collect_type(tp.as_raw_function()) + if tp.ellipsis and not self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_function_decl(self, tp, name): + assert not self.target_is_python + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_constant_decl(tp, name) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + # + # ------------------------------ + # the 'd' version of the function, only for addressof(lib, 'func') + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arguments.append(type.get_c_name(' x%d' % i, context)) + call_arguments.append('x%d' % i) + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) + prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) + prnt('{') + call_arguments = ', '.join(call_arguments) + result_code = 'return ' + if isinstance(tp.result, model.VoidType): + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, call_arguments)) + prnt('}') + # + prnt('#ifndef PYPY_VERSION') # ------------------------------ + # + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' x%d' % i, context) + prnt(' %s;' % arg) + # + localvars = set() + freelines = set() + for type in tp.args: + self._extra_local_variables(type, localvars, freelines) + for decl in sorted(localvars): + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + result_decl = ' %s;' % tp.result.get_c_name(' result', context) + prnt(result_decl) + prnt(' PyObject *pyresult;') + else: + result_decl = None + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( + name, len(rng), len(rng), + ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + call_arguments = ['x%d' % i for i in range(len(tp.args))] + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' pyresult = %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + for freeline in freelines: + prnt(' ' + freeline) + prnt(' return pyresult;') + else: + for freeline in freelines: + prnt(' ' + freeline) + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + # + prnt('#else') # ------------------------------ + # + # the PyPy version: need to replace struct/union arguments with + # pointers, and if the result is a struct/union, insert a first + # arg that is a pointer to the result. We also do that for + # complex args and return type. + def need_indirection(type): + return (isinstance(type, model.StructOrUnion) or + (isinstance(type, model.PrimitiveType) and + type.is_complex_type())) + difference = False + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + indirection = '' + if need_indirection(type): + indirection = '*' + difference = True + arg = type.get_c_name(' %sx%d' % (indirection, i), context) + arguments.append(arg) + call_arguments.append('%sx%d' % (indirection, i)) + tp_result = tp.result + if need_indirection(tp_result): + context = 'result of %s' % name + arg = tp_result.get_c_name(' *result', context) + arguments.insert(0, arg) + tp_result = model.void_type + result_decl = None + result_code = '*result = ' + difference = True + if difference: + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) + prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) + prnt('{') + if result_decl: + prnt(result_decl) + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + if result_decl: + prnt(' return result;') + prnt('}') + else: + prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) + # + prnt('#endif') # ------------------------------ + prnt() + + def _generate_cpy_function_ctx(self, tp, name): + if tp.ellipsis and not self.target_is_python: + self._generate_cpy_constant_ctx(tp, name) + return + type_index = self._typesdict[tp.as_raw_function()] + numargs = len(tp.args) + if self.target_is_python: + meth_kind = OP_DLOPEN_FUNC + elif numargs == 0: + meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' + elif numargs == 1: + meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' + else: + meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' + self._lsts["global"].append( + GlobalExpr(name, '_cffi_f_%s' % name, + CffiOp(meth_kind, type_index), + size='_cffi_d_%s' % name)) + + # ---------- + # named structs or unions + + def _field_type(self, tp_struct, field_name, tp_field): + if isinstance(tp_field, model.ArrayType): + actual_length = tp_field.length + if actual_length == '...': + ptr_struct_name = tp_struct.get_c_name('*') + actual_length = '_cffi_array_len(((%s)0)->%s)' % ( + ptr_struct_name, field_name) + tp_item = self._field_type(tp_struct, '%s[0]' % field_name, + tp_field.item) + tp_field = model.ArrayType(tp_item, actual_length) + return tp_field + + def _struct_collecttype(self, tp): + self._do_collect_type(tp) + if self.target_is_python: + # also requires nested anon struct/unions in ABI mode, recursively + for fldtype in tp.anonymous_struct_fields(): + self._struct_collecttype(fldtype) + + def _struct_decl(self, tp, cname, approxname): + if tp.fldtypes is None: + return + prnt = self._prnt + checkfuncname = '_cffi_checkfld_%s' % (approxname,) + prnt('_CFFI_UNUSED_FN') + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in self._enum_fields(tp): + try: + if ftype.is_integer_type() or fbitsize >= 0: + # accept all integers, but complain on float or double + if fname != '': + prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " + "an integer */" % (fname, cname, fname)) + continue + # only accept exactly the type declared, except that '[]' + # is interpreted as a '*' and so will match any array length. + # (It would also match '*', but that's harder to detect...) + while (isinstance(ftype, model.ArrayType) + and (ftype.length is None or ftype.length == '...')): + ftype = ftype.item + fname = fname + '[0]' + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) + prnt() + + def _struct_ctx(self, tp, cname, approxname, named_ptr=None): + type_index = self._typesdict[tp] + reason_for_not_expanding = None + flags = [] + if isinstance(tp, model.UnionType): + flags.append("_CFFI_F_UNION") + if tp.fldtypes is None: + flags.append("_CFFI_F_OPAQUE") + reason_for_not_expanding = "opaque" + if (tp not in self.ffi._parser._included_declarations and + (named_ptr is None or + named_ptr not in self.ffi._parser._included_declarations)): + if tp.fldtypes is None: + pass # opaque + elif tp.partial or any(tp.anonymous_struct_fields()): + pass # field layout obtained silently from the C compiler + else: + flags.append("_CFFI_F_CHECK_FIELDS") + if tp.packed: + if tp.packed > 1: + raise NotImplementedError( + "%r is declared with 'pack=%r'; only 0 or 1 are " + "supported in API mode (try to use \"...;\", which " + "does not require a 'pack' declaration)" % + (tp, tp.packed)) + flags.append("_CFFI_F_PACKED") + else: + flags.append("_CFFI_F_EXTERNAL") + reason_for_not_expanding = "external" + flags = '|'.join(flags) or '0' + c_fields = [] + if reason_for_not_expanding is None: + enumfields = list(self._enum_fields(tp)) + for fldname, fldtype, fbitsize, fqual in enumfields: + fldtype = self._field_type(tp, fldname, fldtype) + self._check_not_opaque(fldtype, + "field '%s.%s'" % (tp.name, fldname)) + # cname is None for _add_missing_struct_unions() only + op = OP_NOOP + if fbitsize >= 0: + op = OP_BITFIELD + size = '%d /* bits */' % fbitsize + elif cname is None or ( + isinstance(fldtype, model.ArrayType) and + fldtype.length is None): + size = '(size_t)-1' + else: + size = 'sizeof(((%s)0)->%s)' % ( + tp.get_c_name('*') if named_ptr is None + else named_ptr.name, + fldname) + if cname is None or fbitsize >= 0: + offset = '(size_t)-1' + elif named_ptr is not None: + offset = '((char *)&((%s)0)->%s) - (char *)0' % ( + named_ptr.name, fldname) + else: + offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) + c_fields.append( + FieldExpr(fldname, offset, size, fbitsize, + CffiOp(op, self._typesdict[fldtype]))) + first_field_index = len(self._lsts["field"]) + self._lsts["field"].extend(c_fields) + # + if cname is None: # unknown name, for _add_missing_struct_unions + size = '(size_t)-2' + align = -2 + comment = "unnamed" + else: + if named_ptr is not None: + size = 'sizeof(*(%s)0)' % (named_ptr.name,) + align = '-1 /* unknown alignment */' + else: + size = 'sizeof(%s)' % (cname,) + align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) + comment = None + else: + size = '(size_t)-1' + align = -1 + first_field_index = -1 + comment = reason_for_not_expanding + self._lsts["struct_union"].append( + StructUnionExpr(tp.name, type_index, flags, size, align, comment, + first_field_index, c_fields)) + self._seen_struct_unions.add(tp) + + def _check_not_opaque(self, tp, location): + while isinstance(tp, model.ArrayType): + tp = tp.item + if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: + raise TypeError( + "%s is of an opaque type (not declared in cdef())" % location) + + def _add_missing_struct_unions(self): + # not very nice, but some struct declarations might be missing + # because they don't have any known C name. Check that they are + # not partial (we can't complete or verify them!) and emit them + # anonymously. + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: + if tp not in self._seen_struct_unions: + if tp.partial: + raise NotImplementedError("internal inconsistency: %r is " + "partial but was not seen at " + "this point" % (tp,)) + if tp.name.startswith('$') and tp.name[1:].isdigit(): + approxname = tp.name[1:] + elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': + approxname = 'FILE' + self._typedef_ctx(tp, 'FILE') + else: + raise NotImplementedError("internal inconsistency: %r" % + (tp,)) + self._struct_ctx(tp, None, approxname) + + def _generate_cpy_struct_collecttype(self, tp, name): + self._struct_collecttype(tp) + _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype + + def _struct_names(self, tp): + cname = tp.get_c_name('') + if ' ' in cname: + return cname, cname.replace(' ', '_') + else: + return cname, '_' + cname + + def _generate_cpy_struct_decl(self, tp, name): + self._struct_decl(tp, *self._struct_names(tp)) + _generate_cpy_union_decl = _generate_cpy_struct_decl + + def _generate_cpy_struct_ctx(self, tp, name): + self._struct_ctx(tp, *self._struct_names(tp)) + _generate_cpy_union_ctx = _generate_cpy_struct_ctx + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_cpy_anonymous_collecttype(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_collecttype(tp, name) + else: + self._struct_collecttype(tp) + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp) + else: + self._struct_decl(tp, name, 'typedef_' + name) + + def _generate_cpy_anonymous_ctx(self, tp, name): + if isinstance(tp, model.EnumType): + self._enum_ctx(tp, name) + else: + self._struct_ctx(tp, name, 'typedef_' + name) + + # ---------- + # constants, declared with "static const ..." + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + check_value=None): + if (category, name) in self._seen_constants: + raise VerificationError( + "duplicate declaration of %s '%s'" % (category, name)) + self._seen_constants.add((category, name)) + # + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + if is_int: + prnt('static int %s(unsigned long long *o)' % funcname) + prnt('{') + prnt(' int n = (%s) <= 0;' % (name,)) + prnt(' *o = (unsigned long long)((%s) | 0);' + ' /* check that %s is an integer */' % (name, name)) + if check_value is not None: + if check_value > 0: + check_value = '%dU' % (check_value,) + prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) + prnt(' n |= 2;') + prnt(' return n;') + prnt('}') + else: + assert check_value is None + prnt('static void %s(char *o)' % funcname) + prnt('{') + prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = tp.is_integer_type() + if not is_int or self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + def _generate_cpy_constant_ctx(self, tp, name): + if not self.target_is_python and tp.is_integer_type(): + type_op = CffiOp(OP_CONSTANT_INT, -1) + else: + if self.target_is_python: + const_kind = OP_DLOPEN_CONST + else: + const_kind = OP_CONSTANT + type_index = self._typesdict[tp] + type_op = CffiOp(const_kind, type_index) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op)) + + # ---------- + # enums + + def _generate_cpy_enum_collecttype(self, tp, name): + self._do_collect_type(tp) + + def _generate_cpy_enum_decl(self, tp, name=None): + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator) + + def _enum_ctx(self, tp, cname): + type_index = self._typesdict[tp] + type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._lsts["global"].append( + GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, + check_value=enumvalue)) + # + if cname is not None and '$' not in cname and not self.target_is_python: + size = "sizeof(%s)" % cname + signed = "((%s)-1) <= 0" % cname + else: + basetp = tp.build_baseinttype(self.ffi, []) + size = self.ffi.sizeof(basetp) + signed = int(int(self.ffi.cast(basetp, -1)) < 0) + allenums = ",".join(tp.enumerators) + self._lsts["enum"].append( + EnumExpr(tp.name, type_index, size, signed, allenums)) + + def _generate_cpy_enum_ctx(self, tp, name): + self._enum_ctx(tp, tp._get_c_name()) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_collecttype(self, tp, name): + pass + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + def _generate_cpy_macro_ctx(self, tp, name): + if tp == '...': + if self.target_is_python: + raise VerificationError( + "cannot use the syntax '...' in '#define %s ...' when " + "using the ABI mode" % (name,)) + check_value = None + else: + check_value = tp # an integer + type_op = CffiOp(OP_CONSTANT_INT, -1) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op, + check_value=check_value)) + + # ---------- + # global variables + + def _global_type(self, tp, global_name): + if isinstance(tp, model.ArrayType): + actual_length = tp.length + if actual_length == '...': + actual_length = '_cffi_array_len(%s)' % (global_name,) + tp_item = self._global_type(tp.item, '%s[0]' % global_name) + tp = model.ArrayType(tp_item, actual_length) + return tp + + def _generate_cpy_variable_collecttype(self, tp, name): + self._do_collect_type(self._global_type(tp, name)) + + def _generate_cpy_variable_decl(self, tp, name): + prnt = self._prnt + tp = self._global_type(tp, name) + if isinstance(tp, model.ArrayType) and tp.length is None: + tp = tp.item + ampersand = '' + else: + ampersand = '&' + # This code assumes that casts from "tp *" to "void *" is a + # no-op, i.e. a function that returns a "tp *" can be called + # as if it returned a "void *". This should be generally true + # on any modern machine. The only exception to that rule (on + # uncommon architectures, and as far as I can tell) might be + # if 'tp' were a function type, but that is not possible here. + # (If 'tp' is a function _pointer_ type, then casts from "fn_t + # **" to "void *" are again no-ops, as far as I can tell.) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) + prnt('{') + prnt(' return %s(%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_cpy_variable_ctx(self, tp, name): + tp = self._global_type(tp, name) + type_index = self._typesdict[tp] + if self.target_is_python: + op = OP_GLOBAL_VAR + else: + op = OP_GLOBAL_VAR_F + self._lsts["global"].append( + GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) + + # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype + + def _extern_python_decl(self, tp, name, tag_and_space): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s.%s", %s, 0, 0 };' % ( + self.module_name, name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + if tp.abi == "__stdcall": + name_and_arguments = '_cffi_stdcall ' + name_and_arguments + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx + + def _print_string_literal_in_array(self, s): + prnt = self._prnt + prnt('// # NB. this is not a string because of a size limit in MSVC') + if not isinstance(s, bytes): # unicode + s = s.encode('utf-8') # -> bytes + else: + s.decode('utf-8') # got bytes, check for valid utf-8 + try: + s.decode('ascii') + except UnicodeDecodeError: + s = b'# -*- encoding: utf8 -*-\n' + s + for line in s.splitlines(True): + comment = line + if type('//') is bytes: # python2 + line = map(ord, line) # make a list of integers + else: # python3 + # type(line) is bytes, which enumerates like a list of integers + comment = ascii(comment)[1:-1] + prnt(('// ' + comment).rstrip()) + printed_line = '' + for c in line: + if len(printed_line) >= 76: + prnt(printed_line) + printed_line = '' + printed_line += '%d,' % (c,) + prnt(printed_line) + + # ---------- + # emitting the opcodes for individual types + + def _emit_bytecode_VoidType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) + + def _emit_bytecode_PrimitiveType(self, tp, index): + prim_index = PRIMITIVE_TO_INDEX[tp.name] + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) + + def _emit_bytecode_UnknownIntegerType(self, tp, index): + s = ('_cffi_prim_int(sizeof(%s), (\n' + ' ((%s)-1) | 0 /* check that %s is an integer type */\n' + ' ) <= 0)' % (tp.name, tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_UnknownFloatType(self, tp, index): + s = ('_cffi_prim_float(sizeof(%s) *\n' + ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' + ' )' % (tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_RawFunctionType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) + index += 1 + for tp1 in tp.args: + realindex = self._typesdict[tp1] + if index != realindex: + if isinstance(tp1, model.PrimitiveType): + self._emit_bytecode_PrimitiveType(tp1, index) + else: + self.cffi_types[index] = CffiOp(OP_NOOP, realindex) + index += 1 + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) + + def _emit_bytecode_PointerType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) + + _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType + _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType + + def _emit_bytecode_FunctionPtrType(self, tp, index): + raw = tp.as_raw_function() + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) + + def _emit_bytecode_ArrayType(self, tp, index): + item_index = self._typesdict[tp.item] + if tp.length is None: + self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) + elif tp.length == '...': + raise VerificationError( + "type %s badly placed: the '...' array length can only be " + "used on global arrays or on fields of structures" % ( + str(tp).replace('/*...*/', '...'),)) + else: + assert self.cffi_types[index + 1] == 'LEN' + self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) + self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) + + def _emit_bytecode_StructType(self, tp, index): + struct_index = self._struct_unions[tp] + self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) + _emit_bytecode_UnionType = _emit_bytecode_StructType + + def _emit_bytecode_EnumType(self, tp, index): + enum_index = self._enums[tp] + self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) + recompiler = Recompiler(ffi, module_name, + target_is_python=(preamble is None)) + recompiler.collect_type_table() + recompiler.collect_step_tables() + f = NativeIO() + recompiler.write_source_to_f(f, preamble) + output = f.getvalue() + try: + with open(target_file, 'r') as f1: + if f1.read(len(output) + 1) != output: + raise IOError + if verbose: + print("(already up-to-date)") + return False # already up-to-date + except IOError: + tmp_file = '%s.~%d' % (target_file, os.getpid()) + with open(tmp_file, 'w') as f1: + f1.write(output) + try: + os.rename(tmp_file, target_file) + except OSError: + os.unlink(target_file) + os.rename(tmp_file, target_file) + return True + +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): + assert preamble is not None + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) + +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) + +def _modname_to_file(outputdir, modname, extension): + parts = modname.split('.') + try: + os.makedirs(os.path.join(outputdir, *parts[:-1])) + except OSError: + pass + parts[-1] += extension + return os.path.join(outputdir, *parts), parts + + +# Aaargh. Distutils is not tested at all for the purpose of compiling +# DLLs that are not extension modules. Here are some hacks to work +# around that, in the _patch_for_*() functions... + +def _patch_meth(patchlist, cls, name, new_meth): + old = getattr(cls, name) + patchlist.append((cls, name, old)) + setattr(cls, name, new_meth) + return old + +def _unpatch_meths(patchlist): + for cls, name, old_meth in reversed(patchlist): + setattr(cls, name, old_meth) + +def _patch_for_embedding(patchlist): + if sys.platform == 'win32': + # we must not remove the manifest when building for embedding! + from distutils.msvc9compiler import MSVCCompiler + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + + if sys.platform == 'darwin': + # we must not make a '-bundle', but a '-dynamiclib' instead + from distutils.ccompiler import CCompiler + def my_link_shared_object(self, *args, **kwds): + if '-bundle' in self.linker_so: + self.linker_so = list(self.linker_so) + i = self.linker_so.index('-bundle') + self.linker_so[i] = '-dynamiclib' + return old_link_shared_object(self, *args, **kwds) + old_link_shared_object = _patch_meth(patchlist, CCompiler, + 'link_shared_object', + my_link_shared_object) + +def _patch_for_target(patchlist, target): + from distutils.command.build_ext import build_ext + # if 'target' is different from '*', we need to patch some internal + # method to just return this 'target' value, instead of having it + # built from module_name + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + elif sys.platform == 'darwin': + target += '.dylib' + else: + target += '.so' + _patch_meth(patchlist, build_ext, 'get_ext_filename', + lambda self, ext_name: target) + + +def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, target=None, debug=None, **kwds): + if not isinstance(module_name, str): + module_name = module_name.encode('ascii') + if ffi._windows_unicode: + ffi._apply_windows_unicode(kwds) + if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) + if c_file is None: + c_file, parts = _modname_to_file(tmpdir, module_name, + source_extension) + if extradir: + parts = [extradir] + parts + ext_c_file = os.path.join(*parts) + else: + ext_c_file = c_file + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + # + ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) + if call_c_compiler: + patchlist = [] + cwd = os.getcwd() + try: + if embedding: + _patch_for_embedding(patchlist) + if target != '*': + _patch_for_target(patchlist, target) + if compiler_verbose: + if tmpdir == '.': + msg = 'the current directory is' + else: + msg = 'setting the current directory to' + print('%s %r' % (msg, os.path.abspath(tmpdir))) + os.chdir(tmpdir) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) + finally: + os.chdir(cwd) + _unpatch_meths(patchlist) + return outputfilename + else: + return ext, updated + else: + if c_file is None: + c_file, _ = _modname_to_file(tmpdir, module_name, '.py') + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) + if call_c_compiler: + return c_file + else: + return None, updated + diff --git a/myenv/lib/python3.9/site-packages/cffi/setuptools_ext.py b/myenv/lib/python3.9/site-packages/cffi/setuptools_ext.py new file mode 100644 index 0000000..8fe3614 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/setuptools_ext.py @@ -0,0 +1,219 @@ +import os +import sys + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +def error(msg): + from distutils.errors import DistutilsSetupError + raise DistutilsSetupError(msg) + + +def execfile(filename, glob): + # We use execfile() (here rewritten for Python 3) instead of + # __import__() to load the build script. The problem with + # a normal import is that in some packages, the intermediate + # __init__.py files may already try to import the file that + # we are generating. + with open(filename) as f: + src = f.read() + src += '\n' # Python 2.6 compatibility + code = compile(src, filename, 'exec') + exec(code, glob, glob) + + +def add_cffi_module(dist, mod_spec): + from cffi.api import FFI + + if not isinstance(mod_spec, basestring): + error("argument to 'cffi_modules=...' must be a str or a list of str," + " not %r" % (type(mod_spec).__name__,)) + mod_spec = str(mod_spec) + try: + build_file_name, ffi_var_name = mod_spec.split(':') + except ValueError: + error("%r must be of the form 'path/build.py:ffi_variable'" % + (mod_spec,)) + if not os.path.exists(build_file_name): + ext = '' + rewritten = build_file_name.replace('.', '/') + '.py' + if os.path.exists(rewritten): + ext = ' (rewrite cffi_modules to [%r])' % ( + rewritten + ':' + ffi_var_name,) + error("%r does not name an existing file%s" % (build_file_name, ext)) + + mod_vars = {'__name__': '__cffi__', '__file__': build_file_name} + execfile(build_file_name, mod_vars) + + try: + ffi = mod_vars[ffi_var_name] + except KeyError: + error("%r: object %r not found in module" % (mod_spec, + ffi_var_name)) + if not isinstance(ffi, FFI): + ffi = ffi() # maybe it's a function instead of directly an ffi + if not isinstance(ffi, FFI): + error("%r is not an FFI instance (got %r)" % (mod_spec, + type(ffi).__name__)) + if not hasattr(ffi, '_assigned_source'): + error("%r: the set_source() method was not called" % (mod_spec,)) + module_name, source, source_extension, kwds = ffi._assigned_source + if ffi._windows_unicode: + kwds = kwds.copy() + ffi._apply_windows_unicode(kwds) + + if source is None: + _add_py_module(dist, ffi, module_name) + else: + _add_c_module(dist, ffi, module_name, source, source_extension, kwds) + +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + + CPython itself should ignore the flag in a debugging version + (by not listing .abi3.so in the extensions it supports), but + it doesn't so far, creating troubles. That's why we check + for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent + of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401) + + On Windows, with CPython <= 3.4, it's better not to use py_limited_api + because virtualenv *still* doesn't copy PYTHON3.DLL on these versions. + Recently (2020) we started shipping only >= 3.5 wheels, though. So + we'll give it another try and set py_limited_api on Windows >= 3.5. + """ + from cffi import recompiler + + if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount') + and recompiler.USE_LIMITED_API): + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds + +def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): + from distutils.core import Extension + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext + from distutils.dir_util import mkpath + from distutils import log + from cffi import recompiler + + allsources = ['$PLACEHOLDER'] + allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) + ext = Extension(name=module_name, sources=allsources, **kwds) + + def make_mod(tmpdir, pre_run=None): + c_file = os.path.join(tmpdir, module_name + source_extension) + log.info("generating cffi module %r" % c_file) + mkpath(tmpdir) + # a setuptools-only, API-only hook: called with the "ext" and "ffi" + # arguments just before we turn the ffi into C code. To use it, + # subclass the 'distutils.command.build_ext.build_ext' class and + # add a method 'def pre_run(self, ext, ffi)'. + if pre_run is not None: + pre_run(ext, ffi) + updated = recompiler.make_c_source(ffi, module_name, source, c_file) + if not updated: + log.info("already up-to-date") + return c_file + + if dist.ext_modules is None: + dist.ext_modules = [] + dist.ext_modules.append(ext) + + base_class = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class): + def run(self): + if ext.sources[0] == '$PLACEHOLDER': + pre_run = getattr(self, 'pre_run', None) + ext.sources[0] = make_mod(self.build_temp, pre_run) + base_class.run(self) + dist.cmdclass['build_ext'] = build_ext_make_mod + # NB. multiple runs here will create multiple 'build_ext_make_mod' + # classes. Even in this case the 'build_ext' command should be + # run once; but just in case, the logic above does nothing if + # called again. + + +def _add_py_module(dist, ffi, module_name): + from distutils.dir_util import mkpath + from setuptools.command.build_py import build_py + from setuptools.command.build_ext import build_ext + from distutils import log + from cffi import recompiler + + def generate_mod(py_file): + log.info("generating cffi module %r" % py_file) + mkpath(os.path.dirname(py_file)) + updated = recompiler.make_py_source(ffi, module_name, py_file) + if not updated: + log.info("already up-to-date") + + base_class = dist.cmdclass.get('build_py', build_py) + class build_py_make_mod(base_class): + def run(self): + base_class.run(self) + module_path = module_name.split('.') + module_path[-1] += '.py' + generate_mod(os.path.join(self.build_lib, *module_path)) + def get_source_files(self): + # This is called from 'setup.py sdist' only. Exclude + # the generate .py module in this case. + saved_py_modules = self.py_modules + try: + if saved_py_modules: + self.py_modules = [m for m in saved_py_modules + if m != module_name] + return base_class.get_source_files(self) + finally: + self.py_modules = saved_py_modules + dist.cmdclass['build_py'] = build_py_make_mod + + # distutils and setuptools have no notion I could find of a + # generated python module. If we don't add module_name to + # dist.py_modules, then things mostly work but there are some + # combination of options (--root and --record) that will miss + # the module. So we add it here, which gives a few apparently + # harmless warnings about not finding the file outside the + # build directory. + # Then we need to hack more in get_source_files(); see above. + if dist.py_modules is None: + dist.py_modules = [] + dist.py_modules.append(module_name) + + # the following is only for "build_ext -i" + base_class_2 = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class_2): + def run(self): + base_class_2.run(self) + if self.inplace: + # from get_ext_fullpath() in distutils/command/build_ext.py + module_path = module_name.split('.') + package = '.'.join(module_path[:-1]) + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + file_name = module_path[-1] + '.py' + generate_mod(os.path.join(package_dir, file_name)) + dist.cmdclass['build_ext'] = build_ext_make_mod + +def cffi_modules(dist, attr, value): + assert attr == 'cffi_modules' + if isinstance(value, basestring): + value = [value] + + for cffi_module in value: + add_cffi_module(dist, cffi_module) diff --git a/myenv/lib/python3.9/site-packages/cffi/vengine_cpy.py b/myenv/lib/python3.9/site-packages/cffi/vengine_cpy.py new file mode 100644 index 0000000..6de0df0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/vengine_cpy.py @@ -0,0 +1,1076 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, imp +from . import model +from .error import VerificationError + + +class VCPythonEngine(object): + _class_key = 'x' + _gen_python_module = True + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self._struct_pending_verification = {} + self._types_of_builtin_functions = {} + + def patch_extension_kwds(self, kwds): + pass + + def find_module(self, module_name, path, so_suffixes): + try: + f, filename, descr = imp.find_module(module_name, path) + except ImportError: + return None + if f is not None: + f.close() + # Note that after a setuptools installation, there are both .py + # and .so files with the same basename. The code here relies on + # imp.find_module() locating the .so in priority. + if descr[0] not in so_suffixes: + return None + return filename + + def collect_types(self): + self._typesdict = {} + self._generate("collecttype") + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _do_collect_type(self, tp): + if ((not isinstance(tp, model.PrimitiveType) + or tp.name == 'long double') + and tp not in self._typesdict): + num = len(self._typesdict) + self._typesdict[tp] = num + + def write_source_to_f(self): + self.collect_types() + # + # The new module will have a _cffi_setup() function that receives + # objects from the ffi world, and that calls some setup code in + # the module. This setup code is split in several independent + # functions, e.g. one per constant. The functions are "chained" + # by ending in a tail call to each other. + # + # This is further split in two chained lists, depending on if we + # can do it at import-time or if we must wait for _cffi_setup() to + # provide us with the objects. This is needed because we + # need the values of the enum constants in order to build the + # that we may have to pass to _cffi_setup(). + # + # The following two 'chained_list_constants' items contains + # the head of these two chained lists, as a string that gives the + # call to do, if any. + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] + # + prnt = self._prnt + # first paste some standard set of lines that are mostly '#define' + prnt(cffimod_header) + prnt() + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate("decl") + # + # implement the function _cffi_setup_custom() as calling the + # head of the chained list. + self._generate_setup_custom() + prnt() + # + # produce the method table, including the entries for the + # generated Python->C function wrappers, which are done + # by generate_cpy_function_method(). + prnt('static PyMethodDef _cffi_methods[] = {') + self._generate("method") + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') + prnt('};') + prnt() + # + # standard init. + modname = self.verifier.get_module_name() + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') + prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') + + def load_library(self, flags=None): + # XXX review all usages of 'self' here! + # import it as a new extension module + imp.acquire_lock() + try: + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() + try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) + module = imp.load_dynamic(self.verifier.get_module_name(), + self.verifier.modulefilename) + except ImportError as e: + error = "importing %r: %s" % (self.verifier.modulefilename, e) + raise VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) + finally: + imp.release_lock() + # + # call loading_cpy_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + # + # the C code will need the objects. Collect them in + # order in a list. + revmapping = dict([(value, key) + for (key, value) in self._typesdict.items()]) + lst = [revmapping[i] for i in range(len(revmapping))] + lst = list(map(self.ffi._get_cached_btype, lst)) + # + # build the FFILibrary class and instance and call _cffi_setup(). + # this will set up some fields like '_cffi_types', and only then + # it will invoke the chained list of functions that will really + # build (notably) the constant objects, as if they are + # pointers, and store them as attributes on the 'library' object. + class FFILibrary(object): + _cffi_python_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) + library = FFILibrary() + if module._cffi_setup(lst, VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) + # + # finally, call the loaded_cpy_xxx() functions. This will perform + # the final adjustments, like copying the Python->C wrapper + # functions from the module to the 'library' object, and setting + # up the FFILibrary class with properties for the global C variables. + self._load(module, 'loaded', library=library) + module._cffi_original_ffi = self.ffi + module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_cpy_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.PrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + else: + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif isinstance(tp, (model.StructOrUnion, model.EnumType)): + # a struct (not a struct pointer) as a function argument + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars, freelines): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + localvars.add('struct _cffi_freeme_s *large_args_free = NULL') + freelines.add('if (large_args_free != NULL)' + ' _cffi_free_array_arguments(large_args_free);') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' %s = ((size_t)datasize) <= 640 ? ' + 'alloca((size_t)datasize) : NULL;' % (tovar,)) + self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' + '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) + self._prnt(' datasize, &large_args_free) < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.PrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif tp.name != 'long double': + return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs: generates no code so far + + _generate_cpy_typedef_collecttype = _generate_nothing + _generate_cpy_typedef_decl = _generate_nothing + _generate_cpy_typedef_method = _generate_nothing + _loading_cpy_typedef = _loaded_noop + _loaded_cpy_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + self._do_collect_type(tp) + else: + # don't call _do_collect_type(tp) in this common case, + # otherwise test_autofilled_struct_as_argument fails + for type in tp.args: + self._do_collect_type(type) + self._do_collect_type(tp.result) + + def _generate_cpy_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + prnt(' %s;' % type.get_c_name(' x%d' % i, context)) + # + localvars = set() + freelines = set() + for type in tp.args: + self._extra_local_variables(type, localvars, freelines) + for decl in sorted(localvars): + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + prnt(' %s;' % tp.result.get_c_name(' result', context)) + prnt(' PyObject *pyresult;') + else: + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % ( + 'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + prnt(' { %s%s(%s); }' % ( + result_code, name, + ', '.join(['x%d' % i for i in range(len(tp.args))]))) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' pyresult = %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + for freeline in freelines: + prnt(' ' + freeline) + prnt(' return pyresult;') + else: + for freeline in freelines: + prnt(' ' + freeline) + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + prnt() + + def _generate_cpy_function_method(self, tp, name): + if tp.ellipsis: + return + numargs = len(tp.args) + if numargs == 0: + meth = 'METH_NOARGS' + elif numargs == 1: + meth = 'METH_O' + else: + meth = 'METH_VARARGS' + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) + + _loading_cpy_function = _loaded_noop + + def _loaded_cpy_function(self, tp, name, module, library): + if tp.ellipsis: + return + func = getattr(module, name) + setattr(library, name, func) + self._types_of_builtin_functions[func] = tp + + # ---------- + # named structs + + _generate_cpy_struct_collecttype = _generate_nothing + def _generate_cpy_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + def _generate_cpy_struct_method(self, tp, name): + self._generate_struct_or_union_method(tp, 'struct', name) + def _loading_cpy_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + def _loaded_cpy_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + _generate_cpy_union_collecttype = _generate_nothing + def _generate_cpy_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + def _generate_cpy_union_method(self, tp, name): + self._generate_struct_or_union_method(tp, 'union', name) + def _loading_cpy_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + def _loaded_cpy_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('static PyObject *') + prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static Py_ssize_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') + prnt(' return _cffi_get_struct_layout(nums);') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _generate_struct_or_union_method(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + function = getattr(module, layoutfuncname) + layout = function() + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + _generate_cpy_anonymous_collecttype = _generate_nothing + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _generate_cpy_anonymous_method(self, tp, name): + if not isinstance(tp, model.EnumType): + self._generate_struct_or_union_method(tp, '', name) + + def _loading_cpy_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_cpy_enum(tp, name, module) + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_cpy_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_cpy_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + vartp=None, delayed=True, size_too=False, + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + prnt('static int %s(PyObject *lib)' % funcname) + prnt('{') + prnt(' PyObject *o;') + prnt(' int res;') + if not is_int: + prnt(' %s;' % (vartp or tp).get_c_name(' i', name)) + else: + assert category == 'const' + # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # + if not is_int: + if category == 'var': + realexpr = '&' + name + else: + realexpr = name + prnt(' i = (%s);' % (realexpr,)) + prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i', + 'variable type'),)) + assert delayed + else: + prnt(' o = _cffi_from_c_int_const(%s);' % name) + prnt(' if (o == NULL)') + prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') + prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) + prnt(' Py_DECREF(o);') + prnt(' if (res < 0)') + prnt(' return -1;') + prnt(' return %s;' % self._chained_list_constants[delayed]) + self._chained_list_constants[delayed] = funcname + '(lib)' + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + if not is_int: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + _generate_cpy_constant_method = _generate_nothing + _loading_cpy_constant = _loaded_noop + _loaded_cpy_constant = _loaded_noop + + # ---------- + # enums + + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator, delayed=False) + return + # + funcname = self._enum_funcname(prefix, name) + prnt = self._prnt + prnt('static int %s(PyObject *lib)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) + prnt(' return %s;' % self._chained_list_constants[True]) + self._chained_list_constants[True] = funcname + '(lib)' + prnt('}') + prnt() + + _generate_cpy_enum_collecttype = _generate_nothing + _generate_cpy_enum_method = _generate_nothing + + def _loading_cpy_enum(self, tp, name, module): + if tp.partial: + enumvalues = [getattr(module, enumerator) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + + def _loaded_cpy_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + _generate_cpy_macro_collecttype = _generate_nothing + _generate_cpy_macro_method = _generate_nothing + _loading_cpy_macro = _loaded_noop + _loaded_cpy_macro = _loaded_noop + + # ---------- + # global variables + + def _generate_cpy_variable_collecttype(self, tp, name): + if isinstance(tp, model.ArrayType): + tp_ptr = model.PointerType(tp.item) + else: + tp_ptr = model.PointerType(tp) + self._do_collect_type(tp_ptr) + + def _generate_cpy_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + tp_ptr = model.PointerType(tp.item) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = tp.length_is_unknown()) + else: + tp_ptr = model.PointerType(tp) + self._generate_cpy_const(False, name, tp_ptr, category='var') + + _generate_cpy_variable_method = _generate_nothing + _loading_cpy_variable = _loaded_noop + + def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length_is_unknown(): + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + ptr = value + delattr(library, name) + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + + # ---------- + + def _generate_setup_custom(self): + prnt = self._prnt + prnt('static int _cffi_setup_custom(PyObject *lib)') + prnt('{') + prnt(' return %s;' % self._chained_list_constants[True]) + prnt('}') + +cffimod_header = r''' +#include +#include + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +#endif + +#if PY_MAJOR_VERSION < 3 +# undef PyCapsule_CheckExact +# undef PyCapsule_GetPointer +# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule)) +# define PyCapsule_GetPointer(capsule, name) \ + (PyCObject_AsVoidPtr(capsule)) +#endif + +#if PY_MAJOR_VERSION >= 3 +# define PyInt_FromLong PyLong_FromLong +#endif + +#define _cffi_from_c_double PyFloat_FromDouble +#define _cffi_from_c_float PyFloat_FromDouble +#define _cffi_from_c_long PyInt_FromLong +#define _cffi_from_c_ulong PyLong_FromUnsignedLong +#define _cffi_from_c_longlong PyLong_FromLongLong +#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong + +#define _cffi_to_c_double PyFloat_AsDouble +#define _cffi_to_c_float PyFloat_AsDouble + +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) + +#define _cffi_to_c_int(o, type) \ + ((type)( \ + sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ + (Py_FatalError("unsupported size for type " #type), (type)0))) + +#define _cffi_to_c_i8 \ + ((int(*)(PyObject *))_cffi_exports[1]) +#define _cffi_to_c_u8 \ + ((int(*)(PyObject *))_cffi_exports[2]) +#define _cffi_to_c_i16 \ + ((int(*)(PyObject *))_cffi_exports[3]) +#define _cffi_to_c_u16 \ + ((int(*)(PyObject *))_cffi_exports[4]) +#define _cffi_to_c_i32 \ + ((int(*)(PyObject *))_cffi_exports[5]) +#define _cffi_to_c_u32 \ + ((unsigned int(*)(PyObject *))_cffi_exports[6]) +#define _cffi_to_c_i64 \ + ((long long(*)(PyObject *))_cffi_exports[7]) +#define _cffi_to_c_u64 \ + ((unsigned long long(*)(PyObject *))_cffi_exports[8]) +#define _cffi_to_c_char \ + ((int(*)(PyObject *))_cffi_exports[9]) +#define _cffi_from_c_pointer \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10]) +#define _cffi_to_c_pointer \ + ((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11]) +#define _cffi_get_struct_layout \ + ((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12]) +#define _cffi_restore_errno \ + ((void(*)(void))_cffi_exports[13]) +#define _cffi_save_errno \ + ((void(*)(void))_cffi_exports[14]) +#define _cffi_from_c_char \ + ((PyObject *(*)(char))_cffi_exports[15]) +#define _cffi_from_c_deref \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16]) +#define _cffi_to_c \ + ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17]) +#define _cffi_from_c_struct \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18]) +#define _cffi_to_c_wchar_t \ + ((wchar_t(*)(PyObject *))_cffi_exports[19]) +#define _cffi_from_c_wchar_t \ + ((PyObject *(*)(wchar_t))_cffi_exports[20]) +#define _cffi_to_c_long_double \ + ((long double(*)(PyObject *))_cffi_exports[21]) +#define _cffi_to_c__Bool \ + ((_Bool(*)(PyObject *))_cffi_exports[22]) +#define _cffi_prepare_pointer_call_argument \ + ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) +#define _cffi_convert_array_from_object \ + ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_NUM_EXPORTS 25 + +typedef struct _ctypedescr CTypeDescrObject; + +static void *_cffi_exports[_CFFI_NUM_EXPORTS]; +static PyObject *_cffi_types, *_cffi_VerificationError; + +static int _cffi_setup_custom(PyObject *lib); /* forward */ + +static PyObject *_cffi_setup(PyObject *self, PyObject *args) +{ + PyObject *library; + int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ + if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, + &library)) + return NULL; + Py_INCREF(_cffi_types); + Py_INCREF(_cffi_VerificationError); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); +} + +union _cffi_union_alignment_u { + unsigned char m_char; + unsigned short m_short; + unsigned int m_int; + unsigned long m_long; + unsigned long long m_longlong; + float m_float; + double m_double; + long double m_longdouble; +}; + +struct _cffi_freeme_s { + struct _cffi_freeme_s *next; + union _cffi_union_alignment_u alignment; +}; + +#ifdef __GNUC__ + __attribute__((unused)) +#endif +static int _cffi_convert_array_argument(CTypeDescrObject *ctptr, PyObject *arg, + char **output_data, Py_ssize_t datasize, + struct _cffi_freeme_s **freeme) +{ + char *p; + if (datasize < 0) + return -1; + + p = *output_data; + if (p == NULL) { + struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc( + offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize); + if (fp == NULL) + return -1; + fp->next = *freeme; + *freeme = fp; + p = *output_data = (char *)&fp->alignment; + } + memset((void *)p, 0, (size_t)datasize); + return _cffi_convert_array_from_object(p, ctptr, arg); +} + +#ifdef __GNUC__ + __attribute__((unused)) +#endif +static void _cffi_free_array_arguments(struct _cffi_freeme_s *freeme) +{ + do { + void *p = (void *)freeme; + freeme = freeme->next; + PyObject_Free(p); + } while (freeme != NULL); +} + +static int _cffi_init(void) +{ + PyObject *module, *c_api_object = NULL; + + module = PyImport_ImportModule("_cffi_backend"); + if (module == NULL) + goto failure; + + c_api_object = PyObject_GetAttrString(module, "_C_API"); + if (c_api_object == NULL) + goto failure; + if (!PyCapsule_CheckExact(c_api_object)) { + PyErr_SetNone(PyExc_ImportError); + goto failure; + } + memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), + _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); + Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; +} + +#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) + +/**********/ +''' diff --git a/myenv/lib/python3.9/site-packages/cffi/vengine_gen.py b/myenv/lib/python3.9/site-packages/cffi/vengine_gen.py new file mode 100644 index 0000000..2642152 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/vengine_gen.py @@ -0,0 +1,675 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os +import types + +from . import model +from .error import VerificationError + + +class VGenericEngine(object): + _class_key = 'g' + _gen_python_module = False + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self.export_symbols = [] + self._struct_pending_verification = {} + + def patch_extension_kwds(self, kwds): + # add 'export_symbols' to the dictionary. Note that we add the + # list before filling it. When we fill it, it will thus also show + # up in kwds['export_symbols']. + kwds.setdefault('export_symbols', self.export_symbols) + + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + + def collect_types(self): + pass # not needed in the generic engine + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self): + prnt = self._prnt + # first paste some standard set of lines that are mostly '#include' + prnt(cffimod_header) + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + # + # call generate_gen_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate('decl') + # + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + if sys.platform == 'win32': + if sys.version_info >= (3,): + prefix = 'PyInit_' + else: + prefix = 'init' + modname = self.verifier.get_module_name() + prnt("void %s%s(void) { }\n" % (prefix, modname)) + + def load_library(self, flags=0): + # import it with the CFFI backend + backend = self.ffi._backend + # needs to make a path that contains '/', on Posix + filename = os.path.join(os.curdir, self.verifier.modulefilename) + module = backend.load_library(filename, flags) + # + # call loading_gen_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + + # build the FFILibrary class and instance, this is a module subclass + # because modules are expected to have usually-constant-attributes and + # in PyPy this means the JIT is able to treat attributes as constant, + # which we want. + class FFILibrary(types.ModuleType): + _cffi_generic_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + library = FFILibrary("") + # + # finally, call the loaded_gen_xxx() functions. This will set + # up the 'library' object. + self._load(module, 'loaded', library=library) + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_gen_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_gen_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + # typedefs: generates no code so far + + _generate_gen_typedef_decl = _generate_nothing + _loading_gen_typedef = _loaded_noop + _loaded_gen_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_gen_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no _cffi_f_%s wrapper) + self._generate_gen_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + argnames = [] + for i, type in enumerate(tp.args): + indirection = '' + if isinstance(type, model.StructOrUnion): + indirection = '*' + argnames.append('%sx%d' % (indirection, i)) + context = 'argument of %s' % name + arglist = [type.get_c_name(' %s' % arg, context) + for type, arg in zip(tp.args, argnames)] + tpresult = tp.result + if isinstance(tpresult, model.StructOrUnion): + arglist.insert(0, tpresult.get_c_name(' *r', context)) + tpresult = model.void_type + arglist = ', '.join(arglist) or 'void' + wrappername = '_cffi_f_%s' % name + self.export_symbols.append(wrappername) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) + context = 'result of %s' % name + prnt(tpresult.get_c_name(funcdecl, context)) + prnt('{') + # + if isinstance(tp.result, model.StructOrUnion): + result_code = '*r = ' + elif not isinstance(tp.result, model.VoidType): + result_code = 'return ' + else: + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames))) + prnt('}') + prnt() + + _loading_gen_function = _loaded_noop + + def _loaded_gen_function(self, tp, name, module, library): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + newfunction = self._load_constant(False, tp, name, module) + else: + indirections = [] + base_tp = tp + if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) + or isinstance(tp.result, model.StructOrUnion)): + indirect_args = [] + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) + indirect_result = tp.result + if isinstance(indirect_result, model.StructOrUnion): + if indirect_result.fldtypes is None: + raise TypeError("'%s' is used as result type, " + "but is opaque" % ( + indirect_result._get_c_name(),)) + indirect_result = model.PointerType(indirect_result) + indirect_args.insert(0, indirect_result) + indirections.insert(0, ("result", indirect_result)) + indirect_result = model.void_type + tp = model.FunctionPtrType(tuple(indirect_args), + indirect_result, tp.ellipsis) + BFunc = self.ffi._get_cached_btype(tp) + wrappername = '_cffi_f_%s' % name + newfunction = module.load_function(BFunc, wrappername) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) + setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) + + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): + backend = self.ffi._backend + BType = self.ffi._get_cached_btype(tp) + if i == "result": + ffi = self.ffi + def newfunc(*args): + res = ffi.new(BType) + oldfunc(res, *args) + return res[0] + else: + def newfunc(*args): + args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] + return oldfunc(*args) + newfunc._cffi_base_type = base_tp + return newfunc + + # ---------- + # named structs + + def _generate_gen_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + + def _loading_gen_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + + def _loaded_gen_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_gen_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + + def _loading_gen_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + + def _loaded_gen_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + self.export_symbols.append(layoutfuncname) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static intptr_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' return nums[i];') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] + function = module.load_function(BFunc, layoutfuncname) + layout = [] + num = 0 + while True: + x = function(num) + if x < 0: break + layout.append(x) + num += 1 + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_gen_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_gen_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _loading_gen_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_gen_enum(tp, name, module, '') + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_gen_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_gen_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + self.export_symbols.append(funcname) + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: + assert category == 'const' + prnt('int %s(long long *out_value)' % funcname) + prnt('{') + prnt(' *out_value = (long long)(%s);' % (name,)) + prnt(' return (%s) <= 0;' % (name,)) + prnt('}') + else: + assert tp is not None + assert check_value is None + if category == 'var': + ampersand = '&' + else: + ampersand = '' + extra = '' + if category == 'const' and isinstance(tp, model.StructOrUnion): + extra = 'const *' + ampersand = '&' + prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) + prnt('{') + prnt(' return (%s%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_gen_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_gen_const(is_int, name, tp) + + _loading_gen_constant = _loaded_noop + + def _load_constant(self, is_int, tp, name, module, check_value=None): + funcname = '_cffi_const_%s' % name + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType) + negative = function(p) + value = int(p[0]) + if value < 0 and not negative: + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) + else: + assert check_value is None + fntypeextra = '(*)(void)' + if isinstance(tp, model.StructOrUnion): + fntypeextra = '*' + fntypeextra + BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] + function = module.load_function(BFunc, funcname) + value = function() + if isinstance(tp, model.StructOrUnion): + value = value[0] + return value + + def _loaded_gen_constant(self, tp, name, module, library): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + value = self._load_constant(is_int, tp, name, module) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # enums + + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise VerificationError(error) + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_gen_const(True, enumerator) + return + # + funcname = self._enum_funcname(prefix, name) + self.export_symbols.append(funcname) + prnt = self._prnt + prnt('int %s(char *out_error)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue) + prnt(' return 0;') + prnt('}') + prnt() + + def _loading_gen_enum(self, tp, name, module, prefix='enum'): + if tp.partial: + enumvalues = [self._load_constant(True, tp, enumerator, module) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + else: + funcname = self._enum_funcname(prefix, name) + self._load_known_int_constant(module, funcname) + + def _loaded_gen_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) + + # ---------- + # macros: for now only for integers + + def _generate_gen_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) + + _loading_gen_macro = _loaded_noop + + def _loaded_gen_macro(self, tp, name, module, library): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # global variables + + def _generate_gen_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + if tp.length_is_unknown(): + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") + tp_ptr = model.PointerType(tp.item) + self._generate_gen_const(False, name, tp_ptr) + else: + tp_ptr = model.PointerType(tp) + self._generate_gen_const(False, name, tp_ptr, category='var') + + _loading_gen_variable = _loaded_noop + + def _loaded_gen_variable(self, tp, name, module, library): + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length_is_unknown(): + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + tp_ptr = model.PointerType(tp.item) + value = self._load_constant(False, tp_ptr, name, module) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + funcname = '_cffi_var_%s' % name + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] + function = module.load_function(BFunc, funcname) + ptr = function() + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + +cffimod_header = r''' +#include +#include +#include +#include +#include /* XXX for ssize_t on some platforms */ + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +#endif +''' diff --git a/myenv/lib/python3.9/site-packages/cffi/verifier.py b/myenv/lib/python3.9/site-packages/cffi/verifier.py new file mode 100644 index 0000000..a500c78 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cffi/verifier.py @@ -0,0 +1,307 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os, binascii, shutil, io +from . import __version_verifier_modules__ +from . import ffiplatform +from .error import VerificationError + +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + + +class Verifier(object): + + def __init__(self, ffi, preamble, tmpdir=None, modulename=None, + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): + if ffi._parser._uses_new_feature: + raise VerificationError( + "feature not supported with ffi.verify(), but only " + "with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,)) + self.ffi = ffi + self.preamble = preamble + if not modulename: + flattened_kwds = ffiplatform.flatten(kwds) + vengine_class = _locate_engine_class(ffi, force_generic_engine) + self._vengine = vengine_class(self) + self._vengine.patch_extension_kwds(kwds) + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) + # + if modulename: + if tag: + raise TypeError("can't specify both 'modulename' and 'tag'") + else: + key = '\x00'.join(['%d.%d' % sys.version_info[:2], + __version_verifier_modules__, + preamble, flattened_kwds] + + ffi._cdefsources) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, + k1, k2) + suffix = _get_so_suffixes()[0] + self.tmpdir = tmpdir or _caller_dir_pycache() + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) + self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) + self.ext_package = ext_package + self._has_source = False + self._has_module = False + + def write_source(self, file=None): + """Write the C source code. It is produced in 'self.sourcefilename', + which can be tweaked beforehand.""" + with self.ffi._lock: + if self._has_source and file is None: + raise VerificationError( + "source code already written") + self._write_source(file) + + def compile_module(self): + """Write the C source code (if not done already) and compile it. + This produces a dynamic link library in 'self.modulefilename'.""" + with self.ffi._lock: + if self._has_module: + raise VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() + + def load_library(self): + """Get a C module from this Verifier instance. + Returns an instance of a FFILibrary class that behaves like the + objects returned by ffi.dlopen(), but that delegates all + operations to the C module. If necessary, the C code is written + and compiled first. + """ + with self.ffi._lock: + if not self._has_module: + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() + + def get_module_name(self): + basename = os.path.basename(self.modulefilename) + # kill both the .so extension and the other .'s, as introduced + # by Python 3: 'basename.cpython-33m.so' + basename = basename.split('.', 1)[0] + # and the _d added in Python 2 debug builds --- but try to be + # conservative and not kill a legitimate _d + if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'): + basename = basename[:-2] + return basename + + def get_extension(self): + ffiplatform._hack_at_distutils() # backward compatibility hack + if not self._has_source: + with self.ffi._lock: + if not self._has_source: + self._write_source() + sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) + modname = self.get_module_name() + return ffiplatform.get_extension(sourcename, modname, **self.kwds) + + def generates_python_module(self): + return self._vengine._gen_python_module + + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + + # ---------- + + def _locate_module(self): + if not os.path.isfile(self.modulefilename): + if self.ext_package: + try: + pkg = __import__(self.ext_package, None, None, ['__doc__']) + except ImportError: + return # cannot import the package itself, give up + # (e.g. it might be called differently before installation) + path = pkg.__path__ + else: + path = None + filename = self._vengine.find_module(self.get_module_name(), path, + _get_so_suffixes()) + if filename is None: + return + self.modulefilename = filename + self._vengine.collect_types() + self._has_module = True + + def _write_source_to(self, file): + self._vengine._f = file + try: + self._vengine.write_source_to_f() + finally: + del self._vengine._f + + def _write_source(self, file=None): + if file is not None: + self._write_source_to(file) + else: + # Write our source file to an in memory file. + f = NativeIO() + self._write_source_to(f) + source_data = f.getvalue() + + # Determine if this matches the current file + if os.path.exists(self.sourcefilename): + with open(self.sourcefilename, "r") as fp: + needs_written = not (fp.read() == source_data) + else: + needs_written = True + + # Actually write the file out if it doesn't match + if needs_written: + _ensure_dir(self.sourcefilename) + with open(self.sourcefilename, "w") as fp: + fp.write(source_data) + + # Set this flag + self._has_source = True + + def _compile_module(self): + # compile this C source + tmpdir = os.path.dirname(self.sourcefilename) + outputfilename = ffiplatform.compile(tmpdir, self.get_extension()) + try: + same = ffiplatform.samefile(outputfilename, self.modulefilename) + except OSError: + same = False + if not same: + _ensure_dir(self.modulefilename) + shutil.move(outputfilename, self.modulefilename) + self._has_module = True + + def _load_library(self): + assert self._has_module + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() + +# ____________________________________________________________ + +_FORCE_GENERIC_ENGINE = False # for tests + +def _locate_engine_class(ffi, force_generic_engine): + if _FORCE_GENERIC_ENGINE: + force_generic_engine = True + if not force_generic_engine: + if '__pypy__' in sys.builtin_module_names: + force_generic_engine = True + else: + try: + import _cffi_backend + except ImportError: + _cffi_backend = '?' + if ffi._backend is not _cffi_backend: + force_generic_engine = True + if force_generic_engine: + from . import vengine_gen + return vengine_gen.VGenericEngine + else: + from . import vengine_cpy + return vengine_cpy.VCPythonEngine + +# ____________________________________________________________ + +_TMPDIR = None + +def _caller_dir_pycache(): + if _TMPDIR: + return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result + filename = sys._getframe(2).f_code.co_filename + return os.path.abspath(os.path.join(os.path.dirname(filename), + '__pycache__')) + +def set_tmpdir(dirname): + """Set the temporary directory to use instead of __pycache__.""" + global _TMPDIR + _TMPDIR = dirname + +def cleanup_tmpdir(tmpdir=None, keep_so=False): + """Clean up the temporary directory by removing all files in it + called `_cffi_*.{c,so}` as well as the `build` subdirectory.""" + tmpdir = tmpdir or _caller_dir_pycache() + try: + filelist = os.listdir(tmpdir) + except OSError: + return + if keep_so: + suffix = '.c' # only remove .c files + else: + suffix = _get_so_suffixes()[0].lower() + for fn in filelist: + if fn.lower().startswith('_cffi_') and ( + fn.lower().endswith(suffix) or fn.lower().endswith('.c')): + try: + os.unlink(os.path.join(tmpdir, fn)) + except OSError: + pass + clean_dir = [os.path.join(tmpdir, 'build')] + for dir in clean_dir: + try: + for fn in os.listdir(dir): + fn = os.path.join(dir, fn) + if os.path.isdir(fn): + clean_dir.append(fn) + else: + os.unlink(fn) + except OSError: + pass + +def _get_so_suffixes(): + suffixes = _extension_suffixes() + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes + +def _ensure_dir(filename): + dirname = os.path.dirname(filename) + if dirname and not os.path.isdir(dirname): + os.makedirs(dirname) diff --git a/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/LICENSE.rst b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/LICENSE.rst new file mode 100644 index 0000000..d12a849 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2014 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/METADATA b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/METADATA new file mode 100644 index 0000000..8e5dc1e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/METADATA @@ -0,0 +1,111 @@ +Metadata-Version: 2.1 +Name: click +Version: 8.1.3 +Summary: Composable command line interface toolkit +Home-page: https://palletsprojects.com/p/click/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Changes, https://click.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/click/ +Project-URL: Issue Tracker, https://github.com/pallets/click/issues/ +Project-URL: Twitter, https://twitter.com/PalletsTeam +Project-URL: Chat, https://discord.gg/pallets +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst +Requires-Dist: colorama ; platform_system == "Windows" +Requires-Dist: importlib-metadata ; python_version < "3.8" + +\$ click\_ +========== + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U click + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +A Simple Example +---------------- + +.. code-block:: python + + import click + + @click.command() + @click.option("--count", default=1, help="Number of greetings.") + @click.option("--name", prompt="Your name", help="The person to greet.") + def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo(f"Hello, {name}!") + + if __name__ == '__main__': + hello() + +.. code-block:: text + + $ python hello.py --count=3 + Your name: Click + Hello, Click! + Hello, Click! + Hello, Click! + + +Donate +------ + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://click.palletsprojects.com/ +- Changes: https://click.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/click/ +- Source Code: https://github.com/pallets/click +- Issue Tracker: https://github.com/pallets/click/issues +- Website: https://palletsprojects.com/p/click +- Twitter: https://twitter.com/PalletsTeam +- Chat: https://discord.gg/pallets + + diff --git a/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/RECORD b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/RECORD new file mode 100644 index 0000000..14981e3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/RECORD @@ -0,0 +1,23 @@ +click/__init__.py,sha256=rQBLutqg-z6m8nOzivIfigDn_emijB_dKv9BZ2FNi5s,3138 +click/_compat.py,sha256=JIHLYs7Jzz4KT9t-ds4o4jBzLjnwCiJQKqur-5iwCKI,18810 +click/_termui_impl.py,sha256=qK6Cfy4mRFxvxE8dya8RBhLpSC8HjF-lvBc6aNrPdwg,23451 +click/_textwrap.py,sha256=10fQ64OcBUMuK7mFvh8363_uoOxPlRItZBmKzRJDgoY,1353 +click/_winconsole.py,sha256=5ju3jQkcZD0W27WEMGqmEP4y_crUVzPCqsX_FYb7BO0,7860 +click/core.py,sha256=mz87bYEKzIoNYEa56BFAiOJnvt1Y0L-i7wD4_ZecieE,112782 +click/decorators.py,sha256=yo3zvzgUm5q7h5CXjyV6q3h_PJAiUaem178zXwdWUFI,16350 +click/exceptions.py,sha256=7gDaLGuFZBeCNwY9ERMsF2-Z3R9Fvq09Zc6IZSKjseo,9167 +click/formatting.py,sha256=Frf0-5W33-loyY_i9qrwXR8-STnW3m5gvyxLVUdyxyk,9706 +click/globals.py,sha256=TP-qM88STzc7f127h35TD_v920FgfOD2EwzqA0oE8XU,1961 +click/parser.py,sha256=cAEt1uQR8gq3-S9ysqbVU-fdAZNvilxw4ReJ_T1OQMk,19044 +click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click/shell_completion.py,sha256=qOp_BeC9esEOSZKyu5G7RIxEUaLsXUX-mTb7hB1r4QY,18018 +click/termui.py,sha256=ACBQVOvFCTSqtD5VREeCAdRtlHd-Imla-Lte4wSfMjA,28355 +click/testing.py,sha256=ptpMYgRY7dVfE3UDgkgwayu9ePw98sQI3D7zZXiCpj4,16063 +click/types.py,sha256=rEb1aZSQKq3ciCMmjpG2Uva9vk498XRL7ThrcK2GRss,35805 +click/utils.py,sha256=33D6E7poH_nrKB-xr-UyDEXnxOcCiQqxuRLtrqeVv6o,18682 +click-8.1.3.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475 +click-8.1.3.dist-info/METADATA,sha256=tFJIX5lOjx7c5LjZbdTPFVDJSgyv9F74XY0XCPp_gnc,3247 +click-8.1.3.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +click-8.1.3.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 +click-8.1.3.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +click-8.1.3.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/top_level.txt new file mode 100644 index 0000000..dca9a90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click-8.1.3.dist-info/top_level.txt @@ -0,0 +1 @@ +click diff --git a/myenv/lib/python3.9/site-packages/click/__init__.py b/myenv/lib/python3.9/site-packages/click/__init__.py new file mode 100644 index 0000000..e3ef423 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/__init__.py @@ -0,0 +1,73 @@ +""" +Click is a simple Python module inspired by the stdlib optparse to make +writing command line scripts fun. Unlike other modules, it's based +around a simple API that does not come with too much magic and is +composable. +""" +from .core import Argument as Argument +from .core import BaseCommand as BaseCommand +from .core import Command as Command +from .core import CommandCollection as CommandCollection +from .core import Context as Context +from .core import Group as Group +from .core import MultiCommand as MultiCommand +from .core import Option as Option +from .core import Parameter as Parameter +from .decorators import argument as argument +from .decorators import command as command +from .decorators import confirmation_option as confirmation_option +from .decorators import group as group +from .decorators import help_option as help_option +from .decorators import make_pass_decorator as make_pass_decorator +from .decorators import option as option +from .decorators import pass_context as pass_context +from .decorators import pass_obj as pass_obj +from .decorators import password_option as password_option +from .decorators import version_option as version_option +from .exceptions import Abort as Abort +from .exceptions import BadArgumentUsage as BadArgumentUsage +from .exceptions import BadOptionUsage as BadOptionUsage +from .exceptions import BadParameter as BadParameter +from .exceptions import ClickException as ClickException +from .exceptions import FileError as FileError +from .exceptions import MissingParameter as MissingParameter +from .exceptions import NoSuchOption as NoSuchOption +from .exceptions import UsageError as UsageError +from .formatting import HelpFormatter as HelpFormatter +from .formatting import wrap_text as wrap_text +from .globals import get_current_context as get_current_context +from .parser import OptionParser as OptionParser +from .termui import clear as clear +from .termui import confirm as confirm +from .termui import echo_via_pager as echo_via_pager +from .termui import edit as edit +from .termui import getchar as getchar +from .termui import launch as launch +from .termui import pause as pause +from .termui import progressbar as progressbar +from .termui import prompt as prompt +from .termui import secho as secho +from .termui import style as style +from .termui import unstyle as unstyle +from .types import BOOL as BOOL +from .types import Choice as Choice +from .types import DateTime as DateTime +from .types import File as File +from .types import FLOAT as FLOAT +from .types import FloatRange as FloatRange +from .types import INT as INT +from .types import IntRange as IntRange +from .types import ParamType as ParamType +from .types import Path as Path +from .types import STRING as STRING +from .types import Tuple as Tuple +from .types import UNPROCESSED as UNPROCESSED +from .types import UUID as UUID +from .utils import echo as echo +from .utils import format_filename as format_filename +from .utils import get_app_dir as get_app_dir +from .utils import get_binary_stream as get_binary_stream +from .utils import get_text_stream as get_text_stream +from .utils import open_file as open_file + +__version__ = "8.1.3" diff --git a/myenv/lib/python3.9/site-packages/click/_compat.py b/myenv/lib/python3.9/site-packages/click/_compat.py new file mode 100644 index 0000000..766d286 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/_compat.py @@ -0,0 +1,626 @@ +import codecs +import io +import os +import re +import sys +import typing as t +from weakref import WeakKeyDictionary + +CYGWIN = sys.platform.startswith("cygwin") +MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version) +# Determine local App Engine environment, per Google's own suggestion +APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get( + "SERVER_SOFTWARE", "" +) +WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2 +auto_wrap_for_ansi: t.Optional[t.Callable[[t.TextIO], t.TextIO]] = None +_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") + + +def get_filesystem_encoding() -> str: + return sys.getfilesystemencoding() or sys.getdefaultencoding() + + +def _make_text_stream( + stream: t.BinaryIO, + encoding: t.Optional[str], + errors: t.Optional[str], + force_readable: bool = False, + force_writable: bool = False, +) -> t.TextIO: + if encoding is None: + encoding = get_best_encoding(stream) + if errors is None: + errors = "replace" + return _NonClosingTextIOWrapper( + stream, + encoding, + errors, + line_buffering=True, + force_readable=force_readable, + force_writable=force_writable, + ) + + +def is_ascii_encoding(encoding: str) -> bool: + """Checks if a given encoding is ascii.""" + try: + return codecs.lookup(encoding).name == "ascii" + except LookupError: + return False + + +def get_best_encoding(stream: t.IO) -> str: + """Returns the default stream encoding if not found.""" + rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() + if is_ascii_encoding(rv): + return "utf-8" + return rv + + +class _NonClosingTextIOWrapper(io.TextIOWrapper): + def __init__( + self, + stream: t.BinaryIO, + encoding: t.Optional[str], + errors: t.Optional[str], + force_readable: bool = False, + force_writable: bool = False, + **extra: t.Any, + ) -> None: + self._stream = stream = t.cast( + t.BinaryIO, _FixupStream(stream, force_readable, force_writable) + ) + super().__init__(stream, encoding, errors, **extra) + + def __del__(self) -> None: + try: + self.detach() + except Exception: + pass + + def isatty(self) -> bool: + # https://bitbucket.org/pypy/pypy/issue/1803 + return self._stream.isatty() + + +class _FixupStream: + """The new io interface needs more from streams than streams + traditionally implement. As such, this fix-up code is necessary in + some circumstances. + + The forcing of readable and writable flags are there because some tools + put badly patched objects on sys (one such offender are certain version + of jupyter notebook). + """ + + def __init__( + self, + stream: t.BinaryIO, + force_readable: bool = False, + force_writable: bool = False, + ): + self._stream = stream + self._force_readable = force_readable + self._force_writable = force_writable + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._stream, name) + + def read1(self, size: int) -> bytes: + f = getattr(self._stream, "read1", None) + + if f is not None: + return t.cast(bytes, f(size)) + + return self._stream.read(size) + + def readable(self) -> bool: + if self._force_readable: + return True + x = getattr(self._stream, "readable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.read(0) + except Exception: + return False + return True + + def writable(self) -> bool: + if self._force_writable: + return True + x = getattr(self._stream, "writable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.write("") # type: ignore + except Exception: + try: + self._stream.write(b"") + except Exception: + return False + return True + + def seekable(self) -> bool: + x = getattr(self._stream, "seekable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.seek(self._stream.tell()) + except Exception: + return False + return True + + +def _is_binary_reader(stream: t.IO, default: bool = False) -> bool: + try: + return isinstance(stream.read(0), bytes) + except Exception: + return default + # This happens in some cases where the stream was already + # closed. In this case, we assume the default. + + +def _is_binary_writer(stream: t.IO, default: bool = False) -> bool: + try: + stream.write(b"") + except Exception: + try: + stream.write("") + return False + except Exception: + pass + return default + return True + + +def _find_binary_reader(stream: t.IO) -> t.Optional[t.BinaryIO]: + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_reader(stream, False): + return t.cast(t.BinaryIO, stream) + + buf = getattr(stream, "buffer", None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_reader(buf, True): + return t.cast(t.BinaryIO, buf) + + return None + + +def _find_binary_writer(stream: t.IO) -> t.Optional[t.BinaryIO]: + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_writer(stream, False): + return t.cast(t.BinaryIO, stream) + + buf = getattr(stream, "buffer", None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_writer(buf, True): + return t.cast(t.BinaryIO, buf) + + return None + + +def _stream_is_misconfigured(stream: t.TextIO) -> bool: + """A stream is misconfigured if its encoding is ASCII.""" + # If the stream does not have an encoding set, we assume it's set + # to ASCII. This appears to happen in certain unittest + # environments. It's not quite clear what the correct behavior is + # but this at least will force Click to recover somehow. + return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") + + +def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: t.Optional[str]) -> bool: + """A stream attribute is compatible if it is equal to the + desired value or the desired value is unset and the attribute + has a value. + """ + stream_value = getattr(stream, attr, None) + return stream_value == value or (value is None and stream_value is not None) + + +def _is_compatible_text_stream( + stream: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] +) -> bool: + """Check if a stream's encoding and errors attributes are + compatible with the desired values. + """ + return _is_compat_stream_attr( + stream, "encoding", encoding + ) and _is_compat_stream_attr(stream, "errors", errors) + + +def _force_correct_text_stream( + text_stream: t.IO, + encoding: t.Optional[str], + errors: t.Optional[str], + is_binary: t.Callable[[t.IO, bool], bool], + find_binary: t.Callable[[t.IO], t.Optional[t.BinaryIO]], + force_readable: bool = False, + force_writable: bool = False, +) -> t.TextIO: + if is_binary(text_stream, False): + binary_reader = t.cast(t.BinaryIO, text_stream) + else: + text_stream = t.cast(t.TextIO, text_stream) + # If the stream looks compatible, and won't default to a + # misconfigured ascii encoding, return it as-is. + if _is_compatible_text_stream(text_stream, encoding, errors) and not ( + encoding is None and _stream_is_misconfigured(text_stream) + ): + return text_stream + + # Otherwise, get the underlying binary reader. + possible_binary_reader = find_binary(text_stream) + + # If that's not possible, silently use the original reader + # and get mojibake instead of exceptions. + if possible_binary_reader is None: + return text_stream + + binary_reader = possible_binary_reader + + # Default errors to replace instead of strict in order to get + # something that works. + if errors is None: + errors = "replace" + + # Wrap the binary stream in a text stream with the correct + # encoding parameters. + return _make_text_stream( + binary_reader, + encoding, + errors, + force_readable=force_readable, + force_writable=force_writable, + ) + + +def _force_correct_text_reader( + text_reader: t.IO, + encoding: t.Optional[str], + errors: t.Optional[str], + force_readable: bool = False, +) -> t.TextIO: + return _force_correct_text_stream( + text_reader, + encoding, + errors, + _is_binary_reader, + _find_binary_reader, + force_readable=force_readable, + ) + + +def _force_correct_text_writer( + text_writer: t.IO, + encoding: t.Optional[str], + errors: t.Optional[str], + force_writable: bool = False, +) -> t.TextIO: + return _force_correct_text_stream( + text_writer, + encoding, + errors, + _is_binary_writer, + _find_binary_writer, + force_writable=force_writable, + ) + + +def get_binary_stdin() -> t.BinaryIO: + reader = _find_binary_reader(sys.stdin) + if reader is None: + raise RuntimeError("Was not able to determine binary stream for sys.stdin.") + return reader + + +def get_binary_stdout() -> t.BinaryIO: + writer = _find_binary_writer(sys.stdout) + if writer is None: + raise RuntimeError("Was not able to determine binary stream for sys.stdout.") + return writer + + +def get_binary_stderr() -> t.BinaryIO: + writer = _find_binary_writer(sys.stderr) + if writer is None: + raise RuntimeError("Was not able to determine binary stream for sys.stderr.") + return writer + + +def get_text_stdin( + encoding: t.Optional[str] = None, errors: t.Optional[str] = None +) -> t.TextIO: + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True) + + +def get_text_stdout( + encoding: t.Optional[str] = None, errors: t.Optional[str] = None +) -> t.TextIO: + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True) + + +def get_text_stderr( + encoding: t.Optional[str] = None, errors: t.Optional[str] = None +) -> t.TextIO: + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True) + + +def _wrap_io_open( + file: t.Union[str, os.PathLike, int], + mode: str, + encoding: t.Optional[str], + errors: t.Optional[str], +) -> t.IO: + """Handles not passing ``encoding`` and ``errors`` in binary mode.""" + if "b" in mode: + return open(file, mode) + + return open(file, mode, encoding=encoding, errors=errors) + + +def open_stream( + filename: str, + mode: str = "r", + encoding: t.Optional[str] = None, + errors: t.Optional[str] = "strict", + atomic: bool = False, +) -> t.Tuple[t.IO, bool]: + binary = "b" in mode + + # Standard streams first. These are simple because they ignore the + # atomic flag. Use fsdecode to handle Path("-"). + if os.fsdecode(filename) == "-": + if any(m in mode for m in ["w", "a", "x"]): + if binary: + return get_binary_stdout(), False + return get_text_stdout(encoding=encoding, errors=errors), False + if binary: + return get_binary_stdin(), False + return get_text_stdin(encoding=encoding, errors=errors), False + + # Non-atomic writes directly go out through the regular open functions. + if not atomic: + return _wrap_io_open(filename, mode, encoding, errors), True + + # Some usability stuff for atomic writes + if "a" in mode: + raise ValueError( + "Appending to an existing file is not supported, because that" + " would involve an expensive `copy`-operation to a temporary" + " file. Open the file in normal `w`-mode and copy explicitly" + " if that's what you're after." + ) + if "x" in mode: + raise ValueError("Use the `overwrite`-parameter instead.") + if "w" not in mode: + raise ValueError("Atomic writes only make sense with `w`-mode.") + + # Atomic writes are more complicated. They work by opening a file + # as a proxy in the same folder and then using the fdopen + # functionality to wrap it in a Python file. Then we wrap it in an + # atomic file that moves the file over on close. + import errno + import random + + try: + perm: t.Optional[int] = os.stat(filename).st_mode + except OSError: + perm = None + + flags = os.O_RDWR | os.O_CREAT | os.O_EXCL + + if binary: + flags |= getattr(os, "O_BINARY", 0) + + while True: + tmp_filename = os.path.join( + os.path.dirname(filename), + f".__atomic-write{random.randrange(1 << 32):08x}", + ) + try: + fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) + break + except OSError as e: + if e.errno == errno.EEXIST or ( + os.name == "nt" + and e.errno == errno.EACCES + and os.path.isdir(e.filename) + and os.access(e.filename, os.W_OK) + ): + continue + raise + + if perm is not None: + os.chmod(tmp_filename, perm) # in case perm includes bits in umask + + f = _wrap_io_open(fd, mode, encoding, errors) + af = _AtomicFile(f, tmp_filename, os.path.realpath(filename)) + return t.cast(t.IO, af), True + + +class _AtomicFile: + def __init__(self, f: t.IO, tmp_filename: str, real_filename: str) -> None: + self._f = f + self._tmp_filename = tmp_filename + self._real_filename = real_filename + self.closed = False + + @property + def name(self) -> str: + return self._real_filename + + def close(self, delete: bool = False) -> None: + if self.closed: + return + self._f.close() + os.replace(self._tmp_filename, self._real_filename) + self.closed = True + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._f, name) + + def __enter__(self) -> "_AtomicFile": + return self + + def __exit__(self, exc_type, exc_value, tb): # type: ignore + self.close(delete=exc_type is not None) + + def __repr__(self) -> str: + return repr(self._f) + + +def strip_ansi(value: str) -> str: + return _ansi_re.sub("", value) + + +def _is_jupyter_kernel_output(stream: t.IO) -> bool: + while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): + stream = stream._stream + + return stream.__class__.__module__.startswith("ipykernel.") + + +def should_strip_ansi( + stream: t.Optional[t.IO] = None, color: t.Optional[bool] = None +) -> bool: + if color is None: + if stream is None: + stream = sys.stdin + return not isatty(stream) and not _is_jupyter_kernel_output(stream) + return not color + + +# On Windows, wrap the output streams with colorama to support ANSI +# color codes. +# NOTE: double check is needed so mypy does not analyze this on Linux +if sys.platform.startswith("win") and WIN: + from ._winconsole import _get_windows_console_stream + + def _get_argv_encoding() -> str: + import locale + + return locale.getpreferredencoding() + + _ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() + + def auto_wrap_for_ansi( + stream: t.TextIO, color: t.Optional[bool] = None + ) -> t.TextIO: + """Support ANSI color and style codes on Windows by wrapping a + stream with colorama. + """ + try: + cached = _ansi_stream_wrappers.get(stream) + except Exception: + cached = None + + if cached is not None: + return cached + + import colorama + + strip = should_strip_ansi(stream, color) + ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) + rv = t.cast(t.TextIO, ansi_wrapper.stream) + _write = rv.write + + def _safe_write(s): + try: + return _write(s) + except BaseException: + ansi_wrapper.reset_all() + raise + + rv.write = _safe_write + + try: + _ansi_stream_wrappers[stream] = rv + except Exception: + pass + + return rv + +else: + + def _get_argv_encoding() -> str: + return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding() + + def _get_windows_console_stream( + f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] + ) -> t.Optional[t.TextIO]: + return None + + +def term_len(x: str) -> int: + return len(strip_ansi(x)) + + +def isatty(stream: t.IO) -> bool: + try: + return stream.isatty() + except Exception: + return False + + +def _make_cached_stream_func( + src_func: t.Callable[[], t.TextIO], wrapper_func: t.Callable[[], t.TextIO] +) -> t.Callable[[], t.TextIO]: + cache: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() + + def func() -> t.TextIO: + stream = src_func() + try: + rv = cache.get(stream) + except Exception: + rv = None + if rv is not None: + return rv + rv = wrapper_func() + try: + cache[stream] = rv + except Exception: + pass + return rv + + return func + + +_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) +_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) +_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) + + +binary_streams: t.Mapping[str, t.Callable[[], t.BinaryIO]] = { + "stdin": get_binary_stdin, + "stdout": get_binary_stdout, + "stderr": get_binary_stderr, +} + +text_streams: t.Mapping[ + str, t.Callable[[t.Optional[str], t.Optional[str]], t.TextIO] +] = { + "stdin": get_text_stdin, + "stdout": get_text_stdout, + "stderr": get_text_stderr, +} diff --git a/myenv/lib/python3.9/site-packages/click/_termui_impl.py b/myenv/lib/python3.9/site-packages/click/_termui_impl.py new file mode 100644 index 0000000..4b979bc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/_termui_impl.py @@ -0,0 +1,717 @@ +""" +This module contains implementations for the termui module. To keep the +import time of Click down, some infrequently used functionality is +placed in this module and only imported as needed. +""" +import contextlib +import math +import os +import sys +import time +import typing as t +from gettext import gettext as _ + +from ._compat import _default_text_stdout +from ._compat import CYGWIN +from ._compat import get_best_encoding +from ._compat import isatty +from ._compat import open_stream +from ._compat import strip_ansi +from ._compat import term_len +from ._compat import WIN +from .exceptions import ClickException +from .utils import echo + +V = t.TypeVar("V") + +if os.name == "nt": + BEFORE_BAR = "\r" + AFTER_BAR = "\n" +else: + BEFORE_BAR = "\r\033[?25l" + AFTER_BAR = "\033[?25h\n" + + +class ProgressBar(t.Generic[V]): + def __init__( + self, + iterable: t.Optional[t.Iterable[V]], + length: t.Optional[int] = None, + fill_char: str = "#", + empty_char: str = " ", + bar_template: str = "%(bar)s", + info_sep: str = " ", + show_eta: bool = True, + show_percent: t.Optional[bool] = None, + show_pos: bool = False, + item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None, + label: t.Optional[str] = None, + file: t.Optional[t.TextIO] = None, + color: t.Optional[bool] = None, + update_min_steps: int = 1, + width: int = 30, + ) -> None: + self.fill_char = fill_char + self.empty_char = empty_char + self.bar_template = bar_template + self.info_sep = info_sep + self.show_eta = show_eta + self.show_percent = show_percent + self.show_pos = show_pos + self.item_show_func = item_show_func + self.label = label or "" + if file is None: + file = _default_text_stdout() + self.file = file + self.color = color + self.update_min_steps = update_min_steps + self._completed_intervals = 0 + self.width = width + self.autowidth = width == 0 + + if length is None: + from operator import length_hint + + length = length_hint(iterable, -1) + + if length == -1: + length = None + if iterable is None: + if length is None: + raise TypeError("iterable or length is required") + iterable = t.cast(t.Iterable[V], range(length)) + self.iter = iter(iterable) + self.length = length + self.pos = 0 + self.avg: t.List[float] = [] + self.start = self.last_eta = time.time() + self.eta_known = False + self.finished = False + self.max_width: t.Optional[int] = None + self.entered = False + self.current_item: t.Optional[V] = None + self.is_hidden = not isatty(self.file) + self._last_line: t.Optional[str] = None + + def __enter__(self) -> "ProgressBar": + self.entered = True + self.render_progress() + return self + + def __exit__(self, exc_type, exc_value, tb): # type: ignore + self.render_finish() + + def __iter__(self) -> t.Iterator[V]: + if not self.entered: + raise RuntimeError("You need to use progress bars in a with block.") + self.render_progress() + return self.generator() + + def __next__(self) -> V: + # Iteration is defined in terms of a generator function, + # returned by iter(self); use that to define next(). This works + # because `self.iter` is an iterable consumed by that generator, + # so it is re-entry safe. Calling `next(self.generator())` + # twice works and does "what you want". + return next(iter(self)) + + def render_finish(self) -> None: + if self.is_hidden: + return + self.file.write(AFTER_BAR) + self.file.flush() + + @property + def pct(self) -> float: + if self.finished: + return 1.0 + return min(self.pos / (float(self.length or 1) or 1), 1.0) + + @property + def time_per_iteration(self) -> float: + if not self.avg: + return 0.0 + return sum(self.avg) / float(len(self.avg)) + + @property + def eta(self) -> float: + if self.length is not None and not self.finished: + return self.time_per_iteration * (self.length - self.pos) + return 0.0 + + def format_eta(self) -> str: + if self.eta_known: + t = int(self.eta) + seconds = t % 60 + t //= 60 + minutes = t % 60 + t //= 60 + hours = t % 24 + t //= 24 + if t > 0: + return f"{t}d {hours:02}:{minutes:02}:{seconds:02}" + else: + return f"{hours:02}:{minutes:02}:{seconds:02}" + return "" + + def format_pos(self) -> str: + pos = str(self.pos) + if self.length is not None: + pos += f"/{self.length}" + return pos + + def format_pct(self) -> str: + return f"{int(self.pct * 100): 4}%"[1:] + + def format_bar(self) -> str: + if self.length is not None: + bar_length = int(self.pct * self.width) + bar = self.fill_char * bar_length + bar += self.empty_char * (self.width - bar_length) + elif self.finished: + bar = self.fill_char * self.width + else: + chars = list(self.empty_char * (self.width or 1)) + if self.time_per_iteration != 0: + chars[ + int( + (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) + * self.width + ) + ] = self.fill_char + bar = "".join(chars) + return bar + + def format_progress_line(self) -> str: + show_percent = self.show_percent + + info_bits = [] + if self.length is not None and show_percent is None: + show_percent = not self.show_pos + + if self.show_pos: + info_bits.append(self.format_pos()) + if show_percent: + info_bits.append(self.format_pct()) + if self.show_eta and self.eta_known and not self.finished: + info_bits.append(self.format_eta()) + if self.item_show_func is not None: + item_info = self.item_show_func(self.current_item) + if item_info is not None: + info_bits.append(item_info) + + return ( + self.bar_template + % { + "label": self.label, + "bar": self.format_bar(), + "info": self.info_sep.join(info_bits), + } + ).rstrip() + + def render_progress(self) -> None: + import shutil + + if self.is_hidden: + # Only output the label as it changes if the output is not a + # TTY. Use file=stderr if you expect to be piping stdout. + if self._last_line != self.label: + self._last_line = self.label + echo(self.label, file=self.file, color=self.color) + + return + + buf = [] + # Update width in case the terminal has been resized + if self.autowidth: + old_width = self.width + self.width = 0 + clutter_length = term_len(self.format_progress_line()) + new_width = max(0, shutil.get_terminal_size().columns - clutter_length) + if new_width < old_width: + buf.append(BEFORE_BAR) + buf.append(" " * self.max_width) # type: ignore + self.max_width = new_width + self.width = new_width + + clear_width = self.width + if self.max_width is not None: + clear_width = self.max_width + + buf.append(BEFORE_BAR) + line = self.format_progress_line() + line_len = term_len(line) + if self.max_width is None or self.max_width < line_len: + self.max_width = line_len + + buf.append(line) + buf.append(" " * (clear_width - line_len)) + line = "".join(buf) + # Render the line only if it changed. + + if line != self._last_line: + self._last_line = line + echo(line, file=self.file, color=self.color, nl=False) + self.file.flush() + + def make_step(self, n_steps: int) -> None: + self.pos += n_steps + if self.length is not None and self.pos >= self.length: + self.finished = True + + if (time.time() - self.last_eta) < 1.0: + return + + self.last_eta = time.time() + + # self.avg is a rolling list of length <= 7 of steps where steps are + # defined as time elapsed divided by the total progress through + # self.length. + if self.pos: + step = (time.time() - self.start) / self.pos + else: + step = time.time() - self.start + + self.avg = self.avg[-6:] + [step] + + self.eta_known = self.length is not None + + def update(self, n_steps: int, current_item: t.Optional[V] = None) -> None: + """Update the progress bar by advancing a specified number of + steps, and optionally set the ``current_item`` for this new + position. + + :param n_steps: Number of steps to advance. + :param current_item: Optional item to set as ``current_item`` + for the updated position. + + .. versionchanged:: 8.0 + Added the ``current_item`` optional parameter. + + .. versionchanged:: 8.0 + Only render when the number of steps meets the + ``update_min_steps`` threshold. + """ + if current_item is not None: + self.current_item = current_item + + self._completed_intervals += n_steps + + if self._completed_intervals >= self.update_min_steps: + self.make_step(self._completed_intervals) + self.render_progress() + self._completed_intervals = 0 + + def finish(self) -> None: + self.eta_known = False + self.current_item = None + self.finished = True + + def generator(self) -> t.Iterator[V]: + """Return a generator which yields the items added to the bar + during construction, and updates the progress bar *after* the + yielded block returns. + """ + # WARNING: the iterator interface for `ProgressBar` relies on + # this and only works because this is a simple generator which + # doesn't create or manage additional state. If this function + # changes, the impact should be evaluated both against + # `iter(bar)` and `next(bar)`. `next()` in particular may call + # `self.generator()` repeatedly, and this must remain safe in + # order for that interface to work. + if not self.entered: + raise RuntimeError("You need to use progress bars in a with block.") + + if self.is_hidden: + yield from self.iter + else: + for rv in self.iter: + self.current_item = rv + + # This allows show_item_func to be updated before the + # item is processed. Only trigger at the beginning of + # the update interval. + if self._completed_intervals == 0: + self.render_progress() + + yield rv + self.update(1) + + self.finish() + self.render_progress() + + +def pager(generator: t.Iterable[str], color: t.Optional[bool] = None) -> None: + """Decide what method to use for paging through text.""" + stdout = _default_text_stdout() + if not isatty(sys.stdin) or not isatty(stdout): + return _nullpager(stdout, generator, color) + pager_cmd = (os.environ.get("PAGER", None) or "").strip() + if pager_cmd: + if WIN: + return _tempfilepager(generator, pager_cmd, color) + return _pipepager(generator, pager_cmd, color) + if os.environ.get("TERM") in ("dumb", "emacs"): + return _nullpager(stdout, generator, color) + if WIN or sys.platform.startswith("os2"): + return _tempfilepager(generator, "more <", color) + if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: + return _pipepager(generator, "less", color) + + import tempfile + + fd, filename = tempfile.mkstemp() + os.close(fd) + try: + if hasattr(os, "system") and os.system(f'more "{filename}"') == 0: + return _pipepager(generator, "more", color) + return _nullpager(stdout, generator, color) + finally: + os.unlink(filename) + + +def _pipepager(generator: t.Iterable[str], cmd: str, color: t.Optional[bool]) -> None: + """Page through text by feeding it to another program. Invoking a + pager through this might support colors. + """ + import subprocess + + env = dict(os.environ) + + # If we're piping to less we might support colors under the + # condition that + cmd_detail = cmd.rsplit("/", 1)[-1].split() + if color is None and cmd_detail[0] == "less": + less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}" + if not less_flags: + env["LESS"] = "-R" + color = True + elif "r" in less_flags or "R" in less_flags: + color = True + + c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env) + stdin = t.cast(t.BinaryIO, c.stdin) + encoding = get_best_encoding(stdin) + try: + for text in generator: + if not color: + text = strip_ansi(text) + + stdin.write(text.encode(encoding, "replace")) + except (OSError, KeyboardInterrupt): + pass + else: + stdin.close() + + # Less doesn't respect ^C, but catches it for its own UI purposes (aborting + # search or other commands inside less). + # + # That means when the user hits ^C, the parent process (click) terminates, + # but less is still alive, paging the output and messing up the terminal. + # + # If the user wants to make the pager exit on ^C, they should set + # `LESS='-K'`. It's not our decision to make. + while True: + try: + c.wait() + except KeyboardInterrupt: + pass + else: + break + + +def _tempfilepager( + generator: t.Iterable[str], cmd: str, color: t.Optional[bool] +) -> None: + """Page through text by invoking a program on a temporary file.""" + import tempfile + + fd, filename = tempfile.mkstemp() + # TODO: This never terminates if the passed generator never terminates. + text = "".join(generator) + if not color: + text = strip_ansi(text) + encoding = get_best_encoding(sys.stdout) + with open_stream(filename, "wb")[0] as f: + f.write(text.encode(encoding)) + try: + os.system(f'{cmd} "{filename}"') + finally: + os.close(fd) + os.unlink(filename) + + +def _nullpager( + stream: t.TextIO, generator: t.Iterable[str], color: t.Optional[bool] +) -> None: + """Simply print unformatted text. This is the ultimate fallback.""" + for text in generator: + if not color: + text = strip_ansi(text) + stream.write(text) + + +class Editor: + def __init__( + self, + editor: t.Optional[str] = None, + env: t.Optional[t.Mapping[str, str]] = None, + require_save: bool = True, + extension: str = ".txt", + ) -> None: + self.editor = editor + self.env = env + self.require_save = require_save + self.extension = extension + + def get_editor(self) -> str: + if self.editor is not None: + return self.editor + for key in "VISUAL", "EDITOR": + rv = os.environ.get(key) + if rv: + return rv + if WIN: + return "notepad" + for editor in "sensible-editor", "vim", "nano": + if os.system(f"which {editor} >/dev/null 2>&1") == 0: + return editor + return "vi" + + def edit_file(self, filename: str) -> None: + import subprocess + + editor = self.get_editor() + environ: t.Optional[t.Dict[str, str]] = None + + if self.env: + environ = os.environ.copy() + environ.update(self.env) + + try: + c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True) + exit_code = c.wait() + if exit_code != 0: + raise ClickException( + _("{editor}: Editing failed").format(editor=editor) + ) + except OSError as e: + raise ClickException( + _("{editor}: Editing failed: {e}").format(editor=editor, e=e) + ) from e + + def edit(self, text: t.Optional[t.AnyStr]) -> t.Optional[t.AnyStr]: + import tempfile + + if not text: + data = b"" + elif isinstance(text, (bytes, bytearray)): + data = text + else: + if text and not text.endswith("\n"): + text += "\n" + + if WIN: + data = text.replace("\n", "\r\n").encode("utf-8-sig") + else: + data = text.encode("utf-8") + + fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) + f: t.BinaryIO + + try: + with os.fdopen(fd, "wb") as f: + f.write(data) + + # If the filesystem resolution is 1 second, like Mac OS + # 10.12 Extended, or 2 seconds, like FAT32, and the editor + # closes very fast, require_save can fail. Set the modified + # time to be 2 seconds in the past to work around this. + os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2)) + # Depending on the resolution, the exact value might not be + # recorded, so get the new recorded value. + timestamp = os.path.getmtime(name) + + self.edit_file(name) + + if self.require_save and os.path.getmtime(name) == timestamp: + return None + + with open(name, "rb") as f: + rv = f.read() + + if isinstance(text, (bytes, bytearray)): + return rv + + return rv.decode("utf-8-sig").replace("\r\n", "\n") # type: ignore + finally: + os.unlink(name) + + +def open_url(url: str, wait: bool = False, locate: bool = False) -> int: + import subprocess + + def _unquote_file(url: str) -> str: + from urllib.parse import unquote + + if url.startswith("file://"): + url = unquote(url[7:]) + + return url + + if sys.platform == "darwin": + args = ["open"] + if wait: + args.append("-W") + if locate: + args.append("-R") + args.append(_unquote_file(url)) + null = open("/dev/null", "w") + try: + return subprocess.Popen(args, stderr=null).wait() + finally: + null.close() + elif WIN: + if locate: + url = _unquote_file(url.replace('"', "")) + args = f'explorer /select,"{url}"' + else: + url = url.replace('"', "") + wait_str = "/WAIT" if wait else "" + args = f'start {wait_str} "" "{url}"' + return os.system(args) + elif CYGWIN: + if locate: + url = os.path.dirname(_unquote_file(url).replace('"', "")) + args = f'cygstart "{url}"' + else: + url = url.replace('"', "") + wait_str = "-w" if wait else "" + args = f'cygstart {wait_str} "{url}"' + return os.system(args) + + try: + if locate: + url = os.path.dirname(_unquote_file(url)) or "." + else: + url = _unquote_file(url) + c = subprocess.Popen(["xdg-open", url]) + if wait: + return c.wait() + return 0 + except OSError: + if url.startswith(("http://", "https://")) and not locate and not wait: + import webbrowser + + webbrowser.open(url) + return 0 + return 1 + + +def _translate_ch_to_exc(ch: str) -> t.Optional[BaseException]: + if ch == "\x03": + raise KeyboardInterrupt() + + if ch == "\x04" and not WIN: # Unix-like, Ctrl+D + raise EOFError() + + if ch == "\x1a" and WIN: # Windows, Ctrl+Z + raise EOFError() + + return None + + +if WIN: + import msvcrt + + @contextlib.contextmanager + def raw_terminal() -> t.Iterator[int]: + yield -1 + + def getchar(echo: bool) -> str: + # The function `getch` will return a bytes object corresponding to + # the pressed character. Since Windows 10 build 1803, it will also + # return \x00 when called a second time after pressing a regular key. + # + # `getwch` does not share this probably-bugged behavior. Moreover, it + # returns a Unicode object by default, which is what we want. + # + # Either of these functions will return \x00 or \xe0 to indicate + # a special key, and you need to call the same function again to get + # the "rest" of the code. The fun part is that \u00e0 is + # "latin small letter a with grave", so if you type that on a French + # keyboard, you _also_ get a \xe0. + # E.g., consider the Up arrow. This returns \xe0 and then \x48. The + # resulting Unicode string reads as "a with grave" + "capital H". + # This is indistinguishable from when the user actually types + # "a with grave" and then "capital H". + # + # When \xe0 is returned, we assume it's part of a special-key sequence + # and call `getwch` again, but that means that when the user types + # the \u00e0 character, `getchar` doesn't return until a second + # character is typed. + # The alternative is returning immediately, but that would mess up + # cross-platform handling of arrow keys and others that start with + # \xe0. Another option is using `getch`, but then we can't reliably + # read non-ASCII characters, because return values of `getch` are + # limited to the current 8-bit codepage. + # + # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` + # is doing the right thing in more situations than with `getch`. + func: t.Callable[[], str] + + if echo: + func = msvcrt.getwche # type: ignore + else: + func = msvcrt.getwch # type: ignore + + rv = func() + + if rv in ("\x00", "\xe0"): + # \x00 and \xe0 are control characters that indicate special key, + # see above. + rv += func() + + _translate_ch_to_exc(rv) + return rv + +else: + import tty + import termios + + @contextlib.contextmanager + def raw_terminal() -> t.Iterator[int]: + f: t.Optional[t.TextIO] + fd: int + + if not isatty(sys.stdin): + f = open("/dev/tty") + fd = f.fileno() + else: + fd = sys.stdin.fileno() + f = None + + try: + old_settings = termios.tcgetattr(fd) + + try: + tty.setraw(fd) + yield fd + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + sys.stdout.flush() + + if f is not None: + f.close() + except termios.error: + pass + + def getchar(echo: bool) -> str: + with raw_terminal() as fd: + ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace") + + if echo and isatty(sys.stdout): + sys.stdout.write(ch) + + _translate_ch_to_exc(ch) + return ch diff --git a/myenv/lib/python3.9/site-packages/click/_textwrap.py b/myenv/lib/python3.9/site-packages/click/_textwrap.py new file mode 100644 index 0000000..b47dcbd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/_textwrap.py @@ -0,0 +1,49 @@ +import textwrap +import typing as t +from contextlib import contextmanager + + +class TextWrapper(textwrap.TextWrapper): + def _handle_long_word( + self, + reversed_chunks: t.List[str], + cur_line: t.List[str], + cur_len: int, + width: int, + ) -> None: + space_left = max(width - cur_len, 1) + + if self.break_long_words: + last = reversed_chunks[-1] + cut = last[:space_left] + res = last[space_left:] + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + @contextmanager + def extra_indent(self, indent: str) -> t.Iterator[None]: + old_initial_indent = self.initial_indent + old_subsequent_indent = self.subsequent_indent + self.initial_indent += indent + self.subsequent_indent += indent + + try: + yield + finally: + self.initial_indent = old_initial_indent + self.subsequent_indent = old_subsequent_indent + + def indent_only(self, text: str) -> str: + rv = [] + + for idx, line in enumerate(text.splitlines()): + indent = self.initial_indent + + if idx > 0: + indent = self.subsequent_indent + + rv.append(f"{indent}{line}") + + return "\n".join(rv) diff --git a/myenv/lib/python3.9/site-packages/click/_winconsole.py b/myenv/lib/python3.9/site-packages/click/_winconsole.py new file mode 100644 index 0000000..6b20df3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/_winconsole.py @@ -0,0 +1,279 @@ +# This module is based on the excellent work by Adam Bartoš who +# provided a lot of what went into the implementation here in +# the discussion to issue1602 in the Python bug tracker. +# +# There are some general differences in regards to how this works +# compared to the original patches as we do not need to patch +# the entire interpreter but just work in our little world of +# echo and prompt. +import io +import sys +import time +import typing as t +from ctypes import byref +from ctypes import c_char +from ctypes import c_char_p +from ctypes import c_int +from ctypes import c_ssize_t +from ctypes import c_ulong +from ctypes import c_void_p +from ctypes import POINTER +from ctypes import py_object +from ctypes import Structure +from ctypes.wintypes import DWORD +from ctypes.wintypes import HANDLE +from ctypes.wintypes import LPCWSTR +from ctypes.wintypes import LPWSTR + +from ._compat import _NonClosingTextIOWrapper + +assert sys.platform == "win32" +import msvcrt # noqa: E402 +from ctypes import windll # noqa: E402 +from ctypes import WINFUNCTYPE # noqa: E402 + +c_ssize_p = POINTER(c_ssize_t) + +kernel32 = windll.kernel32 +GetStdHandle = kernel32.GetStdHandle +ReadConsoleW = kernel32.ReadConsoleW +WriteConsoleW = kernel32.WriteConsoleW +GetConsoleMode = kernel32.GetConsoleMode +GetLastError = kernel32.GetLastError +GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) +CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( + ("CommandLineToArgvW", windll.shell32) +) +LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32)) + +STDIN_HANDLE = GetStdHandle(-10) +STDOUT_HANDLE = GetStdHandle(-11) +STDERR_HANDLE = GetStdHandle(-12) + +PyBUF_SIMPLE = 0 +PyBUF_WRITABLE = 1 + +ERROR_SUCCESS = 0 +ERROR_NOT_ENOUGH_MEMORY = 8 +ERROR_OPERATION_ABORTED = 995 + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +EOF = b"\x1a" +MAX_BYTES_WRITTEN = 32767 + +try: + from ctypes import pythonapi +except ImportError: + # On PyPy we cannot get buffers so our ability to operate here is + # severely limited. + get_buffer = None +else: + + class Py_buffer(Structure): + _fields_ = [ + ("buf", c_void_p), + ("obj", py_object), + ("len", c_ssize_t), + ("itemsize", c_ssize_t), + ("readonly", c_int), + ("ndim", c_int), + ("format", c_char_p), + ("shape", c_ssize_p), + ("strides", c_ssize_p), + ("suboffsets", c_ssize_p), + ("internal", c_void_p), + ] + + PyObject_GetBuffer = pythonapi.PyObject_GetBuffer + PyBuffer_Release = pythonapi.PyBuffer_Release + + def get_buffer(obj, writable=False): + buf = Py_buffer() + flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE + PyObject_GetBuffer(py_object(obj), byref(buf), flags) + + try: + buffer_type = c_char * buf.len + return buffer_type.from_address(buf.buf) + finally: + PyBuffer_Release(byref(buf)) + + +class _WindowsConsoleRawIOBase(io.RawIOBase): + def __init__(self, handle): + self.handle = handle + + def isatty(self): + super().isatty() + return True + + +class _WindowsConsoleReader(_WindowsConsoleRawIOBase): + def readable(self): + return True + + def readinto(self, b): + bytes_to_be_read = len(b) + if not bytes_to_be_read: + return 0 + elif bytes_to_be_read % 2: + raise ValueError( + "cannot read odd number of bytes from UTF-16-LE encoded console" + ) + + buffer = get_buffer(b, writable=True) + code_units_to_be_read = bytes_to_be_read // 2 + code_units_read = c_ulong() + + rv = ReadConsoleW( + HANDLE(self.handle), + buffer, + code_units_to_be_read, + byref(code_units_read), + None, + ) + if GetLastError() == ERROR_OPERATION_ABORTED: + # wait for KeyboardInterrupt + time.sleep(0.1) + if not rv: + raise OSError(f"Windows error: {GetLastError()}") + + if buffer[0] == EOF: + return 0 + return 2 * code_units_read.value + + +class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): + def writable(self): + return True + + @staticmethod + def _get_error_message(errno): + if errno == ERROR_SUCCESS: + return "ERROR_SUCCESS" + elif errno == ERROR_NOT_ENOUGH_MEMORY: + return "ERROR_NOT_ENOUGH_MEMORY" + return f"Windows error {errno}" + + def write(self, b): + bytes_to_be_written = len(b) + buf = get_buffer(b) + code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 + code_units_written = c_ulong() + + WriteConsoleW( + HANDLE(self.handle), + buf, + code_units_to_be_written, + byref(code_units_written), + None, + ) + bytes_written = 2 * code_units_written.value + + if bytes_written == 0 and bytes_to_be_written > 0: + raise OSError(self._get_error_message(GetLastError())) + return bytes_written + + +class ConsoleStream: + def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None: + self._text_stream = text_stream + self.buffer = byte_stream + + @property + def name(self) -> str: + return self.buffer.name + + def write(self, x: t.AnyStr) -> int: + if isinstance(x, str): + return self._text_stream.write(x) + try: + self.flush() + except Exception: + pass + return self.buffer.write(x) + + def writelines(self, lines: t.Iterable[t.AnyStr]) -> None: + for line in lines: + self.write(line) + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._text_stream, name) + + def isatty(self) -> bool: + return self.buffer.isatty() + + def __repr__(self): + return f"" + + +def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +_stream_factories: t.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = { + 0: _get_text_stdin, + 1: _get_text_stdout, + 2: _get_text_stderr, +} + + +def _is_console(f: t.TextIO) -> bool: + if not hasattr(f, "fileno"): + return False + + try: + fileno = f.fileno() + except (OSError, io.UnsupportedOperation): + return False + + handle = msvcrt.get_osfhandle(fileno) + return bool(GetConsoleMode(handle, byref(DWORD()))) + + +def _get_windows_console_stream( + f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] +) -> t.Optional[t.TextIO]: + if ( + get_buffer is not None + and encoding in {"utf-16-le", None} + and errors in {"strict", None} + and _is_console(f) + ): + func = _stream_factories.get(f.fileno()) + if func is not None: + b = getattr(f, "buffer", None) + + if b is None: + return None + + return func(b) diff --git a/myenv/lib/python3.9/site-packages/click/core.py b/myenv/lib/python3.9/site-packages/click/core.py new file mode 100644 index 0000000..5abfb0f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/core.py @@ -0,0 +1,2998 @@ +import enum +import errno +import inspect +import os +import sys +import typing as t +from collections import abc +from contextlib import contextmanager +from contextlib import ExitStack +from functools import partial +from functools import update_wrapper +from gettext import gettext as _ +from gettext import ngettext +from itertools import repeat + +from . import types +from .exceptions import Abort +from .exceptions import BadParameter +from .exceptions import ClickException +from .exceptions import Exit +from .exceptions import MissingParameter +from .exceptions import UsageError +from .formatting import HelpFormatter +from .formatting import join_options +from .globals import pop_context +from .globals import push_context +from .parser import _flag_needs_value +from .parser import OptionParser +from .parser import split_opt +from .termui import confirm +from .termui import prompt +from .termui import style +from .utils import _detect_program_name +from .utils import _expand_args +from .utils import echo +from .utils import make_default_short_help +from .utils import make_str +from .utils import PacifyFlushWrapper + +if t.TYPE_CHECKING: + import typing_extensions as te + from .shell_completion import CompletionItem + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) +V = t.TypeVar("V") + + +def _complete_visible_commands( + ctx: "Context", incomplete: str +) -> t.Iterator[t.Tuple[str, "Command"]]: + """List all the subcommands of a group that start with the + incomplete value and aren't hidden. + + :param ctx: Invocation context for the group. + :param incomplete: Value being completed. May be empty. + """ + multi = t.cast(MultiCommand, ctx.command) + + for name in multi.list_commands(ctx): + if name.startswith(incomplete): + command = multi.get_command(ctx, name) + + if command is not None and not command.hidden: + yield name, command + + +def _check_multicommand( + base_command: "MultiCommand", cmd_name: str, cmd: "Command", register: bool = False +) -> None: + if not base_command.chain or not isinstance(cmd, MultiCommand): + return + if register: + hint = ( + "It is not possible to add multi commands as children to" + " another multi command that is in chain mode." + ) + else: + hint = ( + "Found a multi command as subcommand to a multi command" + " that is in chain mode. This is not supported." + ) + raise RuntimeError( + f"{hint}. Command {base_command.name!r} is set to chain and" + f" {cmd_name!r} was added as a subcommand but it in itself is a" + f" multi command. ({cmd_name!r} is a {type(cmd).__name__}" + f" within a chained {type(base_command).__name__} named" + f" {base_command.name!r})." + ) + + +def batch(iterable: t.Iterable[V], batch_size: int) -> t.List[t.Tuple[V, ...]]: + return list(zip(*repeat(iter(iterable), batch_size))) + + +@contextmanager +def augment_usage_errors( + ctx: "Context", param: t.Optional["Parameter"] = None +) -> t.Iterator[None]: + """Context manager that attaches extra information to exceptions.""" + try: + yield + except BadParameter as e: + if e.ctx is None: + e.ctx = ctx + if param is not None and e.param is None: + e.param = param + raise + except UsageError as e: + if e.ctx is None: + e.ctx = ctx + raise + + +def iter_params_for_processing( + invocation_order: t.Sequence["Parameter"], + declaration_order: t.Sequence["Parameter"], +) -> t.List["Parameter"]: + """Given a sequence of parameters in the order as should be considered + for processing and an iterable of parameters that exist, this returns + a list in the correct order as they should be processed. + """ + + def sort_key(item: "Parameter") -> t.Tuple[bool, float]: + try: + idx: float = invocation_order.index(item) + except ValueError: + idx = float("inf") + + return not item.is_eager, idx + + return sorted(declaration_order, key=sort_key) + + +class ParameterSource(enum.Enum): + """This is an :class:`~enum.Enum` that indicates the source of a + parameter's value. + + Use :meth:`click.Context.get_parameter_source` to get the + source for a parameter by name. + + .. versionchanged:: 8.0 + Use :class:`~enum.Enum` and drop the ``validate`` method. + + .. versionchanged:: 8.0 + Added the ``PROMPT`` value. + """ + + COMMANDLINE = enum.auto() + """The value was provided by the command line args.""" + ENVIRONMENT = enum.auto() + """The value was provided with an environment variable.""" + DEFAULT = enum.auto() + """Used the default specified by the parameter.""" + DEFAULT_MAP = enum.auto() + """Used a default provided by :attr:`Context.default_map`.""" + PROMPT = enum.auto() + """Used a prompt to confirm a default or provide a value.""" + + +class Context: + """The context is a special internal object that holds state relevant + for the script execution at every single level. It's normally invisible + to commands unless they opt-in to getting access to it. + + The context is useful as it can pass internal objects around and can + control special execution features such as reading data from + environment variables. + + A context can be used as context manager in which case it will call + :meth:`close` on teardown. + + :param command: the command class for this context. + :param parent: the parent context. + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it is usually + the name of the script, for commands below it it's + the name of the script. + :param obj: an arbitrary object of user data. + :param auto_envvar_prefix: the prefix to use for automatic environment + variables. If this is `None` then reading + from environment variables is disabled. This + does not affect manually set environment + variables which are always read. + :param default_map: a dictionary (like object) with default values + for parameters. + :param terminal_width: the width of the terminal. The default is + inherit from parent context. If no context + defines the terminal width then auto + detection will be applied. + :param max_content_width: the maximum width for content rendered by + Click (this currently only affects help + pages). This defaults to 80 characters if + not overridden. In other words: even if the + terminal is larger than that, Click will not + format things wider than 80 characters by + default. In addition to that, formatters might + add some safety mapping on the right. + :param resilient_parsing: if this flag is enabled then Click will + parse without any interactivity or callback + invocation. Default values will also be + ignored. This is useful for implementing + things such as completion support. + :param allow_extra_args: if this is set to `True` then extra arguments + at the end will not raise an error and will be + kept on the context. The default is to inherit + from the command. + :param allow_interspersed_args: if this is set to `False` then options + and arguments cannot be mixed. The + default is to inherit from the command. + :param ignore_unknown_options: instructs click to ignore options it does + not know and keeps them for later + processing. + :param help_option_names: optionally a list of strings that define how + the default help parameter is named. The + default is ``['--help']``. + :param token_normalize_func: an optional function that is used to + normalize tokens (options, choices, + etc.). This for instance can be used to + implement case insensitive behavior. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are used in texts that Click prints which is by + default not the case. This for instance would affect + help output. + :param show_default: Show the default value for commands. If this + value is not set, it defaults to the value from the parent + context. ``Command.show_default`` overrides this default for the + specific command. + + .. versionchanged:: 8.1 + The ``show_default`` parameter is overridden by + ``Command.show_default``, instead of the other way around. + + .. versionchanged:: 8.0 + The ``show_default`` parameter defaults to the value from the + parent context. + + .. versionchanged:: 7.1 + Added the ``show_default`` parameter. + + .. versionchanged:: 4.0 + Added the ``color``, ``ignore_unknown_options``, and + ``max_content_width`` parameters. + + .. versionchanged:: 3.0 + Added the ``allow_extra_args`` and ``allow_interspersed_args`` + parameters. + + .. versionchanged:: 2.0 + Added the ``resilient_parsing``, ``help_option_names``, and + ``token_normalize_func`` parameters. + """ + + #: The formatter class to create with :meth:`make_formatter`. + #: + #: .. versionadded:: 8.0 + formatter_class: t.Type["HelpFormatter"] = HelpFormatter + + def __init__( + self, + command: "Command", + parent: t.Optional["Context"] = None, + info_name: t.Optional[str] = None, + obj: t.Optional[t.Any] = None, + auto_envvar_prefix: t.Optional[str] = None, + default_map: t.Optional[t.Dict[str, t.Any]] = None, + terminal_width: t.Optional[int] = None, + max_content_width: t.Optional[int] = None, + resilient_parsing: bool = False, + allow_extra_args: t.Optional[bool] = None, + allow_interspersed_args: t.Optional[bool] = None, + ignore_unknown_options: t.Optional[bool] = None, + help_option_names: t.Optional[t.List[str]] = None, + token_normalize_func: t.Optional[t.Callable[[str], str]] = None, + color: t.Optional[bool] = None, + show_default: t.Optional[bool] = None, + ) -> None: + #: the parent context or `None` if none exists. + self.parent = parent + #: the :class:`Command` for this context. + self.command = command + #: the descriptive information name + self.info_name = info_name + #: Map of parameter names to their parsed values. Parameters + #: with ``expose_value=False`` are not stored. + self.params: t.Dict[str, t.Any] = {} + #: the leftover arguments. + self.args: t.List[str] = [] + #: protected arguments. These are arguments that are prepended + #: to `args` when certain parsing scenarios are encountered but + #: must be never propagated to another arguments. This is used + #: to implement nested parsing. + self.protected_args: t.List[str] = [] + #: the collected prefixes of the command's options. + self._opt_prefixes: t.Set[str] = set(parent._opt_prefixes) if parent else set() + + if obj is None and parent is not None: + obj = parent.obj + + #: the user object stored. + self.obj: t.Any = obj + self._meta: t.Dict[str, t.Any] = getattr(parent, "meta", {}) + + #: A dictionary (-like object) with defaults for parameters. + if ( + default_map is None + and info_name is not None + and parent is not None + and parent.default_map is not None + ): + default_map = parent.default_map.get(info_name) + + self.default_map: t.Optional[t.Dict[str, t.Any]] = default_map + + #: This flag indicates if a subcommand is going to be executed. A + #: group callback can use this information to figure out if it's + #: being executed directly or because the execution flow passes + #: onwards to a subcommand. By default it's None, but it can be + #: the name of the subcommand to execute. + #: + #: If chaining is enabled this will be set to ``'*'`` in case + #: any commands are executed. It is however not possible to + #: figure out which ones. If you require this knowledge you + #: should use a :func:`result_callback`. + self.invoked_subcommand: t.Optional[str] = None + + if terminal_width is None and parent is not None: + terminal_width = parent.terminal_width + + #: The width of the terminal (None is autodetection). + self.terminal_width: t.Optional[int] = terminal_width + + if max_content_width is None and parent is not None: + max_content_width = parent.max_content_width + + #: The maximum width of formatted content (None implies a sensible + #: default which is 80 for most things). + self.max_content_width: t.Optional[int] = max_content_width + + if allow_extra_args is None: + allow_extra_args = command.allow_extra_args + + #: Indicates if the context allows extra args or if it should + #: fail on parsing. + #: + #: .. versionadded:: 3.0 + self.allow_extra_args = allow_extra_args + + if allow_interspersed_args is None: + allow_interspersed_args = command.allow_interspersed_args + + #: Indicates if the context allows mixing of arguments and + #: options or not. + #: + #: .. versionadded:: 3.0 + self.allow_interspersed_args: bool = allow_interspersed_args + + if ignore_unknown_options is None: + ignore_unknown_options = command.ignore_unknown_options + + #: Instructs click to ignore options that a command does not + #: understand and will store it on the context for later + #: processing. This is primarily useful for situations where you + #: want to call into external programs. Generally this pattern is + #: strongly discouraged because it's not possibly to losslessly + #: forward all arguments. + #: + #: .. versionadded:: 4.0 + self.ignore_unknown_options: bool = ignore_unknown_options + + if help_option_names is None: + if parent is not None: + help_option_names = parent.help_option_names + else: + help_option_names = ["--help"] + + #: The names for the help options. + self.help_option_names: t.List[str] = help_option_names + + if token_normalize_func is None and parent is not None: + token_normalize_func = parent.token_normalize_func + + #: An optional normalization function for tokens. This is + #: options, choices, commands etc. + self.token_normalize_func: t.Optional[ + t.Callable[[str], str] + ] = token_normalize_func + + #: Indicates if resilient parsing is enabled. In that case Click + #: will do its best to not cause any failures and default values + #: will be ignored. Useful for completion. + self.resilient_parsing: bool = resilient_parsing + + # If there is no envvar prefix yet, but the parent has one and + # the command on this level has a name, we can expand the envvar + # prefix automatically. + if auto_envvar_prefix is None: + if ( + parent is not None + and parent.auto_envvar_prefix is not None + and self.info_name is not None + ): + auto_envvar_prefix = ( + f"{parent.auto_envvar_prefix}_{self.info_name.upper()}" + ) + else: + auto_envvar_prefix = auto_envvar_prefix.upper() + + if auto_envvar_prefix is not None: + auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") + + self.auto_envvar_prefix: t.Optional[str] = auto_envvar_prefix + + if color is None and parent is not None: + color = parent.color + + #: Controls if styling output is wanted or not. + self.color: t.Optional[bool] = color + + if show_default is None and parent is not None: + show_default = parent.show_default + + #: Show option default values when formatting help text. + self.show_default: t.Optional[bool] = show_default + + self._close_callbacks: t.List[t.Callable[[], t.Any]] = [] + self._depth = 0 + self._parameter_source: t.Dict[str, ParameterSource] = {} + self._exit_stack = ExitStack() + + def to_info_dict(self) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. This traverses the entire CLI + structure. + + .. code-block:: python + + with Context(cli) as ctx: + info = ctx.to_info_dict() + + .. versionadded:: 8.0 + """ + return { + "command": self.command.to_info_dict(self), + "info_name": self.info_name, + "allow_extra_args": self.allow_extra_args, + "allow_interspersed_args": self.allow_interspersed_args, + "ignore_unknown_options": self.ignore_unknown_options, + "auto_envvar_prefix": self.auto_envvar_prefix, + } + + def __enter__(self) -> "Context": + self._depth += 1 + push_context(self) + return self + + def __exit__(self, exc_type, exc_value, tb): # type: ignore + self._depth -= 1 + if self._depth == 0: + self.close() + pop_context() + + @contextmanager + def scope(self, cleanup: bool = True) -> t.Iterator["Context"]: + """This helper method can be used with the context object to promote + it to the current thread local (see :func:`get_current_context`). + The default behavior of this is to invoke the cleanup functions which + can be disabled by setting `cleanup` to `False`. The cleanup + functions are typically used for things such as closing file handles. + + If the cleanup is intended the context object can also be directly + used as a context manager. + + Example usage:: + + with ctx.scope(): + assert get_current_context() is ctx + + This is equivalent:: + + with ctx: + assert get_current_context() is ctx + + .. versionadded:: 5.0 + + :param cleanup: controls if the cleanup functions should be run or + not. The default is to run these functions. In + some situations the context only wants to be + temporarily pushed in which case this can be disabled. + Nested pushes automatically defer the cleanup. + """ + if not cleanup: + self._depth += 1 + try: + with self as rv: + yield rv + finally: + if not cleanup: + self._depth -= 1 + + @property + def meta(self) -> t.Dict[str, t.Any]: + """This is a dictionary which is shared with all the contexts + that are nested. It exists so that click utilities can store some + state here if they need to. It is however the responsibility of + that code to manage this dictionary well. + + The keys are supposed to be unique dotted strings. For instance + module paths are a good choice for it. What is stored in there is + irrelevant for the operation of click. However what is important is + that code that places data here adheres to the general semantics of + the system. + + Example usage:: + + LANG_KEY = f'{__name__}.lang' + + def set_language(value): + ctx = get_current_context() + ctx.meta[LANG_KEY] = value + + def get_language(): + return get_current_context().meta.get(LANG_KEY, 'en_US') + + .. versionadded:: 5.0 + """ + return self._meta + + def make_formatter(self) -> HelpFormatter: + """Creates the :class:`~click.HelpFormatter` for the help and + usage output. + + To quickly customize the formatter class used without overriding + this method, set the :attr:`formatter_class` attribute. + + .. versionchanged:: 8.0 + Added the :attr:`formatter_class` attribute. + """ + return self.formatter_class( + width=self.terminal_width, max_width=self.max_content_width + ) + + def with_resource(self, context_manager: t.ContextManager[V]) -> V: + """Register a resource as if it were used in a ``with`` + statement. The resource will be cleaned up when the context is + popped. + + Uses :meth:`contextlib.ExitStack.enter_context`. It calls the + resource's ``__enter__()`` method and returns the result. When + the context is popped, it closes the stack, which calls the + resource's ``__exit__()`` method. + + To register a cleanup function for something that isn't a + context manager, use :meth:`call_on_close`. Or use something + from :mod:`contextlib` to turn it into a context manager first. + + .. code-block:: python + + @click.group() + @click.option("--name") + @click.pass_context + def cli(ctx): + ctx.obj = ctx.with_resource(connect_db(name)) + + :param context_manager: The context manager to enter. + :return: Whatever ``context_manager.__enter__()`` returns. + + .. versionadded:: 8.0 + """ + return self._exit_stack.enter_context(context_manager) + + def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: + """Register a function to be called when the context tears down. + + This can be used to close resources opened during the script + execution. Resources that support Python's context manager + protocol which would be used in a ``with`` statement should be + registered with :meth:`with_resource` instead. + + :param f: The function to execute on teardown. + """ + return self._exit_stack.callback(f) + + def close(self) -> None: + """Invoke all close callbacks registered with + :meth:`call_on_close`, and exit all context managers entered + with :meth:`with_resource`. + """ + self._exit_stack.close() + # In case the context is reused, create a new exit stack. + self._exit_stack = ExitStack() + + @property + def command_path(self) -> str: + """The computed command path. This is used for the ``usage`` + information on the help page. It's automatically created by + combining the info names of the chain of contexts to the root. + """ + rv = "" + if self.info_name is not None: + rv = self.info_name + if self.parent is not None: + parent_command_path = [self.parent.command_path] + + if isinstance(self.parent.command, Command): + for param in self.parent.command.get_params(self): + parent_command_path.extend(param.get_usage_pieces(self)) + + rv = f"{' '.join(parent_command_path)} {rv}" + return rv.lstrip() + + def find_root(self) -> "Context": + """Finds the outermost context.""" + node = self + while node.parent is not None: + node = node.parent + return node + + def find_object(self, object_type: t.Type[V]) -> t.Optional[V]: + """Finds the closest object of a given type.""" + node: t.Optional["Context"] = self + + while node is not None: + if isinstance(node.obj, object_type): + return node.obj + + node = node.parent + + return None + + def ensure_object(self, object_type: t.Type[V]) -> V: + """Like :meth:`find_object` but sets the innermost object to a + new instance of `object_type` if it does not exist. + """ + rv = self.find_object(object_type) + if rv is None: + self.obj = rv = object_type() + return rv + + @t.overload + def lookup_default( + self, name: str, call: "te.Literal[True]" = True + ) -> t.Optional[t.Any]: + ... + + @t.overload + def lookup_default( + self, name: str, call: "te.Literal[False]" = ... + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + ... + + def lookup_default(self, name: str, call: bool = True) -> t.Optional[t.Any]: + """Get the default for a parameter from :attr:`default_map`. + + :param name: Name of the parameter. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + if self.default_map is not None: + value = self.default_map.get(name) + + if call and callable(value): + return value() + + return value + + return None + + def fail(self, message: str) -> "te.NoReturn": + """Aborts the execution of the program with a specific error + message. + + :param message: the error message to fail with. + """ + raise UsageError(message, self) + + def abort(self) -> "te.NoReturn": + """Aborts the script.""" + raise Abort() + + def exit(self, code: int = 0) -> "te.NoReturn": + """Exits the application with a given exit code.""" + raise Exit(code) + + def get_usage(self) -> str: + """Helper method to get formatted usage string for the current + context and command. + """ + return self.command.get_usage(self) + + def get_help(self) -> str: + """Helper method to get formatted help page for the current + context and command. + """ + return self.command.get_help(self) + + def _make_sub_context(self, command: "Command") -> "Context": + """Create a new context of the same type as this context, but + for a new command. + + :meta private: + """ + return type(self)(command, info_name=command.name, parent=self) + + def invoke( + __self, # noqa: B902 + __callback: t.Union["Command", t.Callable[..., t.Any]], + *args: t.Any, + **kwargs: t.Any, + ) -> t.Any: + """Invokes a command callback in exactly the way it expects. There + are two ways to invoke this method: + + 1. the first argument can be a callback and all other arguments and + keyword arguments are forwarded directly to the function. + 2. the first argument is a click command object. In that case all + arguments are forwarded as well but proper click parameters + (options and click arguments) must be keyword arguments and Click + will fill in defaults. + + Note that before Click 3.2 keyword arguments were not properly filled + in against the intention of this code and no context was created. For + more information about this change and why it was done in a bugfix + release see :ref:`upgrade-to-3.2`. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if :meth:`forward` is called at multiple levels. + """ + if isinstance(__callback, Command): + other_cmd = __callback + + if other_cmd.callback is None: + raise TypeError( + "The given command does not have a callback that can be invoked." + ) + else: + __callback = other_cmd.callback + + ctx = __self._make_sub_context(other_cmd) + + for param in other_cmd.params: + if param.name not in kwargs and param.expose_value: + kwargs[param.name] = param.type_cast_value( # type: ignore + ctx, param.get_default(ctx) + ) + + # Track all kwargs as params, so that forward() will pass + # them on in subsequent calls. + ctx.params.update(kwargs) + else: + ctx = __self + + with augment_usage_errors(__self): + with ctx: + return __callback(*args, **kwargs) + + def forward( + __self, __cmd: "Command", *args: t.Any, **kwargs: t.Any # noqa: B902 + ) -> t.Any: + """Similar to :meth:`invoke` but fills in default keyword + arguments from the current context if the other command expects + it. This cannot invoke callbacks directly, only other commands. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if ``forward`` is called at multiple levels. + """ + # Can only forward to other commands, not direct callbacks. + if not isinstance(__cmd, Command): + raise TypeError("Callback is not a command.") + + for param in __self.params: + if param not in kwargs: + kwargs[param] = __self.params[param] + + return __self.invoke(__cmd, *args, **kwargs) + + def set_parameter_source(self, name: str, source: ParameterSource) -> None: + """Set the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + :param name: The name of the parameter. + :param source: A member of :class:`~click.core.ParameterSource`. + """ + self._parameter_source[name] = source + + def get_parameter_source(self, name: str) -> t.Optional[ParameterSource]: + """Get the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + This can be useful for determining when a user specified a value + on the command line that is the same as the default value. It + will be :attr:`~click.core.ParameterSource.DEFAULT` only if the + value was actually taken from the default. + + :param name: The name of the parameter. + :rtype: ParameterSource + + .. versionchanged:: 8.0 + Returns ``None`` if the parameter was not provided from any + source. + """ + return self._parameter_source.get(name) + + +class BaseCommand: + """The base command implements the minimal API contract of commands. + Most code will never use this as it does not implement a lot of useful + functionality but it can act as the direct subclass of alternative + parsing methods that do not depend on the Click parser. + + For instance, this can be used to bridge Click and other systems like + argparse or docopt. + + Because base commands do not implement a lot of the API that other + parts of Click take for granted, they are not supported for all + operations. For instance, they cannot be used with the decorators + usually and they have no built-in callback system. + + .. versionchanged:: 2.0 + Added the `context_settings` parameter. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + """ + + #: The context class to create with :meth:`make_context`. + #: + #: .. versionadded:: 8.0 + context_class: t.Type[Context] = Context + #: the default for the :attr:`Context.allow_extra_args` flag. + allow_extra_args = False + #: the default for the :attr:`Context.allow_interspersed_args` flag. + allow_interspersed_args = True + #: the default for the :attr:`Context.ignore_unknown_options` flag. + ignore_unknown_options = False + + def __init__( + self, + name: t.Optional[str], + context_settings: t.Optional[t.Dict[str, t.Any]] = None, + ) -> None: + #: the name the command thinks it has. Upon registering a command + #: on a :class:`Group` the group will default the command name + #: with this information. You should instead use the + #: :class:`Context`\'s :attr:`~Context.info_name` attribute. + self.name = name + + if context_settings is None: + context_settings = {} + + #: an optional dictionary with defaults passed to the context. + self.context_settings: t.Dict[str, t.Any] = context_settings + + def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. This traverses the entire structure + below this command. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + :param ctx: A :class:`Context` representing this command. + + .. versionadded:: 8.0 + """ + return {"name": self.name} + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def get_usage(self, ctx: Context) -> str: + raise NotImplementedError("Base commands cannot get usage") + + def get_help(self, ctx: Context) -> str: + raise NotImplementedError("Base commands cannot get help") + + def make_context( + self, + info_name: t.Optional[str], + args: t.List[str], + parent: t.Optional[Context] = None, + **extra: t.Any, + ) -> Context: + """This function when given an info name and arguments will kick + off the parsing and create a new :class:`Context`. It does not + invoke the actual command callback though. + + To quickly customize the context class used without overriding + this method, set the :attr:`context_class` attribute. + + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it's usually + the name of the script, for commands below it it's + the name of the command. + :param args: the arguments to parse as list of strings. + :param parent: the parent context if available. + :param extra: extra keyword arguments forwarded to the context + constructor. + + .. versionchanged:: 8.0 + Added the :attr:`context_class` attribute. + """ + for key, value in self.context_settings.items(): + if key not in extra: + extra[key] = value + + ctx = self.context_class( + self, info_name=info_name, parent=parent, **extra # type: ignore + ) + + with ctx.scope(cleanup=False): + self.parse_args(ctx, args) + return ctx + + def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: + """Given a context and a list of arguments this creates the parser + and parses the arguments, then modifies the context as necessary. + This is automatically invoked by :meth:`make_context`. + """ + raise NotImplementedError("Base commands do not know how to parse arguments.") + + def invoke(self, ctx: Context) -> t.Any: + """Given a context, this invokes the command. The default + implementation is raising a not implemented error. + """ + raise NotImplementedError("Base commands are not invokable by default") + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. Looks + at the names of chained multi-commands. + + Any command could be part of a chained multi-command, so sibling + commands are valid at any point during command completion. Other + command classes will return more completions. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results: t.List["CompletionItem"] = [] + + while ctx.parent is not None: + ctx = ctx.parent + + if isinstance(ctx.command, MultiCommand) and ctx.command.chain: + results.extend( + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + if name not in ctx.protected_args + ) + + return results + + @t.overload + def main( + self, + args: t.Optional[t.Sequence[str]] = None, + prog_name: t.Optional[str] = None, + complete_var: t.Optional[str] = None, + standalone_mode: "te.Literal[True]" = True, + **extra: t.Any, + ) -> "te.NoReturn": + ... + + @t.overload + def main( + self, + args: t.Optional[t.Sequence[str]] = None, + prog_name: t.Optional[str] = None, + complete_var: t.Optional[str] = None, + standalone_mode: bool = ..., + **extra: t.Any, + ) -> t.Any: + ... + + def main( + self, + args: t.Optional[t.Sequence[str]] = None, + prog_name: t.Optional[str] = None, + complete_var: t.Optional[str] = None, + standalone_mode: bool = True, + windows_expand_args: bool = True, + **extra: t.Any, + ) -> t.Any: + """This is the way to invoke a script with all the bells and + whistles as a command line application. This will always terminate + the application after a call. If this is not wanted, ``SystemExit`` + needs to be caught. + + This method is also available by directly calling the instance of + a :class:`Command`. + + :param args: the arguments that should be used for parsing. If not + provided, ``sys.argv[1:]`` is used. + :param prog_name: the program name that should be used. By default + the program name is constructed by taking the file + name from ``sys.argv[0]``. + :param complete_var: the environment variable that controls the + bash completion support. The default is + ``"__COMPLETE"`` with prog_name in + uppercase. + :param standalone_mode: the default behavior is to invoke the script + in standalone mode. Click will then + handle exceptions and convert them into + error messages and the function will never + return but shut down the interpreter. If + this is set to `False` they will be + propagated to the caller and the return + value of this function is the return value + of :meth:`invoke`. + :param windows_expand_args: Expand glob patterns, user dir, and + env vars in command line args on Windows. + :param extra: extra keyword arguments are forwarded to the context + constructor. See :class:`Context` for more information. + + .. versionchanged:: 8.0.1 + Added the ``windows_expand_args`` parameter to allow + disabling command line arg expansion on Windows. + + .. versionchanged:: 8.0 + When taking arguments from ``sys.argv`` on Windows, glob + patterns, user dir, and env vars are expanded. + + .. versionchanged:: 3.0 + Added the ``standalone_mode`` parameter. + """ + if args is None: + args = sys.argv[1:] + + if os.name == "nt" and windows_expand_args: + args = _expand_args(args) + else: + args = list(args) + + if prog_name is None: + prog_name = _detect_program_name() + + # Process shell completion requests and exit early. + self._main_shell_completion(extra, prog_name, complete_var) + + try: + try: + with self.make_context(prog_name, args, **extra) as ctx: + rv = self.invoke(ctx) + if not standalone_mode: + return rv + # it's not safe to `ctx.exit(rv)` here! + # note that `rv` may actually contain data like "1" which + # has obvious effects + # more subtle case: `rv=[None, None]` can come out of + # chained commands which all returned `None` -- so it's not + # even always obvious that `rv` indicates success/failure + # by its truthiness/falsiness + ctx.exit() + except (EOFError, KeyboardInterrupt): + echo(file=sys.stderr) + raise Abort() from None + except ClickException as e: + if not standalone_mode: + raise + e.show() + sys.exit(e.exit_code) + except OSError as e: + if e.errno == errno.EPIPE: + sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout)) + sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr)) + sys.exit(1) + else: + raise + except Exit as e: + if standalone_mode: + sys.exit(e.exit_code) + else: + # in non-standalone mode, return the exit code + # note that this is only reached if `self.invoke` above raises + # an Exit explicitly -- thus bypassing the check there which + # would return its result + # the results of non-standalone execution may therefore be + # somewhat ambiguous: if there are codepaths which lead to + # `ctx.exit(1)` and to `return 1`, the caller won't be able to + # tell the difference between the two + return e.exit_code + except Abort: + if not standalone_mode: + raise + echo(_("Aborted!"), file=sys.stderr) + sys.exit(1) + + def _main_shell_completion( + self, + ctx_args: t.Dict[str, t.Any], + prog_name: str, + complete_var: t.Optional[str] = None, + ) -> None: + """Check if the shell is asking for tab completion, process + that, then exit early. Called from :meth:`main` before the + program is invoked. + + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. Defaults to + ``_{PROG_NAME}_COMPLETE``. + """ + if complete_var is None: + complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper() + + instruction = os.environ.get(complete_var) + + if not instruction: + return + + from .shell_completion import shell_complete + + rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) + sys.exit(rv) + + def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Alias for :meth:`main`.""" + return self.main(*args, **kwargs) + + +class Command(BaseCommand): + """Commands are the basic building block of command line interfaces in + Click. A basic command handles command line parsing and might dispatch + more parsing to commands nested below it. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + :param callback: the callback to invoke. This is optional. + :param params: the parameters to register with this command. This can + be either :class:`Option` or :class:`Argument` objects. + :param help: the help string to use for this command. + :param epilog: like the help string but it's printed at the end of the + help page after everything else. + :param short_help: the short help to use for this command. This is + shown on the command listing of the parent command. + :param add_help_option: by default each command registers a ``--help`` + option. This can be disabled by this parameter. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is disabled by default. + If enabled this will add ``--help`` as argument + if no arguments are passed + :param hidden: hide this command from help outputs. + + :param deprecated: issues a message indicating that + the command is deprecated. + + .. versionchanged:: 8.1 + ``help``, ``epilog``, and ``short_help`` are stored unprocessed, + all formatting is done when outputting help text, not at init, + and is done even if not using the ``@command`` decorator. + + .. versionchanged:: 8.0 + Added a ``repr`` showing the command name. + + .. versionchanged:: 7.1 + Added the ``no_args_is_help`` parameter. + + .. versionchanged:: 2.0 + Added the ``context_settings`` parameter. + """ + + def __init__( + self, + name: t.Optional[str], + context_settings: t.Optional[t.Dict[str, t.Any]] = None, + callback: t.Optional[t.Callable[..., t.Any]] = None, + params: t.Optional[t.List["Parameter"]] = None, + help: t.Optional[str] = None, + epilog: t.Optional[str] = None, + short_help: t.Optional[str] = None, + options_metavar: t.Optional[str] = "[OPTIONS]", + add_help_option: bool = True, + no_args_is_help: bool = False, + hidden: bool = False, + deprecated: bool = False, + ) -> None: + super().__init__(name, context_settings) + #: the callback to execute when the command fires. This might be + #: `None` in which case nothing happens. + self.callback = callback + #: the list of parameters for this command in the order they + #: should show up in the help page and execute. Eager parameters + #: will automatically be handled before non eager ones. + self.params: t.List["Parameter"] = params or [] + self.help = help + self.epilog = epilog + self.options_metavar = options_metavar + self.short_help = short_help + self.add_help_option = add_help_option + self.no_args_is_help = no_args_is_help + self.hidden = hidden + self.deprecated = deprecated + + def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict(ctx) + info_dict.update( + params=[param.to_info_dict() for param in self.get_params(ctx)], + help=self.help, + epilog=self.epilog, + short_help=self.short_help, + hidden=self.hidden, + deprecated=self.deprecated, + ) + return info_dict + + def get_usage(self, ctx: Context) -> str: + """Formats the usage line into a string and returns it. + + Calls :meth:`format_usage` internally. + """ + formatter = ctx.make_formatter() + self.format_usage(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_params(self, ctx: Context) -> t.List["Parameter"]: + rv = self.params + help_option = self.get_help_option(ctx) + + if help_option is not None: + rv = [*rv, help_option] + + return rv + + def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the usage line into the formatter. + + This is a low-level method called by :meth:`get_usage`. + """ + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage(ctx.command_path, " ".join(pieces)) + + def collect_usage_pieces(self, ctx: Context) -> t.List[str]: + """Returns all the pieces that go into the usage line and returns + it as a list of strings. + """ + rv = [self.options_metavar] if self.options_metavar else [] + + for param in self.get_params(ctx): + rv.extend(param.get_usage_pieces(ctx)) + + return rv + + def get_help_option_names(self, ctx: Context) -> t.List[str]: + """Returns the names for the help option.""" + all_names = set(ctx.help_option_names) + for param in self.params: + all_names.difference_update(param.opts) + all_names.difference_update(param.secondary_opts) + return list(all_names) + + def get_help_option(self, ctx: Context) -> t.Optional["Option"]: + """Returns the help option object.""" + help_options = self.get_help_option_names(ctx) + + if not help_options or not self.add_help_option: + return None + + def show_help(ctx: Context, param: "Parameter", value: str) -> None: + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + return Option( + help_options, + is_flag=True, + is_eager=True, + expose_value=False, + callback=show_help, + help=_("Show this message and exit."), + ) + + def make_parser(self, ctx: Context) -> OptionParser: + """Creates the underlying option parser for this command.""" + parser = OptionParser(ctx) + for param in self.get_params(ctx): + param.add_to_parser(parser, ctx) + return parser + + def get_help(self, ctx: Context) -> str: + """Formats the help into a string and returns it. + + Calls :meth:`format_help` internally. + """ + formatter = ctx.make_formatter() + self.format_help(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_short_help_str(self, limit: int = 45) -> str: + """Gets short help for the command or makes it by shortening the + long help string. + """ + if self.short_help: + text = inspect.cleandoc(self.short_help) + elif self.help: + text = make_default_short_help(self.help, limit) + else: + text = "" + + if self.deprecated: + text = _("(Deprecated) {text}").format(text=text) + + return text.strip() + + def format_help(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help into the formatter if it exists. + + This is a low-level method called by :meth:`get_help`. + + This calls the following methods: + + - :meth:`format_usage` + - :meth:`format_help_text` + - :meth:`format_options` + - :meth:`format_epilog` + """ + self.format_usage(ctx, formatter) + self.format_help_text(ctx, formatter) + self.format_options(ctx, formatter) + self.format_epilog(ctx, formatter) + + def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help text to the formatter if it exists.""" + text = self.help if self.help is not None else "" + + if self.deprecated: + text = _("(Deprecated) {text}").format(text=text) + + if text: + text = inspect.cleandoc(text).partition("\f")[0] + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(text) + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes all the options into the formatter if they exist.""" + opts = [] + for param in self.get_params(ctx): + rv = param.get_help_record(ctx) + if rv is not None: + opts.append(rv) + + if opts: + with formatter.section(_("Options")): + formatter.write_dl(opts) + + def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the epilog into the formatter if it exists.""" + if self.epilog: + epilog = inspect.cleandoc(self.epilog) + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(epilog) + + def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + parser = self.make_parser(ctx) + opts, args, param_order = parser.parse_args(args=args) + + for param in iter_params_for_processing(param_order, self.get_params(ctx)): + value, args = param.handle_parse_result(ctx, opts, args) + + if args and not ctx.allow_extra_args and not ctx.resilient_parsing: + ctx.fail( + ngettext( + "Got unexpected extra argument ({args})", + "Got unexpected extra arguments ({args})", + len(args), + ).format(args=" ".join(map(str, args))) + ) + + ctx.args = args + ctx._opt_prefixes.update(parser._opt_prefixes) + return args + + def invoke(self, ctx: Context) -> t.Any: + """Given a context, this invokes the attached callback (if it exists) + in the right way. + """ + if self.deprecated: + message = _( + "DeprecationWarning: The command {name!r} is deprecated." + ).format(name=self.name) + echo(style(message, fg="red"), err=True) + + if self.callback is not None: + return ctx.invoke(self.callback, **ctx.params) + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. Looks + at the names of options and chained multi-commands. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results: t.List["CompletionItem"] = [] + + if incomplete and not incomplete[0].isalnum(): + for param in self.get_params(ctx): + if ( + not isinstance(param, Option) + or param.hidden + or ( + not param.multiple + and ctx.get_parameter_source(param.name) # type: ignore + is ParameterSource.COMMANDLINE + ) + ): + continue + + results.extend( + CompletionItem(name, help=param.help) + for name in [*param.opts, *param.secondary_opts] + if name.startswith(incomplete) + ) + + results.extend(super().shell_complete(ctx, incomplete)) + return results + + +class MultiCommand(Command): + """A multi command is the basic implementation of a command that + dispatches to subcommands. The most common version is the + :class:`Group`. + + :param invoke_without_command: this controls how the multi command itself + is invoked. By default it's only invoked + if a subcommand is provided. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is enabled by default if + `invoke_without_command` is disabled or disabled + if it's enabled. If enabled this will add + ``--help`` as argument if no arguments are + passed. + :param subcommand_metavar: the string that is used in the documentation + to indicate the subcommand place. + :param chain: if this is set to `True` chaining of multiple subcommands + is enabled. This restricts the form of commands in that + they cannot have optional arguments but it allows + multiple commands to be chained together. + :param result_callback: The result callback to attach to this multi + command. This can be set or changed later with the + :meth:`result_callback` decorator. + """ + + allow_extra_args = True + allow_interspersed_args = False + + def __init__( + self, + name: t.Optional[str] = None, + invoke_without_command: bool = False, + no_args_is_help: t.Optional[bool] = None, + subcommand_metavar: t.Optional[str] = None, + chain: bool = False, + result_callback: t.Optional[t.Callable[..., t.Any]] = None, + **attrs: t.Any, + ) -> None: + super().__init__(name, **attrs) + + if no_args_is_help is None: + no_args_is_help = not invoke_without_command + + self.no_args_is_help = no_args_is_help + self.invoke_without_command = invoke_without_command + + if subcommand_metavar is None: + if chain: + subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." + else: + subcommand_metavar = "COMMAND [ARGS]..." + + self.subcommand_metavar = subcommand_metavar + self.chain = chain + # The result callback that is stored. This can be set or + # overridden with the :func:`result_callback` decorator. + self._result_callback = result_callback + + if self.chain: + for param in self.params: + if isinstance(param, Argument) and not param.required: + raise RuntimeError( + "Multi commands in chain mode cannot have" + " optional arguments." + ) + + def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict(ctx) + commands = {} + + for name in self.list_commands(ctx): + command = self.get_command(ctx, name) + + if command is None: + continue + + sub_ctx = ctx._make_sub_context(command) + + with sub_ctx.scope(cleanup=False): + commands[name] = command.to_info_dict(sub_ctx) + + info_dict.update(commands=commands, chain=self.chain) + return info_dict + + def collect_usage_pieces(self, ctx: Context) -> t.List[str]: + rv = super().collect_usage_pieces(ctx) + rv.append(self.subcommand_metavar) + return rv + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + super().format_options(ctx, formatter) + self.format_commands(ctx, formatter) + + def result_callback(self, replace: bool = False) -> t.Callable[[F], F]: + """Adds a result callback to the command. By default if a + result callback is already registered this will chain them but + this can be disabled with the `replace` parameter. The result + callback is invoked with the return value of the subcommand + (or the list of return values from all subcommands if chaining + is enabled) as well as the parameters as they would be passed + to the main callback. + + Example:: + + @click.group() + @click.option('-i', '--input', default=23) + def cli(input): + return 42 + + @cli.result_callback() + def process_result(result, input): + return result + input + + :param replace: if set to `True` an already existing result + callback will be removed. + + .. versionchanged:: 8.0 + Renamed from ``resultcallback``. + + .. versionadded:: 3.0 + """ + + def decorator(f: F) -> F: + old_callback = self._result_callback + + if old_callback is None or replace: + self._result_callback = f + return f + + def function(__value, *args, **kwargs): # type: ignore + inner = old_callback(__value, *args, **kwargs) # type: ignore + return f(inner, *args, **kwargs) + + self._result_callback = rv = update_wrapper(t.cast(F, function), f) + return rv + + return decorator + + def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None: + """Extra format methods for multi methods that adds all the commands + after the options. + """ + commands = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + if cmd.hidden: + continue + + commands.append((subcommand, cmd)) + + # allow for 3 times the default spacing + if len(commands): + limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) + + rows = [] + for subcommand, cmd in commands: + help = cmd.get_short_help_str(limit) + rows.append((subcommand, help)) + + if rows: + with formatter.section(_("Commands")): + formatter.write_dl(rows) + + def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + rest = super().parse_args(ctx, args) + + if self.chain: + ctx.protected_args = rest + ctx.args = [] + elif rest: + ctx.protected_args, ctx.args = rest[:1], rest[1:] + + return ctx.args + + def invoke(self, ctx: Context) -> t.Any: + def _process_result(value: t.Any) -> t.Any: + if self._result_callback is not None: + value = ctx.invoke(self._result_callback, value, **ctx.params) + return value + + if not ctx.protected_args: + if self.invoke_without_command: + # No subcommand was invoked, so the result callback is + # invoked with the group return value for regular + # groups, or an empty list for chained groups. + with ctx: + rv = super().invoke(ctx) + return _process_result([] if self.chain else rv) + ctx.fail(_("Missing command.")) + + # Fetch args back out + args = [*ctx.protected_args, *ctx.args] + ctx.args = [] + ctx.protected_args = [] + + # If we're not in chain mode, we only allow the invocation of a + # single command but we also inform the current context about the + # name of the command to invoke. + if not self.chain: + # Make sure the context is entered so we do not clean up + # resources until the result processor has worked. + with ctx: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + ctx.invoked_subcommand = cmd_name + super().invoke(ctx) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) + with sub_ctx: + return _process_result(sub_ctx.command.invoke(sub_ctx)) + + # In chain mode we create the contexts step by step, but after the + # base command has been invoked. Because at that point we do not + # know the subcommands yet, the invoked subcommand attribute is + # set to ``*`` to inform the command that subcommands are executed + # but nothing else. + with ctx: + ctx.invoked_subcommand = "*" if args else None + super().invoke(ctx) + + # Otherwise we make every single context and invoke them in a + # chain. In that case the return value to the result processor + # is the list of all invoked subcommand's results. + contexts = [] + while args: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + sub_ctx = cmd.make_context( + cmd_name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + ) + contexts.append(sub_ctx) + args, sub_ctx.args = sub_ctx.args, [] + + rv = [] + for sub_ctx in contexts: + with sub_ctx: + rv.append(sub_ctx.command.invoke(sub_ctx)) + return _process_result(rv) + + def resolve_command( + self, ctx: Context, args: t.List[str] + ) -> t.Tuple[t.Optional[str], t.Optional[Command], t.List[str]]: + cmd_name = make_str(args[0]) + original_cmd_name = cmd_name + + # Get the command + cmd = self.get_command(ctx, cmd_name) + + # If we can't find the command but there is a normalization + # function available, we try with that one. + if cmd is None and ctx.token_normalize_func is not None: + cmd_name = ctx.token_normalize_func(cmd_name) + cmd = self.get_command(ctx, cmd_name) + + # If we don't find the command we want to show an error message + # to the user that it was not provided. However, there is + # something else we should do: if the first argument looks like + # an option we want to kick off parsing again for arguments to + # resolve things like --help which now should go to the main + # place. + if cmd is None and not ctx.resilient_parsing: + if split_opt(cmd_name)[0]: + self.parse_args(ctx, ctx.args) + ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name)) + return cmd_name if cmd else None, cmd, args[1:] + + def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: + """Given a context and a command name, this returns a + :class:`Command` object if it exists or returns `None`. + """ + raise NotImplementedError + + def list_commands(self, ctx: Context) -> t.List[str]: + """Returns a list of subcommand names in the order they should + appear. + """ + return [] + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. Looks + at the names of options, subcommands, and chained + multi-commands. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results = [ + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + ] + results.extend(super().shell_complete(ctx, incomplete)) + return results + + +class Group(MultiCommand): + """A group allows a command to have subcommands attached. This is + the most common way to implement nesting in Click. + + :param name: The name of the group command. + :param commands: A dict mapping names to :class:`Command` objects. + Can also be a list of :class:`Command`, which will use + :attr:`Command.name` to create the dict. + :param attrs: Other command arguments described in + :class:`MultiCommand`, :class:`Command`, and + :class:`BaseCommand`. + + .. versionchanged:: 8.0 + The ``commmands`` argument can be a list of command objects. + """ + + #: If set, this is used by the group's :meth:`command` decorator + #: as the default :class:`Command` class. This is useful to make all + #: subcommands use a custom command class. + #: + #: .. versionadded:: 8.0 + command_class: t.Optional[t.Type[Command]] = None + + #: If set, this is used by the group's :meth:`group` decorator + #: as the default :class:`Group` class. This is useful to make all + #: subgroups use a custom group class. + #: + #: If set to the special value :class:`type` (literally + #: ``group_class = type``), this group's class will be used as the + #: default class. This makes a custom group class continue to make + #: custom groups. + #: + #: .. versionadded:: 8.0 + group_class: t.Optional[t.Union[t.Type["Group"], t.Type[type]]] = None + # Literal[type] isn't valid, so use Type[type] + + def __init__( + self, + name: t.Optional[str] = None, + commands: t.Optional[t.Union[t.Dict[str, Command], t.Sequence[Command]]] = None, + **attrs: t.Any, + ) -> None: + super().__init__(name, **attrs) + + if commands is None: + commands = {} + elif isinstance(commands, abc.Sequence): + commands = {c.name: c for c in commands if c.name is not None} + + #: The registered subcommands by their exported names. + self.commands: t.Dict[str, Command] = commands + + def add_command(self, cmd: Command, name: t.Optional[str] = None) -> None: + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + """ + name = name or cmd.name + if name is None: + raise TypeError("Command has no name.") + _check_multicommand(self, name, cmd, register=True) + self.commands[name] = cmd + + @t.overload + def command(self, __func: t.Callable[..., t.Any]) -> Command: + ... + + @t.overload + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Command]: + ... + + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], Command], Command]: + """A shortcut decorator for declaring and attaching a command to + the group. This takes the same arguments as :func:`command` and + immediately registers the created command with this group by + calling :meth:`add_command`. + + To customize the command class used, set the + :attr:`command_class` attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`command_class` attribute. + """ + from .decorators import command + + if self.command_class and kwargs.get("cls") is None: + kwargs["cls"] = self.command_class + + func: t.Optional[t.Callable] = None + + if args and callable(args[0]): + assert ( + len(args) == 1 and not kwargs + ), "Use 'command(**kwargs)(callable)' to provide arguments." + (func,) = args + args = () + + def decorator(f: t.Callable[..., t.Any]) -> Command: + cmd: Command = command(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + @t.overload + def group(self, __func: t.Callable[..., t.Any]) -> "Group": + ... + + @t.overload + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], "Group"]: + ... + + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], "Group"], "Group"]: + """A shortcut decorator for declaring and attaching a group to + the group. This takes the same arguments as :func:`group` and + immediately registers the created group with this group by + calling :meth:`add_command`. + + To customize the group class used, set the :attr:`group_class` + attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`group_class` attribute. + """ + from .decorators import group + + func: t.Optional[t.Callable] = None + + if args and callable(args[0]): + assert ( + len(args) == 1 and not kwargs + ), "Use 'group(**kwargs)(callable)' to provide arguments." + (func,) = args + args = () + + if self.group_class is not None and kwargs.get("cls") is None: + if self.group_class is type: + kwargs["cls"] = type(self) + else: + kwargs["cls"] = self.group_class + + def decorator(f: t.Callable[..., t.Any]) -> "Group": + cmd: Group = group(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: + return self.commands.get(cmd_name) + + def list_commands(self, ctx: Context) -> t.List[str]: + return sorted(self.commands) + + +class CommandCollection(MultiCommand): + """A command collection is a multi command that merges multiple multi + commands together into one. This is a straightforward implementation + that accepts a list of different multi commands as sources and + provides all the commands for each of them. + """ + + def __init__( + self, + name: t.Optional[str] = None, + sources: t.Optional[t.List[MultiCommand]] = None, + **attrs: t.Any, + ) -> None: + super().__init__(name, **attrs) + #: The list of registered multi commands. + self.sources: t.List[MultiCommand] = sources or [] + + def add_source(self, multi_cmd: MultiCommand) -> None: + """Adds a new multi command to the chain dispatcher.""" + self.sources.append(multi_cmd) + + def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: + for source in self.sources: + rv = source.get_command(ctx, cmd_name) + + if rv is not None: + if self.chain: + _check_multicommand(self, cmd_name, rv) + + return rv + + return None + + def list_commands(self, ctx: Context) -> t.List[str]: + rv: t.Set[str] = set() + + for source in self.sources: + rv.update(source.list_commands(ctx)) + + return sorted(rv) + + +def _check_iter(value: t.Any) -> t.Iterator[t.Any]: + """Check if the value is iterable but not a string. Raises a type + error, or return an iterator over the value. + """ + if isinstance(value, str): + raise TypeError + + return iter(value) + + +class Parameter: + r"""A parameter to a command comes in two versions: they are either + :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently + not supported by design as some of the internals for parsing are + intentionally not finalized. + + Some settings are supported by both options and arguments. + + :param param_decls: the parameter declarations for this option or + argument. This is a list of flags or argument + names. + :param type: the type that should be used. Either a :class:`ParamType` + or a Python type. The later is converted into the former + automatically if supported. + :param required: controls if this is optional or not. + :param default: the default value if omitted. This can also be a callable, + in which case it's invoked when the default is needed + without any arguments. + :param callback: A function to further process or validate the value + after type conversion. It is called as ``f(ctx, param, value)`` + and must return the value. It is called for all sources, + including prompts. + :param nargs: the number of arguments to match. If not ``1`` the return + value is a tuple instead of single value. The default for + nargs is ``1`` (except if the type is a tuple, then it's + the arity of the tuple). If ``nargs=-1``, all remaining + parameters are collected. + :param metavar: how the value is represented in the help page. + :param expose_value: if this is `True` then the value is passed onwards + to the command callback and stored on the context, + otherwise it's skipped. + :param is_eager: eager values are processed before non eager ones. This + should not be set for arguments or it will inverse the + order of processing. + :param envvar: a string or list of strings that are environment variables + that should be checked. + :param shell_complete: A function that returns custom shell + completions. Used instead of the param's type completion if + given. Takes ``ctx, param, incomplete`` and must return a list + of :class:`~click.shell_completion.CompletionItem` or a list of + strings. + + .. versionchanged:: 8.0 + ``process_value`` validates required parameters and bounded + ``nargs``, and invokes the parameter callback before returning + the value. This allows the callback to validate prompts. + ``full_process_value`` is removed. + + .. versionchanged:: 8.0 + ``autocompletion`` is renamed to ``shell_complete`` and has new + semantics described above. The old name is deprecated and will + be removed in 8.1, until then it will be wrapped to match the + new requirements. + + .. versionchanged:: 8.0 + For ``multiple=True, nargs>1``, the default must be a list of + tuples. + + .. versionchanged:: 8.0 + Setting a default is no longer required for ``nargs>1``, it will + default to ``None``. ``multiple=True`` or ``nargs=-1`` will + default to ``()``. + + .. versionchanged:: 7.1 + Empty environment variables are ignored rather than taking the + empty string value. This makes it possible for scripts to clear + variables if they can't unset them. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. The old callback format will still work, but it will + raise a warning to give you a chance to migrate the code easier. + """ + + param_type_name = "parameter" + + def __init__( + self, + param_decls: t.Optional[t.Sequence[str]] = None, + type: t.Optional[t.Union[types.ParamType, t.Any]] = None, + required: bool = False, + default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]] = None, + callback: t.Optional[t.Callable[[Context, "Parameter", t.Any], t.Any]] = None, + nargs: t.Optional[int] = None, + multiple: bool = False, + metavar: t.Optional[str] = None, + expose_value: bool = True, + is_eager: bool = False, + envvar: t.Optional[t.Union[str, t.Sequence[str]]] = None, + shell_complete: t.Optional[ + t.Callable[ + [Context, "Parameter", str], + t.Union[t.List["CompletionItem"], t.List[str]], + ] + ] = None, + ) -> None: + self.name, self.opts, self.secondary_opts = self._parse_decls( + param_decls or (), expose_value + ) + self.type = types.convert_type(type, default) + + # Default nargs to what the type tells us if we have that + # information available. + if nargs is None: + if self.type.is_composite: + nargs = self.type.arity + else: + nargs = 1 + + self.required = required + self.callback = callback + self.nargs = nargs + self.multiple = multiple + self.expose_value = expose_value + self.default = default + self.is_eager = is_eager + self.metavar = metavar + self.envvar = envvar + self._custom_shell_complete = shell_complete + + if __debug__: + if self.type.is_composite and nargs != self.type.arity: + raise ValueError( + f"'nargs' must be {self.type.arity} (or None) for" + f" type {self.type!r}, but it was {nargs}." + ) + + # Skip no default or callable default. + check_default = default if not callable(default) else None + + if check_default is not None: + if multiple: + try: + # Only check the first value against nargs. + check_default = next(_check_iter(check_default), None) + except TypeError: + raise ValueError( + "'default' must be a list when 'multiple' is true." + ) from None + + # Can be None for multiple with empty default. + if nargs != 1 and check_default is not None: + try: + _check_iter(check_default) + except TypeError: + if multiple: + message = ( + "'default' must be a list of lists when 'multiple' is" + " true and 'nargs' != 1." + ) + else: + message = "'default' must be a list when 'nargs' != 1." + + raise ValueError(message) from None + + if nargs > 1 and len(check_default) != nargs: + subject = "item length" if multiple else "length" + raise ValueError( + f"'default' {subject} must match nargs={nargs}." + ) + + def to_info_dict(self) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + return { + "name": self.name, + "param_type_name": self.param_type_name, + "opts": self.opts, + "secondary_opts": self.secondary_opts, + "type": self.type.to_info_dict(), + "required": self.required, + "nargs": self.nargs, + "multiple": self.multiple, + "default": self.default, + "envvar": self.envvar, + } + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def _parse_decls( + self, decls: t.Sequence[str], expose_value: bool + ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: + raise NotImplementedError() + + @property + def human_readable_name(self) -> str: + """Returns the human readable name of this parameter. This is the + same as the name for options, but the metavar for arguments. + """ + return self.name # type: ignore + + def make_metavar(self) -> str: + if self.metavar is not None: + return self.metavar + + metavar = self.type.get_metavar(self) + + if metavar is None: + metavar = self.type.name.upper() + + if self.nargs != 1: + metavar += "..." + + return metavar + + @t.overload + def get_default( + self, ctx: Context, call: "te.Literal[True]" = True + ) -> t.Optional[t.Any]: + ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + """Get the default for the parameter. Tries + :meth:`Context.lookup_default` first, then the local default. + + :param ctx: Current context. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0.2 + Type casting is no longer performed when getting a default. + + .. versionchanged:: 8.0.1 + Type casting can fail in resilient parsing mode. Invalid + defaults will not prevent showing help text. + + .. versionchanged:: 8.0 + Looks at ``ctx.default_map`` first. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + value = ctx.lookup_default(self.name, call=False) # type: ignore + + if value is None: + value = self.default + + if call and callable(value): + value = value() + + return value + + def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: + raise NotImplementedError() + + def consume_value( + self, ctx: Context, opts: t.Mapping[str, t.Any] + ) -> t.Tuple[t.Any, ParameterSource]: + value = opts.get(self.name) # type: ignore + source = ParameterSource.COMMANDLINE + + if value is None: + value = self.value_from_envvar(ctx) + source = ParameterSource.ENVIRONMENT + + if value is None: + value = ctx.lookup_default(self.name) # type: ignore + source = ParameterSource.DEFAULT_MAP + + if value is None: + value = self.get_default(ctx) + source = ParameterSource.DEFAULT + + return value, source + + def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: + """Convert and validate a value against the option's + :attr:`type`, :attr:`multiple`, and :attr:`nargs`. + """ + if value is None: + return () if self.multiple or self.nargs == -1 else None + + def check_iter(value: t.Any) -> t.Iterator: + try: + return _check_iter(value) + except TypeError: + # This should only happen when passing in args manually, + # the parser should construct an iterable when parsing + # the command line. + raise BadParameter( + _("Value must be an iterable."), ctx=ctx, param=self + ) from None + + if self.nargs == 1 or self.type.is_composite: + convert: t.Callable[[t.Any], t.Any] = partial( + self.type, param=self, ctx=ctx + ) + elif self.nargs == -1: + + def convert(value: t.Any) -> t.Tuple: + return tuple(self.type(x, self, ctx) for x in check_iter(value)) + + else: # nargs > 1 + + def convert(value: t.Any) -> t.Tuple: + value = tuple(check_iter(value)) + + if len(value) != self.nargs: + raise BadParameter( + ngettext( + "Takes {nargs} values but 1 was given.", + "Takes {nargs} values but {len} were given.", + len(value), + ).format(nargs=self.nargs, len=len(value)), + ctx=ctx, + param=self, + ) + + return tuple(self.type(x, self, ctx) for x in value) + + if self.multiple: + return tuple(convert(x) for x in check_iter(value)) + + return convert(value) + + def value_is_missing(self, value: t.Any) -> bool: + if value is None: + return True + + if (self.nargs != 1 or self.multiple) and value == (): + return True + + return False + + def process_value(self, ctx: Context, value: t.Any) -> t.Any: + value = self.type_cast_value(ctx, value) + + if self.required and self.value_is_missing(value): + raise MissingParameter(ctx=ctx, param=self) + + if self.callback is not None: + value = self.callback(ctx, self, value) + + return value + + def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: + if self.envvar is None: + return None + + if isinstance(self.envvar, str): + rv = os.environ.get(self.envvar) + + if rv: + return rv + else: + for envvar in self.envvar: + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: + rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) + + if rv is not None and self.nargs != 1: + rv = self.type.split_envvar_value(rv) + + return rv + + def handle_parse_result( + self, ctx: Context, opts: t.Mapping[str, t.Any], args: t.List[str] + ) -> t.Tuple[t.Any, t.List[str]]: + with augment_usage_errors(ctx, param=self): + value, source = self.consume_value(ctx, opts) + ctx.set_parameter_source(self.name, source) # type: ignore + + try: + value = self.process_value(ctx, value) + except Exception: + if not ctx.resilient_parsing: + raise + + value = None + + if self.expose_value: + ctx.params[self.name] = value # type: ignore + + return value, args + + def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: + pass + + def get_usage_pieces(self, ctx: Context) -> t.List[str]: + return [] + + def get_error_hint(self, ctx: Context) -> str: + """Get a stringified version of the param for use in error messages to + indicate which param caused the error. + """ + hint_list = self.opts or [self.human_readable_name] + return " / ".join(f"'{x}'" for x in hint_list) + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. If a + ``shell_complete`` function was given during init, it is used. + Otherwise, the :attr:`type` + :meth:`~click.types.ParamType.shell_complete` function is used. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + if self._custom_shell_complete is not None: + results = self._custom_shell_complete(ctx, self, incomplete) + + if results and isinstance(results[0], str): + from click.shell_completion import CompletionItem + + results = [CompletionItem(c) for c in results] + + return t.cast(t.List["CompletionItem"], results) + + return self.type.shell_complete(ctx, self, incomplete) + + +class Option(Parameter): + """Options are usually optional values on the command line and + have some extra features that arguments don't have. + + All other parameters are passed onwards to the parameter constructor. + + :param show_default: Show the default value for this option in its + help text. Values are not shown by default, unless + :attr:`Context.show_default` is ``True``. If this value is a + string, it shows that string in parentheses instead of the + actual value. This is particularly useful for dynamic options. + For single option boolean flags, the default remains hidden if + its value is ``False``. + :param show_envvar: Controls if an environment variable should be + shown on the help page. Normally, environment variables are not + shown. + :param prompt: If set to ``True`` or a non empty string then the + user will be prompted for input. If set to ``True`` the prompt + will be the option name capitalized. + :param confirmation_prompt: Prompt a second time to confirm the + value if it was prompted for. Can be set to a string instead of + ``True`` to customize the message. + :param prompt_required: If set to ``False``, the user will be + prompted for input only when the option was specified as a flag + without a value. + :param hide_input: If this is ``True`` then the input on the prompt + will be hidden from the user. This is useful for password input. + :param is_flag: forces this option to act as a flag. The default is + auto detection. + :param flag_value: which value should be used for this flag if it's + enabled. This is set to a boolean automatically if + the option string contains a slash to mark two options. + :param multiple: if this is set to `True` then the argument is accepted + multiple times and recorded. This is similar to ``nargs`` + in how it works but supports arbitrary number of + arguments. + :param count: this flag makes an option increment an integer. + :param allow_from_autoenv: if this is enabled then the value of this + parameter will be pulled from an environment + variable in case a prefix is defined on the + context. + :param help: the help string. + :param hidden: hide this option from help outputs. + + .. versionchanged:: 8.1.0 + Help text indentation is cleaned here instead of only in the + ``@option`` decorator. + + .. versionchanged:: 8.1.0 + The ``show_default`` parameter overrides + ``Context.show_default``. + + .. versionchanged:: 8.1.0 + The default of a single option boolean flag is not shown if the + default value is ``False``. + + .. versionchanged:: 8.0.1 + ``type`` is detected from ``flag_value`` if given. + """ + + param_type_name = "option" + + def __init__( + self, + param_decls: t.Optional[t.Sequence[str]] = None, + show_default: t.Union[bool, str, None] = None, + prompt: t.Union[bool, str] = False, + confirmation_prompt: t.Union[bool, str] = False, + prompt_required: bool = True, + hide_input: bool = False, + is_flag: t.Optional[bool] = None, + flag_value: t.Optional[t.Any] = None, + multiple: bool = False, + count: bool = False, + allow_from_autoenv: bool = True, + type: t.Optional[t.Union[types.ParamType, t.Any]] = None, + help: t.Optional[str] = None, + hidden: bool = False, + show_choices: bool = True, + show_envvar: bool = False, + **attrs: t.Any, + ) -> None: + if help: + help = inspect.cleandoc(help) + + default_is_missing = "default" not in attrs + super().__init__(param_decls, type=type, multiple=multiple, **attrs) + + if prompt is True: + if self.name is None: + raise TypeError("'name' is required with 'prompt=True'.") + + prompt_text: t.Optional[str] = self.name.replace("_", " ").capitalize() + elif prompt is False: + prompt_text = None + else: + prompt_text = prompt + + self.prompt = prompt_text + self.confirmation_prompt = confirmation_prompt + self.prompt_required = prompt_required + self.hide_input = hide_input + self.hidden = hidden + + # If prompt is enabled but not required, then the option can be + # used as a flag to indicate using prompt or flag_value. + self._flag_needs_value = self.prompt is not None and not self.prompt_required + + if is_flag is None: + if flag_value is not None: + # Implicitly a flag because flag_value was set. + is_flag = True + elif self._flag_needs_value: + # Not a flag, but when used as a flag it shows a prompt. + is_flag = False + else: + # Implicitly a flag because flag options were given. + is_flag = bool(self.secondary_opts) + elif is_flag is False and not self._flag_needs_value: + # Not a flag, and prompt is not enabled, can be used as a + # flag if flag_value is set. + self._flag_needs_value = flag_value is not None + + if is_flag and default_is_missing and not self.required: + self.default: t.Union[t.Any, t.Callable[[], t.Any]] = False + + if flag_value is None: + flag_value = not self.default + + if is_flag and type is None: + # Re-guess the type from the flag value instead of the + # default. + self.type = types.convert_type(None, flag_value) + + self.is_flag: bool = is_flag + self.is_bool_flag = is_flag and isinstance(self.type, types.BoolParamType) + self.flag_value: t.Any = flag_value + + # Counting + self.count = count + if count: + if type is None: + self.type = types.IntRange(min=0) + if default_is_missing: + self.default = 0 + + self.allow_from_autoenv = allow_from_autoenv + self.help = help + self.show_default = show_default + self.show_choices = show_choices + self.show_envvar = show_envvar + + if __debug__: + if self.nargs == -1: + raise TypeError("nargs=-1 is not supported for options.") + + if self.prompt and self.is_flag and not self.is_bool_flag: + raise TypeError("'prompt' is not valid for non-boolean flag.") + + if not self.is_bool_flag and self.secondary_opts: + raise TypeError("Secondary flag is not valid for non-boolean flag.") + + if self.is_bool_flag and self.hide_input and self.prompt is not None: + raise TypeError( + "'prompt' with 'hide_input' is not valid for boolean flag." + ) + + if self.count: + if self.multiple: + raise TypeError("'count' is not valid with 'multiple'.") + + if self.is_flag: + raise TypeError("'count' is not valid with 'is_flag'.") + + if self.multiple and self.is_flag: + raise TypeError("'multiple' is not valid with 'is_flag', use 'count'.") + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + help=self.help, + prompt=self.prompt, + is_flag=self.is_flag, + flag_value=self.flag_value, + count=self.count, + hidden=self.hidden, + ) + return info_dict + + def _parse_decls( + self, decls: t.Sequence[str], expose_value: bool + ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: + opts = [] + secondary_opts = [] + name = None + possible_names = [] + + for decl in decls: + if decl.isidentifier(): + if name is not None: + raise TypeError(f"Name '{name}' defined twice") + name = decl + else: + split_char = ";" if decl[:1] == "/" else "/" + if split_char in decl: + first, second = decl.split(split_char, 1) + first = first.rstrip() + if first: + possible_names.append(split_opt(first)) + opts.append(first) + second = second.lstrip() + if second: + secondary_opts.append(second.lstrip()) + if first == second: + raise ValueError( + f"Boolean option {decl!r} cannot use the" + " same flag for true/false." + ) + else: + possible_names.append(split_opt(decl)) + opts.append(decl) + + if name is None and possible_names: + possible_names.sort(key=lambda x: -len(x[0])) # group long options first + name = possible_names[0][1].replace("-", "_").lower() + if not name.isidentifier(): + name = None + + if name is None: + if not expose_value: + return None, opts, secondary_opts + raise TypeError("Could not determine name for option") + + if not opts and not secondary_opts: + raise TypeError( + f"No options defined but a name was passed ({name})." + " Did you mean to declare an argument instead? Did" + f" you mean to pass '--{name}'?" + ) + + return name, opts, secondary_opts + + def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: + if self.multiple: + action = "append" + elif self.count: + action = "count" + else: + action = "store" + + if self.is_flag: + action = f"{action}_const" + + if self.is_bool_flag and self.secondary_opts: + parser.add_option( + obj=self, opts=self.opts, dest=self.name, action=action, const=True + ) + parser.add_option( + obj=self, + opts=self.secondary_opts, + dest=self.name, + action=action, + const=False, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + const=self.flag_value, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + nargs=self.nargs, + ) + + def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: + if self.hidden: + return None + + any_prefix_is_slash = False + + def _write_opts(opts: t.Sequence[str]) -> str: + nonlocal any_prefix_is_slash + + rv, any_slashes = join_options(opts) + + if any_slashes: + any_prefix_is_slash = True + + if not self.is_flag and not self.count: + rv += f" {self.make_metavar()}" + + return rv + + rv = [_write_opts(self.opts)] + + if self.secondary_opts: + rv.append(_write_opts(self.secondary_opts)) + + help = self.help or "" + extra = [] + + if self.show_envvar: + envvar = self.envvar + + if envvar is None: + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + + if envvar is not None: + var_str = ( + envvar + if isinstance(envvar, str) + else ", ".join(str(d) for d in envvar) + ) + extra.append(_("env var: {var}").format(var=var_str)) + + # Temporarily enable resilient parsing to avoid type casting + # failing for the default. Might be possible to extend this to + # help formatting in general. + resilient = ctx.resilient_parsing + ctx.resilient_parsing = True + + try: + default_value = self.get_default(ctx, call=False) + finally: + ctx.resilient_parsing = resilient + + show_default = False + show_default_is_str = False + + if self.show_default is not None: + if isinstance(self.show_default, str): + show_default_is_str = show_default = True + else: + show_default = self.show_default + elif ctx.show_default is not None: + show_default = ctx.show_default + + if show_default_is_str or (show_default and (default_value is not None)): + if show_default_is_str: + default_string = f"({self.show_default})" + elif isinstance(default_value, (list, tuple)): + default_string = ", ".join(str(d) for d in default_value) + elif inspect.isfunction(default_value): + default_string = _("(dynamic)") + elif self.is_bool_flag and self.secondary_opts: + # For boolean flags that have distinct True/False opts, + # use the opt without prefix instead of the value. + default_string = split_opt( + (self.opts if self.default else self.secondary_opts)[0] + )[1] + elif self.is_bool_flag and not self.secondary_opts and not default_value: + default_string = "" + else: + default_string = str(default_value) + + if default_string: + extra.append(_("default: {default}").format(default=default_string)) + + if ( + isinstance(self.type, types._NumberRangeBase) + # skip count with default range type + and not (self.count and self.type.min == 0 and self.type.max is None) + ): + range_str = self.type._describe_range() + + if range_str: + extra.append(range_str) + + if self.required: + extra.append(_("required")) + + if extra: + extra_str = "; ".join(extra) + help = f"{help} [{extra_str}]" if help else f"[{extra_str}]" + + return ("; " if any_prefix_is_slash else " / ").join(rv), help + + @t.overload + def get_default( + self, ctx: Context, call: "te.Literal[True]" = True + ) -> t.Optional[t.Any]: + ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + # If we're a non boolean flag our default is more complex because + # we need to look at all flags in the same group to figure out + # if we're the default one in which case we return the flag + # value as default. + if self.is_flag and not self.is_bool_flag: + for param in ctx.command.params: + if param.name == self.name and param.default: + return param.flag_value # type: ignore + + return None + + return super().get_default(ctx, call=call) + + def prompt_for_value(self, ctx: Context) -> t.Any: + """This is an alternative flow that can be activated in the full + value processing if a value does not exist. It will prompt the + user until a valid value exists and then returns the processed + value as result. + """ + assert self.prompt is not None + + # Calculate the default before prompting anything to be stable. + default = self.get_default(ctx) + + # If this is a prompt for a flag we need to handle this + # differently. + if self.is_bool_flag: + return confirm(self.prompt, default) + + return prompt( + self.prompt, + default=default, + type=self.type, + hide_input=self.hide_input, + show_choices=self.show_choices, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x), + ) + + def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: + rv = super().resolve_envvar_value(ctx) + + if rv is not None: + return rv + + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: + rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) + + if rv is None: + return None + + value_depth = (self.nargs != 1) + bool(self.multiple) + + if value_depth > 0: + rv = self.type.split_envvar_value(rv) + + if self.multiple and self.nargs != 1: + rv = batch(rv, self.nargs) + + return rv + + def consume_value( + self, ctx: Context, opts: t.Mapping[str, "Parameter"] + ) -> t.Tuple[t.Any, ParameterSource]: + value, source = super().consume_value(ctx, opts) + + # The parser will emit a sentinel value if the option can be + # given as a flag without a value. This is different from None + # to distinguish from the flag not being given at all. + if value is _flag_needs_value: + if self.prompt is not None and not ctx.resilient_parsing: + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + else: + value = self.flag_value + source = ParameterSource.COMMANDLINE + + elif ( + self.multiple + and value is not None + and any(v is _flag_needs_value for v in value) + ): + value = [self.flag_value if v is _flag_needs_value else v for v in value] + source = ParameterSource.COMMANDLINE + + # The value wasn't set, or used the param's default, prompt if + # prompting is enabled. + elif ( + source in {None, ParameterSource.DEFAULT} + and self.prompt is not None + and (self.required or self.prompt_required) + and not ctx.resilient_parsing + ): + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + + return value, source + + +class Argument(Parameter): + """Arguments are positional parameters to a command. They generally + provide fewer features than options but can have infinite ``nargs`` + and are required by default. + + All parameters are passed onwards to the parameter constructor. + """ + + param_type_name = "argument" + + def __init__( + self, + param_decls: t.Sequence[str], + required: t.Optional[bool] = None, + **attrs: t.Any, + ) -> None: + if required is None: + if attrs.get("default") is not None: + required = False + else: + required = attrs.get("nargs", 1) > 0 + + if "multiple" in attrs: + raise TypeError("__init__() got an unexpected keyword argument 'multiple'.") + + super().__init__(param_decls, required=required, **attrs) + + if __debug__: + if self.default is not None and self.nargs == -1: + raise TypeError("'default' is not supported for nargs=-1.") + + @property + def human_readable_name(self) -> str: + if self.metavar is not None: + return self.metavar + return self.name.upper() # type: ignore + + def make_metavar(self) -> str: + if self.metavar is not None: + return self.metavar + var = self.type.get_metavar(self) + if not var: + var = self.name.upper() # type: ignore + if not self.required: + var = f"[{var}]" + if self.nargs != 1: + var += "..." + return var + + def _parse_decls( + self, decls: t.Sequence[str], expose_value: bool + ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: + if not decls: + if not expose_value: + return None, [], [] + raise TypeError("Could not determine name for argument") + if len(decls) == 1: + name = arg = decls[0] + name = name.replace("-", "_").lower() + else: + raise TypeError( + "Arguments take exactly one parameter declaration, got" + f" {len(decls)}." + ) + return name, [arg], [] + + def get_usage_pieces(self, ctx: Context) -> t.List[str]: + return [self.make_metavar()] + + def get_error_hint(self, ctx: Context) -> str: + return f"'{self.make_metavar()}'" + + def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: + parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) diff --git a/myenv/lib/python3.9/site-packages/click/decorators.py b/myenv/lib/python3.9/site-packages/click/decorators.py new file mode 100644 index 0000000..28618dc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/decorators.py @@ -0,0 +1,497 @@ +import inspect +import types +import typing as t +from functools import update_wrapper +from gettext import gettext as _ + +from .core import Argument +from .core import Command +from .core import Context +from .core import Group +from .core import Option +from .core import Parameter +from .globals import get_current_context +from .utils import echo + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) +FC = t.TypeVar("FC", bound=t.Union[t.Callable[..., t.Any], Command]) + + +def pass_context(f: F) -> F: + """Marks a callback as wanting to receive the current context + object as first argument. + """ + + def new_func(*args, **kwargs): # type: ignore + return f(get_current_context(), *args, **kwargs) + + return update_wrapper(t.cast(F, new_func), f) + + +def pass_obj(f: F) -> F: + """Similar to :func:`pass_context`, but only pass the object on the + context onwards (:attr:`Context.obj`). This is useful if that object + represents the state of a nested system. + """ + + def new_func(*args, **kwargs): # type: ignore + return f(get_current_context().obj, *args, **kwargs) + + return update_wrapper(t.cast(F, new_func), f) + + +def make_pass_decorator( + object_type: t.Type, ensure: bool = False +) -> "t.Callable[[F], F]": + """Given an object type this creates a decorator that will work + similar to :func:`pass_obj` but instead of passing the object of the + current context, it will find the innermost context of type + :func:`object_type`. + + This generates a decorator that works roughly like this:: + + from functools import update_wrapper + + def decorator(f): + @pass_context + def new_func(ctx, *args, **kwargs): + obj = ctx.find_object(object_type) + return ctx.invoke(f, obj, *args, **kwargs) + return update_wrapper(new_func, f) + return decorator + + :param object_type: the type of the object to pass. + :param ensure: if set to `True`, a new object will be created and + remembered on the context if it's not there yet. + """ + + def decorator(f: F) -> F: + def new_func(*args, **kwargs): # type: ignore + ctx = get_current_context() + + if ensure: + obj = ctx.ensure_object(object_type) + else: + obj = ctx.find_object(object_type) + + if obj is None: + raise RuntimeError( + "Managed to invoke callback without a context" + f" object of type {object_type.__name__!r}" + " existing." + ) + + return ctx.invoke(f, obj, *args, **kwargs) + + return update_wrapper(t.cast(F, new_func), f) + + return decorator + + +def pass_meta_key( + key: str, *, doc_description: t.Optional[str] = None +) -> "t.Callable[[F], F]": + """Create a decorator that passes a key from + :attr:`click.Context.meta` as the first argument to the decorated + function. + + :param key: Key in ``Context.meta`` to pass. + :param doc_description: Description of the object being passed, + inserted into the decorator's docstring. Defaults to "the 'key' + key from Context.meta". + + .. versionadded:: 8.0 + """ + + def decorator(f: F) -> F: + def new_func(*args, **kwargs): # type: ignore + ctx = get_current_context() + obj = ctx.meta[key] + return ctx.invoke(f, obj, *args, **kwargs) + + return update_wrapper(t.cast(F, new_func), f) + + if doc_description is None: + doc_description = f"the {key!r} key from :attr:`click.Context.meta`" + + decorator.__doc__ = ( + f"Decorator that passes {doc_description} as the first argument" + " to the decorated function." + ) + return decorator + + +CmdType = t.TypeVar("CmdType", bound=Command) + + +@t.overload +def command( + __func: t.Callable[..., t.Any], +) -> Command: + ... + + +@t.overload +def command( + name: t.Optional[str] = None, + **attrs: t.Any, +) -> t.Callable[..., Command]: + ... + + +@t.overload +def command( + name: t.Optional[str] = None, + cls: t.Type[CmdType] = ..., + **attrs: t.Any, +) -> t.Callable[..., CmdType]: + ... + + +def command( + name: t.Union[str, t.Callable[..., t.Any], None] = None, + cls: t.Optional[t.Type[Command]] = None, + **attrs: t.Any, +) -> t.Union[Command, t.Callable[..., Command]]: + r"""Creates a new :class:`Command` and uses the decorated function as + callback. This will also automatically attach all decorated + :func:`option`\s and :func:`argument`\s as parameters to the command. + + The name of the command defaults to the name of the function with + underscores replaced by dashes. If you want to change that, you can + pass the intended name as the first argument. + + All keyword arguments are forwarded to the underlying command class. + For the ``params`` argument, any decorated params are appended to + the end of the list. + + Once decorated the function turns into a :class:`Command` instance + that can be invoked as a command line utility or be attached to a + command :class:`Group`. + + :param name: the name of the command. This defaults to the function + name with underscores replaced by dashes. + :param cls: the command class to instantiate. This defaults to + :class:`Command`. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.1 + The ``params`` argument can be used. Decorated params are + appended to the end of the list. + """ + + func: t.Optional[t.Callable[..., t.Any]] = None + + if callable(name): + func = name + name = None + assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class." + assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments." + + if cls is None: + cls = Command + + def decorator(f: t.Callable[..., t.Any]) -> Command: + if isinstance(f, Command): + raise TypeError("Attempted to convert a callback into a command twice.") + + attr_params = attrs.pop("params", None) + params = attr_params if attr_params is not None else [] + + try: + decorator_params = f.__click_params__ # type: ignore + except AttributeError: + pass + else: + del f.__click_params__ # type: ignore + params.extend(reversed(decorator_params)) + + if attrs.get("help") is None: + attrs["help"] = f.__doc__ + + cmd = cls( # type: ignore[misc] + name=name or f.__name__.lower().replace("_", "-"), # type: ignore[arg-type] + callback=f, + params=params, + **attrs, + ) + cmd.__doc__ = f.__doc__ + return cmd + + if func is not None: + return decorator(func) + + return decorator + + +@t.overload +def group( + __func: t.Callable[..., t.Any], +) -> Group: + ... + + +@t.overload +def group( + name: t.Optional[str] = None, + **attrs: t.Any, +) -> t.Callable[[F], Group]: + ... + + +def group( + name: t.Union[str, t.Callable[..., t.Any], None] = None, **attrs: t.Any +) -> t.Union[Group, t.Callable[[F], Group]]: + """Creates a new :class:`Group` with a function as callback. This + works otherwise the same as :func:`command` just that the `cls` + parameter is set to :class:`Group`. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + """ + if attrs.get("cls") is None: + attrs["cls"] = Group + + if callable(name): + grp: t.Callable[[F], Group] = t.cast(Group, command(**attrs)) + return grp(name) + + return t.cast(Group, command(name, **attrs)) + + +def _param_memo(f: FC, param: Parameter) -> None: + if isinstance(f, Command): + f.params.append(param) + else: + if not hasattr(f, "__click_params__"): + f.__click_params__ = [] # type: ignore + + f.__click_params__.append(param) # type: ignore + + +def argument(*param_decls: str, **attrs: t.Any) -> t.Callable[[FC], FC]: + """Attaches an argument to the command. All positional arguments are + passed as parameter declarations to :class:`Argument`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Argument` instance manually + and attaching it to the :attr:`Command.params` list. + + :param cls: the argument class to instantiate. This defaults to + :class:`Argument`. + """ + + def decorator(f: FC) -> FC: + ArgumentClass = attrs.pop("cls", None) or Argument + _param_memo(f, ArgumentClass(param_decls, **attrs)) + return f + + return decorator + + +def option(*param_decls: str, **attrs: t.Any) -> t.Callable[[FC], FC]: + """Attaches an option to the command. All positional arguments are + passed as parameter declarations to :class:`Option`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Option` instance manually + and attaching it to the :attr:`Command.params` list. + + :param cls: the option class to instantiate. This defaults to + :class:`Option`. + """ + + def decorator(f: FC) -> FC: + # Issue 926, copy attrs, so pre-defined options can re-use the same cls= + option_attrs = attrs.copy() + OptionClass = option_attrs.pop("cls", None) or Option + _param_memo(f, OptionClass(param_decls, **option_attrs)) + return f + + return decorator + + +def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--yes`` option which shows a prompt before continuing if + not passed. If the prompt is declined, the program will exit. + + :param param_decls: One or more option names. Defaults to the single + value ``"--yes"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value: + ctx.abort() + + if not param_decls: + param_decls = ("--yes",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("callback", callback) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("prompt", "Do you want to continue?") + kwargs.setdefault("help", "Confirm the action without prompting.") + return option(*param_decls, **kwargs) + + +def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--password`` option which prompts for a password, hiding + input and asking to enter the value again for confirmation. + + :param param_decls: One or more option names. Defaults to the single + value ``"--password"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + if not param_decls: + param_decls = ("--password",) + + kwargs.setdefault("prompt", True) + kwargs.setdefault("confirmation_prompt", True) + kwargs.setdefault("hide_input", True) + return option(*param_decls, **kwargs) + + +def version_option( + version: t.Optional[str] = None, + *param_decls: str, + package_name: t.Optional[str] = None, + prog_name: t.Optional[str] = None, + message: t.Optional[str] = None, + **kwargs: t.Any, +) -> t.Callable[[FC], FC]: + """Add a ``--version`` option which immediately prints the version + number and exits the program. + + If ``version`` is not provided, Click will try to detect it using + :func:`importlib.metadata.version` to get the version for the + ``package_name``. On Python < 3.8, the ``importlib_metadata`` + backport must be installed. + + If ``package_name`` is not provided, Click will try to detect it by + inspecting the stack frames. This will be used to detect the + version, so it must match the name of the installed package. + + :param version: The version number to show. If not provided, Click + will try to detect it. + :param param_decls: One or more option names. Defaults to the single + value ``"--version"``. + :param package_name: The package name to detect the version from. If + not provided, Click will try to detect it. + :param prog_name: The name of the CLI to show in the message. If not + provided, it will be detected from the command. + :param message: The message to show. The values ``%(prog)s``, + ``%(package)s``, and ``%(version)s`` are available. Defaults to + ``"%(prog)s, version %(version)s"``. + :param kwargs: Extra arguments are passed to :func:`option`. + :raise RuntimeError: ``version`` could not be detected. + + .. versionchanged:: 8.0 + Add the ``package_name`` parameter, and the ``%(package)s`` + value for messages. + + .. versionchanged:: 8.0 + Use :mod:`importlib.metadata` instead of ``pkg_resources``. The + version is detected based on the package name, not the entry + point name. The Python package name must match the installed + package name, or be passed with ``package_name=``. + """ + if message is None: + message = _("%(prog)s, version %(version)s") + + if version is None and package_name is None: + frame = inspect.currentframe() + f_back = frame.f_back if frame is not None else None + f_globals = f_back.f_globals if f_back is not None else None + # break reference cycle + # https://docs.python.org/3/library/inspect.html#the-interpreter-stack + del frame + + if f_globals is not None: + package_name = f_globals.get("__name__") + + if package_name == "__main__": + package_name = f_globals.get("__package__") + + if package_name: + package_name = package_name.partition(".")[0] + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value or ctx.resilient_parsing: + return + + nonlocal prog_name + nonlocal version + + if prog_name is None: + prog_name = ctx.find_root().info_name + + if version is None and package_name is not None: + metadata: t.Optional[types.ModuleType] + + try: + from importlib import metadata # type: ignore + except ImportError: + # Python < 3.8 + import importlib_metadata as metadata # type: ignore + + try: + version = metadata.version(package_name) # type: ignore + except metadata.PackageNotFoundError: # type: ignore + raise RuntimeError( + f"{package_name!r} is not installed. Try passing" + " 'package_name' instead." + ) from None + + if version is None: + raise RuntimeError( + f"Could not determine the version for {package_name!r} automatically." + ) + + echo( + t.cast(str, message) + % {"prog": prog_name, "package": package_name, "version": version}, + color=ctx.color, + ) + ctx.exit() + + if not param_decls: + param_decls = ("--version",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("is_eager", True) + kwargs.setdefault("help", _("Show the version and exit.")) + kwargs["callback"] = callback + return option(*param_decls, **kwargs) + + +def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--help`` option which immediately prints the help page + and exits the program. + + This is usually unnecessary, as the ``--help`` option is added to + each command automatically unless ``add_help_option=False`` is + passed. + + :param param_decls: One or more option names. Defaults to the single + value ``"--help"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value or ctx.resilient_parsing: + return + + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + if not param_decls: + param_decls = ("--help",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("is_eager", True) + kwargs.setdefault("help", _("Show this message and exit.")) + kwargs["callback"] = callback + return option(*param_decls, **kwargs) diff --git a/myenv/lib/python3.9/site-packages/click/exceptions.py b/myenv/lib/python3.9/site-packages/click/exceptions.py new file mode 100644 index 0000000..9e20b3e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/exceptions.py @@ -0,0 +1,287 @@ +import os +import typing as t +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import get_text_stderr +from .utils import echo + +if t.TYPE_CHECKING: + from .core import Context + from .core import Parameter + + +def _join_param_hints( + param_hint: t.Optional[t.Union[t.Sequence[str], str]] +) -> t.Optional[str]: + if param_hint is not None and not isinstance(param_hint, str): + return " / ".join(repr(x) for x in param_hint) + + return param_hint + + +class ClickException(Exception): + """An exception that Click can handle and show to the user.""" + + #: The exit code for this exception. + exit_code = 1 + + def __init__(self, message: str) -> None: + super().__init__(message) + self.message = message + + def format_message(self) -> str: + return self.message + + def __str__(self) -> str: + return self.message + + def show(self, file: t.Optional[t.IO] = None) -> None: + if file is None: + file = get_text_stderr() + + echo(_("Error: {message}").format(message=self.format_message()), file=file) + + +class UsageError(ClickException): + """An internal exception that signals a usage error. This typically + aborts any further handling. + + :param message: the error message to display. + :param ctx: optionally the context that caused this error. Click will + fill in the context automatically in some situations. + """ + + exit_code = 2 + + def __init__(self, message: str, ctx: t.Optional["Context"] = None) -> None: + super().__init__(message) + self.ctx = ctx + self.cmd = self.ctx.command if self.ctx else None + + def show(self, file: t.Optional[t.IO] = None) -> None: + if file is None: + file = get_text_stderr() + color = None + hint = "" + if ( + self.ctx is not None + and self.ctx.command.get_help_option(self.ctx) is not None + ): + hint = _("Try '{command} {option}' for help.").format( + command=self.ctx.command_path, option=self.ctx.help_option_names[0] + ) + hint = f"{hint}\n" + if self.ctx is not None: + color = self.ctx.color + echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color) + echo( + _("Error: {message}").format(message=self.format_message()), + file=file, + color=color, + ) + + +class BadParameter(UsageError): + """An exception that formats out a standardized error message for a + bad parameter. This is useful when thrown from a callback or type as + Click will attach contextual information to it (for instance, which + parameter it is). + + .. versionadded:: 2.0 + + :param param: the parameter object that caused this error. This can + be left out, and Click will attach this info itself + if possible. + :param param_hint: a string that shows up as parameter name. This + can be used as alternative to `param` in cases + where custom validation should happen. If it is + a string it's used as such, if it's a list then + each item is quoted and separated. + """ + + def __init__( + self, + message: str, + ctx: t.Optional["Context"] = None, + param: t.Optional["Parameter"] = None, + param_hint: t.Optional[str] = None, + ) -> None: + super().__init__(message, ctx) + self.param = param + self.param_hint = param_hint + + def format_message(self) -> str: + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.get_error_hint(self.ctx) # type: ignore + else: + return _("Invalid value: {message}").format(message=self.message) + + return _("Invalid value for {param_hint}: {message}").format( + param_hint=_join_param_hints(param_hint), message=self.message + ) + + +class MissingParameter(BadParameter): + """Raised if click required an option or argument but it was not + provided when invoking the script. + + .. versionadded:: 4.0 + + :param param_type: a string that indicates the type of the parameter. + The default is to inherit the parameter type from + the given `param`. Valid values are ``'parameter'``, + ``'option'`` or ``'argument'``. + """ + + def __init__( + self, + message: t.Optional[str] = None, + ctx: t.Optional["Context"] = None, + param: t.Optional["Parameter"] = None, + param_hint: t.Optional[str] = None, + param_type: t.Optional[str] = None, + ) -> None: + super().__init__(message or "", ctx, param, param_hint) + self.param_type = param_type + + def format_message(self) -> str: + if self.param_hint is not None: + param_hint: t.Optional[str] = self.param_hint + elif self.param is not None: + param_hint = self.param.get_error_hint(self.ctx) # type: ignore + else: + param_hint = None + + param_hint = _join_param_hints(param_hint) + param_hint = f" {param_hint}" if param_hint else "" + + param_type = self.param_type + if param_type is None and self.param is not None: + param_type = self.param.param_type_name + + msg = self.message + if self.param is not None: + msg_extra = self.param.type.get_missing_message(self.param) + if msg_extra: + if msg: + msg += f". {msg_extra}" + else: + msg = msg_extra + + msg = f" {msg}" if msg else "" + + # Translate param_type for known types. + if param_type == "argument": + missing = _("Missing argument") + elif param_type == "option": + missing = _("Missing option") + elif param_type == "parameter": + missing = _("Missing parameter") + else: + missing = _("Missing {param_type}").format(param_type=param_type) + + return f"{missing}{param_hint}.{msg}" + + def __str__(self) -> str: + if not self.message: + param_name = self.param.name if self.param else None + return _("Missing parameter: {param_name}").format(param_name=param_name) + else: + return self.message + + +class NoSuchOption(UsageError): + """Raised if click attempted to handle an option that does not + exist. + + .. versionadded:: 4.0 + """ + + def __init__( + self, + option_name: str, + message: t.Optional[str] = None, + possibilities: t.Optional[t.Sequence[str]] = None, + ctx: t.Optional["Context"] = None, + ) -> None: + if message is None: + message = _("No such option: {name}").format(name=option_name) + + super().__init__(message, ctx) + self.option_name = option_name + self.possibilities = possibilities + + def format_message(self) -> str: + if not self.possibilities: + return self.message + + possibility_str = ", ".join(sorted(self.possibilities)) + suggest = ngettext( + "Did you mean {possibility}?", + "(Possible options: {possibilities})", + len(self.possibilities), + ).format(possibility=possibility_str, possibilities=possibility_str) + return f"{self.message} {suggest}" + + +class BadOptionUsage(UsageError): + """Raised if an option is generally supplied but the use of the option + was incorrect. This is for instance raised if the number of arguments + for an option is not correct. + + .. versionadded:: 4.0 + + :param option_name: the name of the option being used incorrectly. + """ + + def __init__( + self, option_name: str, message: str, ctx: t.Optional["Context"] = None + ) -> None: + super().__init__(message, ctx) + self.option_name = option_name + + +class BadArgumentUsage(UsageError): + """Raised if an argument is generally supplied but the use of the argument + was incorrect. This is for instance raised if the number of values + for an argument is not correct. + + .. versionadded:: 6.0 + """ + + +class FileError(ClickException): + """Raised if a file cannot be opened.""" + + def __init__(self, filename: str, hint: t.Optional[str] = None) -> None: + if hint is None: + hint = _("unknown error") + + super().__init__(hint) + self.ui_filename = os.fsdecode(filename) + self.filename = filename + + def format_message(self) -> str: + return _("Could not open file {filename!r}: {message}").format( + filename=self.ui_filename, message=self.message + ) + + +class Abort(RuntimeError): + """An internal signalling exception that signals Click to abort.""" + + +class Exit(RuntimeError): + """An exception that indicates that the application should exit with some + status code. + + :param code: the status code to exit with. + """ + + __slots__ = ("exit_code",) + + def __init__(self, code: int = 0) -> None: + self.exit_code = code diff --git a/myenv/lib/python3.9/site-packages/click/formatting.py b/myenv/lib/python3.9/site-packages/click/formatting.py new file mode 100644 index 0000000..ddd2a2f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/formatting.py @@ -0,0 +1,301 @@ +import typing as t +from contextlib import contextmanager +from gettext import gettext as _ + +from ._compat import term_len +from .parser import split_opt + +# Can force a width. This is used by the test system +FORCED_WIDTH: t.Optional[int] = None + + +def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]: + widths: t.Dict[int, int] = {} + + for row in rows: + for idx, col in enumerate(row): + widths[idx] = max(widths.get(idx, 0), term_len(col)) + + return tuple(y for x, y in sorted(widths.items())) + + +def iter_rows( + rows: t.Iterable[t.Tuple[str, str]], col_count: int +) -> t.Iterator[t.Tuple[str, ...]]: + for row in rows: + yield row + ("",) * (col_count - len(row)) + + +def wrap_text( + text: str, + width: int = 78, + initial_indent: str = "", + subsequent_indent: str = "", + preserve_paragraphs: bool = False, +) -> str: + """A helper function that intelligently wraps text. By default, it + assumes that it operates on a single paragraph of text but if the + `preserve_paragraphs` parameter is provided it will intelligently + handle paragraphs (defined by two empty lines). + + If paragraphs are handled, a paragraph can be prefixed with an empty + line containing the ``\\b`` character (``\\x08``) to indicate that + no rewrapping should happen in that block. + + :param text: the text that should be rewrapped. + :param width: the maximum width for the text. + :param initial_indent: the initial indent that should be placed on the + first line as a string. + :param subsequent_indent: the indent string that should be placed on + each consecutive line. + :param preserve_paragraphs: if this flag is set then the wrapping will + intelligently handle paragraphs. + """ + from ._textwrap import TextWrapper + + text = text.expandtabs() + wrapper = TextWrapper( + width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + replace_whitespace=False, + ) + if not preserve_paragraphs: + return wrapper.fill(text) + + p: t.List[t.Tuple[int, bool, str]] = [] + buf: t.List[str] = [] + indent = None + + def _flush_par() -> None: + if not buf: + return + if buf[0].strip() == "\b": + p.append((indent or 0, True, "\n".join(buf[1:]))) + else: + p.append((indent or 0, False, " ".join(buf))) + del buf[:] + + for line in text.splitlines(): + if not line: + _flush_par() + indent = None + else: + if indent is None: + orig_len = term_len(line) + line = line.lstrip() + indent = orig_len - term_len(line) + buf.append(line) + _flush_par() + + rv = [] + for indent, raw, text in p: + with wrapper.extra_indent(" " * indent): + if raw: + rv.append(wrapper.indent_only(text)) + else: + rv.append(wrapper.fill(text)) + + return "\n\n".join(rv) + + +class HelpFormatter: + """This class helps with formatting text-based help pages. It's + usually just needed for very special internal cases, but it's also + exposed so that developers can write their own fancy outputs. + + At present, it always writes into memory. + + :param indent_increment: the additional increment for each level. + :param width: the width for the text. This defaults to the terminal + width clamped to a maximum of 78. + """ + + def __init__( + self, + indent_increment: int = 2, + width: t.Optional[int] = None, + max_width: t.Optional[int] = None, + ) -> None: + import shutil + + self.indent_increment = indent_increment + if max_width is None: + max_width = 80 + if width is None: + width = FORCED_WIDTH + if width is None: + width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50) + self.width = width + self.current_indent = 0 + self.buffer: t.List[str] = [] + + def write(self, string: str) -> None: + """Writes a unicode string into the internal buffer.""" + self.buffer.append(string) + + def indent(self) -> None: + """Increases the indentation.""" + self.current_indent += self.indent_increment + + def dedent(self) -> None: + """Decreases the indentation.""" + self.current_indent -= self.indent_increment + + def write_usage( + self, prog: str, args: str = "", prefix: t.Optional[str] = None + ) -> None: + """Writes a usage line into the buffer. + + :param prog: the program name. + :param args: whitespace separated list of arguments. + :param prefix: The prefix for the first line. Defaults to + ``"Usage: "``. + """ + if prefix is None: + prefix = f"{_('Usage:')} " + + usage_prefix = f"{prefix:>{self.current_indent}}{prog} " + text_width = self.width - self.current_indent + + if text_width >= (term_len(usage_prefix) + 20): + # The arguments will fit to the right of the prefix. + indent = " " * term_len(usage_prefix) + self.write( + wrap_text( + args, + text_width, + initial_indent=usage_prefix, + subsequent_indent=indent, + ) + ) + else: + # The prefix is too long, put the arguments on the next line. + self.write(usage_prefix) + self.write("\n") + indent = " " * (max(self.current_indent, term_len(prefix)) + 4) + self.write( + wrap_text( + args, text_width, initial_indent=indent, subsequent_indent=indent + ) + ) + + self.write("\n") + + def write_heading(self, heading: str) -> None: + """Writes a heading into the buffer.""" + self.write(f"{'':>{self.current_indent}}{heading}:\n") + + def write_paragraph(self) -> None: + """Writes a paragraph into the buffer.""" + if self.buffer: + self.write("\n") + + def write_text(self, text: str) -> None: + """Writes re-indented text into the buffer. This rewraps and + preserves paragraphs. + """ + indent = " " * self.current_indent + self.write( + wrap_text( + text, + self.width, + initial_indent=indent, + subsequent_indent=indent, + preserve_paragraphs=True, + ) + ) + self.write("\n") + + def write_dl( + self, + rows: t.Sequence[t.Tuple[str, str]], + col_max: int = 30, + col_spacing: int = 2, + ) -> None: + """Writes a definition list into the buffer. This is how options + and commands are usually formatted. + + :param rows: a list of two item tuples for the terms and values. + :param col_max: the maximum width of the first column. + :param col_spacing: the number of spaces between the first and + second column. + """ + rows = list(rows) + widths = measure_table(rows) + if len(widths) != 2: + raise TypeError("Expected two columns for definition list") + + first_col = min(widths[0], col_max) + col_spacing + + for first, second in iter_rows(rows, len(widths)): + self.write(f"{'':>{self.current_indent}}{first}") + if not second: + self.write("\n") + continue + if term_len(first) <= first_col - col_spacing: + self.write(" " * (first_col - term_len(first))) + else: + self.write("\n") + self.write(" " * (first_col + self.current_indent)) + + text_width = max(self.width - first_col - 2, 10) + wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) + lines = wrapped_text.splitlines() + + if lines: + self.write(f"{lines[0]}\n") + + for line in lines[1:]: + self.write(f"{'':>{first_col + self.current_indent}}{line}\n") + else: + self.write("\n") + + @contextmanager + def section(self, name: str) -> t.Iterator[None]: + """Helpful context manager that writes a paragraph, a heading, + and the indents. + + :param name: the section name that is written as heading. + """ + self.write_paragraph() + self.write_heading(name) + self.indent() + try: + yield + finally: + self.dedent() + + @contextmanager + def indentation(self) -> t.Iterator[None]: + """A context manager that increases the indentation.""" + self.indent() + try: + yield + finally: + self.dedent() + + def getvalue(self) -> str: + """Returns the buffer contents.""" + return "".join(self.buffer) + + +def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]: + """Given a list of option strings this joins them in the most appropriate + way and returns them in the form ``(formatted_string, + any_prefix_is_slash)`` where the second item in the tuple is a flag that + indicates if any of the option prefixes was a slash. + """ + rv = [] + any_prefix_is_slash = False + + for opt in options: + prefix = split_opt(opt)[0] + + if prefix == "/": + any_prefix_is_slash = True + + rv.append((len(prefix), opt)) + + rv.sort(key=lambda x: x[0]) + return ", ".join(x[1] for x in rv), any_prefix_is_slash diff --git a/myenv/lib/python3.9/site-packages/click/globals.py b/myenv/lib/python3.9/site-packages/click/globals.py new file mode 100644 index 0000000..480058f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/globals.py @@ -0,0 +1,68 @@ +import typing as t +from threading import local + +if t.TYPE_CHECKING: + import typing_extensions as te + from .core import Context + +_local = local() + + +@t.overload +def get_current_context(silent: "te.Literal[False]" = False) -> "Context": + ... + + +@t.overload +def get_current_context(silent: bool = ...) -> t.Optional["Context"]: + ... + + +def get_current_context(silent: bool = False) -> t.Optional["Context"]: + """Returns the current click context. This can be used as a way to + access the current context object from anywhere. This is a more implicit + alternative to the :func:`pass_context` decorator. This function is + primarily useful for helpers such as :func:`echo` which might be + interested in changing its behavior based on the current context. + + To push the current context, :meth:`Context.scope` can be used. + + .. versionadded:: 5.0 + + :param silent: if set to `True` the return value is `None` if no context + is available. The default behavior is to raise a + :exc:`RuntimeError`. + """ + try: + return t.cast("Context", _local.stack[-1]) + except (AttributeError, IndexError) as e: + if not silent: + raise RuntimeError("There is no active click context.") from e + + return None + + +def push_context(ctx: "Context") -> None: + """Pushes a new context to the current stack.""" + _local.__dict__.setdefault("stack", []).append(ctx) + + +def pop_context() -> None: + """Removes the top level from the stack.""" + _local.stack.pop() + + +def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]: + """Internal helper to get the default value of the color flag. If a + value is passed it's returned unchanged, otherwise it's looked up from + the current context. + """ + if color is not None: + return color + + ctx = get_current_context(silent=True) + + if ctx is not None: + return ctx.color + + return None diff --git a/myenv/lib/python3.9/site-packages/click/parser.py b/myenv/lib/python3.9/site-packages/click/parser.py new file mode 100644 index 0000000..2d5a2ed --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/parser.py @@ -0,0 +1,529 @@ +""" +This module started out as largely a copy paste from the stdlib's +optparse module with the features removed that we do not need from +optparse because we implement them in Click on a higher level (for +instance type handling, help formatting and a lot more). + +The plan is to remove more and more from here over time. + +The reason this is a different module and not optparse from the stdlib +is that there are differences in 2.x and 3.x about the error messages +generated and optparse in the stdlib uses gettext for no good reason +and might cause us issues. + +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright 2001-2006 Gregory P. Ward. All rights reserved. +Copyright 2002-2006 Python Software Foundation. All rights reserved. +""" +# This code uses parts of optparse written by Gregory P. Ward and +# maintained by the Python Software Foundation. +# Copyright 2001-2006 Gregory P. Ward +# Copyright 2002-2006 Python Software Foundation +import typing as t +from collections import deque +from gettext import gettext as _ +from gettext import ngettext + +from .exceptions import BadArgumentUsage +from .exceptions import BadOptionUsage +from .exceptions import NoSuchOption +from .exceptions import UsageError + +if t.TYPE_CHECKING: + import typing_extensions as te + from .core import Argument as CoreArgument + from .core import Context + from .core import Option as CoreOption + from .core import Parameter as CoreParameter + +V = t.TypeVar("V") + +# Sentinel value that indicates an option was passed as a flag without a +# value but is not a flag option. Option.consume_value uses this to +# prompt or use the flag_value. +_flag_needs_value = object() + + +def _unpack_args( + args: t.Sequence[str], nargs_spec: t.Sequence[int] +) -> t.Tuple[t.Sequence[t.Union[str, t.Sequence[t.Optional[str]], None]], t.List[str]]: + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with `None`. + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv: t.List[t.Union[str, t.Tuple[t.Optional[str], ...], None]] = [] + spos: t.Optional[int] = None + + def _fetch(c: "te.Deque[V]") -> t.Optional[V]: + try: + if spos is None: + return c.popleft() + else: + return c.pop() + except IndexError: + return None + + while nargs_spec: + nargs = _fetch(nargs_spec) + + if nargs is None: + continue + + if nargs == 1: + rv.append(_fetch(args)) + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError("Cannot have two nargs < 0") + + spos = len(rv) + rv.append(None) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + rv[spos + 1 :] = reversed(rv[spos + 1 :]) + + return tuple(rv), list(args) + + +def split_opt(opt: str) -> t.Tuple[str, str]: + first = opt[:1] + if first.isalnum(): + return "", opt + if opt[1:2] == first: + return opt[:2], opt[2:] + return first, opt[1:] + + +def normalize_opt(opt: str, ctx: t.Optional["Context"]) -> str: + if ctx is None or ctx.token_normalize_func is None: + return opt + prefix, opt = split_opt(opt) + return f"{prefix}{ctx.token_normalize_func(opt)}" + + +def split_arg_string(string: str) -> t.List[str]: + """Split an argument string as with :func:`shlex.split`, but don't + fail if the string is incomplete. Ignores a missing closing quote or + incomplete escape sequence and uses the partial token as-is. + + .. code-block:: python + + split_arg_string("example 'my file") + ["example", "my file"] + + split_arg_string("example my\\") + ["example", "my"] + + :param string: String to split. + """ + import shlex + + lex = shlex.shlex(string, posix=True) + lex.whitespace_split = True + lex.commenters = "" + out = [] + + try: + for token in lex: + out.append(token) + except ValueError: + # Raised when end-of-string is reached in an invalid state. Use + # the partial token as-is. The quote or escape character is in + # lex.state, not lex.token. + out.append(lex.token) + + return out + + +class Option: + def __init__( + self, + obj: "CoreOption", + opts: t.Sequence[str], + dest: t.Optional[str], + action: t.Optional[str] = None, + nargs: int = 1, + const: t.Optional[t.Any] = None, + ): + self._short_opts = [] + self._long_opts = [] + self.prefixes = set() + + for opt in opts: + prefix, value = split_opt(opt) + if not prefix: + raise ValueError(f"Invalid start character for option ({opt})") + self.prefixes.add(prefix[0]) + if len(prefix) == 1 and len(value) == 1: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + self.prefixes.add(prefix) + + if action is None: + action = "store" + + self.dest = dest + self.action = action + self.nargs = nargs + self.const = const + self.obj = obj + + @property + def takes_value(self) -> bool: + return self.action in ("store", "append") + + def process(self, value: str, state: "ParsingState") -> None: + if self.action == "store": + state.opts[self.dest] = value # type: ignore + elif self.action == "store_const": + state.opts[self.dest] = self.const # type: ignore + elif self.action == "append": + state.opts.setdefault(self.dest, []).append(value) # type: ignore + elif self.action == "append_const": + state.opts.setdefault(self.dest, []).append(self.const) # type: ignore + elif self.action == "count": + state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore + else: + raise ValueError(f"unknown action '{self.action}'") + state.order.append(self.obj) + + +class Argument: + def __init__(self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1): + self.dest = dest + self.nargs = nargs + self.obj = obj + + def process( + self, + value: t.Union[t.Optional[str], t.Sequence[t.Optional[str]]], + state: "ParsingState", + ) -> None: + if self.nargs > 1: + assert value is not None + holes = sum(1 for x in value if x is None) + if holes == len(value): + value = None + elif holes != 0: + raise BadArgumentUsage( + _("Argument {name!r} takes {nargs} values.").format( + name=self.dest, nargs=self.nargs + ) + ) + + if self.nargs == -1 and self.obj.envvar is not None and value == (): + # Replace empty tuple with None so that a value from the + # environment may be tried. + value = None + + state.opts[self.dest] = value # type: ignore + state.order.append(self.obj) + + +class ParsingState: + def __init__(self, rargs: t.List[str]) -> None: + self.opts: t.Dict[str, t.Any] = {} + self.largs: t.List[str] = [] + self.rargs = rargs + self.order: t.List["CoreParameter"] = [] + + +class OptionParser: + """The option parser is an internal class that is ultimately used to + parse options and arguments. It's modelled after optparse and brings + a similar but vastly simplified API. It should generally not be used + directly as the high level Click classes wrap it for you. + + It's not nearly as extensible as optparse or argparse as it does not + implement features that are implemented on a higher level (such as + types or defaults). + + :param ctx: optionally the :class:`~click.Context` where this parser + should go with. + """ + + def __init__(self, ctx: t.Optional["Context"] = None) -> None: + #: The :class:`~click.Context` for this parser. This might be + #: `None` for some advanced use cases. + self.ctx = ctx + #: This controls how the parser deals with interspersed arguments. + #: If this is set to `False`, the parser will stop on the first + #: non-option. Click uses this to implement nested subcommands + #: safely. + self.allow_interspersed_args = True + #: This tells the parser how to deal with unknown options. By + #: default it will error out (which is sensible), but there is a + #: second mode where it will ignore it and continue processing + #: after shifting all the unknown options into the resulting args. + self.ignore_unknown_options = False + + if ctx is not None: + self.allow_interspersed_args = ctx.allow_interspersed_args + self.ignore_unknown_options = ctx.ignore_unknown_options + + self._short_opt: t.Dict[str, Option] = {} + self._long_opt: t.Dict[str, Option] = {} + self._opt_prefixes = {"-", "--"} + self._args: t.List[Argument] = [] + + def add_option( + self, + obj: "CoreOption", + opts: t.Sequence[str], + dest: t.Optional[str], + action: t.Optional[str] = None, + nargs: int = 1, + const: t.Optional[t.Any] = None, + ) -> None: + """Adds a new option named `dest` to the parser. The destination + is not inferred (unlike with optparse) and needs to be explicitly + provided. Action can be any of ``store``, ``store_const``, + ``append``, ``append_const`` or ``count``. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + opts = [normalize_opt(opt, self.ctx) for opt in opts] + option = Option(obj, opts, dest, action=action, nargs=nargs, const=const) + self._opt_prefixes.update(option.prefixes) + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + def add_argument( + self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1 + ) -> None: + """Adds a positional argument named `dest` to the parser. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + self._args.append(Argument(obj, dest=dest, nargs=nargs)) + + def parse_args( + self, args: t.List[str] + ) -> t.Tuple[t.Dict[str, t.Any], t.List[str], t.List["CoreParameter"]]: + """Parses positional arguments and returns ``(values, args, order)`` + for the parsed options and arguments as well as the leftover + arguments if there are any. The order is a list of objects as they + appear on the command line. If arguments appear multiple times they + will be memorized multiple times as well. + """ + state = ParsingState(args) + try: + self._process_args_for_options(state) + self._process_args_for_args(state) + except UsageError: + if self.ctx is None or not self.ctx.resilient_parsing: + raise + return state.opts, state.largs, state.order + + def _process_args_for_args(self, state: ParsingState) -> None: + pargs, args = _unpack_args( + state.largs + state.rargs, [x.nargs for x in self._args] + ) + + for idx, arg in enumerate(self._args): + arg.process(pargs[idx], state) + + state.largs = args + state.rargs = [] + + def _process_args_for_options(self, state: ParsingState) -> None: + while state.rargs: + arg = state.rargs.pop(0) + arglen = len(arg) + # Double dashes always handled explicitly regardless of what + # prefixes are valid. + if arg == "--": + return + elif arg[:1] in self._opt_prefixes and arglen > 1: + self._process_opts(arg, state) + elif self.allow_interspersed_args: + state.largs.append(arg) + else: + state.rargs.insert(0, arg) + return + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt( + self, opt: str, explicit_value: t.Optional[str], state: ParsingState + ) -> None: + if opt not in self._long_opt: + from difflib import get_close_matches + + possibilities = get_close_matches(opt, self._long_opt) + raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) + + option = self._long_opt[opt] + if option.takes_value: + # At this point it's safe to modify rargs by injecting the + # explicit value, because no exception is raised in this + # branch. This means that the inserted value will be fully + # consumed. + if explicit_value is not None: + state.rargs.insert(0, explicit_value) + + value = self._get_value_from_state(opt, option, state) + + elif explicit_value is not None: + raise BadOptionUsage( + opt, _("Option {name!r} does not take a value.").format(name=opt) + ) + + else: + value = None + + option.process(value, state) + + def _match_short_opt(self, arg: str, state: ParsingState) -> None: + stop = False + i = 1 + prefix = arg[0] + unknown_options = [] + + for ch in arg[1:]: + opt = normalize_opt(f"{prefix}{ch}", self.ctx) + option = self._short_opt.get(opt) + i += 1 + + if not option: + if self.ignore_unknown_options: + unknown_options.append(ch) + continue + raise NoSuchOption(opt, ctx=self.ctx) + if option.takes_value: + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + state.rargs.insert(0, arg[i:]) + stop = True + + value = self._get_value_from_state(opt, option, state) + + else: + value = None + + option.process(value, state) + + if stop: + break + + # If we got any unknown options we re-combinate the string of the + # remaining options and re-attach the prefix, then report that + # to the state as new larg. This way there is basic combinatorics + # that can be achieved while still ignoring unknown arguments. + if self.ignore_unknown_options and unknown_options: + state.largs.append(f"{prefix}{''.join(unknown_options)}") + + def _get_value_from_state( + self, option_name: str, option: Option, state: ParsingState + ) -> t.Any: + nargs = option.nargs + + if len(state.rargs) < nargs: + if option.obj._flag_needs_value: + # Option allows omitting the value. + value = _flag_needs_value + else: + raise BadOptionUsage( + option_name, + ngettext( + "Option {name!r} requires an argument.", + "Option {name!r} requires {nargs} arguments.", + nargs, + ).format(name=option_name, nargs=nargs), + ) + elif nargs == 1: + next_rarg = state.rargs[0] + + if ( + option.obj._flag_needs_value + and isinstance(next_rarg, str) + and next_rarg[:1] in self._opt_prefixes + and len(next_rarg) > 1 + ): + # The next arg looks like the start of an option, don't + # use it as the value if omitting the value is allowed. + value = _flag_needs_value + else: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + return value + + def _process_opts(self, arg: str, state: ParsingState) -> None: + explicit_value = None + # Long option handling happens in two parts. The first part is + # supporting explicitly attached values. In any case, we will try + # to long match the option first. + if "=" in arg: + long_opt, explicit_value = arg.split("=", 1) + else: + long_opt = arg + norm_long_opt = normalize_opt(long_opt, self.ctx) + + # At this point we will match the (assumed) long option through + # the long option matching code. Note that this allows options + # like "-foo" to be matched as long options. + try: + self._match_long_opt(norm_long_opt, explicit_value, state) + except NoSuchOption: + # At this point the long option matching failed, and we need + # to try with short options. However there is a special rule + # which says, that if we have a two character options prefix + # (applies to "--foo" for instance), we do not dispatch to the + # short option code and will instead raise the no option + # error. + if arg[:2] not in self._opt_prefixes: + self._match_short_opt(arg, state) + return + + if not self.ignore_unknown_options: + raise + + state.largs.append(arg) diff --git a/myenv/lib/python3.9/site-packages/click/py.typed b/myenv/lib/python3.9/site-packages/click/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/click/shell_completion.py b/myenv/lib/python3.9/site-packages/click/shell_completion.py new file mode 100644 index 0000000..c17a8e6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/shell_completion.py @@ -0,0 +1,580 @@ +import os +import re +import typing as t +from gettext import gettext as _ + +from .core import Argument +from .core import BaseCommand +from .core import Context +from .core import MultiCommand +from .core import Option +from .core import Parameter +from .core import ParameterSource +from .parser import split_arg_string +from .utils import echo + + +def shell_complete( + cli: BaseCommand, + ctx_args: t.Dict[str, t.Any], + prog_name: str, + complete_var: str, + instruction: str, +) -> int: + """Perform shell completion for the given CLI program. + + :param cli: Command being called. + :param ctx_args: Extra arguments to pass to + ``cli.make_context``. + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. + :param instruction: Value of ``complete_var`` with the completion + instruction and shell, in the form ``instruction_shell``. + :return: Status code to exit with. + """ + shell, _, instruction = instruction.partition("_") + comp_cls = get_completion_class(shell) + + if comp_cls is None: + return 1 + + comp = comp_cls(cli, ctx_args, prog_name, complete_var) + + if instruction == "source": + echo(comp.source()) + return 0 + + if instruction == "complete": + echo(comp.complete()) + return 0 + + return 1 + + +class CompletionItem: + """Represents a completion value and metadata about the value. The + default metadata is ``type`` to indicate special shell handling, + and ``help`` if a shell supports showing a help string next to the + value. + + Arbitrary parameters can be passed when creating the object, and + accessed using ``item.attr``. If an attribute wasn't passed, + accessing it returns ``None``. + + :param value: The completion suggestion. + :param type: Tells the shell script to provide special completion + support for the type. Click uses ``"dir"`` and ``"file"``. + :param help: String shown next to the value if supported. + :param kwargs: Arbitrary metadata. The built-in implementations + don't use this, but custom type completions paired with custom + shell support could use it. + """ + + __slots__ = ("value", "type", "help", "_info") + + def __init__( + self, + value: t.Any, + type: str = "plain", + help: t.Optional[str] = None, + **kwargs: t.Any, + ) -> None: + self.value = value + self.type = type + self.help = help + self._info = kwargs + + def __getattr__(self, name: str) -> t.Any: + return self._info.get(name) + + +# Only Bash >= 4.4 has the nosort option. +_SOURCE_BASH = """\ +%(complete_func)s() { + local IFS=$'\\n' + local response + + response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \ +%(complete_var)s=bash_complete $1) + + for completion in $response; do + IFS=',' read type value <<< "$completion" + + if [[ $type == 'dir' ]]; then + COMPREPLY=() + compopt -o dirnames + elif [[ $type == 'file' ]]; then + COMPREPLY=() + compopt -o default + elif [[ $type == 'plain' ]]; then + COMPREPLY+=($value) + fi + done + + return 0 +} + +%(complete_func)s_setup() { + complete -o nosort -F %(complete_func)s %(prog_name)s +} + +%(complete_func)s_setup; +""" + +_SOURCE_ZSH = """\ +#compdef %(prog_name)s + +%(complete_func)s() { + local -a completions + local -a completions_with_descriptions + local -a response + (( ! $+commands[%(prog_name)s] )) && return 1 + + response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \ +%(complete_var)s=zsh_complete %(prog_name)s)}") + + for type key descr in ${response}; do + if [[ "$type" == "plain" ]]; then + if [[ "$descr" == "_" ]]; then + completions+=("$key") + else + completions_with_descriptions+=("$key":"$descr") + fi + elif [[ "$type" == "dir" ]]; then + _path_files -/ + elif [[ "$type" == "file" ]]; then + _path_files -f + fi + done + + if [ -n "$completions_with_descriptions" ]; then + _describe -V unsorted completions_with_descriptions -U + fi + + if [ -n "$completions" ]; then + compadd -U -V unsorted -a completions + fi +} + +compdef %(complete_func)s %(prog_name)s; +""" + +_SOURCE_FISH = """\ +function %(complete_func)s; + set -l response; + + for value in (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \ +COMP_CWORD=(commandline -t) %(prog_name)s); + set response $response $value; + end; + + for completion in $response; + set -l metadata (string split "," $completion); + + if test $metadata[1] = "dir"; + __fish_complete_directories $metadata[2]; + else if test $metadata[1] = "file"; + __fish_complete_path $metadata[2]; + else if test $metadata[1] = "plain"; + echo $metadata[2]; + end; + end; +end; + +complete --no-files --command %(prog_name)s --arguments \ +"(%(complete_func)s)"; +""" + + +class ShellComplete: + """Base class for providing shell completion support. A subclass for + a given shell will override attributes and methods to implement the + completion instructions (``source`` and ``complete``). + + :param cli: Command being called. + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. + + .. versionadded:: 8.0 + """ + + name: t.ClassVar[str] + """Name to register the shell as with :func:`add_completion_class`. + This is used in completion instructions (``{name}_source`` and + ``{name}_complete``). + """ + + source_template: t.ClassVar[str] + """Completion script template formatted by :meth:`source`. This must + be provided by subclasses. + """ + + def __init__( + self, + cli: BaseCommand, + ctx_args: t.Dict[str, t.Any], + prog_name: str, + complete_var: str, + ) -> None: + self.cli = cli + self.ctx_args = ctx_args + self.prog_name = prog_name + self.complete_var = complete_var + + @property + def func_name(self) -> str: + """The name of the shell function defined by the completion + script. + """ + safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), re.ASCII) + return f"_{safe_name}_completion" + + def source_vars(self) -> t.Dict[str, t.Any]: + """Vars for formatting :attr:`source_template`. + + By default this provides ``complete_func``, ``complete_var``, + and ``prog_name``. + """ + return { + "complete_func": self.func_name, + "complete_var": self.complete_var, + "prog_name": self.prog_name, + } + + def source(self) -> str: + """Produce the shell script that defines the completion + function. By default this ``%``-style formats + :attr:`source_template` with the dict returned by + :meth:`source_vars`. + """ + return self.source_template % self.source_vars() + + def get_completion_args(self) -> t.Tuple[t.List[str], str]: + """Use the env vars defined by the shell script to return a + tuple of ``args, incomplete``. This must be implemented by + subclasses. + """ + raise NotImplementedError + + def get_completions( + self, args: t.List[str], incomplete: str + ) -> t.List[CompletionItem]: + """Determine the context and last complete command or parameter + from the complete args. Call that object's ``shell_complete`` + method to get the completions for the incomplete value. + + :param args: List of complete args before the incomplete value. + :param incomplete: Value being completed. May be empty. + """ + ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args) + obj, incomplete = _resolve_incomplete(ctx, args, incomplete) + return obj.shell_complete(ctx, incomplete) + + def format_completion(self, item: CompletionItem) -> str: + """Format a completion item into the form recognized by the + shell script. This must be implemented by subclasses. + + :param item: Completion item to format. + """ + raise NotImplementedError + + def complete(self) -> str: + """Produce the completion data to send back to the shell. + + By default this calls :meth:`get_completion_args`, gets the + completions, then calls :meth:`format_completion` for each + completion. + """ + args, incomplete = self.get_completion_args() + completions = self.get_completions(args, incomplete) + out = [self.format_completion(item) for item in completions] + return "\n".join(out) + + +class BashComplete(ShellComplete): + """Shell completion for Bash.""" + + name = "bash" + source_template = _SOURCE_BASH + + def _check_version(self) -> None: + import subprocess + + output = subprocess.run( + ["bash", "-c", "echo ${BASH_VERSION}"], stdout=subprocess.PIPE + ) + match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode()) + + if match is not None: + major, minor = match.groups() + + if major < "4" or major == "4" and minor < "4": + raise RuntimeError( + _( + "Shell completion is not supported for Bash" + " versions older than 4.4." + ) + ) + else: + raise RuntimeError( + _("Couldn't detect Bash version, shell completion is not supported.") + ) + + def source(self) -> str: + self._check_version() + return super().source() + + def get_completion_args(self) -> t.Tuple[t.List[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) + args = cwords[1:cword] + + try: + incomplete = cwords[cword] + except IndexError: + incomplete = "" + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + return f"{item.type},{item.value}" + + +class ZshComplete(ShellComplete): + """Shell completion for Zsh.""" + + name = "zsh" + source_template = _SOURCE_ZSH + + def get_completion_args(self) -> t.Tuple[t.List[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) + args = cwords[1:cword] + + try: + incomplete = cwords[cword] + except IndexError: + incomplete = "" + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}" + + +class FishComplete(ShellComplete): + """Shell completion for Fish.""" + + name = "fish" + source_template = _SOURCE_FISH + + def get_completion_args(self) -> t.Tuple[t.List[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + incomplete = os.environ["COMP_CWORD"] + args = cwords[1:] + + # Fish stores the partial word in both COMP_WORDS and + # COMP_CWORD, remove it from complete args. + if incomplete and args and args[-1] == incomplete: + args.pop() + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + if item.help: + return f"{item.type},{item.value}\t{item.help}" + + return f"{item.type},{item.value}" + + +_available_shells: t.Dict[str, t.Type[ShellComplete]] = { + "bash": BashComplete, + "fish": FishComplete, + "zsh": ZshComplete, +} + + +def add_completion_class( + cls: t.Type[ShellComplete], name: t.Optional[str] = None +) -> None: + """Register a :class:`ShellComplete` subclass under the given name. + The name will be provided by the completion instruction environment + variable during completion. + + :param cls: The completion class that will handle completion for the + shell. + :param name: Name to register the class under. Defaults to the + class's ``name`` attribute. + """ + if name is None: + name = cls.name + + _available_shells[name] = cls + + +def get_completion_class(shell: str) -> t.Optional[t.Type[ShellComplete]]: + """Look up a registered :class:`ShellComplete` subclass by the name + provided by the completion instruction environment variable. If the + name isn't registered, returns ``None``. + + :param shell: Name the class is registered under. + """ + return _available_shells.get(shell) + + +def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool: + """Determine if the given parameter is an argument that can still + accept values. + + :param ctx: Invocation context for the command represented by the + parsed complete args. + :param param: Argument object being checked. + """ + if not isinstance(param, Argument): + return False + + assert param.name is not None + value = ctx.params[param.name] + return ( + param.nargs == -1 + or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE + or ( + param.nargs > 1 + and isinstance(value, (tuple, list)) + and len(value) < param.nargs + ) + ) + + +def _start_of_option(ctx: Context, value: str) -> bool: + """Check if the value looks like the start of an option.""" + if not value: + return False + + c = value[0] + return c in ctx._opt_prefixes + + +def _is_incomplete_option(ctx: Context, args: t.List[str], param: Parameter) -> bool: + """Determine if the given parameter is an option that needs a value. + + :param args: List of complete args before the incomplete value. + :param param: Option object being checked. + """ + if not isinstance(param, Option): + return False + + if param.is_flag or param.count: + return False + + last_option = None + + for index, arg in enumerate(reversed(args)): + if index + 1 > param.nargs: + break + + if _start_of_option(ctx, arg): + last_option = arg + + return last_option is not None and last_option in param.opts + + +def _resolve_context( + cli: BaseCommand, ctx_args: t.Dict[str, t.Any], prog_name: str, args: t.List[str] +) -> Context: + """Produce the context hierarchy starting with the command and + traversing the complete arguments. This only follows the commands, + it doesn't trigger input prompts or callbacks. + + :param cli: Command being called. + :param prog_name: Name of the executable in the shell. + :param args: List of complete args before the incomplete value. + """ + ctx_args["resilient_parsing"] = True + ctx = cli.make_context(prog_name, args.copy(), **ctx_args) + args = ctx.protected_args + ctx.args + + while args: + command = ctx.command + + if isinstance(command, MultiCommand): + if not command.chain: + name, cmd, args = command.resolve_command(ctx, args) + + if cmd is None: + return ctx + + ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True) + args = ctx.protected_args + ctx.args + else: + while args: + name, cmd, args = command.resolve_command(ctx, args) + + if cmd is None: + return ctx + + sub_ctx = cmd.make_context( + name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + resilient_parsing=True, + ) + args = sub_ctx.args + + ctx = sub_ctx + args = [*sub_ctx.protected_args, *sub_ctx.args] + else: + break + + return ctx + + +def _resolve_incomplete( + ctx: Context, args: t.List[str], incomplete: str +) -> t.Tuple[t.Union[BaseCommand, Parameter], str]: + """Find the Click object that will handle the completion of the + incomplete value. Return the object and the incomplete value. + + :param ctx: Invocation context for the command represented by + the parsed complete args. + :param args: List of complete args before the incomplete value. + :param incomplete: Value being completed. May be empty. + """ + # Different shells treat an "=" between a long option name and + # value differently. Might keep the value joined, return the "=" + # as a separate item, or return the split name and value. Always + # split and discard the "=" to make completion easier. + if incomplete == "=": + incomplete = "" + elif "=" in incomplete and _start_of_option(ctx, incomplete): + name, _, incomplete = incomplete.partition("=") + args.append(name) + + # The "--" marker tells Click to stop treating values as options + # even if they start with the option character. If it hasn't been + # given and the incomplete arg looks like an option, the current + # command will provide option name completions. + if "--" not in args and _start_of_option(ctx, incomplete): + return ctx.command, incomplete + + params = ctx.command.get_params(ctx) + + # If the last complete arg is an option name with an incomplete + # value, the option will provide value completions. + for param in params: + if _is_incomplete_option(ctx, args, param): + return param, incomplete + + # It's not an option name or value. The first argument without a + # parsed value will provide value completions. + for param in params: + if _is_incomplete_argument(ctx, param): + return param, incomplete + + # There were no unparsed arguments, the command may be a group that + # will provide command name completions. + return ctx.command, incomplete diff --git a/myenv/lib/python3.9/site-packages/click/termui.py b/myenv/lib/python3.9/site-packages/click/termui.py new file mode 100644 index 0000000..bfb2f5a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/termui.py @@ -0,0 +1,787 @@ +import inspect +import io +import itertools +import os +import sys +import typing as t +from gettext import gettext as _ + +from ._compat import isatty +from ._compat import strip_ansi +from ._compat import WIN +from .exceptions import Abort +from .exceptions import UsageError +from .globals import resolve_color_default +from .types import Choice +from .types import convert_type +from .types import ParamType +from .utils import echo +from .utils import LazyFile + +if t.TYPE_CHECKING: + from ._termui_impl import ProgressBar + +V = t.TypeVar("V") + +# The prompt functions to use. The doc tools currently override these +# functions to customize how they work. +visible_prompt_func: t.Callable[[str], str] = input + +_ansi_colors = { + "black": 30, + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, + "reset": 39, + "bright_black": 90, + "bright_red": 91, + "bright_green": 92, + "bright_yellow": 93, + "bright_blue": 94, + "bright_magenta": 95, + "bright_cyan": 96, + "bright_white": 97, +} +_ansi_reset_all = "\033[0m" + + +def hidden_prompt_func(prompt: str) -> str: + import getpass + + return getpass.getpass(prompt) + + +def _build_prompt( + text: str, + suffix: str, + show_default: bool = False, + default: t.Optional[t.Any] = None, + show_choices: bool = True, + type: t.Optional[ParamType] = None, +) -> str: + prompt = text + if type is not None and show_choices and isinstance(type, Choice): + prompt += f" ({', '.join(map(str, type.choices))})" + if default is not None and show_default: + prompt = f"{prompt} [{_format_default(default)}]" + return f"{prompt}{suffix}" + + +def _format_default(default: t.Any) -> t.Any: + if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"): + return default.name # type: ignore + + return default + + +def prompt( + text: str, + default: t.Optional[t.Any] = None, + hide_input: bool = False, + confirmation_prompt: t.Union[bool, str] = False, + type: t.Optional[t.Union[ParamType, t.Any]] = None, + value_proc: t.Optional[t.Callable[[str], t.Any]] = None, + prompt_suffix: str = ": ", + show_default: bool = True, + err: bool = False, + show_choices: bool = True, +) -> t.Any: + """Prompts a user for input. This is a convenience function that can + be used to prompt a user for input later. + + If the user aborts the input by sending an interrupt signal, this + function will catch it and raise a :exc:`Abort` exception. + + :param text: the text to show for the prompt. + :param default: the default value to use if no input happens. If this + is not given it will prompt until it's aborted. + :param hide_input: if this is set to true then the input value will + be hidden. + :param confirmation_prompt: Prompt a second time to confirm the + value. Can be set to a string instead of ``True`` to customize + the message. + :param type: the type to use to check the value against. + :param value_proc: if this parameter is provided it's a function that + is invoked instead of the type conversion to + convert a value. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + :param show_choices: Show or hide choices if the passed type is a Choice. + For example if type is a Choice of either day or week, + show_choices is true and text is "Group by" then the + prompt will be "Group by (day, week): ". + + .. versionadded:: 8.0 + ``confirmation_prompt`` can be a custom string. + + .. versionadded:: 7.0 + Added the ``show_choices`` parameter. + + .. versionadded:: 6.0 + Added unicode support for cmd.exe on Windows. + + .. versionadded:: 4.0 + Added the `err` parameter. + + """ + + def prompt_func(text: str) -> str: + f = hidden_prompt_func if hide_input else visible_prompt_func + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(text.rstrip(" "), nl=False, err=err) + # Echo a space to stdout to work around an issue where + # readline causes backspace to clear the whole line. + return f(" ") + except (KeyboardInterrupt, EOFError): + # getpass doesn't print a newline if the user aborts input with ^C. + # Allegedly this behavior is inherited from getpass(3). + # A doc bug has been filed at https://bugs.python.org/issue24711 + if hide_input: + echo(None, err=err) + raise Abort() from None + + if value_proc is None: + value_proc = convert_type(type, default) + + prompt = _build_prompt( + text, prompt_suffix, show_default, default, show_choices, type + ) + + if confirmation_prompt: + if confirmation_prompt is True: + confirmation_prompt = _("Repeat for confirmation") + + confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix) + + while True: + while True: + value = prompt_func(prompt) + if value: + break + elif default is not None: + value = default + break + try: + result = value_proc(value) + except UsageError as e: + if hide_input: + echo(_("Error: The value you entered was invalid."), err=err) + else: + echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306 + continue + if not confirmation_prompt: + return result + while True: + value2 = prompt_func(confirmation_prompt) + is_empty = not value and not value2 + if value2 or is_empty: + break + if value == value2: + return result + echo(_("Error: The two entered values do not match."), err=err) + + +def confirm( + text: str, + default: t.Optional[bool] = False, + abort: bool = False, + prompt_suffix: str = ": ", + show_default: bool = True, + err: bool = False, +) -> bool: + """Prompts for confirmation (yes/no question). + + If the user aborts the input by sending a interrupt signal this + function will catch it and raise a :exc:`Abort` exception. + + :param text: the question to ask. + :param default: The default value to use when no input is given. If + ``None``, repeat until input is given. + :param abort: if this is set to `True` a negative answer aborts the + exception by raising :exc:`Abort`. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + + .. versionchanged:: 8.0 + Repeat until input is given if ``default`` is ``None``. + + .. versionadded:: 4.0 + Added the ``err`` parameter. + """ + prompt = _build_prompt( + text, + prompt_suffix, + show_default, + "y/n" if default is None else ("Y/n" if default else "y/N"), + ) + + while True: + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(prompt.rstrip(" "), nl=False, err=err) + # Echo a space to stdout to work around an issue where + # readline causes backspace to clear the whole line. + value = visible_prompt_func(" ").lower().strip() + except (KeyboardInterrupt, EOFError): + raise Abort() from None + if value in ("y", "yes"): + rv = True + elif value in ("n", "no"): + rv = False + elif default is not None and value == "": + rv = default + else: + echo(_("Error: invalid input"), err=err) + continue + break + if abort and not rv: + raise Abort() + return rv + + +def echo_via_pager( + text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str], + color: t.Optional[bool] = None, +) -> None: + """This function takes a text and shows it via an environment specific + pager on stdout. + + .. versionchanged:: 3.0 + Added the `color` flag. + + :param text_or_generator: the text to page, or alternatively, a + generator emitting the text to page. + :param color: controls if the pager supports ANSI colors or not. The + default is autodetection. + """ + color = resolve_color_default(color) + + if inspect.isgeneratorfunction(text_or_generator): + i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)() + elif isinstance(text_or_generator, str): + i = [text_or_generator] + else: + i = iter(t.cast(t.Iterable[str], text_or_generator)) + + # convert every element of i to a text type if necessary + text_generator = (el if isinstance(el, str) else str(el) for el in i) + + from ._termui_impl import pager + + return pager(itertools.chain(text_generator, "\n"), color) + + +def progressbar( + iterable: t.Optional[t.Iterable[V]] = None, + length: t.Optional[int] = None, + label: t.Optional[str] = None, + show_eta: bool = True, + show_percent: t.Optional[bool] = None, + show_pos: bool = False, + item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.Optional[t.TextIO] = None, + color: t.Optional[bool] = None, + update_min_steps: int = 1, +) -> "ProgressBar[V]": + """This function creates an iterable context manager that can be used + to iterate over something while showing a progress bar. It will + either iterate over the `iterable` or `length` items (that are counted + up). While iteration happens, this function will print a rendered + progress bar to the given `file` (defaults to stdout) and will attempt + to calculate remaining time and more. By default, this progress bar + will not be rendered if the file is not a terminal. + + The context manager creates the progress bar. When the context + manager is entered the progress bar is already created. With every + iteration over the progress bar, the iterable passed to the bar is + advanced and the bar is updated. When the context manager exits, + a newline is printed and the progress bar is finalized on screen. + + Note: The progress bar is currently designed for use cases where the + total progress can be expected to take at least several seconds. + Because of this, the ProgressBar class object won't display + progress that is considered too fast, and progress where the time + between steps is less than a second. + + No printing must happen or the progress bar will be unintentionally + destroyed. + + Example usage:: + + with progressbar(items) as bar: + for item in bar: + do_something_with(item) + + Alternatively, if no iterable is specified, one can manually update the + progress bar through the `update()` method instead of directly + iterating over the progress bar. The update method accepts the number + of steps to increment the bar with:: + + with progressbar(length=chunks.total_bytes) as bar: + for chunk in chunks: + process_chunk(chunk) + bar.update(chunks.bytes) + + The ``update()`` method also takes an optional value specifying the + ``current_item`` at the new position. This is useful when used + together with ``item_show_func`` to customize the output for each + manual step:: + + with click.progressbar( + length=total_size, + label='Unzipping archive', + item_show_func=lambda a: a.filename + ) as bar: + for archive in zip_file: + archive.extract() + bar.update(archive.size, archive) + + :param iterable: an iterable to iterate over. If not provided the length + is required. + :param length: the number of items to iterate over. By default the + progressbar will attempt to ask the iterator about its + length, which might or might not work. If an iterable is + also provided this parameter can be used to override the + length. If an iterable is not provided the progress bar + will iterate over a range of that length. + :param label: the label to show next to the progress bar. + :param show_eta: enables or disables the estimated time display. This is + automatically disabled if the length cannot be + determined. + :param show_percent: enables or disables the percentage display. The + default is `True` if the iterable has a length or + `False` if not. + :param show_pos: enables or disables the absolute position display. The + default is `False`. + :param item_show_func: A function called with the current item which + can return a string to show next to the progress bar. If the + function returns ``None`` nothing is shown. The current item can + be ``None``, such as when entering and exiting the bar. + :param fill_char: the character to use to show the filled part of the + progress bar. + :param empty_char: the character to use to show the non-filled part of + the progress bar. + :param bar_template: the format string to use as template for the bar. + The parameters in it are ``label`` for the label, + ``bar`` for the progress bar and ``info`` for the + info section. + :param info_sep: the separator between multiple info items (eta etc.) + :param width: the width of the progress bar in characters, 0 means full + terminal width + :param file: The file to write to. If this is not a terminal then + only the label is printed. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are included anywhere in the progress bar output + which is not the case by default. + :param update_min_steps: Render only when this many updates have + completed. This allows tuning for very fast iterators. + + .. versionchanged:: 8.0 + Output is shown even if execution time is less than 0.5 seconds. + + .. versionchanged:: 8.0 + ``item_show_func`` shows the current item, not the previous one. + + .. versionchanged:: 8.0 + Labels are echoed if the output is not a TTY. Reverts a change + in 7.0 that removed all output. + + .. versionadded:: 8.0 + Added the ``update_min_steps`` parameter. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. Added the ``update`` method to + the object. + + .. versionadded:: 2.0 + """ + from ._termui_impl import ProgressBar + + color = resolve_color_default(color) + return ProgressBar( + iterable=iterable, + length=length, + show_eta=show_eta, + show_percent=show_percent, + show_pos=show_pos, + item_show_func=item_show_func, + fill_char=fill_char, + empty_char=empty_char, + bar_template=bar_template, + info_sep=info_sep, + file=file, + label=label, + width=width, + color=color, + update_min_steps=update_min_steps, + ) + + +def clear() -> None: + """Clears the terminal screen. This will have the effect of clearing + the whole visible space of the terminal and moving the cursor to the + top left. This does not do anything if not connected to a terminal. + + .. versionadded:: 2.0 + """ + if not isatty(sys.stdout): + return + if WIN: + os.system("cls") + else: + sys.stdout.write("\033[2J\033[1;1H") + + +def _interpret_color( + color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0 +) -> str: + if isinstance(color, int): + return f"{38 + offset};5;{color:d}" + + if isinstance(color, (tuple, list)): + r, g, b = color + return f"{38 + offset};2;{r:d};{g:d};{b:d}" + + return str(_ansi_colors[color] + offset) + + +def style( + text: t.Any, + fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None, + bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None, + bold: t.Optional[bool] = None, + dim: t.Optional[bool] = None, + underline: t.Optional[bool] = None, + overline: t.Optional[bool] = None, + italic: t.Optional[bool] = None, + blink: t.Optional[bool] = None, + reverse: t.Optional[bool] = None, + strikethrough: t.Optional[bool] = None, + reset: bool = True, +) -> str: + """Styles a text with ANSI styles and returns the new string. By + default the styling is self contained which means that at the end + of the string a reset code is issued. This can be prevented by + passing ``reset=False``. + + Examples:: + + click.echo(click.style('Hello World!', fg='green')) + click.echo(click.style('ATTENTION!', blink=True)) + click.echo(click.style('Some things', reverse=True, fg='cyan')) + click.echo(click.style('More colors', fg=(255, 12, 128), bg=117)) + + Supported color names: + + * ``black`` (might be a gray) + * ``red`` + * ``green`` + * ``yellow`` (might be an orange) + * ``blue`` + * ``magenta`` + * ``cyan`` + * ``white`` (might be light gray) + * ``bright_black`` + * ``bright_red`` + * ``bright_green`` + * ``bright_yellow`` + * ``bright_blue`` + * ``bright_magenta`` + * ``bright_cyan`` + * ``bright_white`` + * ``reset`` (reset the color code only) + + If the terminal supports it, color may also be specified as: + + - An integer in the interval [0, 255]. The terminal must support + 8-bit/256-color mode. + - An RGB tuple of three integers in [0, 255]. The terminal must + support 24-bit/true-color mode. + + See https://en.wikipedia.org/wiki/ANSI_color and + https://gist.github.com/XVilka/8346728 for more information. + + :param text: the string to style with ansi codes. + :param fg: if provided this will become the foreground color. + :param bg: if provided this will become the background color. + :param bold: if provided this will enable or disable bold mode. + :param dim: if provided this will enable or disable dim mode. This is + badly supported. + :param underline: if provided this will enable or disable underline. + :param overline: if provided this will enable or disable overline. + :param italic: if provided this will enable or disable italic. + :param blink: if provided this will enable or disable blinking. + :param reverse: if provided this will enable or disable inverse + rendering (foreground becomes background and the + other way round). + :param strikethrough: if provided this will enable or disable + striking through text. + :param reset: by default a reset-all code is added at the end of the + string which means that styles do not carry over. This + can be disabled to compose styles. + + .. versionchanged:: 8.0 + A non-string ``message`` is converted to a string. + + .. versionchanged:: 8.0 + Added support for 256 and RGB color codes. + + .. versionchanged:: 8.0 + Added the ``strikethrough``, ``italic``, and ``overline`` + parameters. + + .. versionchanged:: 7.0 + Added support for bright colors. + + .. versionadded:: 2.0 + """ + if not isinstance(text, str): + text = str(text) + + bits = [] + + if fg: + try: + bits.append(f"\033[{_interpret_color(fg)}m") + except KeyError: + raise TypeError(f"Unknown color {fg!r}") from None + + if bg: + try: + bits.append(f"\033[{_interpret_color(bg, 10)}m") + except KeyError: + raise TypeError(f"Unknown color {bg!r}") from None + + if bold is not None: + bits.append(f"\033[{1 if bold else 22}m") + if dim is not None: + bits.append(f"\033[{2 if dim else 22}m") + if underline is not None: + bits.append(f"\033[{4 if underline else 24}m") + if overline is not None: + bits.append(f"\033[{53 if overline else 55}m") + if italic is not None: + bits.append(f"\033[{3 if italic else 23}m") + if blink is not None: + bits.append(f"\033[{5 if blink else 25}m") + if reverse is not None: + bits.append(f"\033[{7 if reverse else 27}m") + if strikethrough is not None: + bits.append(f"\033[{9 if strikethrough else 29}m") + bits.append(text) + if reset: + bits.append(_ansi_reset_all) + return "".join(bits) + + +def unstyle(text: str) -> str: + """Removes ANSI styling information from a string. Usually it's not + necessary to use this function as Click's echo function will + automatically remove styling if necessary. + + .. versionadded:: 2.0 + + :param text: the text to remove style information from. + """ + return strip_ansi(text) + + +def secho( + message: t.Optional[t.Any] = None, + file: t.Optional[t.IO[t.AnyStr]] = None, + nl: bool = True, + err: bool = False, + color: t.Optional[bool] = None, + **styles: t.Any, +) -> None: + """This function combines :func:`echo` and :func:`style` into one + call. As such the following two calls are the same:: + + click.secho('Hello World!', fg='green') + click.echo(click.style('Hello World!', fg='green')) + + All keyword arguments are forwarded to the underlying functions + depending on which one they go with. + + Non-string types will be converted to :class:`str`. However, + :class:`bytes` are passed directly to :meth:`echo` without applying + style. If you want to style bytes that represent text, call + :meth:`bytes.decode` first. + + .. versionchanged:: 8.0 + A non-string ``message`` is converted to a string. Bytes are + passed through without style applied. + + .. versionadded:: 2.0 + """ + if message is not None and not isinstance(message, (bytes, bytearray)): + message = style(message, **styles) + + return echo(message, file=file, nl=nl, err=err, color=color) + + +def edit( + text: t.Optional[t.AnyStr] = None, + editor: t.Optional[str] = None, + env: t.Optional[t.Mapping[str, str]] = None, + require_save: bool = True, + extension: str = ".txt", + filename: t.Optional[str] = None, +) -> t.Optional[t.AnyStr]: + r"""Edits the given text in the defined editor. If an editor is given + (should be the full path to the executable but the regular operating + system search path is used for finding the executable) it overrides + the detected editor. Optionally, some environment variables can be + used. If the editor is closed without changes, `None` is returned. In + case a file is edited directly the return value is always `None` and + `require_save` and `extension` are ignored. + + If the editor cannot be opened a :exc:`UsageError` is raised. + + Note for Windows: to simplify cross-platform usage, the newlines are + automatically converted from POSIX to Windows and vice versa. As such, + the message here will have ``\n`` as newline markers. + + :param text: the text to edit. + :param editor: optionally the editor to use. Defaults to automatic + detection. + :param env: environment variables to forward to the editor. + :param require_save: if this is true, then not saving in the editor + will make the return value become `None`. + :param extension: the extension to tell the editor about. This defaults + to `.txt` but changing this might change syntax + highlighting. + :param filename: if provided it will edit this file instead of the + provided text contents. It will not use a temporary + file as an indirection in that case. + """ + from ._termui_impl import Editor + + ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension) + + if filename is None: + return ed.edit(text) + + ed.edit_file(filename) + return None + + +def launch(url: str, wait: bool = False, locate: bool = False) -> int: + """This function launches the given URL (or filename) in the default + viewer application for this file type. If this is an executable, it + might launch the executable in a new session. The return value is + the exit code of the launched application. Usually, ``0`` indicates + success. + + Examples:: + + click.launch('https://click.palletsprojects.com/') + click.launch('/my/downloaded/file', locate=True) + + .. versionadded:: 2.0 + + :param url: URL or filename of the thing to launch. + :param wait: Wait for the program to exit before returning. This + only works if the launched program blocks. In particular, + ``xdg-open`` on Linux does not block. + :param locate: if this is set to `True` then instead of launching the + application associated with the URL it will attempt to + launch a file manager with the file located. This + might have weird effects if the URL does not point to + the filesystem. + """ + from ._termui_impl import open_url + + return open_url(url, wait=wait, locate=locate) + + +# If this is provided, getchar() calls into this instead. This is used +# for unittesting purposes. +_getchar: t.Optional[t.Callable[[bool], str]] = None + + +def getchar(echo: bool = False) -> str: + """Fetches a single character from the terminal and returns it. This + will always return a unicode character and under certain rare + circumstances this might return more than one character. The + situations which more than one character is returned is when for + whatever reason multiple characters end up in the terminal buffer or + standard input was not actually a terminal. + + Note that this will always read from the terminal, even if something + is piped into the standard input. + + Note for Windows: in rare cases when typing non-ASCII characters, this + function might wait for a second character and then return both at once. + This is because certain Unicode characters look like special-key markers. + + .. versionadded:: 2.0 + + :param echo: if set to `True`, the character read will also show up on + the terminal. The default is to not show it. + """ + global _getchar + + if _getchar is None: + from ._termui_impl import getchar as f + + _getchar = f + + return _getchar(echo) + + +def raw_terminal() -> t.ContextManager[int]: + from ._termui_impl import raw_terminal as f + + return f() + + +def pause(info: t.Optional[str] = None, err: bool = False) -> None: + """This command stops execution and waits for the user to press any + key to continue. This is similar to the Windows batch "pause" + command. If the program is not run through a terminal, this command + will instead do nothing. + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param info: The message to print before pausing. Defaults to + ``"Press any key to continue..."``. + :param err: if set to message goes to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + if not isatty(sys.stdin) or not isatty(sys.stdout): + return + + if info is None: + info = _("Press any key to continue...") + + try: + if info: + echo(info, nl=False, err=err) + try: + getchar() + except (KeyboardInterrupt, EOFError): + pass + finally: + if info: + echo(err=err) diff --git a/myenv/lib/python3.9/site-packages/click/testing.py b/myenv/lib/python3.9/site-packages/click/testing.py new file mode 100644 index 0000000..e395c2e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/testing.py @@ -0,0 +1,479 @@ +import contextlib +import io +import os +import shlex +import shutil +import sys +import tempfile +import typing as t +from types import TracebackType + +from . import formatting +from . import termui +from . import utils +from ._compat import _find_binary_reader + +if t.TYPE_CHECKING: + from .core import BaseCommand + + +class EchoingStdin: + def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None: + self._input = input + self._output = output + self._paused = False + + def __getattr__(self, x: str) -> t.Any: + return getattr(self._input, x) + + def _echo(self, rv: bytes) -> bytes: + if not self._paused: + self._output.write(rv) + + return rv + + def read(self, n: int = -1) -> bytes: + return self._echo(self._input.read(n)) + + def read1(self, n: int = -1) -> bytes: + return self._echo(self._input.read1(n)) # type: ignore + + def readline(self, n: int = -1) -> bytes: + return self._echo(self._input.readline(n)) + + def readlines(self) -> t.List[bytes]: + return [self._echo(x) for x in self._input.readlines()] + + def __iter__(self) -> t.Iterator[bytes]: + return iter(self._echo(x) for x in self._input) + + def __repr__(self) -> str: + return repr(self._input) + + +@contextlib.contextmanager +def _pause_echo(stream: t.Optional[EchoingStdin]) -> t.Iterator[None]: + if stream is None: + yield + else: + stream._paused = True + yield + stream._paused = False + + +class _NamedTextIOWrapper(io.TextIOWrapper): + def __init__( + self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any + ) -> None: + super().__init__(buffer, **kwargs) + self._name = name + self._mode = mode + + @property + def name(self) -> str: + return self._name + + @property + def mode(self) -> str: + return self._mode + + +def make_input_stream( + input: t.Optional[t.Union[str, bytes, t.IO]], charset: str +) -> t.BinaryIO: + # Is already an input stream. + if hasattr(input, "read"): + rv = _find_binary_reader(t.cast(t.IO, input)) + + if rv is not None: + return rv + + raise TypeError("Could not find binary reader for input stream.") + + if input is None: + input = b"" + elif isinstance(input, str): + input = input.encode(charset) + + return io.BytesIO(t.cast(bytes, input)) + + +class Result: + """Holds the captured result of an invoked CLI script.""" + + def __init__( + self, + runner: "CliRunner", + stdout_bytes: bytes, + stderr_bytes: t.Optional[bytes], + return_value: t.Any, + exit_code: int, + exception: t.Optional[BaseException], + exc_info: t.Optional[ + t.Tuple[t.Type[BaseException], BaseException, TracebackType] + ] = None, + ): + #: The runner that created the result + self.runner = runner + #: The standard output as bytes. + self.stdout_bytes = stdout_bytes + #: The standard error as bytes, or None if not available + self.stderr_bytes = stderr_bytes + #: The value returned from the invoked command. + #: + #: .. versionadded:: 8.0 + self.return_value = return_value + #: The exit code as integer. + self.exit_code = exit_code + #: The exception that happened if one did. + self.exception = exception + #: The traceback + self.exc_info = exc_info + + @property + def output(self) -> str: + """The (standard) output as unicode string.""" + return self.stdout + + @property + def stdout(self) -> str: + """The standard output as unicode string.""" + return self.stdout_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + @property + def stderr(self) -> str: + """The standard error as unicode string.""" + if self.stderr_bytes is None: + raise ValueError("stderr not separately captured") + return self.stderr_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + def __repr__(self) -> str: + exc_str = repr(self.exception) if self.exception else "okay" + return f"<{type(self).__name__} {exc_str}>" + + +class CliRunner: + """The CLI runner provides functionality to invoke a Click command line + script for unittesting purposes in a isolated environment. This only + works in single-threaded systems without any concurrency as it changes the + global interpreter state. + + :param charset: the character set for the input and output data. + :param env: a dictionary with environment variables for overriding. + :param echo_stdin: if this is set to `True`, then reading from stdin writes + to stdout. This is useful for showing examples in + some circumstances. Note that regular prompts + will automatically echo the input. + :param mix_stderr: if this is set to `False`, then stdout and stderr are + preserved as independent streams. This is useful for + Unix-philosophy apps that have predictable stdout and + noisy stderr, such that each may be measured + independently + """ + + def __init__( + self, + charset: str = "utf-8", + env: t.Optional[t.Mapping[str, t.Optional[str]]] = None, + echo_stdin: bool = False, + mix_stderr: bool = True, + ) -> None: + self.charset = charset + self.env = env or {} + self.echo_stdin = echo_stdin + self.mix_stderr = mix_stderr + + def get_default_prog_name(self, cli: "BaseCommand") -> str: + """Given a command object it will return the default program name + for it. The default is the `name` attribute or ``"root"`` if not + set. + """ + return cli.name or "root" + + def make_env( + self, overrides: t.Optional[t.Mapping[str, t.Optional[str]]] = None + ) -> t.Mapping[str, t.Optional[str]]: + """Returns the environment overrides for invoking a script.""" + rv = dict(self.env) + if overrides: + rv.update(overrides) + return rv + + @contextlib.contextmanager + def isolation( + self, + input: t.Optional[t.Union[str, bytes, t.IO]] = None, + env: t.Optional[t.Mapping[str, t.Optional[str]]] = None, + color: bool = False, + ) -> t.Iterator[t.Tuple[io.BytesIO, t.Optional[io.BytesIO]]]: + """A context manager that sets up the isolation for invoking of a + command line tool. This sets up stdin with the given input data + and `os.environ` with the overrides from the given dictionary. + This also rebinds some internals in Click to be mocked (like the + prompt functionality). + + This is automatically done in the :meth:`invoke` method. + + :param input: the input stream to put into sys.stdin. + :param env: the environment overrides as dictionary. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + + .. versionchanged:: 8.0 + ``stderr`` is opened with ``errors="backslashreplace"`` + instead of the default ``"strict"``. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + """ + bytes_input = make_input_stream(input, self.charset) + echo_input = None + + old_stdin = sys.stdin + old_stdout = sys.stdout + old_stderr = sys.stderr + old_forced_width = formatting.FORCED_WIDTH + formatting.FORCED_WIDTH = 80 + + env = self.make_env(env) + + bytes_output = io.BytesIO() + + if self.echo_stdin: + bytes_input = echo_input = t.cast( + t.BinaryIO, EchoingStdin(bytes_input, bytes_output) + ) + + sys.stdin = text_input = _NamedTextIOWrapper( + bytes_input, encoding=self.charset, name="", mode="r" + ) + + if self.echo_stdin: + # Force unbuffered reads, otherwise TextIOWrapper reads a + # large chunk which is echoed early. + text_input._CHUNK_SIZE = 1 # type: ignore + + sys.stdout = _NamedTextIOWrapper( + bytes_output, encoding=self.charset, name="", mode="w" + ) + + bytes_error = None + if self.mix_stderr: + sys.stderr = sys.stdout + else: + bytes_error = io.BytesIO() + sys.stderr = _NamedTextIOWrapper( + bytes_error, + encoding=self.charset, + name="", + mode="w", + errors="backslashreplace", + ) + + @_pause_echo(echo_input) # type: ignore + def visible_input(prompt: t.Optional[str] = None) -> str: + sys.stdout.write(prompt or "") + val = text_input.readline().rstrip("\r\n") + sys.stdout.write(f"{val}\n") + sys.stdout.flush() + return val + + @_pause_echo(echo_input) # type: ignore + def hidden_input(prompt: t.Optional[str] = None) -> str: + sys.stdout.write(f"{prompt or ''}\n") + sys.stdout.flush() + return text_input.readline().rstrip("\r\n") + + @_pause_echo(echo_input) # type: ignore + def _getchar(echo: bool) -> str: + char = sys.stdin.read(1) + + if echo: + sys.stdout.write(char) + + sys.stdout.flush() + return char + + default_color = color + + def should_strip_ansi( + stream: t.Optional[t.IO] = None, color: t.Optional[bool] = None + ) -> bool: + if color is None: + return not default_color + return not color + + old_visible_prompt_func = termui.visible_prompt_func + old_hidden_prompt_func = termui.hidden_prompt_func + old__getchar_func = termui._getchar + old_should_strip_ansi = utils.should_strip_ansi # type: ignore + termui.visible_prompt_func = visible_input + termui.hidden_prompt_func = hidden_input + termui._getchar = _getchar + utils.should_strip_ansi = should_strip_ansi # type: ignore + + old_env = {} + try: + for key, value in env.items(): + old_env[key] = os.environ.get(key) + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + yield (bytes_output, bytes_error) + finally: + for key, value in old_env.items(): + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + sys.stdout = old_stdout + sys.stderr = old_stderr + sys.stdin = old_stdin + termui.visible_prompt_func = old_visible_prompt_func + termui.hidden_prompt_func = old_hidden_prompt_func + termui._getchar = old__getchar_func + utils.should_strip_ansi = old_should_strip_ansi # type: ignore + formatting.FORCED_WIDTH = old_forced_width + + def invoke( + self, + cli: "BaseCommand", + args: t.Optional[t.Union[str, t.Sequence[str]]] = None, + input: t.Optional[t.Union[str, bytes, t.IO]] = None, + env: t.Optional[t.Mapping[str, t.Optional[str]]] = None, + catch_exceptions: bool = True, + color: bool = False, + **extra: t.Any, + ) -> Result: + """Invokes a command in an isolated environment. The arguments are + forwarded directly to the command line script, the `extra` keyword + arguments are passed to the :meth:`~clickpkg.Command.main` function of + the command. + + This returns a :class:`Result` object. + + :param cli: the command to invoke + :param args: the arguments to invoke. It may be given as an iterable + or a string. When given as string it will be interpreted + as a Unix shell command. More details at + :func:`shlex.split`. + :param input: the input data for `sys.stdin`. + :param env: the environment overrides. + :param catch_exceptions: Whether to catch any other exceptions than + ``SystemExit``. + :param extra: the keyword arguments to pass to :meth:`main`. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + + .. versionchanged:: 8.0 + The result object has the ``return_value`` attribute with + the value returned from the invoked command. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + + .. versionchanged:: 3.0 + Added the ``catch_exceptions`` parameter. + + .. versionchanged:: 3.0 + The result object has the ``exc_info`` attribute with the + traceback if available. + """ + exc_info = None + with self.isolation(input=input, env=env, color=color) as outstreams: + return_value = None + exception: t.Optional[BaseException] = None + exit_code = 0 + + if isinstance(args, str): + args = shlex.split(args) + + try: + prog_name = extra.pop("prog_name") + except KeyError: + prog_name = self.get_default_prog_name(cli) + + try: + return_value = cli.main(args=args or (), prog_name=prog_name, **extra) + except SystemExit as e: + exc_info = sys.exc_info() + e_code = t.cast(t.Optional[t.Union[int, t.Any]], e.code) + + if e_code is None: + e_code = 0 + + if e_code != 0: + exception = e + + if not isinstance(e_code, int): + sys.stdout.write(str(e_code)) + sys.stdout.write("\n") + e_code = 1 + + exit_code = e_code + + except Exception as e: + if not catch_exceptions: + raise + exception = e + exit_code = 1 + exc_info = sys.exc_info() + finally: + sys.stdout.flush() + stdout = outstreams[0].getvalue() + if self.mix_stderr: + stderr = None + else: + stderr = outstreams[1].getvalue() # type: ignore + + return Result( + runner=self, + stdout_bytes=stdout, + stderr_bytes=stderr, + return_value=return_value, + exit_code=exit_code, + exception=exception, + exc_info=exc_info, # type: ignore + ) + + @contextlib.contextmanager + def isolated_filesystem( + self, temp_dir: t.Optional[t.Union[str, os.PathLike]] = None + ) -> t.Iterator[str]: + """A context manager that creates a temporary directory and + changes the current working directory to it. This isolates tests + that affect the contents of the CWD to prevent them from + interfering with each other. + + :param temp_dir: Create the temporary directory under this + directory. If given, the created directory is not removed + when exiting. + + .. versionchanged:: 8.0 + Added the ``temp_dir`` parameter. + """ + cwd = os.getcwd() + dt = tempfile.mkdtemp(dir=temp_dir) # type: ignore[type-var] + os.chdir(dt) + + try: + yield t.cast(str, dt) + finally: + os.chdir(cwd) + + if temp_dir is None: + try: + shutil.rmtree(dt) + except OSError: # noqa: B014 + pass diff --git a/myenv/lib/python3.9/site-packages/click/types.py b/myenv/lib/python3.9/site-packages/click/types.py new file mode 100644 index 0000000..b45ee53 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/types.py @@ -0,0 +1,1073 @@ +import os +import stat +import typing as t +from datetime import datetime +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import _get_argv_encoding +from ._compat import get_filesystem_encoding +from ._compat import open_stream +from .exceptions import BadParameter +from .utils import LazyFile +from .utils import safecall + +if t.TYPE_CHECKING: + import typing_extensions as te + from .core import Context + from .core import Parameter + from .shell_completion import CompletionItem + + +class ParamType: + """Represents the type of a parameter. Validates and converts values + from the command line or Python into the correct type. + + To implement a custom type, subclass and implement at least the + following: + + - The :attr:`name` class attribute must be set. + - Calling an instance of the type with ``None`` must return + ``None``. This is already implemented by default. + - :meth:`convert` must convert string values to the correct type. + - :meth:`convert` must accept values that are already the correct + type. + - It must be able to convert a value if the ``ctx`` and ``param`` + arguments are ``None``. This can occur when converting prompt + input. + """ + + is_composite: t.ClassVar[bool] = False + arity: t.ClassVar[int] = 1 + + #: the descriptive name of this type + name: str + + #: if a list of this type is expected and the value is pulled from a + #: string environment variable, this is what splits it up. `None` + #: means any whitespace. For all parameters the general rule is that + #: whitespace splits them up. The exception are paths and files which + #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on + #: Windows). + envvar_list_splitter: t.ClassVar[t.Optional[str]] = None + + def to_info_dict(self) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + # The class name without the "ParamType" suffix. + param_type = type(self).__name__.partition("ParamType")[0] + param_type = param_type.partition("ParameterType")[0] + + # Custom subclasses might not remember to set a name. + if hasattr(self, "name"): + name = self.name + else: + name = param_type + + return {"param_type": param_type, "name": name} + + def __call__( + self, + value: t.Any, + param: t.Optional["Parameter"] = None, + ctx: t.Optional["Context"] = None, + ) -> t.Any: + if value is not None: + return self.convert(value, param, ctx) + + def get_metavar(self, param: "Parameter") -> t.Optional[str]: + """Returns the metavar default for this param if it provides one.""" + + def get_missing_message(self, param: "Parameter") -> t.Optional[str]: + """Optionally might return extra information about a missing + parameter. + + .. versionadded:: 2.0 + """ + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + """Convert the value to the correct type. This is not called if + the value is ``None`` (the missing value). + + This must accept string values from the command line, as well as + values that are already the correct type. It may also convert + other compatible types. + + The ``param`` and ``ctx`` arguments may be ``None`` in certain + situations, such as when converting prompt input. + + If the value cannot be converted, call :meth:`fail` with a + descriptive message. + + :param value: The value to convert. + :param param: The parameter that is using this type to convert + its value. May be ``None``. + :param ctx: The current context that arrived at this value. May + be ``None``. + """ + return value + + def split_envvar_value(self, rv: str) -> t.Sequence[str]: + """Given a value from an environment variable this splits it up + into small chunks depending on the defined envvar list splitter. + + If the splitter is set to `None`, which means that whitespace splits, + then leading and trailing whitespace is ignored. Otherwise, leading + and trailing splitters usually lead to empty items being included. + """ + return (rv or "").split(self.envvar_list_splitter) + + def fail( + self, + message: str, + param: t.Optional["Parameter"] = None, + ctx: t.Optional["Context"] = None, + ) -> "t.NoReturn": + """Helper method to fail with an invalid value message.""" + raise BadParameter(message, ctx=ctx, param=param) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Return a list of + :class:`~click.shell_completion.CompletionItem` objects for the + incomplete value. Most types do not provide completions, but + some do, and this allows custom types to provide custom + completions as well. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + return [] + + +class CompositeParamType(ParamType): + is_composite = True + + @property + def arity(self) -> int: # type: ignore + raise NotImplementedError() + + +class FuncParamType(ParamType): + def __init__(self, func: t.Callable[[t.Any], t.Any]) -> None: + self.name = func.__name__ + self.func = func + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["func"] = self.func + return info_dict + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + try: + return self.func(value) + except ValueError: + try: + value = str(value) + except UnicodeError: + value = value.decode("utf-8", "replace") + + self.fail(value, param, ctx) + + +class UnprocessedParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + return value + + def __repr__(self) -> str: + return "UNPROCESSED" + + +class StringParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + if isinstance(value, bytes): + enc = _get_argv_encoding() + try: + value = value.decode(enc) + except UnicodeError: + fs_enc = get_filesystem_encoding() + if fs_enc != enc: + try: + value = value.decode(fs_enc) + except UnicodeError: + value = value.decode("utf-8", "replace") + else: + value = value.decode("utf-8", "replace") + return value + return str(value) + + def __repr__(self) -> str: + return "STRING" + + +class Choice(ParamType): + """The choice type allows a value to be checked against a fixed set + of supported values. All of these values have to be strings. + + You should only pass a list or tuple of choices. Other iterables + (like generators) may lead to surprising results. + + The resulting value will always be one of the originally passed choices + regardless of ``case_sensitive`` or any ``ctx.token_normalize_func`` + being specified. + + See :ref:`choice-opts` for an example. + + :param case_sensitive: Set to false to make choices case + insensitive. Defaults to true. + """ + + name = "choice" + + def __init__(self, choices: t.Sequence[str], case_sensitive: bool = True) -> None: + self.choices = choices + self.case_sensitive = case_sensitive + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["choices"] = self.choices + info_dict["case_sensitive"] = self.case_sensitive + return info_dict + + def get_metavar(self, param: "Parameter") -> str: + choices_str = "|".join(self.choices) + + # Use curly braces to indicate a required argument. + if param.required and param.param_type_name == "argument": + return f"{{{choices_str}}}" + + # Use square braces to indicate an option or optional argument. + return f"[{choices_str}]" + + def get_missing_message(self, param: "Parameter") -> str: + return _("Choose from:\n\t{choices}").format(choices=",\n\t".join(self.choices)) + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + # Match through normalization and case sensitivity + # first do token_normalize_func, then lowercase + # preserve original `value` to produce an accurate message in + # `self.fail` + normed_value = value + normed_choices = {choice: choice for choice in self.choices} + + if ctx is not None and ctx.token_normalize_func is not None: + normed_value = ctx.token_normalize_func(value) + normed_choices = { + ctx.token_normalize_func(normed_choice): original + for normed_choice, original in normed_choices.items() + } + + if not self.case_sensitive: + normed_value = normed_value.casefold() + normed_choices = { + normed_choice.casefold(): original + for normed_choice, original in normed_choices.items() + } + + if normed_value in normed_choices: + return normed_choices[normed_value] + + choices_str = ", ".join(map(repr, self.choices)) + self.fail( + ngettext( + "{value!r} is not {choice}.", + "{value!r} is not one of {choices}.", + len(self.choices), + ).format(value=value, choice=choices_str, choices=choices_str), + param, + ctx, + ) + + def __repr__(self) -> str: + return f"Choice({list(self.choices)})" + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Complete choices that start with the incomplete value. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + str_choices = map(str, self.choices) + + if self.case_sensitive: + matched = (c for c in str_choices if c.startswith(incomplete)) + else: + incomplete = incomplete.lower() + matched = (c for c in str_choices if c.lower().startswith(incomplete)) + + return [CompletionItem(c) for c in matched] + + +class DateTime(ParamType): + """The DateTime type converts date strings into `datetime` objects. + + The format strings which are checked are configurable, but default to some + common (non-timezone aware) ISO 8601 formats. + + When specifying *DateTime* formats, you should only pass a list or a tuple. + Other iterables, like generators, may lead to surprising results. + + The format strings are processed using ``datetime.strptime``, and this + consequently defines the format strings which are allowed. + + Parsing is tried using each format, in order, and the first format which + parses successfully is used. + + :param formats: A list or tuple of date format strings, in the order in + which they should be tried. Defaults to + ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, + ``'%Y-%m-%d %H:%M:%S'``. + """ + + name = "datetime" + + def __init__(self, formats: t.Optional[t.Sequence[str]] = None): + self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"] + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["formats"] = self.formats + return info_dict + + def get_metavar(self, param: "Parameter") -> str: + return f"[{'|'.join(self.formats)}]" + + def _try_to_convert_date(self, value: t.Any, format: str) -> t.Optional[datetime]: + try: + return datetime.strptime(value, format) + except ValueError: + return None + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + if isinstance(value, datetime): + return value + + for format in self.formats: + converted = self._try_to_convert_date(value, format) + + if converted is not None: + return converted + + formats_str = ", ".join(map(repr, self.formats)) + self.fail( + ngettext( + "{value!r} does not match the format {format}.", + "{value!r} does not match the formats {formats}.", + len(self.formats), + ).format(value=value, format=formats_str, formats=formats_str), + param, + ctx, + ) + + def __repr__(self) -> str: + return "DateTime" + + +class _NumberParamTypeBase(ParamType): + _number_class: t.ClassVar[t.Type] + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + try: + return self._number_class(value) + except ValueError: + self.fail( + _("{value!r} is not a valid {number_type}.").format( + value=value, number_type=self.name + ), + param, + ctx, + ) + + +class _NumberRangeBase(_NumberParamTypeBase): + def __init__( + self, + min: t.Optional[float] = None, + max: t.Optional[float] = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + self.min = min + self.max = max + self.min_open = min_open + self.max_open = max_open + self.clamp = clamp + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + min=self.min, + max=self.max, + min_open=self.min_open, + max_open=self.max_open, + clamp=self.clamp, + ) + return info_dict + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + import operator + + rv = super().convert(value, param, ctx) + lt_min: bool = self.min is not None and ( + operator.le if self.min_open else operator.lt + )(rv, self.min) + gt_max: bool = self.max is not None and ( + operator.ge if self.max_open else operator.gt + )(rv, self.max) + + if self.clamp: + if lt_min: + return self._clamp(self.min, 1, self.min_open) # type: ignore + + if gt_max: + return self._clamp(self.max, -1, self.max_open) # type: ignore + + if lt_min or gt_max: + self.fail( + _("{value} is not in the range {range}.").format( + value=rv, range=self._describe_range() + ), + param, + ctx, + ) + + return rv + + def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float: + """Find the valid value to clamp to bound in the given + direction. + + :param bound: The boundary value. + :param dir: 1 or -1 indicating the direction to move. + :param open: If true, the range does not include the bound. + """ + raise NotImplementedError + + def _describe_range(self) -> str: + """Describe the range for use in help text.""" + if self.min is None: + op = "<" if self.max_open else "<=" + return f"x{op}{self.max}" + + if self.max is None: + op = ">" if self.min_open else ">=" + return f"x{op}{self.min}" + + lop = "<" if self.min_open else "<=" + rop = "<" if self.max_open else "<=" + return f"{self.min}{lop}x{rop}{self.max}" + + def __repr__(self) -> str: + clamp = " clamped" if self.clamp else "" + return f"<{type(self).__name__} {self._describe_range()}{clamp}>" + + +class IntParamType(_NumberParamTypeBase): + name = "integer" + _number_class = int + + def __repr__(self) -> str: + return "INT" + + +class IntRange(_NumberRangeBase, IntParamType): + """Restrict an :data:`click.INT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "integer range" + + def _clamp( # type: ignore + self, bound: int, dir: "te.Literal[1, -1]", open: bool + ) -> int: + if not open: + return bound + + return bound + dir + + +class FloatParamType(_NumberParamTypeBase): + name = "float" + _number_class = float + + def __repr__(self) -> str: + return "FLOAT" + + +class FloatRange(_NumberRangeBase, FloatParamType): + """Restrict a :data:`click.FLOAT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. This is not supported if either + boundary is marked ``open``. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "float range" + + def __init__( + self, + min: t.Optional[float] = None, + max: t.Optional[float] = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + super().__init__( + min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp + ) + + if (min_open or max_open) and clamp: + raise TypeError("Clamping is not supported for open bounds.") + + def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float: + if not open: + return bound + + # Could use Python 3.9's math.nextafter here, but clamping an + # open float range doesn't seem to be particularly useful. It's + # left up to the user to write a callback to do it if needed. + raise RuntimeError("Clamping is not supported for open bounds.") + + +class BoolParamType(ParamType): + name = "boolean" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + if value in {False, True}: + return bool(value) + + norm = value.strip().lower() + + if norm in {"1", "true", "t", "yes", "y", "on"}: + return True + + if norm in {"0", "false", "f", "no", "n", "off"}: + return False + + self.fail( + _("{value!r} is not a valid boolean.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "BOOL" + + +class UUIDParameterType(ParamType): + name = "uuid" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + import uuid + + if isinstance(value, uuid.UUID): + return value + + value = value.strip() + + try: + return uuid.UUID(value) + except ValueError: + self.fail( + _("{value!r} is not a valid UUID.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "UUID" + + +class File(ParamType): + """Declares a parameter to be a file for reading or writing. The file + is automatically closed once the context tears down (after the command + finished working). + + Files can be opened for reading or writing. The special value ``-`` + indicates stdin or stdout depending on the mode. + + By default, the file is opened for reading text data, but it can also be + opened in binary mode or for writing. The encoding parameter can be used + to force a specific encoding. + + The `lazy` flag controls if the file should be opened immediately or upon + first IO. The default is to be non-lazy for standard input and output + streams as well as files opened for reading, `lazy` otherwise. When opening a + file lazily for reading, it is still opened temporarily for validation, but + will not be held open until first IO. lazy is mainly useful when opening + for writing to avoid creating the file until it is needed. + + Starting with Click 2.0, files can also be opened atomically in which + case all writes go into a separate file in the same folder and upon + completion the file will be moved over to the original location. This + is useful if a file regularly read by other users is modified. + + See :ref:`file-args` for more information. + """ + + name = "filename" + envvar_list_splitter = os.path.pathsep + + def __init__( + self, + mode: str = "r", + encoding: t.Optional[str] = None, + errors: t.Optional[str] = "strict", + lazy: t.Optional[bool] = None, + atomic: bool = False, + ) -> None: + self.mode = mode + self.encoding = encoding + self.errors = errors + self.lazy = lazy + self.atomic = atomic + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update(mode=self.mode, encoding=self.encoding) + return info_dict + + def resolve_lazy_flag(self, value: t.Any) -> bool: + if self.lazy is not None: + return self.lazy + if value == "-": + return False + elif "w" in self.mode: + return True + return False + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + try: + if hasattr(value, "read") or hasattr(value, "write"): + return value + + lazy = self.resolve_lazy_flag(value) + + if lazy: + f: t.IO = t.cast( + t.IO, + LazyFile( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ), + ) + + if ctx is not None: + ctx.call_on_close(f.close_intelligently) # type: ignore + + return f + + f, should_close = open_stream( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + # If a context is provided, we automatically close the file + # at the end of the context execution (or flush out). If a + # context does not exist, it's the caller's responsibility to + # properly close the file. This for instance happens when the + # type is used with prompts. + if ctx is not None: + if should_close: + ctx.call_on_close(safecall(f.close)) + else: + ctx.call_on_close(safecall(f.flush)) + + return f + except OSError as e: # noqa: B014 + self.fail(f"'{os.fsdecode(value)}': {e.strerror}", param, ctx) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Return a special completion marker that tells the completion + system to use the shell to provide file path completions. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + return [CompletionItem(incomplete, type="file")] + + +class Path(ParamType): + """The ``Path`` type is similar to the :class:`File` type, but + returns the filename instead of an open file. Various checks can be + enabled to validate the type of file and permissions. + + :param exists: The file or directory needs to exist for the value to + be valid. If this is not set to ``True``, and the file does not + exist, then all further checks are silently skipped. + :param file_okay: Allow a file as a value. + :param dir_okay: Allow a directory as a value. + :param readable: if true, a readable check is performed. + :param writable: if true, a writable check is performed. + :param executable: if true, an executable check is performed. + :param resolve_path: Make the value absolute and resolve any + symlinks. A ``~`` is not expanded, as this is supposed to be + done by the shell only. + :param allow_dash: Allow a single dash as a value, which indicates + a standard stream (but does not open it). Use + :func:`~click.open_file` to handle opening this value. + :param path_type: Convert the incoming path value to this type. If + ``None``, keep Python's default, which is ``str``. Useful to + convert to :class:`pathlib.Path`. + + .. versionchanged:: 8.1 + Added the ``executable`` parameter. + + .. versionchanged:: 8.0 + Allow passing ``type=pathlib.Path``. + + .. versionchanged:: 6.0 + Added the ``allow_dash`` parameter. + """ + + envvar_list_splitter = os.path.pathsep + + def __init__( + self, + exists: bool = False, + file_okay: bool = True, + dir_okay: bool = True, + writable: bool = False, + readable: bool = True, + resolve_path: bool = False, + allow_dash: bool = False, + path_type: t.Optional[t.Type] = None, + executable: bool = False, + ): + self.exists = exists + self.file_okay = file_okay + self.dir_okay = dir_okay + self.readable = readable + self.writable = writable + self.executable = executable + self.resolve_path = resolve_path + self.allow_dash = allow_dash + self.type = path_type + + if self.file_okay and not self.dir_okay: + self.name = _("file") + elif self.dir_okay and not self.file_okay: + self.name = _("directory") + else: + self.name = _("path") + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + exists=self.exists, + file_okay=self.file_okay, + dir_okay=self.dir_okay, + writable=self.writable, + readable=self.readable, + allow_dash=self.allow_dash, + ) + return info_dict + + def coerce_path_result(self, rv: t.Any) -> t.Any: + if self.type is not None and not isinstance(rv, self.type): + if self.type is str: + rv = os.fsdecode(rv) + elif self.type is bytes: + rv = os.fsencode(rv) + else: + rv = self.type(rv) + + return rv + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + rv = value + + is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") + + if not is_dash: + if self.resolve_path: + # os.path.realpath doesn't resolve symlinks on Windows + # until Python 3.8. Use pathlib for now. + import pathlib + + rv = os.fsdecode(pathlib.Path(rv).resolve()) + + try: + st = os.stat(rv) + except OSError: + if not self.exists: + return self.coerce_path_result(rv) + self.fail( + _("{name} {filename!r} does not exist.").format( + name=self.name.title(), filename=os.fsdecode(value) + ), + param, + ctx, + ) + + if not self.file_okay and stat.S_ISREG(st.st_mode): + self.fail( + _("{name} {filename!r} is a file.").format( + name=self.name.title(), filename=os.fsdecode(value) + ), + param, + ctx, + ) + if not self.dir_okay and stat.S_ISDIR(st.st_mode): + self.fail( + _("{name} '{filename}' is a directory.").format( + name=self.name.title(), filename=os.fsdecode(value) + ), + param, + ctx, + ) + + if self.readable and not os.access(rv, os.R_OK): + self.fail( + _("{name} {filename!r} is not readable.").format( + name=self.name.title(), filename=os.fsdecode(value) + ), + param, + ctx, + ) + + if self.writable and not os.access(rv, os.W_OK): + self.fail( + _("{name} {filename!r} is not writable.").format( + name=self.name.title(), filename=os.fsdecode(value) + ), + param, + ctx, + ) + + if self.executable and not os.access(value, os.X_OK): + self.fail( + _("{name} {filename!r} is not executable.").format( + name=self.name.title(), filename=os.fsdecode(value) + ), + param, + ctx, + ) + + return self.coerce_path_result(rv) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Return a special completion marker that tells the completion + system to use the shell to provide path completions for only + directories or any paths. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + type = "dir" if self.dir_okay and not self.file_okay else "file" + return [CompletionItem(incomplete, type=type)] + + +class Tuple(CompositeParamType): + """The default behavior of Click is to apply a type on a value directly. + This works well in most cases, except for when `nargs` is set to a fixed + count and different types should be used for different items. In this + case the :class:`Tuple` type can be used. This type can only be used + if `nargs` is set to a fixed number. + + For more information see :ref:`tuple-type`. + + This can be selected by using a Python tuple literal as a type. + + :param types: a list of types that should be used for the tuple items. + """ + + def __init__(self, types: t.Sequence[t.Union[t.Type, ParamType]]) -> None: + self.types = [convert_type(ty) for ty in types] + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["types"] = [t.to_info_dict() for t in self.types] + return info_dict + + @property + def name(self) -> str: # type: ignore + return f"<{' '.join(ty.name for ty in self.types)}>" + + @property + def arity(self) -> int: # type: ignore + return len(self.types) + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + len_type = len(self.types) + len_value = len(value) + + if len_value != len_type: + self.fail( + ngettext( + "{len_type} values are required, but {len_value} was given.", + "{len_type} values are required, but {len_value} were given.", + len_value, + ).format(len_type=len_type, len_value=len_value), + param=param, + ctx=ctx, + ) + + return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) + + +def convert_type(ty: t.Optional[t.Any], default: t.Optional[t.Any] = None) -> ParamType: + """Find the most appropriate :class:`ParamType` for the given Python + type. If the type isn't provided, it can be inferred from a default + value. + """ + guessed_type = False + + if ty is None and default is not None: + if isinstance(default, (tuple, list)): + # If the default is empty, ty will remain None and will + # return STRING. + if default: + item = default[0] + + # A tuple of tuples needs to detect the inner types. + # Can't call convert recursively because that would + # incorrectly unwind the tuple to a single type. + if isinstance(item, (tuple, list)): + ty = tuple(map(type, item)) + else: + ty = type(item) + else: + ty = type(default) + + guessed_type = True + + if isinstance(ty, tuple): + return Tuple(ty) + + if isinstance(ty, ParamType): + return ty + + if ty is str or ty is None: + return STRING + + if ty is int: + return INT + + if ty is float: + return FLOAT + + if ty is bool: + return BOOL + + if guessed_type: + return STRING + + if __debug__: + try: + if issubclass(ty, ParamType): + raise AssertionError( + f"Attempted to use an uninstantiated parameter type ({ty})." + ) + except TypeError: + # ty is an instance (correct), so issubclass fails. + pass + + return FuncParamType(ty) + + +#: A dummy parameter type that just does nothing. From a user's +#: perspective this appears to just be the same as `STRING` but +#: internally no string conversion takes place if the input was bytes. +#: This is usually useful when working with file paths as they can +#: appear in bytes and unicode. +#: +#: For path related uses the :class:`Path` type is a better choice but +#: there are situations where an unprocessed type is useful which is why +#: it is is provided. +#: +#: .. versionadded:: 4.0 +UNPROCESSED = UnprocessedParamType() + +#: A unicode string parameter type which is the implicit default. This +#: can also be selected by using ``str`` as type. +STRING = StringParamType() + +#: An integer parameter. This can also be selected by using ``int`` as +#: type. +INT = IntParamType() + +#: A floating point value parameter. This can also be selected by using +#: ``float`` as type. +FLOAT = FloatParamType() + +#: A boolean parameter. This is the default for boolean flags. This can +#: also be selected by using ``bool`` as a type. +BOOL = BoolParamType() + +#: A UUID parameter. +UUID = UUIDParameterType() diff --git a/myenv/lib/python3.9/site-packages/click/utils.py b/myenv/lib/python3.9/site-packages/click/utils.py new file mode 100644 index 0000000..8283788 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/click/utils.py @@ -0,0 +1,580 @@ +import os +import re +import sys +import typing as t +from functools import update_wrapper +from types import ModuleType + +from ._compat import _default_text_stderr +from ._compat import _default_text_stdout +from ._compat import _find_binary_writer +from ._compat import auto_wrap_for_ansi +from ._compat import binary_streams +from ._compat import get_filesystem_encoding +from ._compat import open_stream +from ._compat import should_strip_ansi +from ._compat import strip_ansi +from ._compat import text_streams +from ._compat import WIN +from .globals import resolve_color_default + +if t.TYPE_CHECKING: + import typing_extensions as te + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) + + +def _posixify(name: str) -> str: + return "-".join(name.split()).lower() + + +def safecall(func: F) -> F: + """Wraps a function so that it swallows exceptions.""" + + def wrapper(*args, **kwargs): # type: ignore + try: + return func(*args, **kwargs) + except Exception: + pass + + return update_wrapper(t.cast(F, wrapper), func) + + +def make_str(value: t.Any) -> str: + """Converts a value into a valid string.""" + if isinstance(value, bytes): + try: + return value.decode(get_filesystem_encoding()) + except UnicodeError: + return value.decode("utf-8", "replace") + return str(value) + + +def make_default_short_help(help: str, max_length: int = 45) -> str: + """Returns a condensed version of help string.""" + # Consider only the first paragraph. + paragraph_end = help.find("\n\n") + + if paragraph_end != -1: + help = help[:paragraph_end] + + # Collapse newlines, tabs, and spaces. + words = help.split() + + if not words: + return "" + + # The first paragraph started with a "no rewrap" marker, ignore it. + if words[0] == "\b": + words = words[1:] + + total_length = 0 + last_index = len(words) - 1 + + for i, word in enumerate(words): + total_length += len(word) + (i > 0) + + if total_length > max_length: # too long, truncate + break + + if word[-1] == ".": # sentence end, truncate without "..." + return " ".join(words[: i + 1]) + + if total_length == max_length and i != last_index: + break # not at sentence end, truncate with "..." + else: + return " ".join(words) # no truncation needed + + # Account for the length of the suffix. + total_length += len("...") + + # remove words until the length is short enough + while i > 0: + total_length -= len(words[i]) + (i > 0) + + if total_length <= max_length: + break + + i -= 1 + + return " ".join(words[:i]) + "..." + + +class LazyFile: + """A lazy file works like a regular file but it does not fully open + the file but it does perform some basic checks early to see if the + filename parameter does make sense. This is useful for safely opening + files for writing. + """ + + def __init__( + self, + filename: str, + mode: str = "r", + encoding: t.Optional[str] = None, + errors: t.Optional[str] = "strict", + atomic: bool = False, + ): + self.name = filename + self.mode = mode + self.encoding = encoding + self.errors = errors + self.atomic = atomic + self._f: t.Optional[t.IO] + + if filename == "-": + self._f, self.should_close = open_stream(filename, mode, encoding, errors) + else: + if "r" in mode: + # Open and close the file in case we're opening it for + # reading so that we can catch at least some errors in + # some cases early. + open(filename, mode).close() + self._f = None + self.should_close = True + + def __getattr__(self, name: str) -> t.Any: + return getattr(self.open(), name) + + def __repr__(self) -> str: + if self._f is not None: + return repr(self._f) + return f"" + + def open(self) -> t.IO: + """Opens the file if it's not yet open. This call might fail with + a :exc:`FileError`. Not handling this error will produce an error + that Click shows. + """ + if self._f is not None: + return self._f + try: + rv, self.should_close = open_stream( + self.name, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + except OSError as e: # noqa: E402 + from .exceptions import FileError + + raise FileError(self.name, hint=e.strerror) from e + self._f = rv + return rv + + def close(self) -> None: + """Closes the underlying file, no matter what.""" + if self._f is not None: + self._f.close() + + def close_intelligently(self) -> None: + """This function only closes the file if it was opened by the lazy + file wrapper. For instance this will never close stdin. + """ + if self.should_close: + self.close() + + def __enter__(self) -> "LazyFile": + return self + + def __exit__(self, exc_type, exc_value, tb): # type: ignore + self.close_intelligently() + + def __iter__(self) -> t.Iterator[t.AnyStr]: + self.open() + return iter(self._f) # type: ignore + + +class KeepOpenFile: + def __init__(self, file: t.IO) -> None: + self._file = file + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._file, name) + + def __enter__(self) -> "KeepOpenFile": + return self + + def __exit__(self, exc_type, exc_value, tb): # type: ignore + pass + + def __repr__(self) -> str: + return repr(self._file) + + def __iter__(self) -> t.Iterator[t.AnyStr]: + return iter(self._file) + + +def echo( + message: t.Optional[t.Any] = None, + file: t.Optional[t.IO[t.Any]] = None, + nl: bool = True, + err: bool = False, + color: t.Optional[bool] = None, +) -> None: + """Print a message and newline to stdout or a file. This should be + used instead of :func:`print` because it provides better support + for different data, files, and environments. + + Compared to :func:`print`, this does the following: + + - Ensures that the output encoding is not misconfigured on Linux. + - Supports Unicode in the Windows console. + - Supports writing to binary outputs, and supports writing bytes + to text outputs. + - Supports colors and styles on Windows. + - Removes ANSI color and style codes if the output does not look + like an interactive terminal. + - Always flushes the output. + + :param message: The string or bytes to output. Other objects are + converted to strings. + :param file: The file to write to. Defaults to ``stdout``. + :param err: Write to ``stderr`` instead of ``stdout``. + :param nl: Print a newline after the message. Enabled by default. + :param color: Force showing or hiding colors and other styles. By + default Click will remove color if the output does not look like + an interactive terminal. + + .. versionchanged:: 6.0 + Support Unicode output on the Windows console. Click does not + modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()`` + will still not support Unicode. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + + .. versionadded:: 3.0 + Added the ``err`` parameter. + + .. versionchanged:: 2.0 + Support colors on Windows if colorama is installed. + """ + if file is None: + if err: + file = _default_text_stderr() + else: + file = _default_text_stdout() + + # Convert non bytes/text into the native string type. + if message is not None and not isinstance(message, (str, bytes, bytearray)): + out: t.Optional[t.Union[str, bytes]] = str(message) + else: + out = message + + if nl: + out = out or "" + if isinstance(out, str): + out += "\n" + else: + out += b"\n" + + if not out: + file.flush() + return + + # If there is a message and the value looks like bytes, we manually + # need to find the binary stream and write the message in there. + # This is done separately so that most stream types will work as you + # would expect. Eg: you can write to StringIO for other cases. + if isinstance(out, (bytes, bytearray)): + binary_file = _find_binary_writer(file) + + if binary_file is not None: + file.flush() + binary_file.write(out) + binary_file.flush() + return + + # ANSI style code support. For no message or bytes, nothing happens. + # When outputting to a file instead of a terminal, strip codes. + else: + color = resolve_color_default(color) + + if should_strip_ansi(file, color): + out = strip_ansi(out) + elif WIN: + if auto_wrap_for_ansi is not None: + file = auto_wrap_for_ansi(file) # type: ignore + elif not color: + out = strip_ansi(out) + + file.write(out) # type: ignore + file.flush() + + +def get_binary_stream(name: "te.Literal['stdin', 'stdout', 'stderr']") -> t.BinaryIO: + """Returns a system stream for byte processing. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + """ + opener = binary_streams.get(name) + if opener is None: + raise TypeError(f"Unknown standard stream '{name}'") + return opener() + + +def get_text_stream( + name: "te.Literal['stdin', 'stdout', 'stderr']", + encoding: t.Optional[str] = None, + errors: t.Optional[str] = "strict", +) -> t.TextIO: + """Returns a system stream for text processing. This usually returns + a wrapped stream around a binary stream returned from + :func:`get_binary_stream` but it also can take shortcuts for already + correctly configured streams. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + :param encoding: overrides the detected default encoding. + :param errors: overrides the default error mode. + """ + opener = text_streams.get(name) + if opener is None: + raise TypeError(f"Unknown standard stream '{name}'") + return opener(encoding, errors) + + +def open_file( + filename: str, + mode: str = "r", + encoding: t.Optional[str] = None, + errors: t.Optional[str] = "strict", + lazy: bool = False, + atomic: bool = False, +) -> t.IO: + """Open a file, with extra behavior to handle ``'-'`` to indicate + a standard stream, lazy open on write, and atomic write. Similar to + the behavior of the :class:`~click.File` param type. + + If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is + wrapped so that using it in a context manager will not close it. + This makes it possible to use the function without accidentally + closing a standard stream: + + .. code-block:: python + + with open_file(filename) as f: + ... + + :param filename: The name of the file to open, or ``'-'`` for + ``stdin``/``stdout``. + :param mode: The mode in which to open the file. + :param encoding: The encoding to decode or encode a file opened in + text mode. + :param errors: The error handling mode. + :param lazy: Wait to open the file until it is accessed. For read + mode, the file is temporarily opened to raise access errors + early, then closed until it is read again. + :param atomic: Write to a temporary file and replace the given file + on close. + + .. versionadded:: 3.0 + """ + if lazy: + return t.cast(t.IO, LazyFile(filename, mode, encoding, errors, atomic=atomic)) + + f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic) + + if not should_close: + f = t.cast(t.IO, KeepOpenFile(f)) + + return f + + +def format_filename( + filename: t.Union[str, bytes, os.PathLike], shorten: bool = False +) -> str: + """Formats a filename for user display. The main purpose of this + function is to ensure that the filename can be displayed at all. This + will decode the filename to unicode if necessary in a way that it will + not fail. Optionally, it can shorten the filename to not include the + full path to the filename. + + :param filename: formats a filename for UI display. This will also convert + the filename into unicode without failing. + :param shorten: this optionally shortens the filename to strip of the + path that leads up to it. + """ + if shorten: + filename = os.path.basename(filename) + + return os.fsdecode(filename) + + +def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str: + r"""Returns the config folder for the application. The default behavior + is to return whatever is most appropriate for the operating system. + + To give you an idea, for an app called ``"Foo Bar"``, something like + the following folders could be returned: + + Mac OS X: + ``~/Library/Application Support/Foo Bar`` + Mac OS X (POSIX): + ``~/.foo-bar`` + Unix: + ``~/.config/foo-bar`` + Unix (POSIX): + ``~/.foo-bar`` + Windows (roaming): + ``C:\Users\\AppData\Roaming\Foo Bar`` + Windows (not roaming): + ``C:\Users\\AppData\Local\Foo Bar`` + + .. versionadded:: 2.0 + + :param app_name: the application name. This should be properly capitalized + and can contain whitespace. + :param roaming: controls if the folder should be roaming or not on Windows. + Has no affect otherwise. + :param force_posix: if this is set to `True` then on any POSIX system the + folder will be stored in the home folder with a leading + dot instead of the XDG config home or darwin's + application support folder. + """ + if WIN: + key = "APPDATA" if roaming else "LOCALAPPDATA" + folder = os.environ.get(key) + if folder is None: + folder = os.path.expanduser("~") + return os.path.join(folder, app_name) + if force_posix: + return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}")) + if sys.platform == "darwin": + return os.path.join( + os.path.expanduser("~/Library/Application Support"), app_name + ) + return os.path.join( + os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")), + _posixify(app_name), + ) + + +class PacifyFlushWrapper: + """This wrapper is used to catch and suppress BrokenPipeErrors resulting + from ``.flush()`` being called on broken pipe during the shutdown/final-GC + of the Python interpreter. Notably ``.flush()`` is always called on + ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any + other cleanup code, and the case where the underlying file is not a broken + pipe, all calls and attributes are proxied. + """ + + def __init__(self, wrapped: t.IO) -> None: + self.wrapped = wrapped + + def flush(self) -> None: + try: + self.wrapped.flush() + except OSError as e: + import errno + + if e.errno != errno.EPIPE: + raise + + def __getattr__(self, attr: str) -> t.Any: + return getattr(self.wrapped, attr) + + +def _detect_program_name( + path: t.Optional[str] = None, _main: t.Optional[ModuleType] = None +) -> str: + """Determine the command used to run the program, for use in help + text. If a file or entry point was executed, the file name is + returned. If ``python -m`` was used to execute a module or package, + ``python -m name`` is returned. + + This doesn't try to be too precise, the goal is to give a concise + name for help text. Files are only shown as their name without the + path. ``python`` is only shown for modules, and the full path to + ``sys.executable`` is not shown. + + :param path: The Python file being executed. Python puts this in + ``sys.argv[0]``, which is used by default. + :param _main: The ``__main__`` module. This should only be passed + during internal testing. + + .. versionadded:: 8.0 + Based on command args detection in the Werkzeug reloader. + + :meta private: + """ + if _main is None: + _main = sys.modules["__main__"] + + if not path: + path = sys.argv[0] + + # The value of __package__ indicates how Python was called. It may + # not exist if a setuptools script is installed as an egg. It may be + # set incorrectly for entry points created with pip on Windows. + if getattr(_main, "__package__", None) is None or ( + os.name == "nt" + and _main.__package__ == "" + and not os.path.exists(path) + and os.path.exists(f"{path}.exe") + ): + # Executed a file, like "python app.py". + return os.path.basename(path) + + # Executed a module, like "python -m example". + # Rewritten by Python from "-m script" to "/path/to/script.py". + # Need to look at main module to determine how it was executed. + py_module = t.cast(str, _main.__package__) + name = os.path.splitext(os.path.basename(path))[0] + + # A submodule like "example.cli". + if name != "__main__": + py_module = f"{py_module}.{name}" + + return f"python -m {py_module.lstrip('.')}" + + +def _expand_args( + args: t.Iterable[str], + *, + user: bool = True, + env: bool = True, + glob_recursive: bool = True, +) -> t.List[str]: + """Simulate Unix shell expansion with Python functions. + + See :func:`glob.glob`, :func:`os.path.expanduser`, and + :func:`os.path.expandvars`. + + This is intended for use on Windows, where the shell does not do any + expansion. It may not exactly match what a Unix shell would do. + + :param args: List of command line arguments to expand. + :param user: Expand user home directory. + :param env: Expand environment variables. + :param glob_recursive: ``**`` matches directories recursively. + + .. versionchanged:: 8.1 + Invalid glob patterns are treated as empty expansions rather + than raising an error. + + .. versionadded:: 8.0 + + :meta private: + """ + from glob import glob + + out = [] + + for arg in args: + if user: + arg = os.path.expanduser(arg) + + if env: + arg = os.path.expandvars(arg) + + try: + matches = glob(arg, recursive=glob_recursive) + except re.error: + matches = [] + + if not matches: + out.append(arg) + else: + out.extend(matches) + + return out diff --git a/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/LICENSE.txt b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/LICENSE.txt new file mode 100644 index 0000000..3105888 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2010 Jonathan Hartley +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holders, nor those of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/METADATA b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/METADATA new file mode 100644 index 0000000..cd93935 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/METADATA @@ -0,0 +1,411 @@ +Metadata-Version: 2.1 +Name: colorama +Version: 0.4.5 +Summary: Cross-platform colored terminal text. +Home-page: https://github.com/tartley/colorama +Author: Jonathan Hartley +Author-email: tartley@tartley.com +Maintainer: Arnon Yaari +License: BSD +Keywords: color colour terminal text ansi windows crossplatform xplatform +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Terminals +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* +License-File: LICENSE.txt + +.. image:: https://img.shields.io/pypi/v/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Supported Python versions + +.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg + :target: https://github.com/tartley/colorama/actions/workflows/test.yml + :alt: Build Status + +Colorama +======== + +Makes ANSI escape character sequences (for producing colored terminal text and +cursor positioning) work under MS Windows. + +.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif + :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD + :alt: Donate with Paypal + +`PyPI for releases `_ | +`Github for source `_ | +`Colorama for enterprise on Tidelift `_ + +If you find Colorama useful, please |donate| to the authors. Thank you! + + +Installation +------------ + +Tested on CPython 2.7, 3.5, 3.6, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.6. + +No requirements other than the standard library. + +.. code-block:: bash + + pip install colorama + # or + conda install -c anaconda colorama + + +Description +----------- + +ANSI escape character sequences have long been used to produce colored terminal +text and cursor positioning on Unix and Macs. Colorama makes this work on +Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which +would appear as gobbledygook in the output), and converting them into the +appropriate win32 calls to modify the state of the terminal. On other platforms, +Colorama does nothing. + +This has the upshot of providing a simple cross-platform API for printing +colored terminal text from Python, and has the happy side-effect that existing +applications or libraries which use ANSI sequences to produce colored output on +Linux or Macs can now also work on Windows, simply by calling +``colorama.init()``. + +An alternative approach is to install ``ansi.sys`` on Windows machines, which +provides the same behaviour for all applications running in terminals. Colorama +is intended for situations where that isn't easy (e.g., maybe your app doesn't +have an installer.) + +Demo scripts in the source code repository print some colored text using +ANSI sequences. Compare their output under Gnome-terminal's built in ANSI +handling, versus on Windows Command-Prompt using Colorama: + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png + :width: 661 + :height: 357 + :alt: ANSI sequences on Ubuntu under gnome-terminal. + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png + :width: 668 + :height: 325 + :alt: Same ANSI sequences on Windows, using Colorama. + +These screenshots show that, on Windows, Colorama does not support ANSI 'dim +text'; it looks the same as 'normal text'. + +Usage +----- + +Initialisation +.............. + +Applications should initialise Colorama using: + +.. code-block:: python + + from colorama import init + init() + +On Windows, calling ``init()`` will filter ANSI escape sequences out of any +text sent to ``stdout`` or ``stderr``, and replace them with equivalent Win32 +calls. + +On other platforms, calling ``init()`` has no effect (unless you request other +optional functionality, see "Init Keyword Args" below; or if output +is redirected). By design, this permits applications to call ``init()`` +unconditionally on all platforms, after which ANSI output should just work. + +On all platforms, if output is redirected, ANSI escape sequences are completely +stripped out. + +To stop using Colorama before your program exits, simply call ``deinit()``. +This will restore ``stdout`` and ``stderr`` to their original values, so that +Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is +cheaper than calling ``init()`` again (but does the same thing). + + +Colored Output +.............. + +Cross-platform printing of colored text can then be done using Colorama's +constant shorthand for ANSI escape sequences. These are deliberately +rudimentary, see below. + +.. code-block:: python + + from colorama import Fore, Back, Style + print(Fore.RED + 'some red text') + print(Back.GREEN + 'and with a green background') + print(Style.DIM + 'and in dim text') + print(Style.RESET_ALL) + print('back to normal now') + +...or simply by manually printing ANSI sequences from your own code: + +.. code-block:: python + + print('\033[31m' + 'some red text') + print('\033[39m') # and reset to default color + +...or, Colorama can be used in conjunction with existing ANSI libraries +such as the venerable `Termcolor `_ +the fabulous `Blessings `_, +or the incredible `_Rich `_. + +If you wish Colorama's Fore, Back and Style constants were more capable, +then consider using one of the above highly capable libraries to generate +colors, etc, and use Colorama just for its primary purpose: to convert +those ANSI sequences to also work on Windows: + +.. code-block:: python + + from colorama import init + from termcolor import colored + + # use Colorama to make Termcolor work on Windows too + init() + + # then use Termcolor for all colored text output + print(colored('Hello, World!', 'green', 'on_red')) + +Available formatting constants are:: + + Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Style: DIM, NORMAL, BRIGHT, RESET_ALL + +``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will +perform this reset automatically on program exit. + +These are fairly well supported, but not part of the standard:: + + Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + + +Cursor Positioning +.................. + +ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for +an example of how to generate them. + + +Init Keyword Args +................. + +``init()`` accepts some ``**kwargs`` to override default behaviour. + +init(autoreset=False): + If you find yourself repeatedly sending reset sequences to turn off color + changes at the end of every print, then ``init(autoreset=True)`` will + automate that: + + .. code-block:: python + + from colorama import init + init(autoreset=True) + print(Fore.RED + 'some red text') + print('automatically back to default color again') + +init(strip=None): + Pass ``True`` or ``False`` to override whether ANSI codes should be + stripped from the output. The default behaviour is to strip if on Windows + or if output is redirected (not a tty). + +init(convert=None): + Pass ``True`` or ``False`` to override whether to convert ANSI codes in the + output into win32 calls. The default behaviour is to convert if on Windows + and output is to a tty (terminal). + +init(wrap=True): + On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr`` + with proxy objects, which override the ``.write()`` method to do their work. + If this wrapping causes you problems, then this can be disabled by passing + ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or + ``strip`` or ``convert`` are True. + + When wrapping is disabled, colored printing on non-Windows platforms will + continue to work as normal. To do cross-platform colored output, you can + use Colorama's ``AnsiToWin32`` proxy directly: + + .. code-block:: python + + import sys + from colorama import init, AnsiToWin32 + init(wrap=False) + stream = AnsiToWin32(sys.stderr).stream + + # Python 2 + print >>stream, Fore.BLUE + 'blue text on stderr' + + # Python 3 + print(Fore.BLUE + 'blue text on stderr', file=stream) + + +Recognised ANSI Sequences +......................... + +ANSI sequences generally take the form:: + + ESC [ ; ... + +Where ```` is an integer, and ```` is a single letter. Zero or +more params are passed to a ````. If no params are passed, it is +generally synonymous with passing a single zero. No spaces exist in the +sequence; they have been inserted here simply to read more easily. + +The only ANSI sequences that Colorama converts into win32 calls are:: + + ESC [ 0 m # reset all (colors and brightness) + ESC [ 1 m # bright + ESC [ 2 m # dim (looks same as normal brightness) + ESC [ 22 m # normal brightness + + # FOREGROUND: + ESC [ 30 m # black + ESC [ 31 m # red + ESC [ 32 m # green + ESC [ 33 m # yellow + ESC [ 34 m # blue + ESC [ 35 m # magenta + ESC [ 36 m # cyan + ESC [ 37 m # white + ESC [ 39 m # reset + + # BACKGROUND + ESC [ 40 m # black + ESC [ 41 m # red + ESC [ 42 m # green + ESC [ 43 m # yellow + ESC [ 44 m # blue + ESC [ 45 m # magenta + ESC [ 46 m # cyan + ESC [ 47 m # white + ESC [ 49 m # reset + + # cursor positioning + ESC [ y;x H # position cursor at x across, y down + ESC [ y;x f # position cursor at x across, y down + ESC [ n A # move cursor n lines up + ESC [ n B # move cursor n lines down + ESC [ n C # move cursor n characters forward + ESC [ n D # move cursor n characters backward + + # clear the screen + ESC [ mode J # clear the screen + + # clear the line + ESC [ mode K # clear the line + +Multiple numeric params to the ``'m'`` command can be combined into a single +sequence:: + + ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background + +All other ANSI sequences of the form ``ESC [ ; ... `` +are silently stripped from the output on Windows. + +Any other form of ANSI sequence, such as single-character codes or alternative +initial characters, are not recognised or stripped. It would be cool to add +them though. Let me know if it would be useful for you, via the Issues on +GitHub. + + +Status & Known Problems +----------------------- + +I've personally only tested it on Windows XP (CMD, Console2), Ubuntu +(gnome-terminal, xterm), and OS X. + +Some presumably valid ANSI sequences aren't recognised (see details below), +but to my knowledge nobody has yet complained about this. Puzzling. + +See outstanding issues and wish-list: +https://github.com/tartley/colorama/issues + +If anything doesn't work for you, or doesn't do what you expected or hoped for, +I'd love to hear about it on that issues list, would be delighted by patches, +and would be happy to grant commit access to anyone who submits a working patch +or two. + +If you're hacking on the code, see `README-hacking.md`_. + +.. _README-hacking.md: README-hacking.md + + +License +------- + +Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see +LICENSE file. + + +Professional support +-------------------- + +.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for colorama is available as part of the + `Tidelift Subscription`_. + Tidelift gives software development teams a single source for purchasing + and maintaining their software, with professional grade assurances from + the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + + +Thanks +------ + +* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5. +* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``, + providing a solution to issue #7's setuptools/distutils debate, + and other fixes. +* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``. +* Matthew McCormick for politely pointing out a longstanding crash on non-Win. +* Ben Hoyt, for a magnificent fix under 64-bit Windows. +* Jesse at Empty Square for submitting a fix for examples in the README. +* User 'jamessp', an observant documentation fix for cursor positioning. +* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7 + fix. +* Julien Stuyck, for wisely suggesting Python3 compatible updates to README. +* Daniel Griffith for multiple fabulous patches. +* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty + output. +* Roger Binns, for many suggestions, valuable feedback, & bug reports. +* Tim Golden for thought and much appreciated feedback on the initial idea. +* User 'Zearin' for updates to the README file. +* John Szakmeister for adding support for light colors +* Charles Merriam for adding documentation to demos +* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes +* Florian Bruhin for a fix when stdout or stderr are None +* Thomas Weininger for fixing ValueError on Windows +* Remi Rampin for better Github integration and fixes to the README file +* Simeon Visser for closing a file handle using 'with' and updating classifiers + to include Python 3.3 and 3.4 +* Andy Neff for fixing RESET of LIGHT_EX colors. +* Jonathan Hartley for the initial idea and implementation. diff --git a/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/RECORD b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/RECORD new file mode 100644 index 0000000..02a3c5e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/RECORD @@ -0,0 +1,12 @@ +colorama/__init__.py,sha256=ihDoWQOkapwF7sqQ99AoDoEF3vGYm40OtmgW211cLZw,239 +colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +colorama/ansitowin32.py,sha256=gGrO7MVtwc-j1Sq3jKfZpERT1JWmYSOsTVDiTnFbZU4,10830 +colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915 +colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404 +colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438 +colorama-0.4.5.dist-info/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491 +colorama-0.4.5.dist-info/METADATA,sha256=Kb6MoYzWBmkPhFCf0SW7a-5Eeyssj-szefJmxokQFSU,15128 +colorama-0.4.5.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +colorama-0.4.5.dist-info/top_level.txt,sha256=_Kx6-Cni2BT1PEATPhrSRxo0d7kSgfBbHf5o7IF1ABw,9 +colorama-0.4.5.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +colorama-0.4.5.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/WHEEL new file mode 100644 index 0000000..0b18a28 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/top_level.txt new file mode 100644 index 0000000..3fcfb51 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama-0.4.5.dist-info/top_level.txt @@ -0,0 +1 @@ +colorama diff --git a/myenv/lib/python3.9/site-packages/colorama/__init__.py b/myenv/lib/python3.9/site-packages/colorama/__init__.py new file mode 100644 index 0000000..9138a8c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama/__init__.py @@ -0,0 +1,6 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.4.5' diff --git a/myenv/lib/python3.9/site-packages/colorama/ansi.py b/myenv/lib/python3.9/site-packages/colorama/ansi.py new file mode 100644 index 0000000..11ec695 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\a' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/myenv/lib/python3.9/site-packages/colorama/ansitowin32.py b/myenv/lib/python3.9/site-packages/colorama/ansitowin32.py new file mode 100644 index 0000000..3db248b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama/ansitowin32.py @@ -0,0 +1,266 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL +from .winterm import WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + + def write(self, text): + self.__convertor.write(text) + + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): + return True + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + + # should we strip ANSI sequences from our output? + if strip is None: + strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = conversion_supported and not self.stream.closed and self.stream.isatty() + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not self.stream.closed: + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command == BEL: + if paramstring.count(";") == 1: + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text diff --git a/myenv/lib/python3.9/site-packages/colorama/initialise.py b/myenv/lib/python3.9/site-packages/colorama/initialise.py new file mode 100644 index 0000000..430d066 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama/initialise.py @@ -0,0 +1,80 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +orig_stdout = None +orig_stderr = None + +wrapped_stdout = None +wrapped_stderr = None + +atexit_done = False + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream diff --git a/myenv/lib/python3.9/site-packages/colorama/win32.py b/myenv/lib/python3.9/site-packages/colorama/win32.py new file mode 100644 index 0000000..c2d8360 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama/win32.py @@ -0,0 +1,152 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW + _SetConsoleTitleW.argtypes = [ + wintypes.LPCWSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + def _winapi_test(handle): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def winapi_test(): + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = _GetStdHandle(stream_id) + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = _GetStdHandle(stream_id) + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = _GetStdHandle(stream_id) + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = _GetStdHandle(stream_id) + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = _GetStdHandle(stream_id) + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) diff --git a/myenv/lib/python3.9/site-packages/colorama/winterm.py b/myenv/lib/python3.9/site-packages/colorama/winterm.py new file mode 100644 index 0000000..0fdb4ec --- /dev/null +++ b/myenv/lib/python3.9/site-packages/colorama/winterm.py @@ -0,0 +1,169 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from . import win32 + + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + self._light = 0 + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + elif mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + elif mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE new file mode 100644 index 0000000..0707425 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE @@ -0,0 +1,6 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to cryptography are made +under the terms of *both* these licenses. + +The code used in the OS random engine is derived from CPython, and is licensed +under the terms of the PSF License Agreement. diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.APACHE b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.APACHE new file mode 100644 index 0000000..62589ed --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.APACHE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.BSD b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.BSD new file mode 100644 index 0000000..ec1a29d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) Individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of PyCA Cryptography nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.PSF b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.PSF new file mode 100644 index 0000000..4d3a4f5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/LICENSE.PSF @@ -0,0 +1,41 @@ +1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and + the Individual or Organization ("Licensee") accessing and otherwise using Python + 2.7.12 software in source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby + grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, + analyze, test, perform and/or display publicly, prepare derivative works, + distribute, and otherwise use Python 2.7.12 alone or in any derivative + version, provided, however, that PSF's License Agreement and PSF's notice of + copyright, i.e., "Copyright © 2001-2016 Python Software Foundation; All Rights + Reserved" are retained in Python 2.7.12 alone or in any derivative version + prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on or + incorporates Python 2.7.12 or any part thereof, and wants to make the + derivative work available to others as provided herein, then Licensee hereby + agrees to include in any such work a brief summary of the changes made to Python + 2.7.12. + +4. PSF is making Python 2.7.12 available to Licensee on an "AS IS" basis. + PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF + EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR + WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE + USE OF PYTHON 2.7.12 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.7.12 + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF + MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.7.12, OR ANY DERIVATIVE + THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of + its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any relationship + of agency, partnership, or joint venture between PSF and Licensee. This License + Agreement does not grant permission to use PSF trademarks or trade name in a + trademark sense to endorse or promote products or services of Licensee, or any + third party. + +8. By copying, installing or otherwise using Python 2.7.12, Licensee agrees + to be bound by the terms and conditions of this License Agreement. diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/METADATA b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/METADATA new file mode 100644 index 0000000..9ed7d6c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/METADATA @@ -0,0 +1,138 @@ +Metadata-Version: 2.1 +Name: cryptography +Version: 37.0.4 +Summary: cryptography is a package which provides cryptographic recipes and primitives to Python developers. +Home-page: https://github.com/pyca/cryptography +Author: The Python Cryptographic Authority and individual contributors +Author-email: cryptography-dev@python.org +License: BSD-3-Clause OR Apache-2.0 +Project-URL: Documentation, https://cryptography.io/ +Project-URL: Source, https://github.com/pyca/cryptography/ +Project-URL: Issues, https://github.com/pyca/cryptography/issues +Project-URL: Changelog, https://cryptography.io/en/latest/changelog/ +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: License :: OSI Approved :: BSD License +Classifier: Natural Language :: English +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: BSD +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Security :: Cryptography +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: LICENSE.APACHE +License-File: LICENSE.BSD +License-File: LICENSE.PSF +Requires-Dist: cffi (>=1.12) +Provides-Extra: docs +Requires-Dist: sphinx (!=1.8.0,!=3.1.0,!=3.1.1,>=1.6.5) ; extra == 'docs' +Requires-Dist: sphinx-rtd-theme ; extra == 'docs' +Provides-Extra: docstest +Requires-Dist: pyenchant (>=1.6.11) ; extra == 'docstest' +Requires-Dist: twine (>=1.12.0) ; extra == 'docstest' +Requires-Dist: sphinxcontrib-spelling (>=4.0.1) ; extra == 'docstest' +Provides-Extra: pep8test +Requires-Dist: black ; extra == 'pep8test' +Requires-Dist: flake8 ; extra == 'pep8test' +Requires-Dist: flake8-import-order ; extra == 'pep8test' +Requires-Dist: pep8-naming ; extra == 'pep8test' +Provides-Extra: sdist +Requires-Dist: setuptools-rust (>=0.11.4) ; extra == 'sdist' +Provides-Extra: ssh +Requires-Dist: bcrypt (>=3.1.5) ; extra == 'ssh' +Provides-Extra: test +Requires-Dist: pytest (>=6.2.0) ; extra == 'test' +Requires-Dist: pytest-benchmark ; extra == 'test' +Requires-Dist: pytest-cov ; extra == 'test' +Requires-Dist: pytest-subtests ; extra == 'test' +Requires-Dist: pytest-xdist ; extra == 'test' +Requires-Dist: pretend ; extra == 'test' +Requires-Dist: iso8601 ; extra == 'test' +Requires-Dist: pytz ; extra == 'test' +Requires-Dist: hypothesis (!=3.79.2,>=1.11.4) ; extra == 'test' + +pyca/cryptography +================= + +.. image:: https://img.shields.io/pypi/v/cryptography.svg + :target: https://pypi.org/project/cryptography/ + :alt: Latest Version + +.. image:: https://readthedocs.org/projects/cryptography/badge/?version=latest + :target: https://cryptography.io + :alt: Latest Docs + +.. image:: https://github.com/pyca/cryptography/workflows/CI/badge.svg?branch=main + :target: https://github.com/pyca/cryptography/actions?query=workflow%3ACI+branch%3Amain + +.. image:: https://codecov.io/github/pyca/cryptography/coverage.svg?branch=main + :target: https://codecov.io/github/pyca/cryptography?branch=main + + +``cryptography`` is a package which provides cryptographic recipes and +primitives to Python developers. Our goal is for it to be your "cryptographic +standard library". It supports Python 3.6+ and PyPy3 7.2+. + +``cryptography`` includes both high level recipes and low level interfaces to +common cryptographic algorithms such as symmetric ciphers, message digests, and +key derivation functions. For example, to encrypt something with +``cryptography``'s high level symmetric encryption recipe: + +.. code-block:: pycon + + >>> from cryptography.fernet import Fernet + >>> # Put this somewhere safe! + >>> key = Fernet.generate_key() + >>> f = Fernet(key) + >>> token = f.encrypt(b"A really secret message. Not for prying eyes.") + >>> token + '...' + >>> f.decrypt(token) + 'A really secret message. Not for prying eyes.' + +You can find more information in the `documentation`_. + +You can install ``cryptography`` with: + +.. code-block:: console + + $ pip install cryptography + +For full details see `the installation documentation`_. + +Discussion +~~~~~~~~~~ + +If you run into bugs, you can file them in our `issue tracker`_. + +We maintain a `cryptography-dev`_ mailing list for development discussion. + +You can also join ``#pyca`` on ``irc.libera.chat`` to ask questions or get +involved. + +Security +~~~~~~~~ + +Need to report a security issue? Please consult our `security reporting`_ +documentation. + + +.. _`documentation`: https://cryptography.io/ +.. _`the installation documentation`: https://cryptography.io/en/latest/installation/ +.. _`issue tracker`: https://github.com/pyca/cryptography/issues +.. _`cryptography-dev`: https://mail.python.org/mailman/listinfo/cryptography-dev +.. _`security reporting`: https://cryptography.io/en/latest/security/ diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/RECORD b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/RECORD new file mode 100644 index 0000000..c09d990 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/RECORD @@ -0,0 +1,99 @@ +cryptography/__about__.py,sha256=--Ir4xTpXvJIAKXDX9hap3KQf2E9--cHRoaKB-fb_cY,417 +cryptography/__init__.py,sha256=j08JCN_u_m8eL-zxbXRxgsriW6Oe29oSSo_e2hyyasg,748 +cryptography/exceptions.py,sha256=sN_VVTF_LuKMM6R-lIASFFuzAmz1uZ2Qbcdko9WyS64,1471 +cryptography/fernet.py,sha256=W1NZKgmIxX-iMJr6GW-vm-j4BkYACKxHjrq4cbIZMaw,6604 +cryptography/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +cryptography/utils.py,sha256=v3zj8FS2bHYAmH__CECwfa0flmFCQnGrxZBRQcDys1A,5627 +cryptography/hazmat/__init__.py,sha256=OYlvgprzULzZlsf3yYTsd6VUVyQmpsbHjgJdNnsyRwE,418 +cryptography/hazmat/_oid.py,sha256=CrOgnohV6ckWZU7ZFUYvgf_Gj8npC7LhJfzBB6wLHYc,15440 +cryptography/hazmat/backends/__init__.py,sha256=bgrjB1SX2vXX-rmfG7A4PqGkq-isqQVXGaZtjWHAgj0,324 +cryptography/hazmat/backends/openssl/__init__.py,sha256=7rpz1Z3eV9vZy_d2iLrwC8Oz0vEruDFrjJlc6W2ZDXA,271 +cryptography/hazmat/backends/openssl/aead.py,sha256=1GASyrJPO8a-mDPTT7VJZXPb_0zEdrkW-Wu_rxV-6RQ,8442 +cryptography/hazmat/backends/openssl/backend.py,sha256=XBpLgVxVYvXwmkdcCwd0bVQWK0EBTHTbCFDsLGsBbBg,96584 +cryptography/hazmat/backends/openssl/ciphers.py,sha256=n3rrPQZi1blJBKqIWeMG6-U6YTvEb8rXGQKn8i-kFog,10342 +cryptography/hazmat/backends/openssl/cmac.py,sha256=K5-S0H72KHZH-WPAcHL5jCtcNyoBZSMe7VmxGn8_VWA,3005 +cryptography/hazmat/backends/openssl/decode_asn1.py,sha256=nSqtgO5MJVf_UUkvw9tez10zhGnsGHq24OP1X2GKOe4,1113 +cryptography/hazmat/backends/openssl/dh.py,sha256=9fwPordToELTkeJ-c7TuO9NiE1vfUBejk2QEUZbvo4s,12230 +cryptography/hazmat/backends/openssl/dsa.py,sha256=awfP80ykAfb4C_I-aOo-PnGU1DF6uf8bnEi-jld18ec,8888 +cryptography/hazmat/backends/openssl/ec.py,sha256=kgxwW508FTXDwGG-7pSywBLlICZKKfo4bcTHnNpsvJY,11103 +cryptography/hazmat/backends/openssl/ed25519.py,sha256=irHT-jSbpTNMMHqw5T885uzAi3Syf3kaaHuTnKgQPSg,5920 +cryptography/hazmat/backends/openssl/ed448.py,sha256=K8HDEiXl98QGJ-4llT4SVZf5-xe8aCuci00DkZf0lhw,5874 +cryptography/hazmat/backends/openssl/encode_asn1.py,sha256=4RUYVTpkYh6J1BnmYdr3G8xv4X1H-K2k2-fQoIDkpHI,570 +cryptography/hazmat/backends/openssl/hashes.py,sha256=3L5bkCOo2LbRSVNGLca_9rpCZ2zb8ISBrMLtts1BkEw,3241 +cryptography/hazmat/backends/openssl/hmac.py,sha256=9RX8bo9ywJievoodxjmqCXmD2iUWyH2jBmw78Hb-pOY,3095 +cryptography/hazmat/backends/openssl/poly1305.py,sha256=_qyGCXNaQVCFpa1qjb_9UtsI6lmnki_15Jbc5vihbeE,2514 +cryptography/hazmat/backends/openssl/rsa.py,sha256=KK97C_jBJEUGfKfJK7E8QZ-uZb6DTJjX6dLOuVbqTI8,20720 +cryptography/hazmat/backends/openssl/utils.py,sha256=7Ara81KkY0QCLPqW6kUG9dEsp52cZ3kOUJczwEpecJ0,1977 +cryptography/hazmat/backends/openssl/x25519.py,sha256=oA_ao4o27ki_OAx0UXNeI2ItZ84Xg_li7It1DxFlrZ0,4753 +cryptography/hazmat/backends/openssl/x448.py,sha256=a_zgqGUpGFvyKEoKRR1vgNdD_gk1gxGYpBp1a6x9HuE,4338 +cryptography/hazmat/backends/openssl/x509.py,sha256=WUoRC6UDM9FkOdn6xR5Mk-v_WCq7eJryenGN9ni8L-A,1452 +cryptography/hazmat/bindings/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180 +cryptography/hazmat/bindings/_openssl.abi3.so,sha256=8iHtYijm7UPnTTJ2C0n5tOrVDan81I-HEF9oR2SvVyM,6023128 +cryptography/hazmat/bindings/_rust.abi3.so,sha256=OmU1FnjADpbk4-CDvbk2G63VxB6MbrYeivWX_IBBNz0,2004592 +cryptography/hazmat/bindings/_rust/__init__.pyi,sha256=ga5QLYp8MmumQB-Rp4TGHq_NAqONcXTrLm2712TZ9Ms,103 +cryptography/hazmat/bindings/_rust/asn1.pyi,sha256=Hovrt8dXZ9p8BKHaroPYmunu1VtDuirslJnE4jTG28s,411 +cryptography/hazmat/bindings/_rust/ocsp.pyi,sha256=jATWMh1yz5JpnnT7A10_sbY-ja5zARnOpZaToLqm43Y,768 +cryptography/hazmat/bindings/_rust/x509.pyi,sha256=YdnUA9uu60WhQP5QihTgo2pBrT49wxYtayCZ4trgZAg,1497 +cryptography/hazmat/bindings/openssl/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180 +cryptography/hazmat/bindings/openssl/_conditional.py,sha256=K0JIsYkDBifV-x5WDeq9M1Hofr6HW667rDFlhDEiIMQ,10078 +cryptography/hazmat/bindings/openssl/binding.py,sha256=hWWp-N_JMUuYaNuRHgpadEEdVJsL-wAhdQpTj-Pi0Vc,8044 +cryptography/hazmat/primitives/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180 +cryptography/hazmat/primitives/_asymmetric.py,sha256=nVJwmxkakirAXfFp410pC4kY_CinzN5FSJwhEn2IE34,485 +cryptography/hazmat/primitives/_cipheralgorithm.py,sha256=zd7N8rBYWaf8tPM7GDtZ9vUgarK_P0_PUNCFi3A0u0c,1016 +cryptography/hazmat/primitives/_serialization.py,sha256=OC_uXC5cNHucoOkHuTsZbfcQ9bvZs1cq7b18TcJu4Es,1341 +cryptography/hazmat/primitives/cmac.py,sha256=ODkc7EonY1cRxyJ0SYOuwtiYQv6B0ZPxJQm3rXxfXd4,2037 +cryptography/hazmat/primitives/constant_time.py,sha256=6bkW00QjhKusdgsQbexXhMlGX0XRN59XNmxWS2W38NA,387 +cryptography/hazmat/primitives/hashes.py,sha256=cpaYjgkazlq7Xw0MVoR3cp17mD0TgyEvhZQbyoAWHzU,5996 +cryptography/hazmat/primitives/hmac.py,sha256=M_sa4smPIkO8ra17Xl_cM0daRhGCozUu_8gnHePEIb0,2131 +cryptography/hazmat/primitives/keywrap.py,sha256=TWqyG9K7k-Ymq4kcIw7u3NIKUPVDtv6bimwxIJYTe20,5643 +cryptography/hazmat/primitives/padding.py,sha256=xruasOE5Cd8KEQ-yp9W6v9WKPvKH-GudHCPKQ7A8HfI,6207 +cryptography/hazmat/primitives/poly1305.py,sha256=QvxPMrqjgKJt0mOZSeZKk4NcxsNCd2kgfI-X1CmyUW4,1837 +cryptography/hazmat/primitives/asymmetric/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180 +cryptography/hazmat/primitives/asymmetric/dh.py,sha256=pjjgKFcn2bCAaL_5zr0ygwXM8pHJFyAO6koJFkzhQb8,6604 +cryptography/hazmat/primitives/asymmetric/dsa.py,sha256=dIo6lYiHWRWUCxwejAi01w1-3jjmzEuJovqaVqDO3_g,7870 +cryptography/hazmat/primitives/asymmetric/ec.py,sha256=wX8wH9bD7g7-YxmINam_s9cPc9RUPrui2QdIKg6Q3Nc,14468 +cryptography/hazmat/primitives/asymmetric/ed25519.py,sha256=1qOl1UWV_-cXKHhwlFSyPBdhpx2HMDRukkI6eI5i8vM,2728 +cryptography/hazmat/primitives/asymmetric/ed448.py,sha256=oR-j4jGcWUnGxWi1GygHxVZbgkSOKHsR6y1E3Lf6wYM,2647 +cryptography/hazmat/primitives/asymmetric/padding.py,sha256=EkKuY9e6UFqSuQ0LvyKYKl_L19tOfNCTlHWEiKgHeUc,2690 +cryptography/hazmat/primitives/asymmetric/rsa.py,sha256=GiCIuBuJdpeew-yJ7mnTF4KFH_FUJaut1r-d6TRs31s,11322 +cryptography/hazmat/primitives/asymmetric/types.py,sha256=VidxjUWPOMB8q8vGiaXMhY715Zw8U9Vd4_rkqjOagRE,1814 +cryptography/hazmat/primitives/asymmetric/utils.py,sha256=5hD4KjfMbmozeFq08PLVunHr4FgeVzV1NkKalECM26s,756 +cryptography/hazmat/primitives/asymmetric/x25519.py,sha256=-nbaGlgT1sufO9Ic-urwKDql8Da0U3GL6hZJIMqHgVc,2588 +cryptography/hazmat/primitives/asymmetric/x448.py,sha256=V3lxb1VOiRTa3bzVUC3uZat2ogfExUOdktCIGUUMZ2Y,2556 +cryptography/hazmat/primitives/ciphers/__init__.py,sha256=Qp78Y3PDSRfwp7DDa3pezlLrED_QFhic_LvDw4LM9ZQ,646 +cryptography/hazmat/primitives/ciphers/aead.py,sha256=QnJD2doZ8XdpCIrDwqJBNgaw2eG9Tx4FWirIP159MAg,11488 +cryptography/hazmat/primitives/ciphers/algorithms.py,sha256=GoM_c2LNonci43B06g0e9zXW6PfBh_uAFiZhIxrQ_x4,4677 +cryptography/hazmat/primitives/ciphers/base.py,sha256=AiCYCzXbSZ9wQbXWMYc60IKmzqz5619YdaaF0zVr4rY,8251 +cryptography/hazmat/primitives/ciphers/modes.py,sha256=Pb2h2X0HUVfM6xKdSZjCgCsUcCQ32mDTSTRQQl4-1Gs,7988 +cryptography/hazmat/primitives/kdf/__init__.py,sha256=DcZhzfLG8d8IYBH771lGTVU5S87OQDpu3nrfOwZnsmA,715 +cryptography/hazmat/primitives/kdf/concatkdf.py,sha256=5YXw8cLZCBYT6rVDGS5URQEeFiPW-ZRBRcPdZQIxTMA,3772 +cryptography/hazmat/primitives/kdf/hkdf.py,sha256=LlDQbCvlNzuLa_UJXrkG5fXGjAjor5Wunv2378TBmms,3031 +cryptography/hazmat/primitives/kdf/kbkdf.py,sha256=QmgJw2_l0D21DEMMfuNQ6e1IaLV3bjwOzMJEAXhpOVs,7699 +cryptography/hazmat/primitives/kdf/pbkdf2.py,sha256=wEMH4CJfPccCg9apQLXyWUWBrZLTpYLLnoZEnzvaHQo,2032 +cryptography/hazmat/primitives/kdf/scrypt.py,sha256=JvX_cD0o0Op5EcFNeZhr-vI5sYv_LdnJ6kNEbW3u5ow,2228 +cryptography/hazmat/primitives/kdf/x963kdf.py,sha256=JsdrJhw2IJVYkl8JIWUN66h7DrKZM2RoQ_tw_iKAvdI,2018 +cryptography/hazmat/primitives/serialization/__init__.py,sha256=RALEthF7wRjlMyTvSq09XmKQey74tsSdDCCsDaD6yQU,1129 +cryptography/hazmat/primitives/serialization/base.py,sha256=yw8_yzIvruT6fmS-KrTmIXbAF00rItH48WXTPOSLdJ4,1761 +cryptography/hazmat/primitives/serialization/pkcs12.py,sha256=tHQlCKOY0EfOBBiqR_et4TgcDY_OAtRENC69arjvyLU,6481 +cryptography/hazmat/primitives/serialization/pkcs7.py,sha256=LnISP-1SEDXCpsoEbR0EfuIlWm8eJAgWupt0gvHyyIU,5870 +cryptography/hazmat/primitives/serialization/ssh.py,sha256=Bp4M8yFrLnw9Oj3jCbttewovymCX_OGzBg5GBw4RPpA,23923 +cryptography/hazmat/primitives/twofactor/__init__.py,sha256=ZHo4zwWidFP2RWFl8luiNuYkVMZPghzx54izPNSCtD4,222 +cryptography/hazmat/primitives/twofactor/hotp.py,sha256=v4wkTbdc1E53POx6pdNnEUBvANbmt4f6scQSsTgABeU,2989 +cryptography/hazmat/primitives/twofactor/totp.py,sha256=bIIxOI-LcLGNahB5kN7A_TwEyYMTsLjHd8eJc4b2cLg,1449 +cryptography/x509/__init__.py,sha256=yC0TbuvPmWL1U4rEY-0m46SayuxCfPVNFWjJJdi5lY0,7654 +cryptography/x509/base.py,sha256=FNBVG8ACCBZLF5TXlq1vjEPWtlR5YZ1Bs25AIxEHI34,33747 +cryptography/x509/certificate_transparency.py,sha256=Elm_-GGA6k9zrcm5KYVY5uTirDsvGc_BUuTLR7Hu-K4,1119 +cryptography/x509/extensions.py,sha256=LFy1jgd_0Z_P-lE2-07nZGJmsFh_1cOOTAl9ejyXFHQ,64999 +cryptography/x509/general_name.py,sha256=S_kJd4ZsNGrMfi2osfFJEWqPxy3oPCAWpLb91yhxzPs,7896 +cryptography/x509/name.py,sha256=EOtO9CscxrfsxhO6GTEfVhiZo3_EE7qGIFfv1eeI4-U,14200 +cryptography/x509/ocsp.py,sha256=OQKsqW_Y4mWY53UT_JG79RJR19xt53Q-iQSSw4m0kZM,16691 +cryptography/x509/oid.py,sha256=CLIlQwzE3PQXMvkKep4JbzVUaRDl_stwcX_U6-s2cNw,794 +cryptography-37.0.4.dist-info/LICENSE,sha256=Q9rSzHUqtyHNmp827OcPtTq3cTVR8tPYaU2OjFoG1uI,323 +cryptography-37.0.4.dist-info/LICENSE.APACHE,sha256=qsc7MUj20dcRHbyjIJn2jSbGRMaBOuHk8F9leaomY_4,11360 +cryptography-37.0.4.dist-info/LICENSE.BSD,sha256=YCxMdILeZHndLpeTzaJ15eY9dz2s0eymiSMqtwCPtPs,1532 +cryptography-37.0.4.dist-info/LICENSE.PSF,sha256=aT7ApmKzn5laTyUrA6YiKUVHDBtvEsoCkY5O_g32S58,2415 +cryptography-37.0.4.dist-info/METADATA,sha256=xTRAAMAb3cHqW1cXChdK9TXOWG2ghhz-sdfmRXvXrqU,5434 +cryptography-37.0.4.dist-info/WHEEL,sha256=MfjkjXwqeYlORnFhJNWi-eFW4jhozqV8XXeu9PzVv9Y,110 +cryptography-37.0.4.dist-info/top_level.txt,sha256=zYbdX67v4JFZPfsaNue7ZV4-mgoRqYCAhMsNgt22LqA,22 +cryptography-37.0.4.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +cryptography-37.0.4.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/WHEEL new file mode 100644 index 0000000..fd41c07 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: false +Tag: cp36-abi3-macosx_10_10_x86_64 + diff --git a/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/top_level.txt new file mode 100644 index 0000000..f512e40 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography-37.0.4.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_openssl +cryptography diff --git a/myenv/lib/python3.9/site-packages/cryptography/__about__.py b/myenv/lib/python3.9/site-packages/cryptography/__about__.py new file mode 100644 index 0000000..399beb6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/__about__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +__all__ = [ + "__version__", + "__author__", + "__copyright__", +] + +__version__ = "37.0.4" + +__author__ = "The Python Cryptographic Authority and individual contributors" +__copyright__ = "Copyright 2013-2022 {}".format(__author__) diff --git a/myenv/lib/python3.9/site-packages/cryptography/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/__init__.py new file mode 100644 index 0000000..599bf51 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/__init__.py @@ -0,0 +1,29 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import sys +import warnings + +from cryptography.__about__ import ( + __author__, + __copyright__, + __version__, +) +from cryptography.utils import CryptographyDeprecationWarning + + +__all__ = [ + "__version__", + "__author__", + "__copyright__", +] + +if sys.version_info[:2] == (3, 6): + warnings.warn( + "Python 3.6 is no longer supported by the Python core team. " + "Therefore, support for it is deprecated in cryptography and will be" + " removed in a future release.", + CryptographyDeprecationWarning, + stacklevel=2, + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/exceptions.py b/myenv/lib/python3.9/site-packages/cryptography/exceptions.py new file mode 100644 index 0000000..a315703 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/exceptions.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils + +if typing.TYPE_CHECKING: + from cryptography.hazmat.bindings.openssl.binding import ( + _OpenSSLErrorWithText, + ) + + +class _Reasons(utils.Enum): + BACKEND_MISSING_INTERFACE = 0 + UNSUPPORTED_HASH = 1 + UNSUPPORTED_CIPHER = 2 + UNSUPPORTED_PADDING = 3 + UNSUPPORTED_MGF = 4 + UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5 + UNSUPPORTED_ELLIPTIC_CURVE = 6 + UNSUPPORTED_SERIALIZATION = 7 + UNSUPPORTED_X509 = 8 + UNSUPPORTED_EXCHANGE_ALGORITHM = 9 + UNSUPPORTED_DIFFIE_HELLMAN = 10 + UNSUPPORTED_MAC = 11 + + +class UnsupportedAlgorithm(Exception): + def __init__( + self, message: str, reason: typing.Optional[_Reasons] = None + ) -> None: + super(UnsupportedAlgorithm, self).__init__(message) + self._reason = reason + + +class AlreadyFinalized(Exception): + pass + + +class AlreadyUpdated(Exception): + pass + + +class NotYetFinalized(Exception): + pass + + +class InvalidTag(Exception): + pass + + +class InvalidSignature(Exception): + pass + + +class InternalError(Exception): + def __init__( + self, msg: str, err_code: typing.List["_OpenSSLErrorWithText"] + ) -> None: + super(InternalError, self).__init__(msg) + self.err_code = err_code + + +class InvalidKey(Exception): + pass diff --git a/myenv/lib/python3.9/site-packages/cryptography/fernet.py b/myenv/lib/python3.9/site-packages/cryptography/fernet.py new file mode 100644 index 0000000..9c5a3d7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/fernet.py @@ -0,0 +1,212 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import base64 +import binascii +import os +import time +import typing + +from cryptography import utils +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives import hashes, padding +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +from cryptography.hazmat.primitives.hmac import HMAC + + +class InvalidToken(Exception): + pass + + +_MAX_CLOCK_SKEW = 60 + + +class Fernet: + def __init__( + self, + key: typing.Union[bytes, str], + backend: typing.Any = None, + ): + try: + key = base64.urlsafe_b64decode(key) + except binascii.Error as exc: + raise ValueError( + "Fernet key must be 32 url-safe base64-encoded bytes." + ) from exc + if len(key) != 32: + raise ValueError( + "Fernet key must be 32 url-safe base64-encoded bytes." + ) + + self._signing_key = key[:16] + self._encryption_key = key[16:] + + @classmethod + def generate_key(cls) -> bytes: + return base64.urlsafe_b64encode(os.urandom(32)) + + def encrypt(self, data: bytes) -> bytes: + return self.encrypt_at_time(data, int(time.time())) + + def encrypt_at_time(self, data: bytes, current_time: int) -> bytes: + iv = os.urandom(16) + return self._encrypt_from_parts(data, current_time, iv) + + def _encrypt_from_parts( + self, data: bytes, current_time: int, iv: bytes + ) -> bytes: + utils._check_bytes("data", data) + + padder = padding.PKCS7(algorithms.AES.block_size).padder() + padded_data = padder.update(data) + padder.finalize() + encryptor = Cipher( + algorithms.AES(self._encryption_key), + modes.CBC(iv), + ).encryptor() + ciphertext = encryptor.update(padded_data) + encryptor.finalize() + + basic_parts = ( + b"\x80" + + current_time.to_bytes(length=8, byteorder="big") + + iv + + ciphertext + ) + + h = HMAC(self._signing_key, hashes.SHA256()) + h.update(basic_parts) + hmac = h.finalize() + return base64.urlsafe_b64encode(basic_parts + hmac) + + def decrypt(self, token: bytes, ttl: typing.Optional[int] = None) -> bytes: + timestamp, data = Fernet._get_unverified_token_data(token) + if ttl is None: + time_info = None + else: + time_info = (ttl, int(time.time())) + return self._decrypt_data(data, timestamp, time_info) + + def decrypt_at_time( + self, token: bytes, ttl: int, current_time: int + ) -> bytes: + if ttl is None: + raise ValueError( + "decrypt_at_time() can only be used with a non-None ttl" + ) + timestamp, data = Fernet._get_unverified_token_data(token) + return self._decrypt_data(data, timestamp, (ttl, current_time)) + + def extract_timestamp(self, token: bytes) -> int: + timestamp, data = Fernet._get_unverified_token_data(token) + # Verify the token was not tampered with. + self._verify_signature(data) + return timestamp + + @staticmethod + def _get_unverified_token_data(token: bytes) -> typing.Tuple[int, bytes]: + utils._check_bytes("token", token) + try: + data = base64.urlsafe_b64decode(token) + except (TypeError, binascii.Error): + raise InvalidToken + + if not data or data[0] != 0x80: + raise InvalidToken + + if len(data) < 9: + raise InvalidToken + + timestamp = int.from_bytes(data[1:9], byteorder="big") + return timestamp, data + + def _verify_signature(self, data: bytes) -> None: + h = HMAC(self._signing_key, hashes.SHA256()) + h.update(data[:-32]) + try: + h.verify(data[-32:]) + except InvalidSignature: + raise InvalidToken + + def _decrypt_data( + self, + data: bytes, + timestamp: int, + time_info: typing.Optional[typing.Tuple[int, int]], + ) -> bytes: + if time_info is not None: + ttl, current_time = time_info + if timestamp + ttl < current_time: + raise InvalidToken + + if current_time + _MAX_CLOCK_SKEW < timestamp: + raise InvalidToken + + self._verify_signature(data) + + iv = data[9:25] + ciphertext = data[25:-32] + decryptor = Cipher( + algorithms.AES(self._encryption_key), modes.CBC(iv) + ).decryptor() + plaintext_padded = decryptor.update(ciphertext) + try: + plaintext_padded += decryptor.finalize() + except ValueError: + raise InvalidToken + unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder() + + unpadded = unpadder.update(plaintext_padded) + try: + unpadded += unpadder.finalize() + except ValueError: + raise InvalidToken + return unpadded + + +class MultiFernet: + def __init__(self, fernets: typing.Iterable[Fernet]): + fernets = list(fernets) + if not fernets: + raise ValueError( + "MultiFernet requires at least one Fernet instance" + ) + self._fernets = fernets + + def encrypt(self, msg: bytes) -> bytes: + return self.encrypt_at_time(msg, int(time.time())) + + def encrypt_at_time(self, msg: bytes, current_time: int) -> bytes: + return self._fernets[0].encrypt_at_time(msg, current_time) + + def rotate(self, msg: bytes) -> bytes: + timestamp, data = Fernet._get_unverified_token_data(msg) + for f in self._fernets: + try: + p = f._decrypt_data(data, timestamp, None) + break + except InvalidToken: + pass + else: + raise InvalidToken + + iv = os.urandom(16) + return self._fernets[0]._encrypt_from_parts(p, timestamp, iv) + + def decrypt(self, msg: bytes, ttl: typing.Optional[int] = None) -> bytes: + for f in self._fernets: + try: + return f.decrypt(msg, ttl) + except InvalidToken: + pass + raise InvalidToken + + def decrypt_at_time( + self, msg: bytes, ttl: int, current_time: int + ) -> bytes: + for f in self._fernets: + try: + return f.decrypt_at_time(msg, ttl, current_time) + except InvalidToken: + pass + raise InvalidToken diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/__init__.py new file mode 100644 index 0000000..007694b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/__init__.py @@ -0,0 +1,10 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +Hazardous Materials + +This is a "Hazardous Materials" module. You should ONLY use it if you're +100% absolutely sure that you know what you're doing because this module +is full of land mines, dragons, and dinosaurs with laser guns. +""" diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/_oid.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/_oid.py new file mode 100644 index 0000000..da3668b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/_oid.py @@ -0,0 +1,345 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.hazmat.primitives import hashes + + +class ObjectIdentifier: + def __init__(self, dotted_string: str) -> None: + self._dotted_string = dotted_string + + nodes = self._dotted_string.split(".") + intnodes = [] + + # There must be at least 2 nodes, the first node must be 0..2, and + # if less than 2, the second node cannot have a value outside the + # range 0..39. All nodes must be integers. + for node in nodes: + try: + node_value = int(node, 10) + except ValueError: + raise ValueError( + f"Malformed OID: {dotted_string} (non-integer nodes)" + ) + if node_value < 0: + raise ValueError( + f"Malformed OID: {dotted_string} (negative-integer nodes)" + ) + intnodes.append(node_value) + + if len(nodes) < 2: + raise ValueError( + f"Malformed OID: {dotted_string} " + "(insufficient number of nodes)" + ) + + if intnodes[0] > 2: + raise ValueError( + f"Malformed OID: {dotted_string} " + "(first node outside valid range)" + ) + + if intnodes[0] < 2 and intnodes[1] >= 40: + raise ValueError( + f"Malformed OID: {dotted_string} " + "(second node outside valid range)" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, ObjectIdentifier): + return NotImplemented + + return self.dotted_string == other.dotted_string + + def __repr__(self) -> str: + return "".format( + self.dotted_string, self._name + ) + + def __hash__(self) -> int: + return hash(self.dotted_string) + + @property + def _name(self) -> str: + return _OID_NAMES.get(self, "Unknown OID") + + @property + def dotted_string(self) -> str: + return self._dotted_string + + +class ExtensionOID: + SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier("2.5.29.9") + SUBJECT_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.14") + KEY_USAGE = ObjectIdentifier("2.5.29.15") + SUBJECT_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.17") + ISSUER_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.18") + BASIC_CONSTRAINTS = ObjectIdentifier("2.5.29.19") + NAME_CONSTRAINTS = ObjectIdentifier("2.5.29.30") + CRL_DISTRIBUTION_POINTS = ObjectIdentifier("2.5.29.31") + CERTIFICATE_POLICIES = ObjectIdentifier("2.5.29.32") + POLICY_MAPPINGS = ObjectIdentifier("2.5.29.33") + AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.35") + POLICY_CONSTRAINTS = ObjectIdentifier("2.5.29.36") + EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37") + FRESHEST_CRL = ObjectIdentifier("2.5.29.46") + INHIBIT_ANY_POLICY = ObjectIdentifier("2.5.29.54") + ISSUING_DISTRIBUTION_POINT = ObjectIdentifier("2.5.29.28") + AUTHORITY_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.1") + SUBJECT_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.11") + OCSP_NO_CHECK = ObjectIdentifier("1.3.6.1.5.5.7.48.1.5") + TLS_FEATURE = ObjectIdentifier("1.3.6.1.5.5.7.1.24") + CRL_NUMBER = ObjectIdentifier("2.5.29.20") + DELTA_CRL_INDICATOR = ObjectIdentifier("2.5.29.27") + PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS = ObjectIdentifier( + "1.3.6.1.4.1.11129.2.4.2" + ) + PRECERT_POISON = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3") + SIGNED_CERTIFICATE_TIMESTAMPS = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.5") + + +class OCSPExtensionOID: + NONCE = ObjectIdentifier("1.3.6.1.5.5.7.48.1.2") + + +class CRLEntryExtensionOID: + CERTIFICATE_ISSUER = ObjectIdentifier("2.5.29.29") + CRL_REASON = ObjectIdentifier("2.5.29.21") + INVALIDITY_DATE = ObjectIdentifier("2.5.29.24") + + +class NameOID: + COMMON_NAME = ObjectIdentifier("2.5.4.3") + COUNTRY_NAME = ObjectIdentifier("2.5.4.6") + LOCALITY_NAME = ObjectIdentifier("2.5.4.7") + STATE_OR_PROVINCE_NAME = ObjectIdentifier("2.5.4.8") + STREET_ADDRESS = ObjectIdentifier("2.5.4.9") + ORGANIZATION_NAME = ObjectIdentifier("2.5.4.10") + ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier("2.5.4.11") + SERIAL_NUMBER = ObjectIdentifier("2.5.4.5") + SURNAME = ObjectIdentifier("2.5.4.4") + GIVEN_NAME = ObjectIdentifier("2.5.4.42") + TITLE = ObjectIdentifier("2.5.4.12") + GENERATION_QUALIFIER = ObjectIdentifier("2.5.4.44") + X500_UNIQUE_IDENTIFIER = ObjectIdentifier("2.5.4.45") + DN_QUALIFIER = ObjectIdentifier("2.5.4.46") + PSEUDONYM = ObjectIdentifier("2.5.4.65") + USER_ID = ObjectIdentifier("0.9.2342.19200300.100.1.1") + DOMAIN_COMPONENT = ObjectIdentifier("0.9.2342.19200300.100.1.25") + EMAIL_ADDRESS = ObjectIdentifier("1.2.840.113549.1.9.1") + JURISDICTION_COUNTRY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.3") + JURISDICTION_LOCALITY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.1") + JURISDICTION_STATE_OR_PROVINCE_NAME = ObjectIdentifier( + "1.3.6.1.4.1.311.60.2.1.2" + ) + BUSINESS_CATEGORY = ObjectIdentifier("2.5.4.15") + POSTAL_ADDRESS = ObjectIdentifier("2.5.4.16") + POSTAL_CODE = ObjectIdentifier("2.5.4.17") + INN = ObjectIdentifier("1.2.643.3.131.1.1") + OGRN = ObjectIdentifier("1.2.643.100.1") + SNILS = ObjectIdentifier("1.2.643.100.3") + UNSTRUCTURED_NAME = ObjectIdentifier("1.2.840.113549.1.9.2") + + +class SignatureAlgorithmOID: + RSA_WITH_MD5 = ObjectIdentifier("1.2.840.113549.1.1.4") + RSA_WITH_SHA1 = ObjectIdentifier("1.2.840.113549.1.1.5") + # This is an alternate OID for RSA with SHA1 that is occasionally seen + _RSA_WITH_SHA1 = ObjectIdentifier("1.3.14.3.2.29") + RSA_WITH_SHA224 = ObjectIdentifier("1.2.840.113549.1.1.14") + RSA_WITH_SHA256 = ObjectIdentifier("1.2.840.113549.1.1.11") + RSA_WITH_SHA384 = ObjectIdentifier("1.2.840.113549.1.1.12") + RSA_WITH_SHA512 = ObjectIdentifier("1.2.840.113549.1.1.13") + RSA_WITH_SHA3_224 = ObjectIdentifier("2.16.840.1.101.3.4.3.13") + RSA_WITH_SHA3_256 = ObjectIdentifier("2.16.840.1.101.3.4.3.14") + RSA_WITH_SHA3_384 = ObjectIdentifier("2.16.840.1.101.3.4.3.15") + RSA_WITH_SHA3_512 = ObjectIdentifier("2.16.840.1.101.3.4.3.16") + RSASSA_PSS = ObjectIdentifier("1.2.840.113549.1.1.10") + ECDSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10045.4.1") + ECDSA_WITH_SHA224 = ObjectIdentifier("1.2.840.10045.4.3.1") + ECDSA_WITH_SHA256 = ObjectIdentifier("1.2.840.10045.4.3.2") + ECDSA_WITH_SHA384 = ObjectIdentifier("1.2.840.10045.4.3.3") + ECDSA_WITH_SHA512 = ObjectIdentifier("1.2.840.10045.4.3.4") + ECDSA_WITH_SHA3_224 = ObjectIdentifier("2.16.840.1.101.3.4.3.9") + ECDSA_WITH_SHA3_256 = ObjectIdentifier("2.16.840.1.101.3.4.3.10") + ECDSA_WITH_SHA3_384 = ObjectIdentifier("2.16.840.1.101.3.4.3.11") + ECDSA_WITH_SHA3_512 = ObjectIdentifier("2.16.840.1.101.3.4.3.12") + DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3") + DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1") + DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2") + DSA_WITH_SHA384 = ObjectIdentifier("2.16.840.1.101.3.4.3.3") + DSA_WITH_SHA512 = ObjectIdentifier("2.16.840.1.101.3.4.3.4") + ED25519 = ObjectIdentifier("1.3.101.112") + ED448 = ObjectIdentifier("1.3.101.113") + GOSTR3411_94_WITH_3410_2001 = ObjectIdentifier("1.2.643.2.2.3") + GOSTR3410_2012_WITH_3411_2012_256 = ObjectIdentifier("1.2.643.7.1.1.3.2") + GOSTR3410_2012_WITH_3411_2012_512 = ObjectIdentifier("1.2.643.7.1.1.3.3") + + +_SIG_OIDS_TO_HASH: typing.Dict[ + ObjectIdentifier, typing.Optional[hashes.HashAlgorithm] +] = { + SignatureAlgorithmOID.RSA_WITH_MD5: hashes.MD5(), + SignatureAlgorithmOID.RSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID._RSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.RSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.RSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.RSA_WITH_SHA384: hashes.SHA384(), + SignatureAlgorithmOID.RSA_WITH_SHA512: hashes.SHA512(), + SignatureAlgorithmOID.ECDSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.ECDSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.ECDSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.ECDSA_WITH_SHA384: hashes.SHA384(), + SignatureAlgorithmOID.ECDSA_WITH_SHA512: hashes.SHA512(), + SignatureAlgorithmOID.DSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.DSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.ED25519: None, + SignatureAlgorithmOID.ED448: None, + SignatureAlgorithmOID.GOSTR3411_94_WITH_3410_2001: None, + SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_256: None, + SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_512: None, +} + + +class ExtendedKeyUsageOID: + SERVER_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.1") + CLIENT_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.2") + CODE_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.3") + EMAIL_PROTECTION = ObjectIdentifier("1.3.6.1.5.5.7.3.4") + TIME_STAMPING = ObjectIdentifier("1.3.6.1.5.5.7.3.8") + OCSP_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.9") + ANY_EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37.0") + SMARTCARD_LOGON = ObjectIdentifier("1.3.6.1.4.1.311.20.2.2") + KERBEROS_PKINIT_KDC = ObjectIdentifier("1.3.6.1.5.2.3.5") + IPSEC_IKE = ObjectIdentifier("1.3.6.1.5.5.7.3.17") + + +class AuthorityInformationAccessOID: + CA_ISSUERS = ObjectIdentifier("1.3.6.1.5.5.7.48.2") + OCSP = ObjectIdentifier("1.3.6.1.5.5.7.48.1") + + +class SubjectInformationAccessOID: + CA_REPOSITORY = ObjectIdentifier("1.3.6.1.5.5.7.48.5") + + +class CertificatePoliciesOID: + CPS_QUALIFIER = ObjectIdentifier("1.3.6.1.5.5.7.2.1") + CPS_USER_NOTICE = ObjectIdentifier("1.3.6.1.5.5.7.2.2") + ANY_POLICY = ObjectIdentifier("2.5.29.32.0") + + +class AttributeOID: + CHALLENGE_PASSWORD = ObjectIdentifier("1.2.840.113549.1.9.7") + UNSTRUCTURED_NAME = ObjectIdentifier("1.2.840.113549.1.9.2") + + +_OID_NAMES = { + NameOID.COMMON_NAME: "commonName", + NameOID.COUNTRY_NAME: "countryName", + NameOID.LOCALITY_NAME: "localityName", + NameOID.STATE_OR_PROVINCE_NAME: "stateOrProvinceName", + NameOID.STREET_ADDRESS: "streetAddress", + NameOID.ORGANIZATION_NAME: "organizationName", + NameOID.ORGANIZATIONAL_UNIT_NAME: "organizationalUnitName", + NameOID.SERIAL_NUMBER: "serialNumber", + NameOID.SURNAME: "surname", + NameOID.GIVEN_NAME: "givenName", + NameOID.TITLE: "title", + NameOID.GENERATION_QUALIFIER: "generationQualifier", + NameOID.X500_UNIQUE_IDENTIFIER: "x500UniqueIdentifier", + NameOID.DN_QUALIFIER: "dnQualifier", + NameOID.PSEUDONYM: "pseudonym", + NameOID.USER_ID: "userID", + NameOID.DOMAIN_COMPONENT: "domainComponent", + NameOID.EMAIL_ADDRESS: "emailAddress", + NameOID.JURISDICTION_COUNTRY_NAME: "jurisdictionCountryName", + NameOID.JURISDICTION_LOCALITY_NAME: "jurisdictionLocalityName", + NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME: ( + "jurisdictionStateOrProvinceName" + ), + NameOID.BUSINESS_CATEGORY: "businessCategory", + NameOID.POSTAL_ADDRESS: "postalAddress", + NameOID.POSTAL_CODE: "postalCode", + NameOID.INN: "INN", + NameOID.OGRN: "OGRN", + NameOID.SNILS: "SNILS", + NameOID.UNSTRUCTURED_NAME: "unstructuredName", + SignatureAlgorithmOID.RSA_WITH_MD5: "md5WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA1: "sha1WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA224: "sha224WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA256: "sha256WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA384: "sha384WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA512: "sha512WithRSAEncryption", + SignatureAlgorithmOID.RSASSA_PSS: "RSASSA-PSS", + SignatureAlgorithmOID.ECDSA_WITH_SHA1: "ecdsa-with-SHA1", + SignatureAlgorithmOID.ECDSA_WITH_SHA224: "ecdsa-with-SHA224", + SignatureAlgorithmOID.ECDSA_WITH_SHA256: "ecdsa-with-SHA256", + SignatureAlgorithmOID.ECDSA_WITH_SHA384: "ecdsa-with-SHA384", + SignatureAlgorithmOID.ECDSA_WITH_SHA512: "ecdsa-with-SHA512", + SignatureAlgorithmOID.DSA_WITH_SHA1: "dsa-with-sha1", + SignatureAlgorithmOID.DSA_WITH_SHA224: "dsa-with-sha224", + SignatureAlgorithmOID.DSA_WITH_SHA256: "dsa-with-sha256", + SignatureAlgorithmOID.ED25519: "ed25519", + SignatureAlgorithmOID.ED448: "ed448", + SignatureAlgorithmOID.GOSTR3411_94_WITH_3410_2001: ( + "GOST R 34.11-94 with GOST R 34.10-2001" + ), + SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_256: ( + "GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)" + ), + SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_512: ( + "GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)" + ), + ExtendedKeyUsageOID.SERVER_AUTH: "serverAuth", + ExtendedKeyUsageOID.CLIENT_AUTH: "clientAuth", + ExtendedKeyUsageOID.CODE_SIGNING: "codeSigning", + ExtendedKeyUsageOID.EMAIL_PROTECTION: "emailProtection", + ExtendedKeyUsageOID.TIME_STAMPING: "timeStamping", + ExtendedKeyUsageOID.OCSP_SIGNING: "OCSPSigning", + ExtendedKeyUsageOID.SMARTCARD_LOGON: "msSmartcardLogin", + ExtendedKeyUsageOID.KERBEROS_PKINIT_KDC: "pkInitKDC", + ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES: "subjectDirectoryAttributes", + ExtensionOID.SUBJECT_KEY_IDENTIFIER: "subjectKeyIdentifier", + ExtensionOID.KEY_USAGE: "keyUsage", + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: "subjectAltName", + ExtensionOID.ISSUER_ALTERNATIVE_NAME: "issuerAltName", + ExtensionOID.BASIC_CONSTRAINTS: "basicConstraints", + ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: ( + "signedCertificateTimestampList" + ), + ExtensionOID.SIGNED_CERTIFICATE_TIMESTAMPS: ( + "signedCertificateTimestampList" + ), + ExtensionOID.PRECERT_POISON: "ctPoison", + CRLEntryExtensionOID.CRL_REASON: "cRLReason", + CRLEntryExtensionOID.INVALIDITY_DATE: "invalidityDate", + CRLEntryExtensionOID.CERTIFICATE_ISSUER: "certificateIssuer", + ExtensionOID.NAME_CONSTRAINTS: "nameConstraints", + ExtensionOID.CRL_DISTRIBUTION_POINTS: "cRLDistributionPoints", + ExtensionOID.CERTIFICATE_POLICIES: "certificatePolicies", + ExtensionOID.POLICY_MAPPINGS: "policyMappings", + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: "authorityKeyIdentifier", + ExtensionOID.POLICY_CONSTRAINTS: "policyConstraints", + ExtensionOID.EXTENDED_KEY_USAGE: "extendedKeyUsage", + ExtensionOID.FRESHEST_CRL: "freshestCRL", + ExtensionOID.INHIBIT_ANY_POLICY: "inhibitAnyPolicy", + ExtensionOID.ISSUING_DISTRIBUTION_POINT: ("issuingDistributionPoint"), + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: "authorityInfoAccess", + ExtensionOID.SUBJECT_INFORMATION_ACCESS: "subjectInfoAccess", + ExtensionOID.OCSP_NO_CHECK: "OCSPNoCheck", + ExtensionOID.CRL_NUMBER: "cRLNumber", + ExtensionOID.DELTA_CRL_INDICATOR: "deltaCRLIndicator", + ExtensionOID.TLS_FEATURE: "TLSFeature", + AuthorityInformationAccessOID.OCSP: "OCSP", + AuthorityInformationAccessOID.CA_ISSUERS: "caIssuers", + SubjectInformationAccessOID.CA_REPOSITORY: "caRepository", + CertificatePoliciesOID.CPS_QUALIFIER: "id-qt-cps", + CertificatePoliciesOID.CPS_USER_NOTICE: "id-qt-unotice", + OCSPExtensionOID.NONCE: "OCSPNonce", + AttributeOID.CHALLENGE_PASSWORD: "challengePassword", +} diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/__init__.py new file mode 100644 index 0000000..3926f85 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/__init__.py @@ -0,0 +1,10 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from typing import Any + + +def default_backend() -> Any: + from cryptography.hazmat.backends.openssl.backend import backend + + return backend diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/__init__.py new file mode 100644 index 0000000..31fd17c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/__init__.py @@ -0,0 +1,9 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography.hazmat.backends.openssl.backend import backend + + +__all__ = ["backend"] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/aead.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/aead.py new file mode 100644 index 0000000..f7914af --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/aead.py @@ -0,0 +1,251 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import InvalidTag + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + from cryptography.hazmat.primitives.ciphers.aead import ( + AESCCM, + AESGCM, + AESOCB3, + AESSIV, + ChaCha20Poly1305, + ) + + _AEAD_TYPES = typing.Union[ + AESCCM, AESGCM, AESOCB3, AESSIV, ChaCha20Poly1305 + ] + +_ENCRYPT = 1 +_DECRYPT = 0 + + +def _aead_cipher_name(cipher: "_AEAD_TYPES") -> bytes: + from cryptography.hazmat.primitives.ciphers.aead import ( + AESCCM, + AESGCM, + AESOCB3, + AESSIV, + ChaCha20Poly1305, + ) + + if isinstance(cipher, ChaCha20Poly1305): + return b"chacha20-poly1305" + elif isinstance(cipher, AESCCM): + return f"aes-{len(cipher._key) * 8}-ccm".encode("ascii") + elif isinstance(cipher, AESOCB3): + return f"aes-{len(cipher._key) * 8}-ocb".encode("ascii") + elif isinstance(cipher, AESSIV): + return f"aes-{len(cipher._key) * 8 // 2}-siv".encode("ascii") + else: + assert isinstance(cipher, AESGCM) + return f"aes-{len(cipher._key) * 8}-gcm".encode("ascii") + + +def _evp_cipher(cipher_name: bytes, backend: "Backend"): + if cipher_name.endswith(b"-siv"): + evp_cipher = backend._lib.EVP_CIPHER_fetch( + backend._ffi.NULL, + cipher_name, + backend._ffi.NULL, + ) + backend.openssl_assert(evp_cipher != backend._ffi.NULL) + evp_cipher = backend._ffi.gc(evp_cipher, backend._lib.EVP_CIPHER_free) + else: + evp_cipher = backend._lib.EVP_get_cipherbyname(cipher_name) + backend.openssl_assert(evp_cipher != backend._ffi.NULL) + + return evp_cipher + + +def _aead_setup( + backend: "Backend", + cipher_name: bytes, + key: bytes, + nonce: bytes, + tag: typing.Optional[bytes], + tag_len: int, + operation: int, +): + evp_cipher = _evp_cipher(cipher_name, backend) + ctx = backend._lib.EVP_CIPHER_CTX_new() + ctx = backend._ffi.gc(ctx, backend._lib.EVP_CIPHER_CTX_free) + res = backend._lib.EVP_CipherInit_ex( + ctx, + evp_cipher, + backend._ffi.NULL, + backend._ffi.NULL, + backend._ffi.NULL, + int(operation == _ENCRYPT), + ) + backend.openssl_assert(res != 0) + res = backend._lib.EVP_CIPHER_CTX_set_key_length(ctx, len(key)) + backend.openssl_assert(res != 0) + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, + backend._lib.EVP_CTRL_AEAD_SET_IVLEN, + len(nonce), + backend._ffi.NULL, + ) + backend.openssl_assert(res != 0) + if operation == _DECRYPT: + assert tag is not None + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag + ) + backend.openssl_assert(res != 0) + elif cipher_name.endswith(b"-ccm"): + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, tag_len, backend._ffi.NULL + ) + backend.openssl_assert(res != 0) + + nonce_ptr = backend._ffi.from_buffer(nonce) + key_ptr = backend._ffi.from_buffer(key) + res = backend._lib.EVP_CipherInit_ex( + ctx, + backend._ffi.NULL, + backend._ffi.NULL, + key_ptr, + nonce_ptr, + int(operation == _ENCRYPT), + ) + backend.openssl_assert(res != 0) + return ctx + + +def _set_length(backend: "Backend", ctx, data_len: int) -> None: + intptr = backend._ffi.new("int *") + res = backend._lib.EVP_CipherUpdate( + ctx, backend._ffi.NULL, intptr, backend._ffi.NULL, data_len + ) + backend.openssl_assert(res != 0) + + +def _process_aad(backend: "Backend", ctx, associated_data: bytes) -> None: + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherUpdate( + ctx, backend._ffi.NULL, outlen, associated_data, len(associated_data) + ) + backend.openssl_assert(res != 0) + + +def _process_data(backend: "Backend", ctx, data: bytes) -> bytes: + outlen = backend._ffi.new("int *") + buf = backend._ffi.new("unsigned char[]", len(data)) + res = backend._lib.EVP_CipherUpdate(ctx, buf, outlen, data, len(data)) + if res == 0: + # AES SIV can error here if the data is invalid on decrypt + backend._consume_errors() + raise InvalidTag + return backend._ffi.buffer(buf, outlen[0])[:] + + +def _encrypt( + backend: "Backend", + cipher: "_AEAD_TYPES", + nonce: bytes, + data: bytes, + associated_data: typing.List[bytes], + tag_length: int, +) -> bytes: + from cryptography.hazmat.primitives.ciphers.aead import AESCCM, AESSIV + + cipher_name = _aead_cipher_name(cipher) + ctx = _aead_setup( + backend, cipher_name, cipher._key, nonce, None, tag_length, _ENCRYPT + ) + # CCM requires us to pass the length of the data before processing anything + # However calling this with any other AEAD results in an error + if isinstance(cipher, AESCCM): + _set_length(backend, ctx, len(data)) + + for ad in associated_data: + _process_aad(backend, ctx, ad) + processed_data = _process_data(backend, ctx, data) + outlen = backend._ffi.new("int *") + # All AEADs we support besides OCB are streaming so they return nothing + # in finalization. OCB can return up to (16 byte block - 1) bytes so + # we need a buffer here too. + buf = backend._ffi.new("unsigned char[]", 16) + res = backend._lib.EVP_CipherFinal_ex(ctx, buf, outlen) + backend.openssl_assert(res != 0) + processed_data += backend._ffi.buffer(buf, outlen[0])[:] + tag_buf = backend._ffi.new("unsigned char[]", tag_length) + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_GET_TAG, tag_length, tag_buf + ) + backend.openssl_assert(res != 0) + tag = backend._ffi.buffer(tag_buf)[:] + + if isinstance(cipher, AESSIV): + # RFC 5297 defines the output as IV || C, where the tag we generate is + # the "IV" and C is the ciphertext. This is the opposite of our + # other AEADs, which are Ciphertext || Tag + backend.openssl_assert(len(tag) == 16) + return tag + processed_data + else: + return processed_data + tag + + +def _decrypt( + backend: "Backend", + cipher: "_AEAD_TYPES", + nonce: bytes, + data: bytes, + associated_data: typing.List[bytes], + tag_length: int, +) -> bytes: + from cryptography.hazmat.primitives.ciphers.aead import AESCCM, AESSIV + + if len(data) < tag_length: + raise InvalidTag + + if isinstance(cipher, AESSIV): + # RFC 5297 defines the output as IV || C, where the tag we generate is + # the "IV" and C is the ciphertext. This is the opposite of our + # other AEADs, which are Ciphertext || Tag + tag = data[:tag_length] + data = data[tag_length:] + else: + tag = data[-tag_length:] + data = data[:-tag_length] + cipher_name = _aead_cipher_name(cipher) + ctx = _aead_setup( + backend, cipher_name, cipher._key, nonce, tag, tag_length, _DECRYPT + ) + # CCM requires us to pass the length of the data before processing anything + # However calling this with any other AEAD results in an error + if isinstance(cipher, AESCCM): + _set_length(backend, ctx, len(data)) + + for ad in associated_data: + _process_aad(backend, ctx, ad) + # CCM has a different error path if the tag doesn't match. Errors are + # raised in Update and Final is irrelevant. + if isinstance(cipher, AESCCM): + outlen = backend._ffi.new("int *") + buf = backend._ffi.new("unsigned char[]", len(data)) + res = backend._lib.EVP_CipherUpdate(ctx, buf, outlen, data, len(data)) + if res != 1: + backend._consume_errors() + raise InvalidTag + + processed_data = backend._ffi.buffer(buf, outlen[0])[:] + else: + processed_data = _process_data(backend, ctx, data) + outlen = backend._ffi.new("int *") + # OCB can return up to 15 bytes (16 byte block - 1) in finalization + buf = backend._ffi.new("unsigned char[]", 16) + res = backend._lib.EVP_CipherFinal_ex(ctx, buf, outlen) + processed_data += backend._ffi.buffer(buf, outlen[0])[:] + if res == 0: + backend._consume_errors() + raise InvalidTag + + return processed_data diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/backend.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/backend.py new file mode 100644 index 0000000..42fb446 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/backend.py @@ -0,0 +1,2537 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import collections +import contextlib +import itertools +import typing +import warnings +from contextlib import contextmanager + +from cryptography import utils, x509 +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.backends.openssl import aead +from cryptography.hazmat.backends.openssl.ciphers import _CipherContext +from cryptography.hazmat.backends.openssl.cmac import _CMACContext +from cryptography.hazmat.backends.openssl.dh import ( + _DHParameters, + _DHPrivateKey, + _DHPublicKey, + _dh_params_dup, +) +from cryptography.hazmat.backends.openssl.dsa import ( + _DSAParameters, + _DSAPrivateKey, + _DSAPublicKey, +) +from cryptography.hazmat.backends.openssl.ec import ( + _EllipticCurvePrivateKey, + _EllipticCurvePublicKey, +) +from cryptography.hazmat.backends.openssl.ed25519 import ( + _Ed25519PrivateKey, + _Ed25519PublicKey, +) +from cryptography.hazmat.backends.openssl.ed448 import ( + _ED448_KEY_SIZE, + _Ed448PrivateKey, + _Ed448PublicKey, +) +from cryptography.hazmat.backends.openssl.hashes import _HashContext +from cryptography.hazmat.backends.openssl.hmac import _HMACContext +from cryptography.hazmat.backends.openssl.poly1305 import ( + _POLY1305_KEY_SIZE, + _Poly1305Context, +) +from cryptography.hazmat.backends.openssl.rsa import ( + _RSAPrivateKey, + _RSAPublicKey, +) +from cryptography.hazmat.backends.openssl.x25519 import ( + _X25519PrivateKey, + _X25519PublicKey, +) +from cryptography.hazmat.backends.openssl.x448 import ( + _X448PrivateKey, + _X448PublicKey, +) +from cryptography.hazmat.bindings._rust import ( + x509 as rust_x509, +) +from cryptography.hazmat.bindings.openssl import binding +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives._asymmetric import AsymmetricPadding +from cryptography.hazmat.primitives.asymmetric import ( + dh, + dsa, + ec, + ed25519, + ed448, + rsa, + x25519, + x448, +) +from cryptography.hazmat.primitives.asymmetric.padding import ( + MGF1, + OAEP, + PKCS1v15, + PSS, +) +from cryptography.hazmat.primitives.asymmetric.types import ( + CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES, + PRIVATE_KEY_TYPES, + PUBLIC_KEY_TYPES, +) +from cryptography.hazmat.primitives.ciphers import ( + BlockCipherAlgorithm, + CipherAlgorithm, +) +from cryptography.hazmat.primitives.ciphers.algorithms import ( + AES, + ARC4, + Camellia, + ChaCha20, + SM4, + TripleDES, + _BlowfishInternal, + _CAST5Internal, + _IDEAInternal, + _SEEDInternal, +) +from cryptography.hazmat.primitives.ciphers.modes import ( + CBC, + CFB, + CFB8, + CTR, + ECB, + GCM, + Mode, + OFB, + XTS, +) +from cryptography.hazmat.primitives.kdf import scrypt +from cryptography.hazmat.primitives.serialization import pkcs7, ssh +from cryptography.hazmat.primitives.serialization.pkcs12 import ( + PKCS12Certificate, + PKCS12KeyAndCertificates, + _ALLOWED_PKCS12_TYPES, + _PKCS12_CAS_TYPES, +) + + +_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"]) + + +# Not actually supported, just used as a marker for some serialization tests. +class _RC2: + pass + + +class Backend: + """ + OpenSSL API binding interfaces. + """ + + name = "openssl" + + # FIPS has opinions about acceptable algorithms and key sizes, but the + # disallowed algorithms are still present in OpenSSL. They just error if + # you try to use them. To avoid that we allowlist the algorithms in + # FIPS 140-3. This isn't ideal, but FIPS 140-3 is trash so here we are. + _fips_aead = { + b"aes-128-ccm", + b"aes-192-ccm", + b"aes-256-ccm", + b"aes-128-gcm", + b"aes-192-gcm", + b"aes-256-gcm", + } + # TripleDES encryption is disallowed/deprecated throughout 2023 in + # FIPS 140-3. To keep it simple we denylist any use of TripleDES (TDEA). + _fips_ciphers = (AES,) + # Sometimes SHA1 is still permissible. That logic is contained + # within the various *_supported methods. + _fips_hashes = ( + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, + hashes.SHA512_224, + hashes.SHA512_256, + hashes.SHA3_224, + hashes.SHA3_256, + hashes.SHA3_384, + hashes.SHA3_512, + hashes.SHAKE128, + hashes.SHAKE256, + ) + _fips_ecdh_curves = ( + ec.SECP224R1, + ec.SECP256R1, + ec.SECP384R1, + ec.SECP521R1, + ) + _fips_rsa_min_key_size = 2048 + _fips_rsa_min_public_exponent = 65537 + _fips_dsa_min_modulus = 1 << 2048 + _fips_dh_min_key_size = 2048 + _fips_dh_min_modulus = 1 << _fips_dh_min_key_size + + def __init__(self): + self._binding = binding.Binding() + self._ffi = self._binding.ffi + self._lib = self._binding.lib + self._rsa_skip_check_key = False + self._fips_enabled = self._is_fips_enabled() + + self._cipher_registry = {} + self._register_default_ciphers() + if self._fips_enabled and self._lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE: + warnings.warn( + "OpenSSL FIPS mode is enabled. Can't enable DRBG fork safety.", + UserWarning, + ) + else: + self.activate_osrandom_engine() + self._dh_types = [self._lib.EVP_PKEY_DH] + if self._lib.Cryptography_HAS_EVP_PKEY_DHX: + self._dh_types.append(self._lib.EVP_PKEY_DHX) + + def __repr__(self) -> str: + return "".format( + self.openssl_version_text(), self._fips_enabled + ) + + def openssl_assert( + self, + ok: bool, + errors: typing.Optional[typing.List[binding._OpenSSLError]] = None, + ) -> None: + return binding._openssl_assert(self._lib, ok, errors=errors) + + def _is_fips_enabled(self) -> bool: + if self._lib.Cryptography_HAS_300_FIPS: + mode = self._lib.EVP_default_properties_is_fips_enabled( + self._ffi.NULL + ) + else: + mode = getattr(self._lib, "FIPS_mode", lambda: 0)() + + if mode == 0: + # OpenSSL without FIPS pushes an error on the error stack + self._lib.ERR_clear_error() + return bool(mode) + + def _enable_fips(self) -> None: + # This function enables FIPS mode for OpenSSL 3.0.0 on installs that + # have the FIPS provider installed properly. + self._binding._enable_fips() + assert self._is_fips_enabled() + self._fips_enabled = self._is_fips_enabled() + + def activate_builtin_random(self) -> None: + if self._lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE: + # Obtain a new structural reference. + e = self._lib.ENGINE_get_default_RAND() + if e != self._ffi.NULL: + self._lib.ENGINE_unregister_RAND(e) + # Reset the RNG to use the built-in. + res = self._lib.RAND_set_rand_method(self._ffi.NULL) + self.openssl_assert(res == 1) + # decrement the structural reference from get_default_RAND + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) + + @contextlib.contextmanager + def _get_osurandom_engine(self): + # Fetches an engine by id and returns it. This creates a structural + # reference. + e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id) + self.openssl_assert(e != self._ffi.NULL) + # Initialize the engine for use. This adds a functional reference. + res = self._lib.ENGINE_init(e) + self.openssl_assert(res == 1) + + try: + yield e + finally: + # Decrement the structural ref incremented by ENGINE_by_id. + res = self._lib.ENGINE_free(e) + self.openssl_assert(res == 1) + # Decrement the functional ref incremented by ENGINE_init. + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) + + def activate_osrandom_engine(self) -> None: + if self._lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE: + # Unregister and free the current engine. + self.activate_builtin_random() + with self._get_osurandom_engine() as e: + # Set the engine as the default RAND provider. + res = self._lib.ENGINE_set_default_RAND(e) + self.openssl_assert(res == 1) + # Reset the RNG to use the engine + res = self._lib.RAND_set_rand_method(self._ffi.NULL) + self.openssl_assert(res == 1) + + def osrandom_engine_implementation(self) -> str: + buf = self._ffi.new("char[]", 64) + with self._get_osurandom_engine() as e: + res = self._lib.ENGINE_ctrl_cmd( + e, b"get_implementation", len(buf), buf, self._ffi.NULL, 0 + ) + self.openssl_assert(res > 0) + return self._ffi.string(buf).decode("ascii") + + def openssl_version_text(self) -> str: + """ + Friendly string name of the loaded OpenSSL library. This is not + necessarily the same version as it was compiled against. + + Example: OpenSSL 1.1.1d 10 Sep 2019 + """ + return self._ffi.string( + self._lib.OpenSSL_version(self._lib.OPENSSL_VERSION) + ).decode("ascii") + + def openssl_version_number(self) -> int: + return self._lib.OpenSSL_version_num() + + def create_hmac_ctx( + self, key: bytes, algorithm: hashes.HashAlgorithm + ) -> _HMACContext: + return _HMACContext(self, key, algorithm) + + def _evp_md_from_algorithm(self, algorithm: hashes.HashAlgorithm): + if algorithm.name == "blake2b" or algorithm.name == "blake2s": + alg = "{}{}".format( + algorithm.name, algorithm.digest_size * 8 + ).encode("ascii") + else: + alg = algorithm.name.encode("ascii") + + evp_md = self._lib.EVP_get_digestbyname(alg) + return evp_md + + def _evp_md_non_null_from_algorithm(self, algorithm: hashes.HashAlgorithm): + evp_md = self._evp_md_from_algorithm(algorithm) + self.openssl_assert(evp_md != self._ffi.NULL) + return evp_md + + def hash_supported(self, algorithm: hashes.HashAlgorithm) -> bool: + if self._fips_enabled and not isinstance(algorithm, self._fips_hashes): + return False + + evp_md = self._evp_md_from_algorithm(algorithm) + return evp_md != self._ffi.NULL + + def signature_hash_supported( + self, algorithm: hashes.HashAlgorithm + ) -> bool: + # Dedicated check for hashing algorithm use in message digest for + # signatures, e.g. RSA PKCS#1 v1.5 SHA1 (sha1WithRSAEncryption). + if self._fips_enabled and isinstance(algorithm, hashes.SHA1): + return False + return self.hash_supported(algorithm) + + def scrypt_supported(self) -> bool: + if self._fips_enabled: + return False + else: + return self._lib.Cryptography_HAS_SCRYPT == 1 + + def hmac_supported(self, algorithm: hashes.HashAlgorithm) -> bool: + # FIPS mode still allows SHA1 for HMAC + if self._fips_enabled and isinstance(algorithm, hashes.SHA1): + return True + + return self.hash_supported(algorithm) + + def create_hash_ctx( + self, algorithm: hashes.HashAlgorithm + ) -> hashes.HashContext: + return _HashContext(self, algorithm) + + def cipher_supported(self, cipher: CipherAlgorithm, mode: Mode) -> bool: + if self._fips_enabled: + # FIPS mode requires AES. TripleDES is disallowed/deprecated in + # FIPS 140-3. + if not isinstance(cipher, self._fips_ciphers): + return False + + try: + adapter = self._cipher_registry[type(cipher), type(mode)] + except KeyError: + return False + evp_cipher = adapter(self, cipher, mode) + return self._ffi.NULL != evp_cipher + + def register_cipher_adapter(self, cipher_cls, mode_cls, adapter): + if (cipher_cls, mode_cls) in self._cipher_registry: + raise ValueError( + "Duplicate registration for: {} {}.".format( + cipher_cls, mode_cls + ) + ) + self._cipher_registry[cipher_cls, mode_cls] = adapter + + def _register_default_ciphers(self) -> None: + for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8, GCM]: + self.register_cipher_adapter( + AES, + mode_cls, + GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}"), + ) + for mode_cls in [CBC, CTR, ECB, OFB, CFB]: + self.register_cipher_adapter( + Camellia, + mode_cls, + GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}"), + ) + for mode_cls in [CBC, CFB, CFB8, OFB]: + self.register_cipher_adapter( + TripleDES, mode_cls, GetCipherByName("des-ede3-{mode.name}") + ) + self.register_cipher_adapter( + TripleDES, ECB, GetCipherByName("des-ede3") + ) + for mode_cls in [CBC, CFB, OFB, ECB]: + self.register_cipher_adapter( + _BlowfishInternal, mode_cls, GetCipherByName("bf-{mode.name}") + ) + for mode_cls in [CBC, CFB, OFB, ECB]: + self.register_cipher_adapter( + _SEEDInternal, mode_cls, GetCipherByName("seed-{mode.name}") + ) + for cipher_cls, mode_cls in itertools.product( + [_CAST5Internal, _IDEAInternal], + [CBC, OFB, CFB, ECB], + ): + self.register_cipher_adapter( + cipher_cls, + mode_cls, + GetCipherByName("{cipher.name}-{mode.name}"), + ) + self.register_cipher_adapter(ARC4, type(None), GetCipherByName("rc4")) + # We don't actually support RC2, this is just used by some tests. + self.register_cipher_adapter(_RC2, type(None), GetCipherByName("rc2")) + self.register_cipher_adapter( + ChaCha20, type(None), GetCipherByName("chacha20") + ) + self.register_cipher_adapter(AES, XTS, _get_xts_cipher) + for mode_cls in [ECB, CBC, OFB, CFB, CTR]: + self.register_cipher_adapter( + SM4, mode_cls, GetCipherByName("sm4-{mode.name}") + ) + + def create_symmetric_encryption_ctx( + self, cipher: CipherAlgorithm, mode: Mode + ) -> _CipherContext: + return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT) + + def create_symmetric_decryption_ctx( + self, cipher: CipherAlgorithm, mode: Mode + ) -> _CipherContext: + return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT) + + def pbkdf2_hmac_supported(self, algorithm: hashes.HashAlgorithm) -> bool: + return self.hmac_supported(algorithm) + + def derive_pbkdf2_hmac( + self, + algorithm: hashes.HashAlgorithm, + length: int, + salt: bytes, + iterations: int, + key_material: bytes, + ) -> bytes: + buf = self._ffi.new("unsigned char[]", length) + evp_md = self._evp_md_non_null_from_algorithm(algorithm) + key_material_ptr = self._ffi.from_buffer(key_material) + res = self._lib.PKCS5_PBKDF2_HMAC( + key_material_ptr, + len(key_material), + salt, + len(salt), + iterations, + evp_md, + length, + buf, + ) + self.openssl_assert(res == 1) + return self._ffi.buffer(buf)[:] + + def _consume_errors(self) -> typing.List[binding._OpenSSLError]: + return binding._consume_errors(self._lib) + + def _consume_errors_with_text( + self, + ) -> typing.List[binding._OpenSSLErrorWithText]: + return binding._consume_errors_with_text(self._lib) + + def _bn_to_int(self, bn) -> int: + assert bn != self._ffi.NULL + self.openssl_assert(not self._lib.BN_is_negative(bn)) + + bn_num_bytes = self._lib.BN_num_bytes(bn) + bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes) + bin_len = self._lib.BN_bn2bin(bn, bin_ptr) + # A zero length means the BN has value 0 + self.openssl_assert(bin_len >= 0) + val = int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big") + return val + + def _int_to_bn(self, num: int, bn=None): + """ + Converts a python integer to a BIGNUM. The returned BIGNUM will not + be garbage collected (to support adding them to structs that take + ownership of the object). Be sure to register it for GC if it will + be discarded after use. + """ + assert bn is None or bn != self._ffi.NULL + + if bn is None: + bn = self._ffi.NULL + + binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big") + bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn) + self.openssl_assert(bn_ptr != self._ffi.NULL) + return bn_ptr + + def generate_rsa_private_key( + self, public_exponent: int, key_size: int + ) -> rsa.RSAPrivateKey: + rsa._verify_rsa_parameters(public_exponent, key_size) + + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + + bn = self._int_to_bn(public_exponent) + bn = self._ffi.gc(bn, self._lib.BN_free) + + res = self._lib.RSA_generate_key_ex( + rsa_cdata, key_size, bn, self._ffi.NULL + ) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPrivateKey( + self, rsa_cdata, evp_pkey, self._rsa_skip_check_key + ) + + def generate_rsa_parameters_supported( + self, public_exponent: int, key_size: int + ) -> bool: + return ( + public_exponent >= 3 + and public_exponent & 1 != 0 + and key_size >= 512 + ) + + def load_rsa_private_numbers( + self, numbers: rsa.RSAPrivateNumbers + ) -> rsa.RSAPrivateKey: + rsa._check_private_key_components( + numbers.p, + numbers.q, + numbers.d, + numbers.dmp1, + numbers.dmq1, + numbers.iqmp, + numbers.public_numbers.e, + numbers.public_numbers.n, + ) + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + p = self._int_to_bn(numbers.p) + q = self._int_to_bn(numbers.q) + d = self._int_to_bn(numbers.d) + dmp1 = self._int_to_bn(numbers.dmp1) + dmq1 = self._int_to_bn(numbers.dmq1) + iqmp = self._int_to_bn(numbers.iqmp) + e = self._int_to_bn(numbers.public_numbers.e) + n = self._int_to_bn(numbers.public_numbers.n) + res = self._lib.RSA_set0_factors(rsa_cdata, p, q) + self.openssl_assert(res == 1) + res = self._lib.RSA_set0_key(rsa_cdata, n, e, d) + self.openssl_assert(res == 1) + res = self._lib.RSA_set0_crt_params(rsa_cdata, dmp1, dmq1, iqmp) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPrivateKey( + self, rsa_cdata, evp_pkey, self._rsa_skip_check_key + ) + + def load_rsa_public_numbers( + self, numbers: rsa.RSAPublicNumbers + ) -> rsa.RSAPublicKey: + rsa._check_public_key_components(numbers.e, numbers.n) + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + e = self._int_to_bn(numbers.e) + n = self._int_to_bn(numbers.n) + res = self._lib.RSA_set0_key(rsa_cdata, n, e, self._ffi.NULL) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + + def _create_evp_pkey_gc(self): + evp_pkey = self._lib.EVP_PKEY_new() + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return evp_pkey + + def _rsa_cdata_to_evp_pkey(self, rsa_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_RSA(evp_pkey, rsa_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def _bytes_to_bio(self, data: bytes): + """ + Return a _MemoryBIO namedtuple of (BIO, char*). + + The char* is the storage for the BIO and it must stay alive until the + BIO is finished with. + """ + data_ptr = self._ffi.from_buffer(data) + bio = self._lib.BIO_new_mem_buf(data_ptr, len(data)) + self.openssl_assert(bio != self._ffi.NULL) + + return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_ptr) + + def _create_mem_bio_gc(self): + """ + Creates an empty memory BIO. + """ + bio_method = self._lib.BIO_s_mem() + self.openssl_assert(bio_method != self._ffi.NULL) + bio = self._lib.BIO_new(bio_method) + self.openssl_assert(bio != self._ffi.NULL) + bio = self._ffi.gc(bio, self._lib.BIO_free) + return bio + + def _read_mem_bio(self, bio) -> bytes: + """ + Reads a memory BIO. This only works on memory BIOs. + """ + buf = self._ffi.new("char **") + buf_len = self._lib.BIO_get_mem_data(bio, buf) + self.openssl_assert(buf_len > 0) + self.openssl_assert(buf[0] != self._ffi.NULL) + bio_data = self._ffi.buffer(buf[0], buf_len)[:] + return bio_data + + def _evp_pkey_to_private_key(self, evp_pkey) -> PRIVATE_KEY_TYPES: + """ + Return the appropriate type of PrivateKey given an evp_pkey cdata + pointer. + """ + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if key_type == self._lib.EVP_PKEY_RSA: + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + return _RSAPrivateKey( + self, rsa_cdata, evp_pkey, self._rsa_skip_check_key + ) + elif ( + key_type == self._lib.EVP_PKEY_RSA_PSS + and not self._lib.CRYPTOGRAPHY_IS_LIBRESSL + and not self._lib.CRYPTOGRAPHY_IS_BORINGSSL + and not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E + ): + # At the moment the way we handle RSA PSS keys is to strip the + # PSS constraints from them and treat them as normal RSA keys + # Unfortunately the RSA * itself tracks this data so we need to + # extract, serialize, and reload it without the constraints. + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + bio = self._create_mem_bio_gc() + res = self._lib.i2d_RSAPrivateKey_bio(bio, rsa_cdata) + self.openssl_assert(res == 1) + return self.load_der_private_key( + self._read_mem_bio(bio), password=None + ) + elif key_type == self._lib.EVP_PKEY_DSA: + dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey) + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + return _DSAPrivateKey(self, dsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_EC: + ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + elif key_type in self._dh_types: + dh_cdata = self._lib.EVP_PKEY_get1_DH(evp_pkey) + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHPrivateKey(self, dh_cdata, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED25519", None): + # EVP_PKEY_ED25519 is not present in OpenSSL < 1.1.1 + return _Ed25519PrivateKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X448", None): + # EVP_PKEY_X448 is not present in OpenSSL < 1.1.1 + return _X448PrivateKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X25519", None): + # EVP_PKEY_X25519 is not present in OpenSSL < 1.1.0 + return _X25519PrivateKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED448", None): + # EVP_PKEY_ED448 is not present in OpenSSL < 1.1.1 + return _Ed448PrivateKey(self, evp_pkey) + else: + raise UnsupportedAlgorithm("Unsupported key type.") + + def _evp_pkey_to_public_key(self, evp_pkey) -> PUBLIC_KEY_TYPES: + """ + Return the appropriate type of PublicKey given an evp_pkey cdata + pointer. + """ + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if key_type == self._lib.EVP_PKEY_RSA: + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_DSA: + dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey) + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + return _DSAPublicKey(self, dsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_EC: + ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey) + if ec_cdata == self._ffi.NULL: + errors = self._consume_errors_with_text() + raise ValueError("Unable to load EC key", errors) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + elif key_type in self._dh_types: + dh_cdata = self._lib.EVP_PKEY_get1_DH(evp_pkey) + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHPublicKey(self, dh_cdata, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED25519", None): + # EVP_PKEY_ED25519 is not present in OpenSSL < 1.1.1 + return _Ed25519PublicKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X448", None): + # EVP_PKEY_X448 is not present in OpenSSL < 1.1.1 + return _X448PublicKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X25519", None): + # EVP_PKEY_X25519 is not present in OpenSSL < 1.1.0 + return _X25519PublicKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED448", None): + # EVP_PKEY_X25519 is not present in OpenSSL < 1.1.1 + return _Ed448PublicKey(self, evp_pkey) + else: + raise UnsupportedAlgorithm("Unsupported key type.") + + def _oaep_hash_supported(self, algorithm: hashes.HashAlgorithm) -> bool: + return isinstance( + algorithm, + ( + hashes.SHA1, + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, + ), + ) + + def rsa_padding_supported(self, padding: AsymmetricPadding) -> bool: + if isinstance(padding, PKCS1v15): + return True + elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1): + # SHA1 is permissible in MGF1 in FIPS even when SHA1 is blocked + # as signature algorithm. + if self._fips_enabled and isinstance( + padding._mgf._algorithm, hashes.SHA1 + ): + return True + else: + return self.hash_supported(padding._mgf._algorithm) + elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1): + return self._oaep_hash_supported( + padding._mgf._algorithm + ) and self._oaep_hash_supported(padding._algorithm) + else: + return False + + def generate_dsa_parameters(self, key_size: int) -> dsa.DSAParameters: + if key_size not in (1024, 2048, 3072, 4096): + raise ValueError( + "Key size must be 1024, 2048, 3072, or 4096 bits." + ) + + ctx = self._lib.DSA_new() + self.openssl_assert(ctx != self._ffi.NULL) + ctx = self._ffi.gc(ctx, self._lib.DSA_free) + + res = self._lib.DSA_generate_parameters_ex( + ctx, + key_size, + self._ffi.NULL, + 0, + self._ffi.NULL, + self._ffi.NULL, + self._ffi.NULL, + ) + + self.openssl_assert(res == 1) + + return _DSAParameters(self, ctx) + + def generate_dsa_private_key( + self, parameters: dsa.DSAParameters + ) -> dsa.DSAPrivateKey: + ctx = self._lib.DSAparams_dup( + parameters._dsa_cdata # type: ignore[attr-defined] + ) + self.openssl_assert(ctx != self._ffi.NULL) + ctx = self._ffi.gc(ctx, self._lib.DSA_free) + self._lib.DSA_generate_key(ctx) + evp_pkey = self._dsa_cdata_to_evp_pkey(ctx) + + return _DSAPrivateKey(self, ctx, evp_pkey) + + def generate_dsa_private_key_and_parameters( + self, key_size: int + ) -> dsa.DSAPrivateKey: + parameters = self.generate_dsa_parameters(key_size) + return self.generate_dsa_private_key(parameters) + + def _dsa_cdata_set_values(self, dsa_cdata, p, q, g, pub_key, priv_key): + res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g) + self.openssl_assert(res == 1) + res = self._lib.DSA_set0_key(dsa_cdata, pub_key, priv_key) + self.openssl_assert(res == 1) + + def load_dsa_private_numbers( + self, numbers: dsa.DSAPrivateNumbers + ) -> dsa.DSAPrivateKey: + dsa._check_dsa_private_numbers(numbers) + parameter_numbers = numbers.public_numbers.parameter_numbers + + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(parameter_numbers.p) + q = self._int_to_bn(parameter_numbers.q) + g = self._int_to_bn(parameter_numbers.g) + pub_key = self._int_to_bn(numbers.public_numbers.y) + priv_key = self._int_to_bn(numbers.x) + self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key) + + evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata) + + return _DSAPrivateKey(self, dsa_cdata, evp_pkey) + + def load_dsa_public_numbers( + self, numbers: dsa.DSAPublicNumbers + ) -> dsa.DSAPublicKey: + dsa._check_dsa_parameters(numbers.parameter_numbers) + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(numbers.parameter_numbers.p) + q = self._int_to_bn(numbers.parameter_numbers.q) + g = self._int_to_bn(numbers.parameter_numbers.g) + pub_key = self._int_to_bn(numbers.y) + priv_key = self._ffi.NULL + self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key) + + evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata) + + return _DSAPublicKey(self, dsa_cdata, evp_pkey) + + def load_dsa_parameter_numbers( + self, numbers: dsa.DSAParameterNumbers + ) -> dsa.DSAParameters: + dsa._check_dsa_parameters(numbers) + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(numbers.p) + q = self._int_to_bn(numbers.q) + g = self._int_to_bn(numbers.g) + res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g) + self.openssl_assert(res == 1) + + return _DSAParameters(self, dsa_cdata) + + def _dsa_cdata_to_evp_pkey(self, dsa_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_DSA(evp_pkey, dsa_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def dsa_supported(self) -> bool: + return not self._fips_enabled + + def dsa_hash_supported(self, algorithm: hashes.HashAlgorithm) -> bool: + if not self.dsa_supported(): + return False + return self.signature_hash_supported(algorithm) + + def cmac_algorithm_supported(self, algorithm) -> bool: + return self.cipher_supported( + algorithm, CBC(b"\x00" * algorithm.block_size) + ) + + def create_cmac_ctx(self, algorithm: BlockCipherAlgorithm) -> _CMACContext: + return _CMACContext(self, algorithm) + + def load_pem_private_key( + self, data: bytes, password: typing.Optional[bytes] + ) -> PRIVATE_KEY_TYPES: + return self._load_key( + self._lib.PEM_read_bio_PrivateKey, + self._evp_pkey_to_private_key, + data, + password, + ) + + def load_pem_public_key(self, data: bytes) -> PUBLIC_KEY_TYPES: + mem_bio = self._bytes_to_bio(data) + # In OpenSSL 3.0.x the PEM_read_bio_PUBKEY function will invoke + # the default password callback if you pass an encrypted private + # key. This is very, very, very bad as the default callback can + # trigger an interactive console prompt, which will hang the + # Python process. We therefore provide our own callback to + # catch this and error out properly. + userdata = self._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *") + evp_pkey = self._lib.PEM_read_bio_PUBKEY( + mem_bio.bio, + self._ffi.NULL, + self._ffi.addressof( + self._lib._original_lib, "Cryptography_pem_password_cb" + ), + userdata, + ) + if evp_pkey != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return self._evp_pkey_to_public_key(evp_pkey) + else: + # It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still + # need to check to see if it is a pure PKCS1 RSA public key (not + # embedded in a subjectPublicKeyInfo) + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + rsa_cdata = self._lib.PEM_read_bio_RSAPublicKey( + mem_bio.bio, + self._ffi.NULL, + self._ffi.addressof( + self._lib._original_lib, "Cryptography_pem_password_cb" + ), + userdata, + ) + if rsa_cdata != self._ffi.NULL: + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + else: + self._handle_key_loading_error() + + def load_pem_parameters(self, data: bytes) -> dh.DHParameters: + mem_bio = self._bytes_to_bio(data) + # only DH is supported currently + dh_cdata = self._lib.PEM_read_bio_DHparams( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + else: + self._handle_key_loading_error() + + def load_der_private_key( + self, data: bytes, password: typing.Optional[bytes] + ) -> PRIVATE_KEY_TYPES: + # OpenSSL has a function called d2i_AutoPrivateKey that in theory + # handles this automatically, however it doesn't handle encrypted + # private keys. Instead we try to load the key two different ways. + # First we'll try to load it as a traditional key. + bio_data = self._bytes_to_bio(data) + key = self._evp_pkey_from_der_traditional_key(bio_data, password) + if key: + return self._evp_pkey_to_private_key(key) + else: + # Finally we try to load it with the method that handles encrypted + # PKCS8 properly. + return self._load_key( + self._lib.d2i_PKCS8PrivateKey_bio, + self._evp_pkey_to_private_key, + data, + password, + ) + + def _evp_pkey_from_der_traditional_key(self, bio_data, password): + key = self._lib.d2i_PrivateKey_bio(bio_data.bio, self._ffi.NULL) + if key != self._ffi.NULL: + # In OpenSSL 3.0.0-alpha15 there exist scenarios where the key will + # successfully load but errors are still put on the stack. Tracked + # as https://github.com/openssl/openssl/issues/14996 + self._consume_errors() + + key = self._ffi.gc(key, self._lib.EVP_PKEY_free) + if password is not None: + raise TypeError( + "Password was given but private key is not encrypted." + ) + + return key + else: + self._consume_errors() + return None + + def load_der_public_key(self, data: bytes) -> PUBLIC_KEY_TYPES: + mem_bio = self._bytes_to_bio(data) + evp_pkey = self._lib.d2i_PUBKEY_bio(mem_bio.bio, self._ffi.NULL) + if evp_pkey != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return self._evp_pkey_to_public_key(evp_pkey) + else: + # It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still + # need to check to see if it is a pure PKCS1 RSA public key (not + # embedded in a subjectPublicKeyInfo) + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + rsa_cdata = self._lib.d2i_RSAPublicKey_bio( + mem_bio.bio, self._ffi.NULL + ) + if rsa_cdata != self._ffi.NULL: + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + else: + self._handle_key_loading_error() + + def load_der_parameters(self, data: bytes) -> dh.DHParameters: + mem_bio = self._bytes_to_bio(data) + dh_cdata = self._lib.d2i_DHparams_bio(mem_bio.bio, self._ffi.NULL) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + elif self._lib.Cryptography_HAS_EVP_PKEY_DHX: + # We check to see if the is dhx. + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + dh_cdata = self._lib.Cryptography_d2i_DHxparams_bio( + mem_bio.bio, self._ffi.NULL + ) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + + self._handle_key_loading_error() + + def _cert2ossl(self, cert: x509.Certificate) -> typing.Any: + data = cert.public_bytes(serialization.Encoding.DER) + mem_bio = self._bytes_to_bio(data) + x509 = self._lib.d2i_X509_bio(mem_bio.bio, self._ffi.NULL) + self.openssl_assert(x509 != self._ffi.NULL) + x509 = self._ffi.gc(x509, self._lib.X509_free) + return x509 + + def _ossl2cert(self, x509: typing.Any) -> x509.Certificate: + bio = self._create_mem_bio_gc() + res = self._lib.i2d_X509_bio(bio, x509) + self.openssl_assert(res == 1) + return rust_x509.load_der_x509_certificate(self._read_mem_bio(bio)) + + def _csr2ossl(self, csr: x509.CertificateSigningRequest) -> typing.Any: + data = csr.public_bytes(serialization.Encoding.DER) + mem_bio = self._bytes_to_bio(data) + x509_req = self._lib.d2i_X509_REQ_bio(mem_bio.bio, self._ffi.NULL) + self.openssl_assert(x509_req != self._ffi.NULL) + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + return x509_req + + def _ossl2csr( + self, x509_req: typing.Any + ) -> x509.CertificateSigningRequest: + bio = self._create_mem_bio_gc() + res = self._lib.i2d_X509_REQ_bio(bio, x509_req) + self.openssl_assert(res == 1) + return rust_x509.load_der_x509_csr(self._read_mem_bio(bio)) + + def _crl2ossl(self, crl: x509.CertificateRevocationList) -> typing.Any: + data = crl.public_bytes(serialization.Encoding.DER) + mem_bio = self._bytes_to_bio(data) + x509_crl = self._lib.d2i_X509_CRL_bio(mem_bio.bio, self._ffi.NULL) + self.openssl_assert(x509_crl != self._ffi.NULL) + x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free) + return x509_crl + + def _ossl2crl( + self, x509_crl: typing.Any + ) -> x509.CertificateRevocationList: + bio = self._create_mem_bio_gc() + res = self._lib.i2d_X509_CRL_bio(bio, x509_crl) + self.openssl_assert(res == 1) + return rust_x509.load_der_x509_crl(self._read_mem_bio(bio)) + + def _crl_is_signature_valid( + self, + crl: x509.CertificateRevocationList, + public_key: CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES, + ) -> bool: + if not isinstance( + public_key, + ( + _DSAPublicKey, + _RSAPublicKey, + _EllipticCurvePublicKey, + ), + ): + raise TypeError( + "Expecting one of DSAPublicKey, RSAPublicKey," + " or EllipticCurvePublicKey." + ) + x509_crl = self._crl2ossl(crl) + res = self._lib.X509_CRL_verify(x509_crl, public_key._evp_pkey) + + if res != 1: + self._consume_errors() + return False + + return True + + def _csr_is_signature_valid( + self, csr: x509.CertificateSigningRequest + ) -> bool: + x509_req = self._csr2ossl(csr) + pkey = self._lib.X509_REQ_get_pubkey(x509_req) + self.openssl_assert(pkey != self._ffi.NULL) + pkey = self._ffi.gc(pkey, self._lib.EVP_PKEY_free) + res = self._lib.X509_REQ_verify(x509_req, pkey) + + if res != 1: + self._consume_errors() + return False + + return True + + def _check_keys_correspond(self, key1, key2): + if self._lib.EVP_PKEY_cmp(key1._evp_pkey, key2._evp_pkey) != 1: + raise ValueError("Keys do not correspond") + + def _load_key(self, openssl_read_func, convert_func, data, password): + mem_bio = self._bytes_to_bio(data) + + userdata = self._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *") + if password is not None: + utils._check_byteslike("password", password) + password_ptr = self._ffi.from_buffer(password) + userdata.password = password_ptr + userdata.length = len(password) + + evp_pkey = openssl_read_func( + mem_bio.bio, + self._ffi.NULL, + self._ffi.addressof( + self._lib._original_lib, "Cryptography_pem_password_cb" + ), + userdata, + ) + + if evp_pkey == self._ffi.NULL: + if userdata.error != 0: + self._consume_errors() + if userdata.error == -1: + raise TypeError( + "Password was not given but private key is encrypted" + ) + else: + assert userdata.error == -2 + raise ValueError( + "Passwords longer than {} bytes are not supported " + "by this backend.".format(userdata.maxsize - 1) + ) + else: + self._handle_key_loading_error() + + # In OpenSSL 3.0.0-alpha15 there exist scenarios where the key will + # successfully load but errors are still put on the stack. Tracked + # as https://github.com/openssl/openssl/issues/14996 + self._consume_errors() + + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + if password is not None and userdata.called == 0: + raise TypeError( + "Password was given but private key is not encrypted." + ) + + assert ( + password is not None and userdata.called == 1 + ) or password is None + + return convert_func(evp_pkey) + + def _handle_key_loading_error(self) -> typing.NoReturn: + errors = self._consume_errors() + + if not errors: + raise ValueError( + "Could not deserialize key data. The data may be in an " + "incorrect format or it may be encrypted with an unsupported " + "algorithm." + ) + + elif ( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, self._lib.EVP_R_BAD_DECRYPT + ) + or errors[0]._lib_reason_match( + self._lib.ERR_LIB_PKCS12, + self._lib.PKCS12_R_PKCS12_CIPHERFINAL_ERROR, + ) + or ( + self._lib.Cryptography_HAS_PROVIDERS + and errors[0]._lib_reason_match( + self._lib.ERR_LIB_PROV, + self._lib.PROV_R_BAD_DECRYPT, + ) + ) + ): + raise ValueError("Bad decrypt. Incorrect password?") + + elif any( + error._lib_reason_match( + self._lib.ERR_LIB_EVP, + self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM, + ) + for error in errors + ): + raise ValueError("Unsupported public key algorithm.") + + else: + errors_with_text = binding._errors_with_text(errors) + raise ValueError( + "Could not deserialize key data. The data may be in an " + "incorrect format, it may be encrypted with an unsupported " + "algorithm, or it may be an unsupported key type (e.g. EC " + "curves with explicit parameters).", + errors_with_text, + ) + + def elliptic_curve_supported(self, curve: ec.EllipticCurve) -> bool: + try: + curve_nid = self._elliptic_curve_to_nid(curve) + except UnsupportedAlgorithm: + curve_nid = self._lib.NID_undef + + group = self._lib.EC_GROUP_new_by_curve_name(curve_nid) + + if group == self._ffi.NULL: + self._consume_errors() + return False + else: + self.openssl_assert(curve_nid != self._lib.NID_undef) + self._lib.EC_GROUP_free(group) + return True + + def elliptic_curve_signature_algorithm_supported( + self, + signature_algorithm: ec.EllipticCurveSignatureAlgorithm, + curve: ec.EllipticCurve, + ) -> bool: + # We only support ECDSA right now. + if not isinstance(signature_algorithm, ec.ECDSA): + return False + + return self.elliptic_curve_supported(curve) + + def generate_elliptic_curve_private_key( + self, curve: ec.EllipticCurve + ) -> ec.EllipticCurvePrivateKey: + """ + Generate a new private key on the named curve. + """ + + if self.elliptic_curve_supported(curve): + ec_cdata = self._ec_key_new_by_curve(curve) + + res = self._lib.EC_KEY_generate_key(ec_cdata) + self.openssl_assert(res == 1) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm( + "Backend object does not support {}.".format(curve.name), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE, + ) + + def load_elliptic_curve_private_numbers( + self, numbers: ec.EllipticCurvePrivateNumbers + ) -> ec.EllipticCurvePrivateKey: + public = numbers.public_numbers + + ec_cdata = self._ec_key_new_by_curve(public.curve) + + private_value = self._ffi.gc( + self._int_to_bn(numbers.private_value), self._lib.BN_clear_free + ) + res = self._lib.EC_KEY_set_private_key(ec_cdata, private_value) + if res != 1: + self._consume_errors() + raise ValueError("Invalid EC key.") + + self._ec_key_set_public_key_affine_coordinates( + ec_cdata, public.x, public.y + ) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + + def load_elliptic_curve_public_numbers( + self, numbers: ec.EllipticCurvePublicNumbers + ) -> ec.EllipticCurvePublicKey: + ec_cdata = self._ec_key_new_by_curve(numbers.curve) + self._ec_key_set_public_key_affine_coordinates( + ec_cdata, numbers.x, numbers.y + ) + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + + def load_elliptic_curve_public_bytes( + self, curve: ec.EllipticCurve, point_bytes: bytes + ) -> ec.EllipticCurvePublicKey: + ec_cdata = self._ec_key_new_by_curve(curve) + group = self._lib.EC_KEY_get0_group(ec_cdata) + self.openssl_assert(group != self._ffi.NULL) + point = self._lib.EC_POINT_new(group) + self.openssl_assert(point != self._ffi.NULL) + point = self._ffi.gc(point, self._lib.EC_POINT_free) + with self._tmp_bn_ctx() as bn_ctx: + res = self._lib.EC_POINT_oct2point( + group, point, point_bytes, len(point_bytes), bn_ctx + ) + if res != 1: + self._consume_errors() + raise ValueError("Invalid public bytes for the given curve") + + res = self._lib.EC_KEY_set_public_key(ec_cdata, point) + self.openssl_assert(res == 1) + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + + def derive_elliptic_curve_private_key( + self, private_value: int, curve: ec.EllipticCurve + ) -> ec.EllipticCurvePrivateKey: + ec_cdata = self._ec_key_new_by_curve(curve) + + get_func, group = self._ec_key_determine_group_get_func(ec_cdata) + + point = self._lib.EC_POINT_new(group) + self.openssl_assert(point != self._ffi.NULL) + point = self._ffi.gc(point, self._lib.EC_POINT_free) + + value = self._int_to_bn(private_value) + value = self._ffi.gc(value, self._lib.BN_clear_free) + + with self._tmp_bn_ctx() as bn_ctx: + res = self._lib.EC_POINT_mul( + group, point, value, self._ffi.NULL, self._ffi.NULL, bn_ctx + ) + self.openssl_assert(res == 1) + + bn_x = self._lib.BN_CTX_get(bn_ctx) + bn_y = self._lib.BN_CTX_get(bn_ctx) + + res = get_func(group, point, bn_x, bn_y, bn_ctx) + if res != 1: + self._consume_errors() + raise ValueError("Unable to derive key from private_value") + + res = self._lib.EC_KEY_set_public_key(ec_cdata, point) + self.openssl_assert(res == 1) + private = self._int_to_bn(private_value) + private = self._ffi.gc(private, self._lib.BN_clear_free) + res = self._lib.EC_KEY_set_private_key(ec_cdata, private) + self.openssl_assert(res == 1) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + + def _ec_key_new_by_curve(self, curve: ec.EllipticCurve): + curve_nid = self._elliptic_curve_to_nid(curve) + return self._ec_key_new_by_curve_nid(curve_nid) + + def _ec_key_new_by_curve_nid(self, curve_nid: int): + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + return self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + def elliptic_curve_exchange_algorithm_supported( + self, algorithm: ec.ECDH, curve: ec.EllipticCurve + ) -> bool: + if self._fips_enabled and not isinstance( + curve, self._fips_ecdh_curves + ): + return False + + return self.elliptic_curve_supported(curve) and isinstance( + algorithm, ec.ECDH + ) + + def _ec_cdata_to_evp_pkey(self, ec_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, ec_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def _elliptic_curve_to_nid(self, curve: ec.EllipticCurve) -> int: + """ + Get the NID for a curve name. + """ + + curve_aliases = {"secp192r1": "prime192v1", "secp256r1": "prime256v1"} + + curve_name = curve_aliases.get(curve.name, curve.name) + + curve_nid = self._lib.OBJ_sn2nid(curve_name.encode()) + if curve_nid == self._lib.NID_undef: + raise UnsupportedAlgorithm( + "{} is not a supported elliptic curve".format(curve.name), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE, + ) + return curve_nid + + @contextmanager + def _tmp_bn_ctx(self): + bn_ctx = self._lib.BN_CTX_new() + self.openssl_assert(bn_ctx != self._ffi.NULL) + bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free) + self._lib.BN_CTX_start(bn_ctx) + try: + yield bn_ctx + finally: + self._lib.BN_CTX_end(bn_ctx) + + def _ec_key_determine_group_get_func(self, ctx): + """ + Given an EC_KEY determine the group and what function is required to + get point coordinates. + """ + self.openssl_assert(ctx != self._ffi.NULL) + + nid_two_field = self._lib.OBJ_sn2nid(b"characteristic-two-field") + self.openssl_assert(nid_two_field != self._lib.NID_undef) + + group = self._lib.EC_KEY_get0_group(ctx) + self.openssl_assert(group != self._ffi.NULL) + + method = self._lib.EC_GROUP_method_of(group) + self.openssl_assert(method != self._ffi.NULL) + + nid = self._lib.EC_METHOD_get_field_type(method) + self.openssl_assert(nid != self._lib.NID_undef) + + if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M: + get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m + else: + get_func = self._lib.EC_POINT_get_affine_coordinates_GFp + + assert get_func + + return get_func, group + + def _ec_key_set_public_key_affine_coordinates(self, ctx, x: int, y: int): + """ + Sets the public key point in the EC_KEY context to the affine x and y + values. + """ + + if x < 0 or y < 0: + raise ValueError( + "Invalid EC key. Both x and y must be non-negative." + ) + + x = self._ffi.gc(self._int_to_bn(x), self._lib.BN_free) + y = self._ffi.gc(self._int_to_bn(y), self._lib.BN_free) + res = self._lib.EC_KEY_set_public_key_affine_coordinates(ctx, x, y) + if res != 1: + self._consume_errors() + raise ValueError("Invalid EC key.") + + def _private_key_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + key, + evp_pkey, + cdata, + ) -> bytes: + # validate argument types + if not isinstance(encoding, serialization.Encoding): + raise TypeError("encoding must be an item from the Encoding enum") + if not isinstance(format, serialization.PrivateFormat): + raise TypeError( + "format must be an item from the PrivateFormat enum" + ) + if not isinstance( + encryption_algorithm, serialization.KeySerializationEncryption + ): + raise TypeError( + "Encryption algorithm must be a KeySerializationEncryption " + "instance" + ) + + # validate password + if isinstance(encryption_algorithm, serialization.NoEncryption): + password = b"" + elif isinstance( + encryption_algorithm, serialization.BestAvailableEncryption + ): + password = encryption_algorithm.password + if len(password) > 1023: + raise ValueError( + "Passwords longer than 1023 bytes are not supported by " + "this backend" + ) + else: + raise ValueError("Unsupported encryption type") + + # PKCS8 + PEM/DER + if format is serialization.PrivateFormat.PKCS8: + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_PKCS8PrivateKey + elif encoding is serialization.Encoding.DER: + write_bio = self._lib.i2d_PKCS8PrivateKey_bio + else: + raise ValueError("Unsupported encoding for PKCS8") + return self._private_key_bytes_via_bio( + write_bio, evp_pkey, password + ) + + # TraditionalOpenSSL + PEM/DER + if format is serialization.PrivateFormat.TraditionalOpenSSL: + if self._fips_enabled and not isinstance( + encryption_algorithm, serialization.NoEncryption + ): + raise ValueError( + "Encrypted traditional OpenSSL format is not " + "supported in FIPS mode." + ) + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if encoding is serialization.Encoding.PEM: + if key_type == self._lib.EVP_PKEY_RSA: + write_bio = self._lib.PEM_write_bio_RSAPrivateKey + elif key_type == self._lib.EVP_PKEY_DSA: + write_bio = self._lib.PEM_write_bio_DSAPrivateKey + elif key_type == self._lib.EVP_PKEY_EC: + write_bio = self._lib.PEM_write_bio_ECPrivateKey + else: + raise ValueError( + "Unsupported key type for TraditionalOpenSSL" + ) + return self._private_key_bytes_via_bio( + write_bio, cdata, password + ) + + if encoding is serialization.Encoding.DER: + if password: + raise ValueError( + "Encryption is not supported for DER encoded " + "traditional OpenSSL keys" + ) + if key_type == self._lib.EVP_PKEY_RSA: + write_bio = self._lib.i2d_RSAPrivateKey_bio + elif key_type == self._lib.EVP_PKEY_EC: + write_bio = self._lib.i2d_ECPrivateKey_bio + elif key_type == self._lib.EVP_PKEY_DSA: + write_bio = self._lib.i2d_DSAPrivateKey_bio + else: + raise ValueError( + "Unsupported key type for TraditionalOpenSSL" + ) + return self._bio_func_output(write_bio, cdata) + + raise ValueError("Unsupported encoding for TraditionalOpenSSL") + + # OpenSSH + PEM + if format is serialization.PrivateFormat.OpenSSH: + if encoding is serialization.Encoding.PEM: + return ssh.serialize_ssh_private_key(key, password) + + raise ValueError( + "OpenSSH private key format can only be used" + " with PEM encoding" + ) + + # Anything that key-specific code was supposed to handle earlier, + # like Raw. + raise ValueError("format is invalid with this key") + + def _private_key_bytes_via_bio(self, write_bio, evp_pkey, password): + if not password: + evp_cipher = self._ffi.NULL + else: + # This is a curated value that we will update over time. + evp_cipher = self._lib.EVP_get_cipherbyname(b"aes-256-cbc") + + return self._bio_func_output( + write_bio, + evp_pkey, + evp_cipher, + password, + len(password), + self._ffi.NULL, + self._ffi.NULL, + ) + + def _bio_func_output(self, write_bio, *args): + bio = self._create_mem_bio_gc() + res = write_bio(bio, *args) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _public_key_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + key, + evp_pkey, + cdata, + ) -> bytes: + if not isinstance(encoding, serialization.Encoding): + raise TypeError("encoding must be an item from the Encoding enum") + if not isinstance(format, serialization.PublicFormat): + raise TypeError( + "format must be an item from the PublicFormat enum" + ) + + # SubjectPublicKeyInfo + PEM/DER + if format is serialization.PublicFormat.SubjectPublicKeyInfo: + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_PUBKEY + elif encoding is serialization.Encoding.DER: + write_bio = self._lib.i2d_PUBKEY_bio + else: + raise ValueError( + "SubjectPublicKeyInfo works only with PEM or DER encoding" + ) + return self._bio_func_output(write_bio, evp_pkey) + + # PKCS1 + PEM/DER + if format is serialization.PublicFormat.PKCS1: + # Only RSA is supported here. + key_type = self._lib.EVP_PKEY_id(evp_pkey) + if key_type != self._lib.EVP_PKEY_RSA: + raise ValueError("PKCS1 format is supported only for RSA keys") + + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_RSAPublicKey + elif encoding is serialization.Encoding.DER: + write_bio = self._lib.i2d_RSAPublicKey_bio + else: + raise ValueError("PKCS1 works only with PEM or DER encoding") + return self._bio_func_output(write_bio, cdata) + + # OpenSSH + OpenSSH + if format is serialization.PublicFormat.OpenSSH: + if encoding is serialization.Encoding.OpenSSH: + return ssh.serialize_ssh_public_key(key) + + raise ValueError( + "OpenSSH format must be used with OpenSSH encoding" + ) + + # Anything that key-specific code was supposed to handle earlier, + # like Raw, CompressedPoint, UncompressedPoint + raise ValueError("format is invalid with this key") + + def dh_supported(self) -> bool: + return not self._lib.CRYPTOGRAPHY_IS_BORINGSSL + + def generate_dh_parameters( + self, generator: int, key_size: int + ) -> dh.DHParameters: + if key_size < dh._MIN_MODULUS_SIZE: + raise ValueError( + "DH key_size must be at least {} bits".format( + dh._MIN_MODULUS_SIZE + ) + ) + + if generator not in (2, 5): + raise ValueError("DH generator must be 2 or 5") + + dh_param_cdata = self._lib.DH_new() + self.openssl_assert(dh_param_cdata != self._ffi.NULL) + dh_param_cdata = self._ffi.gc(dh_param_cdata, self._lib.DH_free) + + res = self._lib.DH_generate_parameters_ex( + dh_param_cdata, key_size, generator, self._ffi.NULL + ) + self.openssl_assert(res == 1) + + return _DHParameters(self, dh_param_cdata) + + def _dh_cdata_to_evp_pkey(self, dh_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_DH(evp_pkey, dh_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def generate_dh_private_key( + self, parameters: dh.DHParameters + ) -> dh.DHPrivateKey: + dh_key_cdata = _dh_params_dup( + parameters._dh_cdata, self # type: ignore[attr-defined] + ) + + res = self._lib.DH_generate_key(dh_key_cdata) + self.openssl_assert(res == 1) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_key_cdata) + + return _DHPrivateKey(self, dh_key_cdata, evp_pkey) + + def generate_dh_private_key_and_parameters( + self, generator: int, key_size: int + ) -> dh.DHPrivateKey: + return self.generate_dh_private_key( + self.generate_dh_parameters(generator, key_size) + ) + + def load_dh_private_numbers( + self, numbers: dh.DHPrivateNumbers + ) -> dh.DHPrivateKey: + parameter_numbers = numbers.public_numbers.parameter_numbers + + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(parameter_numbers.p) + g = self._int_to_bn(parameter_numbers.g) + + if parameter_numbers.q is not None: + q = self._int_to_bn(parameter_numbers.q) + else: + q = self._ffi.NULL + + pub_key = self._int_to_bn(numbers.public_numbers.y) + priv_key = self._int_to_bn(numbers.x) + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + res = self._lib.DH_set0_key(dh_cdata, pub_key, priv_key) + self.openssl_assert(res == 1) + + codes = self._ffi.new("int[]", 1) + res = self._lib.Cryptography_DH_check(dh_cdata, codes) + self.openssl_assert(res == 1) + + # DH_check will return DH_NOT_SUITABLE_GENERATOR if p % 24 does not + # equal 11 when the generator is 2 (a quadratic nonresidue). + # We want to ignore that error because p % 24 == 23 is also fine. + # Specifically, g is then a quadratic residue. Within the context of + # Diffie-Hellman this means it can only generate half the possible + # values. That sounds bad, but quadratic nonresidues leak a bit of + # the key to the attacker in exchange for having the full key space + # available. See: https://crypto.stackexchange.com/questions/12961 + if codes[0] != 0 and not ( + parameter_numbers.g == 2 + and codes[0] ^ self._lib.DH_NOT_SUITABLE_GENERATOR == 0 + ): + raise ValueError("DH private numbers did not pass safety checks.") + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_cdata) + + return _DHPrivateKey(self, dh_cdata, evp_pkey) + + def load_dh_public_numbers( + self, numbers: dh.DHPublicNumbers + ) -> dh.DHPublicKey: + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + parameter_numbers = numbers.parameter_numbers + + p = self._int_to_bn(parameter_numbers.p) + g = self._int_to_bn(parameter_numbers.g) + + if parameter_numbers.q is not None: + q = self._int_to_bn(parameter_numbers.q) + else: + q = self._ffi.NULL + + pub_key = self._int_to_bn(numbers.y) + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + res = self._lib.DH_set0_key(dh_cdata, pub_key, self._ffi.NULL) + self.openssl_assert(res == 1) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_cdata) + + return _DHPublicKey(self, dh_cdata, evp_pkey) + + def load_dh_parameter_numbers( + self, numbers: dh.DHParameterNumbers + ) -> dh.DHParameters: + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(numbers.p) + g = self._int_to_bn(numbers.g) + + if numbers.q is not None: + q = self._int_to_bn(numbers.q) + else: + q = self._ffi.NULL + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + return _DHParameters(self, dh_cdata) + + def dh_parameters_supported( + self, p: int, g: int, q: typing.Optional[int] = None + ) -> bool: + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(p) + g = self._int_to_bn(g) + + if q is not None: + q = self._int_to_bn(q) + else: + q = self._ffi.NULL + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + codes = self._ffi.new("int[]", 1) + res = self._lib.Cryptography_DH_check(dh_cdata, codes) + self.openssl_assert(res == 1) + + return codes[0] == 0 + + def dh_x942_serialization_supported(self) -> bool: + return self._lib.Cryptography_HAS_EVP_PKEY_DHX == 1 + + def x25519_load_public_bytes(self, data: bytes) -> x25519.X25519PublicKey: + # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can + # switch this to EVP_PKEY_new_raw_public_key + if len(data) != 32: + raise ValueError("An X25519 public key is 32 bytes long") + + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set_type(evp_pkey, self._lib.NID_X25519) + self.openssl_assert(res == 1) + res = self._lib.EVP_PKEY_set1_tls_encodedpoint( + evp_pkey, data, len(data) + ) + self.openssl_assert(res == 1) + return _X25519PublicKey(self, evp_pkey) + + def x25519_load_private_bytes( + self, data: bytes + ) -> x25519.X25519PrivateKey: + # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can + # switch this to EVP_PKEY_new_raw_private_key and drop the + # zeroed_bytearray garbage. + # OpenSSL only has facilities for loading PKCS8 formatted private + # keys using the algorithm identifiers specified in + # https://tools.ietf.org/html/draft-ietf-curdle-pkix-09. + # This is the standard PKCS8 prefix for a 32 byte X25519 key. + # The form is: + # 0:d=0 hl=2 l= 46 cons: SEQUENCE + # 2:d=1 hl=2 l= 1 prim: INTEGER :00 + # 5:d=1 hl=2 l= 5 cons: SEQUENCE + # 7:d=2 hl=2 l= 3 prim: OBJECT :1.3.101.110 + # 12:d=1 hl=2 l= 34 prim: OCTET STRING (the key) + # Of course there's a bit more complexity. In reality OCTET STRING + # contains an OCTET STRING of length 32! So the last two bytes here + # are \x04\x20, which is an OCTET STRING of length 32. + if len(data) != 32: + raise ValueError("An X25519 private key is 32 bytes long") + + pkcs8_prefix = b'0.\x02\x01\x000\x05\x06\x03+en\x04"\x04 ' + with self._zeroed_bytearray(48) as ba: + ba[0:16] = pkcs8_prefix + ba[16:] = data + bio = self._bytes_to_bio(ba) + evp_pkey = self._lib.d2i_PrivateKey_bio(bio.bio, self._ffi.NULL) + + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + self.openssl_assert( + self._lib.EVP_PKEY_id(evp_pkey) == self._lib.EVP_PKEY_X25519 + ) + return _X25519PrivateKey(self, evp_pkey) + + def _evp_pkey_keygen_gc(self, nid): + evp_pkey_ctx = self._lib.EVP_PKEY_CTX_new_id(nid, self._ffi.NULL) + self.openssl_assert(evp_pkey_ctx != self._ffi.NULL) + evp_pkey_ctx = self._ffi.gc(evp_pkey_ctx, self._lib.EVP_PKEY_CTX_free) + res = self._lib.EVP_PKEY_keygen_init(evp_pkey_ctx) + self.openssl_assert(res == 1) + evp_ppkey = self._ffi.new("EVP_PKEY **") + res = self._lib.EVP_PKEY_keygen(evp_pkey_ctx, evp_ppkey) + self.openssl_assert(res == 1) + self.openssl_assert(evp_ppkey[0] != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_ppkey[0], self._lib.EVP_PKEY_free) + return evp_pkey + + def x25519_generate_key(self) -> x25519.X25519PrivateKey: + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_X25519) + return _X25519PrivateKey(self, evp_pkey) + + def x25519_supported(self) -> bool: + if self._fips_enabled: + return False + return not self._lib.CRYPTOGRAPHY_IS_LIBRESSL + + def x448_load_public_bytes(self, data: bytes) -> x448.X448PublicKey: + if len(data) != 56: + raise ValueError("An X448 public key is 56 bytes long") + + evp_pkey = self._lib.EVP_PKEY_new_raw_public_key( + self._lib.NID_X448, self._ffi.NULL, data, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return _X448PublicKey(self, evp_pkey) + + def x448_load_private_bytes(self, data: bytes) -> x448.X448PrivateKey: + if len(data) != 56: + raise ValueError("An X448 private key is 56 bytes long") + + data_ptr = self._ffi.from_buffer(data) + evp_pkey = self._lib.EVP_PKEY_new_raw_private_key( + self._lib.NID_X448, self._ffi.NULL, data_ptr, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return _X448PrivateKey(self, evp_pkey) + + def x448_generate_key(self) -> x448.X448PrivateKey: + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_X448) + return _X448PrivateKey(self, evp_pkey) + + def x448_supported(self) -> bool: + if self._fips_enabled: + return False + return ( + not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 + and not self._lib.CRYPTOGRAPHY_IS_BORINGSSL + ) + + def ed25519_supported(self) -> bool: + if self._fips_enabled: + return False + return not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B + + def ed25519_load_public_bytes( + self, data: bytes + ) -> ed25519.Ed25519PublicKey: + utils._check_bytes("data", data) + + if len(data) != ed25519._ED25519_KEY_SIZE: + raise ValueError("An Ed25519 public key is 32 bytes long") + + evp_pkey = self._lib.EVP_PKEY_new_raw_public_key( + self._lib.NID_ED25519, self._ffi.NULL, data, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed25519PublicKey(self, evp_pkey) + + def ed25519_load_private_bytes( + self, data: bytes + ) -> ed25519.Ed25519PrivateKey: + if len(data) != ed25519._ED25519_KEY_SIZE: + raise ValueError("An Ed25519 private key is 32 bytes long") + + utils._check_byteslike("data", data) + data_ptr = self._ffi.from_buffer(data) + evp_pkey = self._lib.EVP_PKEY_new_raw_private_key( + self._lib.NID_ED25519, self._ffi.NULL, data_ptr, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed25519PrivateKey(self, evp_pkey) + + def ed25519_generate_key(self) -> ed25519.Ed25519PrivateKey: + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_ED25519) + return _Ed25519PrivateKey(self, evp_pkey) + + def ed448_supported(self) -> bool: + if self._fips_enabled: + return False + return ( + not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B + and not self._lib.CRYPTOGRAPHY_IS_BORINGSSL + ) + + def ed448_load_public_bytes(self, data: bytes) -> ed448.Ed448PublicKey: + utils._check_bytes("data", data) + if len(data) != _ED448_KEY_SIZE: + raise ValueError("An Ed448 public key is 57 bytes long") + + evp_pkey = self._lib.EVP_PKEY_new_raw_public_key( + self._lib.NID_ED448, self._ffi.NULL, data, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed448PublicKey(self, evp_pkey) + + def ed448_load_private_bytes(self, data: bytes) -> ed448.Ed448PrivateKey: + utils._check_byteslike("data", data) + if len(data) != _ED448_KEY_SIZE: + raise ValueError("An Ed448 private key is 57 bytes long") + + data_ptr = self._ffi.from_buffer(data) + evp_pkey = self._lib.EVP_PKEY_new_raw_private_key( + self._lib.NID_ED448, self._ffi.NULL, data_ptr, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed448PrivateKey(self, evp_pkey) + + def ed448_generate_key(self) -> ed448.Ed448PrivateKey: + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_ED448) + return _Ed448PrivateKey(self, evp_pkey) + + def derive_scrypt( + self, + key_material: bytes, + salt: bytes, + length: int, + n: int, + r: int, + p: int, + ) -> bytes: + buf = self._ffi.new("unsigned char[]", length) + key_material_ptr = self._ffi.from_buffer(key_material) + res = self._lib.EVP_PBE_scrypt( + key_material_ptr, + len(key_material), + salt, + len(salt), + n, + r, + p, + scrypt._MEM_LIMIT, + buf, + length, + ) + if res != 1: + errors = self._consume_errors_with_text() + # memory required formula explained here: + # https://blog.filippo.io/the-scrypt-parameters/ + min_memory = 128 * n * r // (1024**2) + raise MemoryError( + "Not enough memory to derive key. These parameters require" + " {} MB of memory.".format(min_memory), + errors, + ) + return self._ffi.buffer(buf)[:] + + def aead_cipher_supported(self, cipher) -> bool: + cipher_name = aead._aead_cipher_name(cipher) + if self._fips_enabled and cipher_name not in self._fips_aead: + return False + # SIV isn't loaded through get_cipherbyname but instead a new fetch API + # only available in 3.0+. But if we know we're on 3.0+ then we know + # it's supported. + if cipher_name.endswith(b"-siv"): + return self._lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER == 1 + else: + return ( + self._lib.EVP_get_cipherbyname(cipher_name) != self._ffi.NULL + ) + + @contextlib.contextmanager + def _zeroed_bytearray(self, length: int) -> typing.Iterator[bytearray]: + """ + This method creates a bytearray, which we copy data into (hopefully + also from a mutable buffer that can be dynamically erased!), and then + zero when we're done. + """ + ba = bytearray(length) + try: + yield ba + finally: + self._zero_data(ba, length) + + def _zero_data(self, data, length: int) -> None: + # We clear things this way because at the moment we're not + # sure of a better way that can guarantee it overwrites the + # memory of a bytearray and doesn't just replace the underlying char *. + for i in range(length): + data[i] = 0 + + @contextlib.contextmanager + def _zeroed_null_terminated_buf(self, data): + """ + This method takes bytes, which can be a bytestring or a mutable + buffer like a bytearray, and yields a null-terminated version of that + data. This is required because PKCS12_parse doesn't take a length with + its password char * and ffi.from_buffer doesn't provide null + termination. So, to support zeroing the data via bytearray we + need to build this ridiculous construct that copies the memory, but + zeroes it after use. + """ + if data is None: + yield self._ffi.NULL + else: + data_len = len(data) + buf = self._ffi.new("char[]", data_len + 1) + self._ffi.memmove(buf, data, data_len) + try: + yield buf + finally: + # Cast to a uint8_t * so we can assign by integer + self._zero_data(self._ffi.cast("uint8_t *", buf), data_len) + + def load_key_and_certificates_from_pkcs12( + self, data: bytes, password: typing.Optional[bytes] + ) -> typing.Tuple[ + typing.Optional[PRIVATE_KEY_TYPES], + typing.Optional[x509.Certificate], + typing.List[x509.Certificate], + ]: + pkcs12 = self.load_pkcs12(data, password) + return ( + pkcs12.key, + pkcs12.cert.certificate if pkcs12.cert else None, + [cert.certificate for cert in pkcs12.additional_certs], + ) + + def load_pkcs12( + self, data: bytes, password: typing.Optional[bytes] + ) -> PKCS12KeyAndCertificates: + if password is not None: + utils._check_byteslike("password", password) + + bio = self._bytes_to_bio(data) + p12 = self._lib.d2i_PKCS12_bio(bio.bio, self._ffi.NULL) + if p12 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Could not deserialize PKCS12 data") + + p12 = self._ffi.gc(p12, self._lib.PKCS12_free) + evp_pkey_ptr = self._ffi.new("EVP_PKEY **") + x509_ptr = self._ffi.new("X509 **") + sk_x509_ptr = self._ffi.new("Cryptography_STACK_OF_X509 **") + with self._zeroed_null_terminated_buf(password) as password_buf: + res = self._lib.PKCS12_parse( + p12, password_buf, evp_pkey_ptr, x509_ptr, sk_x509_ptr + ) + + # Workaround for + # https://github.com/libressl-portable/portable/issues/659 + if self._lib.CRYPTOGRAPHY_LIBRESSL_LESS_THAN_340: + self._consume_errors() + + if res == 0: + self._consume_errors() + raise ValueError("Invalid password or PKCS12 data") + + cert = None + key = None + additional_certificates = [] + + if evp_pkey_ptr[0] != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey_ptr[0], self._lib.EVP_PKEY_free) + key = self._evp_pkey_to_private_key(evp_pkey) + + if x509_ptr[0] != self._ffi.NULL: + x509 = self._ffi.gc(x509_ptr[0], self._lib.X509_free) + cert_obj = self._ossl2cert(x509) + name = None + maybe_name = self._lib.X509_alias_get0(x509, self._ffi.NULL) + if maybe_name != self._ffi.NULL: + name = self._ffi.string(maybe_name) + cert = PKCS12Certificate(cert_obj, name) + + if sk_x509_ptr[0] != self._ffi.NULL: + sk_x509 = self._ffi.gc(sk_x509_ptr[0], self._lib.sk_X509_free) + num = self._lib.sk_X509_num(sk_x509_ptr[0]) + + # In OpenSSL < 3.0.0 PKCS12 parsing reverses the order of the + # certificates. + indices: typing.Iterable[int] + if ( + self._lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER + or self._lib.CRYPTOGRAPHY_IS_BORINGSSL + ): + indices = range(num) + else: + indices = reversed(range(num)) + + for i in indices: + x509 = self._lib.sk_X509_value(sk_x509, i) + self.openssl_assert(x509 != self._ffi.NULL) + x509 = self._ffi.gc(x509, self._lib.X509_free) + addl_cert = self._ossl2cert(x509) + addl_name = None + maybe_name = self._lib.X509_alias_get0(x509, self._ffi.NULL) + if maybe_name != self._ffi.NULL: + addl_name = self._ffi.string(maybe_name) + additional_certificates.append( + PKCS12Certificate(addl_cert, addl_name) + ) + + return PKCS12KeyAndCertificates(key, cert, additional_certificates) + + def serialize_key_and_certificates_to_pkcs12( + self, + name: typing.Optional[bytes], + key: typing.Optional[_ALLOWED_PKCS12_TYPES], + cert: typing.Optional[x509.Certificate], + cas: typing.Optional[typing.List[_PKCS12_CAS_TYPES]], + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + password = None + if name is not None: + utils._check_bytes("name", name) + + if isinstance(encryption_algorithm, serialization.NoEncryption): + nid_cert = -1 + nid_key = -1 + pkcs12_iter = 0 + mac_iter = 0 + elif isinstance( + encryption_algorithm, serialization.BestAvailableEncryption + ): + # PKCS12 encryption is hopeless trash and can never be fixed. + # This is the least terrible option. + nid_cert = self._lib.NID_pbe_WithSHA1And3_Key_TripleDES_CBC + nid_key = self._lib.NID_pbe_WithSHA1And3_Key_TripleDES_CBC + # At least we can set this higher than OpenSSL's default + pkcs12_iter = 20000 + # mac_iter chosen for compatibility reasons, see: + # https://www.openssl.org/docs/man1.1.1/man3/PKCS12_create.html + # Did we mention how lousy PKCS12 encryption is? + mac_iter = 1 + password = encryption_algorithm.password + else: + raise ValueError("Unsupported key encryption type") + + if cas is None or len(cas) == 0: + sk_x509 = self._ffi.NULL + else: + sk_x509 = self._lib.sk_X509_new_null() + sk_x509 = self._ffi.gc(sk_x509, self._lib.sk_X509_free) + + # This list is to keep the x509 values alive until end of function + ossl_cas = [] + for ca in cas: + if isinstance(ca, PKCS12Certificate): + ca_alias = ca.friendly_name + ossl_ca = self._cert2ossl(ca.certificate) + with self._zeroed_null_terminated_buf( + ca_alias + ) as ca_name_buf: + res = self._lib.X509_alias_set1( + ossl_ca, ca_name_buf, -1 + ) + self.openssl_assert(res == 1) + else: + ossl_ca = self._cert2ossl(ca) + ossl_cas.append(ossl_ca) + res = self._lib.sk_X509_push(sk_x509, ossl_ca) + backend.openssl_assert(res >= 1) + + with self._zeroed_null_terminated_buf(password) as password_buf: + with self._zeroed_null_terminated_buf(name) as name_buf: + ossl_cert = self._cert2ossl(cert) if cert else self._ffi.NULL + if key is not None: + evp_pkey = key._evp_pkey # type: ignore[union-attr] + else: + evp_pkey = self._ffi.NULL + + p12 = self._lib.PKCS12_create( + password_buf, + name_buf, + evp_pkey, + ossl_cert, + sk_x509, + nid_key, + nid_cert, + pkcs12_iter, + mac_iter, + 0, + ) + + self.openssl_assert(p12 != self._ffi.NULL) + p12 = self._ffi.gc(p12, self._lib.PKCS12_free) + + bio = self._create_mem_bio_gc() + res = self._lib.i2d_PKCS12_bio(bio, p12) + self.openssl_assert(res > 0) + return self._read_mem_bio(bio) + + def poly1305_supported(self) -> bool: + if self._fips_enabled: + return False + return self._lib.Cryptography_HAS_POLY1305 == 1 + + def create_poly1305_ctx(self, key: bytes) -> _Poly1305Context: + utils._check_byteslike("key", key) + if len(key) != _POLY1305_KEY_SIZE: + raise ValueError("A poly1305 key is 32 bytes long") + + return _Poly1305Context(self, key) + + def pkcs7_supported(self) -> bool: + return not self._lib.CRYPTOGRAPHY_IS_BORINGSSL + + def load_pem_pkcs7_certificates( + self, data: bytes + ) -> typing.List[x509.Certificate]: + utils._check_bytes("data", data) + bio = self._bytes_to_bio(data) + p7 = self._lib.PEM_read_bio_PKCS7( + bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if p7 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to parse PKCS7 data") + + p7 = self._ffi.gc(p7, self._lib.PKCS7_free) + return self._load_pkcs7_certificates(p7) + + def load_der_pkcs7_certificates( + self, data: bytes + ) -> typing.List[x509.Certificate]: + utils._check_bytes("data", data) + bio = self._bytes_to_bio(data) + p7 = self._lib.d2i_PKCS7_bio(bio.bio, self._ffi.NULL) + if p7 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to parse PKCS7 data") + + p7 = self._ffi.gc(p7, self._lib.PKCS7_free) + return self._load_pkcs7_certificates(p7) + + def _load_pkcs7_certificates(self, p7): + nid = self._lib.OBJ_obj2nid(p7.type) + self.openssl_assert(nid != self._lib.NID_undef) + if nid != self._lib.NID_pkcs7_signed: + raise UnsupportedAlgorithm( + "Only basic signed structures are currently supported. NID" + " for this data was {}".format(nid), + _Reasons.UNSUPPORTED_SERIALIZATION, + ) + + sk_x509 = p7.d.sign.cert + num = self._lib.sk_X509_num(sk_x509) + certs = [] + for i in range(num): + x509 = self._lib.sk_X509_value(sk_x509, i) + self.openssl_assert(x509 != self._ffi.NULL) + res = self._lib.X509_up_ref(x509) + # When OpenSSL is less than 1.1.0 up_ref returns the current + # refcount. On 1.1.0+ it returns 1 for success. + self.openssl_assert(res >= 1) + x509 = self._ffi.gc(x509, self._lib.X509_free) + cert = self._ossl2cert(x509) + certs.append(cert) + + return certs + + def pkcs7_serialize_certificates( + self, + certs: typing.List[x509.Certificate], + encoding: serialization.Encoding, + ): + certs = list(certs) + if not certs or not all( + isinstance(cert, x509.Certificate) for cert in certs + ): + raise TypeError("certs must be a list of certs with length >= 1") + + if encoding not in ( + serialization.Encoding.PEM, + serialization.Encoding.DER, + ): + raise TypeError("encoding must DER or PEM from the Encoding enum") + + certs_sk = self._lib.sk_X509_new_null() + certs_sk = self._ffi.gc(certs_sk, self._lib.sk_X509_free) + # This list is to keep the x509 values alive until end of function + ossl_certs = [] + for cert in certs: + ossl_cert = self._cert2ossl(cert) + ossl_certs.append(ossl_cert) + res = self._lib.sk_X509_push(certs_sk, ossl_cert) + self.openssl_assert(res >= 1) + # We use PKCS7_sign here because it creates the PKCS7 and PKCS7_SIGNED + # structures for us rather than requiring manual assignment. + p7 = self._lib.PKCS7_sign( + self._ffi.NULL, + self._ffi.NULL, + certs_sk, + self._ffi.NULL, + self._lib.PKCS7_PARTIAL, + ) + bio_out = self._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._lib.PEM_write_bio_PKCS7_stream( + bio_out, p7, self._ffi.NULL, 0 + ) + else: + assert encoding is serialization.Encoding.DER + res = self._lib.i2d_PKCS7_bio(bio_out, p7) + + self.openssl_assert(res == 1) + return self._read_mem_bio(bio_out) + + def pkcs7_sign( + self, + builder: pkcs7.PKCS7SignatureBuilder, + encoding: serialization.Encoding, + options: typing.List[pkcs7.PKCS7Options], + ) -> bytes: + assert builder._data is not None + bio = self._bytes_to_bio(builder._data) + init_flags = self._lib.PKCS7_PARTIAL + final_flags = 0 + + if len(builder._additional_certs) == 0: + certs = self._ffi.NULL + else: + certs = self._lib.sk_X509_new_null() + certs = self._ffi.gc(certs, self._lib.sk_X509_free) + # This list is to keep the x509 values alive until end of function + ossl_certs = [] + for cert in builder._additional_certs: + ossl_cert = self._cert2ossl(cert) + ossl_certs.append(ossl_cert) + res = self._lib.sk_X509_push(certs, ossl_cert) + self.openssl_assert(res >= 1) + + if pkcs7.PKCS7Options.DetachedSignature in options: + # Don't embed the data in the PKCS7 structure + init_flags |= self._lib.PKCS7_DETACHED + final_flags |= self._lib.PKCS7_DETACHED + + # This just inits a structure for us. However, there + # are flags we need to set, joy. + p7 = self._lib.PKCS7_sign( + self._ffi.NULL, + self._ffi.NULL, + certs, + self._ffi.NULL, + init_flags, + ) + self.openssl_assert(p7 != self._ffi.NULL) + p7 = self._ffi.gc(p7, self._lib.PKCS7_free) + signer_flags = 0 + # These flags are configurable on a per-signature basis + # but we've deliberately chosen to make the API only allow + # setting it across all signatures for now. + if pkcs7.PKCS7Options.NoCapabilities in options: + signer_flags |= self._lib.PKCS7_NOSMIMECAP + elif pkcs7.PKCS7Options.NoAttributes in options: + signer_flags |= self._lib.PKCS7_NOATTR + + if pkcs7.PKCS7Options.NoCerts in options: + signer_flags |= self._lib.PKCS7_NOCERTS + + for certificate, private_key, hash_algorithm in builder._signers: + ossl_cert = self._cert2ossl(certificate) + md = self._evp_md_non_null_from_algorithm(hash_algorithm) + p7signerinfo = self._lib.PKCS7_sign_add_signer( + p7, + ossl_cert, + private_key._evp_pkey, # type: ignore[union-attr] + md, + signer_flags, + ) + self.openssl_assert(p7signerinfo != self._ffi.NULL) + + for option in options: + # DetachedSignature, NoCapabilities, and NoAttributes are already + # handled so we just need to check these last two options. + if option is pkcs7.PKCS7Options.Text: + final_flags |= self._lib.PKCS7_TEXT + elif option is pkcs7.PKCS7Options.Binary: + final_flags |= self._lib.PKCS7_BINARY + + bio_out = self._create_mem_bio_gc() + if encoding is serialization.Encoding.SMIME: + # This finalizes the structure + res = self._lib.SMIME_write_PKCS7( + bio_out, p7, bio.bio, final_flags + ) + elif encoding is serialization.Encoding.PEM: + res = self._lib.PKCS7_final(p7, bio.bio, final_flags) + self.openssl_assert(res == 1) + res = self._lib.PEM_write_bio_PKCS7_stream( + bio_out, p7, bio.bio, final_flags + ) + else: + assert encoding is serialization.Encoding.DER + # We need to call finalize here becauase i2d_PKCS7_bio does not + # finalize. + res = self._lib.PKCS7_final(p7, bio.bio, final_flags) + self.openssl_assert(res == 1) + # OpenSSL 3.0 leaves a random bio error on the stack: + # https://github.com/openssl/openssl/issues/16681 + if self._lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER: + self._consume_errors() + res = self._lib.i2d_PKCS7_bio(bio_out, p7) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio_out) + + +class GetCipherByName: + def __init__(self, fmt: str): + self._fmt = fmt + + def __call__(self, backend: Backend, cipher: CipherAlgorithm, mode: Mode): + cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower() + return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) + + +def _get_xts_cipher(backend: Backend, cipher: AES, mode): + cipher_name = "aes-{}-xts".format(cipher.key_size // 2) + return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) + + +backend = Backend() diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ciphers.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ciphers.py new file mode 100644 index 0000000..1058de9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ciphers.py @@ -0,0 +1,282 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import ciphers +from cryptography.hazmat.primitives.ciphers import algorithms, modes + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +class _CipherContext: + _ENCRYPT = 1 + _DECRYPT = 0 + _MAX_CHUNK_SIZE = 2**30 - 1 + + def __init__( + self, backend: "Backend", cipher, mode, operation: int + ) -> None: + self._backend = backend + self._cipher = cipher + self._mode = mode + self._operation = operation + self._tag: typing.Optional[bytes] = None + + if isinstance(self._cipher, ciphers.BlockCipherAlgorithm): + self._block_size_bytes = self._cipher.block_size // 8 + else: + self._block_size_bytes = 1 + + ctx = self._backend._lib.EVP_CIPHER_CTX_new() + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.EVP_CIPHER_CTX_free + ) + + registry = self._backend._cipher_registry + try: + adapter = registry[type(cipher), type(mode)] + except KeyError: + raise UnsupportedAlgorithm( + "cipher {} in {} mode is not supported " + "by this backend.".format( + cipher.name, mode.name if mode else mode + ), + _Reasons.UNSUPPORTED_CIPHER, + ) + + evp_cipher = adapter(self._backend, cipher, mode) + if evp_cipher == self._backend._ffi.NULL: + msg = "cipher {0.name} ".format(cipher) + if mode is not None: + msg += "in {0.name} mode ".format(mode) + msg += ( + "is not supported by this backend (Your version of OpenSSL " + "may be too old. Current version: {}.)" + ).format(self._backend.openssl_version_text()) + raise UnsupportedAlgorithm(msg, _Reasons.UNSUPPORTED_CIPHER) + + if isinstance(mode, modes.ModeWithInitializationVector): + iv_nonce = self._backend._ffi.from_buffer( + mode.initialization_vector + ) + elif isinstance(mode, modes.ModeWithTweak): + iv_nonce = self._backend._ffi.from_buffer(mode.tweak) + elif isinstance(mode, modes.ModeWithNonce): + iv_nonce = self._backend._ffi.from_buffer(mode.nonce) + elif isinstance(cipher, algorithms.ChaCha20): + iv_nonce = self._backend._ffi.from_buffer(cipher.nonce) + else: + iv_nonce = self._backend._ffi.NULL + # begin init with cipher and operation type + res = self._backend._lib.EVP_CipherInit_ex( + ctx, + evp_cipher, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + operation, + ) + self._backend.openssl_assert(res != 0) + # set the key length to handle variable key ciphers + res = self._backend._lib.EVP_CIPHER_CTX_set_key_length( + ctx, len(cipher.key) + ) + self._backend.openssl_assert(res != 0) + if isinstance(mode, modes.GCM): + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, + self._backend._lib.EVP_CTRL_AEAD_SET_IVLEN, + len(iv_nonce), + self._backend._ffi.NULL, + ) + self._backend.openssl_assert(res != 0) + if mode.tag is not None: + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, + self._backend._lib.EVP_CTRL_AEAD_SET_TAG, + len(mode.tag), + mode.tag, + ) + self._backend.openssl_assert(res != 0) + self._tag = mode.tag + + # pass key/iv + res = self._backend._lib.EVP_CipherInit_ex( + ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.from_buffer(cipher.key), + iv_nonce, + operation, + ) + + # Check for XTS mode duplicate keys error + errors = self._backend._consume_errors() + lib = self._backend._lib + if res == 0 and ( + ( + lib.CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER + and errors[0]._lib_reason_match( + lib.ERR_LIB_EVP, lib.EVP_R_XTS_DUPLICATED_KEYS + ) + ) + or ( + lib.Cryptography_HAS_PROVIDERS + and errors[0]._lib_reason_match( + lib.ERR_LIB_PROV, lib.PROV_R_XTS_DUPLICATED_KEYS + ) + ) + ): + raise ValueError("In XTS mode duplicated keys are not allowed") + + self._backend.openssl_assert(res != 0, errors=errors) + + # We purposely disable padding here as it's handled higher up in the + # API. + self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0) + self._ctx = ctx + + def update(self, data: bytes) -> bytes: + buf = bytearray(len(data) + self._block_size_bytes - 1) + n = self.update_into(data, buf) + return bytes(buf[:n]) + + def update_into(self, data: bytes, buf: bytes) -> int: + total_data_len = len(data) + if len(buf) < (total_data_len + self._block_size_bytes - 1): + raise ValueError( + "buffer must be at least {} bytes for this " + "payload".format(len(data) + self._block_size_bytes - 1) + ) + + data_processed = 0 + total_out = 0 + outlen = self._backend._ffi.new("int *") + baseoutbuf = self._backend._ffi.from_buffer(buf) + baseinbuf = self._backend._ffi.from_buffer(data) + + while data_processed != total_data_len: + outbuf = baseoutbuf + total_out + inbuf = baseinbuf + data_processed + inlen = min(self._MAX_CHUNK_SIZE, total_data_len - data_processed) + + res = self._backend._lib.EVP_CipherUpdate( + self._ctx, outbuf, outlen, inbuf, inlen + ) + if res == 0 and isinstance(self._mode, modes.XTS): + self._backend._consume_errors() + raise ValueError( + "In XTS mode you must supply at least a full block in the " + "first update call. For AES this is 16 bytes." + ) + else: + self._backend.openssl_assert(res != 0) + data_processed += inlen + total_out += outlen[0] + + return total_out + + def finalize(self) -> bytes: + if ( + self._operation == self._DECRYPT + and isinstance(self._mode, modes.ModeWithAuthenticationTag) + and self.tag is None + ): + raise ValueError( + "Authentication tag must be provided when decrypting." + ) + + buf = self._backend._ffi.new("unsigned char[]", self._block_size_bytes) + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen) + if res == 0: + errors = self._backend._consume_errors() + + if not errors and isinstance(self._mode, modes.GCM): + raise InvalidTag + + lib = self._backend._lib + self._backend.openssl_assert( + errors[0]._lib_reason_match( + lib.ERR_LIB_EVP, + lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH, + ) + or ( + lib.Cryptography_HAS_PROVIDERS + and errors[0]._lib_reason_match( + lib.ERR_LIB_PROV, + lib.PROV_R_WRONG_FINAL_BLOCK_LENGTH, + ) + ) + or ( + lib.CRYPTOGRAPHY_IS_BORINGSSL + and errors[0].reason + == lib.CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH + ), + errors=errors, + ) + raise ValueError( + "The length of the provided data is not a multiple of " + "the block length." + ) + + if ( + isinstance(self._mode, modes.GCM) + and self._operation == self._ENCRYPT + ): + tag_buf = self._backend._ffi.new( + "unsigned char[]", self._block_size_bytes + ) + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + self._ctx, + self._backend._lib.EVP_CTRL_AEAD_GET_TAG, + self._block_size_bytes, + tag_buf, + ) + self._backend.openssl_assert(res != 0) + self._tag = self._backend._ffi.buffer(tag_buf)[:] + + res = self._backend._lib.EVP_CIPHER_CTX_reset(self._ctx) + self._backend.openssl_assert(res == 1) + return self._backend._ffi.buffer(buf)[: outlen[0]] + + def finalize_with_tag(self, tag: bytes) -> bytes: + tag_len = len(tag) + if tag_len < self._mode._min_tag_length: + raise ValueError( + "Authentication tag must be {} bytes or longer.".format( + self._mode._min_tag_length + ) + ) + elif tag_len > self._block_size_bytes: + raise ValueError( + "Authentication tag cannot be more than {} bytes.".format( + self._block_size_bytes + ) + ) + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + self._ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag + ) + self._backend.openssl_assert(res != 0) + self._tag = tag + return self.finalize() + + def authenticate_additional_data(self, data: bytes) -> None: + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherUpdate( + self._ctx, + self._backend._ffi.NULL, + outlen, + self._backend._ffi.from_buffer(data), + len(data), + ) + self._backend.openssl_assert(res != 0) + + @property + def tag(self) -> typing.Optional[bytes]: + return self._tag diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/cmac.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/cmac.py new file mode 100644 index 0000000..35f50c5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/cmac.py @@ -0,0 +1,87 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import ( + InvalidSignature, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.ciphers.modes import CBC + +if typing.TYPE_CHECKING: + from cryptography.hazmat.primitives import ciphers + from cryptography.hazmat.backends.openssl.backend import Backend + + +class _CMACContext: + def __init__( + self, + backend: "Backend", + algorithm: "ciphers.BlockCipherAlgorithm", + ctx=None, + ) -> None: + if not backend.cmac_algorithm_supported(algorithm): + raise UnsupportedAlgorithm( + "This backend does not support CMAC.", + _Reasons.UNSUPPORTED_CIPHER, + ) + + self._backend = backend + self._key = algorithm.key + self._algorithm = algorithm + self._output_length = algorithm.block_size // 8 + + if ctx is None: + registry = self._backend._cipher_registry + adapter = registry[type(algorithm), CBC] + + evp_cipher = adapter(self._backend, algorithm, CBC) + + ctx = self._backend._lib.CMAC_CTX_new() + + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.CMAC_CTX_free) + + key_ptr = self._backend._ffi.from_buffer(self._key) + res = self._backend._lib.CMAC_Init( + ctx, + key_ptr, + len(self._key), + evp_cipher, + self._backend._ffi.NULL, + ) + self._backend.openssl_assert(res == 1) + + self._ctx = ctx + + def update(self, data: bytes) -> None: + res = self._backend._lib.CMAC_Update(self._ctx, data, len(data)) + self._backend.openssl_assert(res == 1) + + def finalize(self) -> bytes: + buf = self._backend._ffi.new("unsigned char[]", self._output_length) + length = self._backend._ffi.new("size_t *", self._output_length) + res = self._backend._lib.CMAC_Final(self._ctx, buf, length) + self._backend.openssl_assert(res == 1) + + self._ctx = None + + return self._backend._ffi.buffer(buf)[:] + + def copy(self) -> "_CMACContext": + copied_ctx = self._backend._lib.CMAC_CTX_new() + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.CMAC_CTX_free + ) + res = self._backend._lib.CMAC_CTX_copy(copied_ctx, self._ctx) + self._backend.openssl_assert(res == 1) + return _CMACContext(self._backend, self._algorithm, ctx=copied_ctx) + + def verify(self, signature: bytes) -> None: + digest = self.finalize() + if not constant_time.bytes_eq(digest, signature): + raise InvalidSignature("Signature did not match digest.") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/decode_asn1.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/decode_asn1.py new file mode 100644 index 0000000..df91d6d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/decode_asn1.py @@ -0,0 +1,31 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography import x509 + +# CRLReason ::= ENUMERATED { +# unspecified (0), +# keyCompromise (1), +# cACompromise (2), +# affiliationChanged (3), +# superseded (4), +# cessationOfOperation (5), +# certificateHold (6), +# -- value 7 is not used +# removeFromCRL (8), +# privilegeWithdrawn (9), +# aACompromise (10) } +_CRL_ENTRY_REASON_ENUM_TO_CODE = { + x509.ReasonFlags.unspecified: 0, + x509.ReasonFlags.key_compromise: 1, + x509.ReasonFlags.ca_compromise: 2, + x509.ReasonFlags.affiliation_changed: 3, + x509.ReasonFlags.superseded: 4, + x509.ReasonFlags.cessation_of_operation: 5, + x509.ReasonFlags.certificate_hold: 6, + x509.ReasonFlags.remove_from_crl: 8, + x509.ReasonFlags.privilege_withdrawn: 9, + x509.ReasonFlags.aa_compromise: 10, +} diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/dh.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/dh.py new file mode 100644 index 0000000..70364a3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/dh.py @@ -0,0 +1,318 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import dh + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +def _dh_params_dup(dh_cdata, backend: "Backend"): + lib = backend._lib + ffi = backend._ffi + + param_cdata = lib.DHparams_dup(dh_cdata) + backend.openssl_assert(param_cdata != ffi.NULL) + param_cdata = ffi.gc(param_cdata, lib.DH_free) + if lib.CRYPTOGRAPHY_IS_LIBRESSL: + # In libressl DHparams_dup don't copy q + q = ffi.new("BIGNUM **") + lib.DH_get0_pqg(dh_cdata, ffi.NULL, q, ffi.NULL) + q_dup = lib.BN_dup(q[0]) + res = lib.DH_set0_pqg(param_cdata, ffi.NULL, q_dup, ffi.NULL) + backend.openssl_assert(res == 1) + + return param_cdata + + +def _dh_cdata_to_parameters(dh_cdata, backend: "Backend") -> "_DHParameters": + param_cdata = _dh_params_dup(dh_cdata, backend) + return _DHParameters(backend, param_cdata) + + +class _DHParameters(dh.DHParameters): + def __init__(self, backend: "Backend", dh_cdata): + self._backend = backend + self._dh_cdata = dh_cdata + + def parameter_numbers(self) -> dh.DHParameterNumbers: + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + q_val: typing.Optional[int] + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + return dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val, + ) + + def generate_private_key(self) -> dh.DHPrivateKey: + return self._backend.generate_dh_private_key(self) + + def parameter_bytes( + self, + encoding: serialization.Encoding, + format: serialization.ParameterFormat, + ) -> bytes: + if encoding is serialization.Encoding.OpenSSH: + raise TypeError("OpenSSH encoding is not supported") + + if format is not serialization.ParameterFormat.PKCS3: + raise ValueError("Only PKCS3 serialization is supported") + + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg( + self._dh_cdata, self._backend._ffi.NULL, q, self._backend._ffi.NULL + ) + if ( + q[0] != self._backend._ffi.NULL + and not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX + ): + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION, + ) + + if encoding is serialization.Encoding.PEM: + if q[0] != self._backend._ffi.NULL: + write_bio = self._backend._lib.PEM_write_bio_DHxparams + else: + write_bio = self._backend._lib.PEM_write_bio_DHparams + elif encoding is serialization.Encoding.DER: + if q[0] != self._backend._ffi.NULL: + write_bio = self._backend._lib.Cryptography_i2d_DHxparams_bio + else: + write_bio = self._backend._lib.i2d_DHparams_bio + else: + raise TypeError("encoding must be an item from the Encoding enum") + + bio = self._backend._create_mem_bio_gc() + res = write_bio(bio, self._dh_cdata) + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + +def _get_dh_num_bits(backend, dh_cdata) -> int: + p = backend._ffi.new("BIGNUM **") + backend._lib.DH_get0_pqg(dh_cdata, p, backend._ffi.NULL, backend._ffi.NULL) + backend.openssl_assert(p[0] != backend._ffi.NULL) + return backend._lib.BN_num_bits(p[0]) + + +class _DHPrivateKey(dh.DHPrivateKey): + def __init__(self, backend: "Backend", dh_cdata, evp_pkey): + self._backend = backend + self._dh_cdata = dh_cdata + self._evp_pkey = evp_pkey + self._key_size_bytes = self._backend._lib.DH_size(dh_cdata) + + @property + def key_size(self) -> int: + return _get_dh_num_bits(self._backend, self._dh_cdata) + + def private_numbers(self) -> dh.DHPrivateNumbers: + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + pub_key = self._backend._ffi.new("BIGNUM **") + priv_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, pub_key, priv_key) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL) + return dh.DHPrivateNumbers( + public_numbers=dh.DHPublicNumbers( + parameter_numbers=dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val, + ), + y=self._backend._bn_to_int(pub_key[0]), + ), + x=self._backend._bn_to_int(priv_key[0]), + ) + + def exchange(self, peer_public_key: dh.DHPublicKey) -> bytes: + if not isinstance(peer_public_key, _DHPublicKey): + raise TypeError("peer_public_key must be a DHPublicKey") + + ctx = self._backend._lib.EVP_PKEY_CTX_new( + self._evp_pkey, self._backend._ffi.NULL + ) + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.EVP_PKEY_CTX_free) + res = self._backend._lib.EVP_PKEY_derive_init(ctx) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_PKEY_derive_set_peer( + ctx, peer_public_key._evp_pkey + ) + # Invalid kex errors here in OpenSSL 3.0 because checks were moved + # to EVP_PKEY_derive_set_peer + self._exchange_assert(res == 1) + keylen = self._backend._ffi.new("size_t *") + res = self._backend._lib.EVP_PKEY_derive( + ctx, self._backend._ffi.NULL, keylen + ) + # Invalid kex errors here in OpenSSL < 3 + self._exchange_assert(res == 1) + self._backend.openssl_assert(keylen[0] > 0) + buf = self._backend._ffi.new("unsigned char[]", keylen[0]) + res = self._backend._lib.EVP_PKEY_derive(ctx, buf, keylen) + self._backend.openssl_assert(res == 1) + + key = self._backend._ffi.buffer(buf, keylen[0])[:] + pad = self._key_size_bytes - len(key) + + if pad > 0: + key = (b"\x00" * pad) + key + + return key + + def _exchange_assert(self, ok: bool) -> None: + if not ok: + errors_with_text = self._backend._consume_errors_with_text() + raise ValueError( + "Error computing shared key.", + errors_with_text, + ) + + def public_key(self) -> dh.DHPublicKey: + dh_cdata = _dh_params_dup(self._dh_cdata, self._backend) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key( + self._dh_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + pub_key_dup = self._backend._lib.BN_dup(pub_key[0]) + self._backend.openssl_assert(pub_key_dup != self._backend._ffi.NULL) + + res = self._backend._lib.DH_set0_key( + dh_cdata, pub_key_dup, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._dh_cdata_to_evp_pkey(dh_cdata) + return _DHPublicKey(self._backend, dh_cdata, evp_pkey) + + def parameters(self) -> dh.DHParameters: + return _dh_cdata_to_parameters(self._dh_cdata, self._backend) + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + if format is not serialization.PrivateFormat.PKCS8: + raise ValueError( + "DH private keys support only PKCS8 serialization" + ) + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg( + self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL, + ) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION, + ) + + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self, + self._evp_pkey, + self._dh_cdata, + ) + + +class _DHPublicKey(dh.DHPublicKey): + def __init__(self, backend: "Backend", dh_cdata, evp_pkey): + self._backend = backend + self._dh_cdata = dh_cdata + self._evp_pkey = evp_pkey + self._key_size_bits = _get_dh_num_bits(self._backend, self._dh_cdata) + + @property + def key_size(self) -> int: + return self._key_size_bits + + def public_numbers(self) -> dh.DHPublicNumbers: + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key( + self._dh_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + return dh.DHPublicNumbers( + parameter_numbers=dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val, + ), + y=self._backend._bn_to_int(pub_key[0]), + ) + + def parameters(self) -> dh.DHParameters: + return _dh_cdata_to_parameters(self._dh_cdata, self._backend) + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + if format is not serialization.PublicFormat.SubjectPublicKeyInfo: + raise ValueError( + "DH public keys support only " + "SubjectPublicKeyInfo serialization" + ) + + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg( + self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL, + ) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION, + ) + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/dsa.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/dsa.py new file mode 100644 index 0000000..8634b72 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/dsa.py @@ -0,0 +1,239 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + dsa, + utils as asym_utils, +) + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +def _dsa_sig_sign( + backend: "Backend", private_key: "_DSAPrivateKey", data: bytes +) -> bytes: + sig_buf_len = backend._lib.DSA_size(private_key._dsa_cdata) + sig_buf = backend._ffi.new("unsigned char[]", sig_buf_len) + buflen = backend._ffi.new("unsigned int *") + + # The first parameter passed to DSA_sign is unused by OpenSSL but + # must be an integer. + res = backend._lib.DSA_sign( + 0, data, len(data), sig_buf, buflen, private_key._dsa_cdata + ) + backend.openssl_assert(res == 1) + backend.openssl_assert(buflen[0]) + + return backend._ffi.buffer(sig_buf)[: buflen[0]] + + +def _dsa_sig_verify( + backend: "Backend", + public_key: "_DSAPublicKey", + signature: bytes, + data: bytes, +) -> None: + # The first parameter passed to DSA_verify is unused by OpenSSL but + # must be an integer. + res = backend._lib.DSA_verify( + 0, data, len(data), signature, len(signature), public_key._dsa_cdata + ) + + if res != 1: + backend._consume_errors() + raise InvalidSignature + + +class _DSAParameters(dsa.DSAParameters): + def __init__(self, backend: "Backend", dsa_cdata): + self._backend = backend + self._dsa_cdata = dsa_cdata + + def parameter_numbers(self) -> dsa.DSAParameterNumbers: + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + return dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]), + ) + + def generate_private_key(self) -> dsa.DSAPrivateKey: + return self._backend.generate_dsa_private_key(self) + + +class _DSAPrivateKey(dsa.DSAPrivateKey): + _key_size: int + + def __init__(self, backend: "Backend", dsa_cdata, evp_pkey): + self._backend = backend + self._dsa_cdata = dsa_cdata + self._evp_pkey = evp_pkey + + p = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg( + dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(p[0] != backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(p[0]) + + @property + def key_size(self) -> int: + return self._key_size + + def private_numbers(self) -> dsa.DSAPrivateNumbers: + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + pub_key = self._backend._ffi.new("BIGNUM **") + priv_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + self._backend._lib.DSA_get0_key(self._dsa_cdata, pub_key, priv_key) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL) + return dsa.DSAPrivateNumbers( + public_numbers=dsa.DSAPublicNumbers( + parameter_numbers=dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]), + ), + y=self._backend._bn_to_int(pub_key[0]), + ), + x=self._backend._bn_to_int(priv_key[0]), + ) + + def public_key(self) -> dsa.DSAPublicKey: + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_key( + self._dsa_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + pub_key_dup = self._backend._lib.BN_dup(pub_key[0]) + res = self._backend._lib.DSA_set0_key( + dsa_cdata, pub_key_dup, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._dsa_cdata_to_evp_pkey(dsa_cdata) + return _DSAPublicKey(self._backend, dsa_cdata, evp_pkey) + + def parameters(self) -> dsa.DSAParameters: + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + return _DSAParameters(self._backend, dsa_cdata) + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self, + self._evp_pkey, + self._dsa_cdata, + ) + + def sign( + self, + data: bytes, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> bytes: + data, _ = _calculate_digest_and_algorithm(data, algorithm) + return _dsa_sig_sign(self._backend, self, data) + + +class _DSAPublicKey(dsa.DSAPublicKey): + _key_size: int + + def __init__(self, backend: "Backend", dsa_cdata, evp_pkey): + self._backend = backend + self._dsa_cdata = dsa_cdata + self._evp_pkey = evp_pkey + p = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg( + dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(p[0] != backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(p[0]) + + @property + def key_size(self) -> int: + return self._key_size + + def public_numbers(self) -> dsa.DSAPublicNumbers: + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + self._backend._lib.DSA_get0_key( + self._dsa_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + return dsa.DSAPublicNumbers( + parameter_numbers=dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]), + ), + y=self._backend._bn_to_int(pub_key[0]), + ) + + def parameters(self) -> dsa.DSAParameters: + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + return _DSAParameters(self._backend, dsa_cdata) + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def verify( + self, + signature: bytes, + data: bytes, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> None: + data, _ = _calculate_digest_and_algorithm(data, algorithm) + return _dsa_sig_verify(self._backend, self, signature, data) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ec.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ec.py new file mode 100644 index 0000000..9bc6dd3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ec.py @@ -0,0 +1,315 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import ( + InvalidSignature, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, + _evp_pkey_derive, +) +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import ec + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +def _check_signature_algorithm( + signature_algorithm: ec.EllipticCurveSignatureAlgorithm, +) -> None: + if not isinstance(signature_algorithm, ec.ECDSA): + raise UnsupportedAlgorithm( + "Unsupported elliptic curve signature algorithm.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + + +def _ec_key_curve_sn(backend: "Backend", ec_key) -> str: + group = backend._lib.EC_KEY_get0_group(ec_key) + backend.openssl_assert(group != backend._ffi.NULL) + + nid = backend._lib.EC_GROUP_get_curve_name(group) + # The following check is to find EC keys with unnamed curves and raise + # an error for now. + if nid == backend._lib.NID_undef: + raise ValueError( + "ECDSA keys with explicit parameters are unsupported at this time" + ) + + # This is like the above check, but it also catches the case where you + # explicitly encoded a curve with the same parameters as a named curve. + # Don't do that. + if ( + not backend._lib.CRYPTOGRAPHY_IS_LIBRESSL + and backend._lib.EC_GROUP_get_asn1_flag(group) == 0 + ): + raise ValueError( + "ECDSA keys with explicit parameters are unsupported at this time" + ) + + curve_name = backend._lib.OBJ_nid2sn(nid) + backend.openssl_assert(curve_name != backend._ffi.NULL) + + sn = backend._ffi.string(curve_name).decode("ascii") + return sn + + +def _mark_asn1_named_ec_curve(backend: "Backend", ec_cdata): + """ + Set the named curve flag on the EC_KEY. This causes OpenSSL to + serialize EC keys along with their curve OID which makes + deserialization easier. + """ + + backend._lib.EC_KEY_set_asn1_flag( + ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE + ) + + +def _check_key_infinity(backend: "Backend", ec_cdata) -> None: + point = backend._lib.EC_KEY_get0_public_key(ec_cdata) + backend.openssl_assert(point != backend._ffi.NULL) + group = backend._lib.EC_KEY_get0_group(ec_cdata) + backend.openssl_assert(group != backend._ffi.NULL) + if backend._lib.EC_POINT_is_at_infinity(group, point): + raise ValueError( + "Cannot load an EC public key where the point is at infinity" + ) + + +def _sn_to_elliptic_curve(backend: "Backend", sn: str) -> ec.EllipticCurve: + try: + return ec._CURVE_TYPES[sn]() + except KeyError: + raise UnsupportedAlgorithm( + "{} is not a supported elliptic curve".format(sn), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE, + ) + + +def _ecdsa_sig_sign( + backend: "Backend", private_key: "_EllipticCurvePrivateKey", data: bytes +) -> bytes: + max_size = backend._lib.ECDSA_size(private_key._ec_key) + backend.openssl_assert(max_size > 0) + + sigbuf = backend._ffi.new("unsigned char[]", max_size) + siglen_ptr = backend._ffi.new("unsigned int[]", 1) + res = backend._lib.ECDSA_sign( + 0, data, len(data), sigbuf, siglen_ptr, private_key._ec_key + ) + backend.openssl_assert(res == 1) + return backend._ffi.buffer(sigbuf)[: siglen_ptr[0]] + + +def _ecdsa_sig_verify( + backend: "Backend", + public_key: "_EllipticCurvePublicKey", + signature: bytes, + data: bytes, +) -> None: + res = backend._lib.ECDSA_verify( + 0, data, len(data), signature, len(signature), public_key._ec_key + ) + if res != 1: + backend._consume_errors() + raise InvalidSignature + + +class _EllipticCurvePrivateKey(ec.EllipticCurvePrivateKey): + def __init__(self, backend: "Backend", ec_key_cdata, evp_pkey): + self._backend = backend + self._ec_key = ec_key_cdata + self._evp_pkey = evp_pkey + + sn = _ec_key_curve_sn(backend, ec_key_cdata) + self._curve = _sn_to_elliptic_curve(backend, sn) + _mark_asn1_named_ec_curve(backend, ec_key_cdata) + _check_key_infinity(backend, ec_key_cdata) + + @property + def curve(self) -> ec.EllipticCurve: + return self._curve + + @property + def key_size(self) -> int: + return self.curve.key_size + + def exchange( + self, algorithm: ec.ECDH, peer_public_key: ec.EllipticCurvePublicKey + ) -> bytes: + if not ( + self._backend.elliptic_curve_exchange_algorithm_supported( + algorithm, self.curve + ) + ): + raise UnsupportedAlgorithm( + "This backend does not support the ECDH algorithm.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + + if peer_public_key.curve.name != self.curve.name: + raise ValueError( + "peer_public_key and self are not on the same curve" + ) + + return _evp_pkey_derive(self._backend, self._evp_pkey, peer_public_key) + + def public_key(self) -> ec.EllipticCurvePublicKey: + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + self._backend.openssl_assert(group != self._backend._ffi.NULL) + + curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group) + public_ec_key = self._backend._ec_key_new_by_curve_nid(curve_nid) + + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + + res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point) + self._backend.openssl_assert(res == 1) + + evp_pkey = self._backend._ec_cdata_to_evp_pkey(public_ec_key) + + return _EllipticCurvePublicKey(self._backend, public_ec_key, evp_pkey) + + def private_numbers(self) -> ec.EllipticCurvePrivateNumbers: + bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key) + private_value = self._backend._bn_to_int(bn) + return ec.EllipticCurvePrivateNumbers( + private_value=private_value, + public_numbers=self.public_key().public_numbers(), + ) + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self, + self._evp_pkey, + self._ec_key, + ) + + def sign( + self, + data: bytes, + signature_algorithm: ec.EllipticCurveSignatureAlgorithm, + ) -> bytes: + _check_signature_algorithm(signature_algorithm) + data, _ = _calculate_digest_and_algorithm( + data, + signature_algorithm.algorithm, + ) + return _ecdsa_sig_sign(self._backend, self, data) + + +class _EllipticCurvePublicKey(ec.EllipticCurvePublicKey): + def __init__(self, backend: "Backend", ec_key_cdata, evp_pkey): + self._backend = backend + self._ec_key = ec_key_cdata + self._evp_pkey = evp_pkey + + sn = _ec_key_curve_sn(backend, ec_key_cdata) + self._curve = _sn_to_elliptic_curve(backend, sn) + _mark_asn1_named_ec_curve(backend, ec_key_cdata) + _check_key_infinity(backend, ec_key_cdata) + + @property + def curve(self) -> ec.EllipticCurve: + return self._curve + + @property + def key_size(self) -> int: + return self.curve.key_size + + def public_numbers(self) -> ec.EllipticCurvePublicNumbers: + get_func, group = self._backend._ec_key_determine_group_get_func( + self._ec_key + ) + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + + with self._backend._tmp_bn_ctx() as bn_ctx: + bn_x = self._backend._lib.BN_CTX_get(bn_ctx) + bn_y = self._backend._lib.BN_CTX_get(bn_ctx) + + res = get_func(group, point, bn_x, bn_y, bn_ctx) + self._backend.openssl_assert(res == 1) + + x = self._backend._bn_to_int(bn_x) + y = self._backend._bn_to_int(bn_y) + + return ec.EllipticCurvePublicNumbers(x=x, y=y, curve=self._curve) + + def _encode_point(self, format: serialization.PublicFormat) -> bytes: + if format is serialization.PublicFormat.CompressedPoint: + conversion = self._backend._lib.POINT_CONVERSION_COMPRESSED + else: + assert format is serialization.PublicFormat.UncompressedPoint + conversion = self._backend._lib.POINT_CONVERSION_UNCOMPRESSED + + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + self._backend.openssl_assert(group != self._backend._ffi.NULL) + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + with self._backend._tmp_bn_ctx() as bn_ctx: + buflen = self._backend._lib.EC_POINT_point2oct( + group, point, conversion, self._backend._ffi.NULL, 0, bn_ctx + ) + self._backend.openssl_assert(buflen > 0) + buf = self._backend._ffi.new("char[]", buflen) + res = self._backend._lib.EC_POINT_point2oct( + group, point, conversion, buf, buflen, bn_ctx + ) + self._backend.openssl_assert(buflen == res) + + return self._backend._ffi.buffer(buf)[:] + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + if ( + encoding is serialization.Encoding.X962 + or format is serialization.PublicFormat.CompressedPoint + or format is serialization.PublicFormat.UncompressedPoint + ): + if encoding is not serialization.Encoding.X962 or format not in ( + serialization.PublicFormat.CompressedPoint, + serialization.PublicFormat.UncompressedPoint, + ): + raise ValueError( + "X962 encoding must be used with CompressedPoint or " + "UncompressedPoint format" + ) + + return self._encode_point(format) + else: + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def verify( + self, + signature: bytes, + data: bytes, + signature_algorithm: ec.EllipticCurveSignatureAlgorithm, + ) -> None: + _check_signature_algorithm(signature_algorithm) + data, _ = _calculate_digest_and_algorithm( + data, + signature_algorithm.algorithm, + ) + _ecdsa_sig_verify(self._backend, self, signature, data) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ed25519.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ed25519.py new file mode 100644 index 0000000..5cfdffb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ed25519.py @@ -0,0 +1,155 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography import exceptions +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.ed25519 import ( + Ed25519PrivateKey, + Ed25519PublicKey, + _ED25519_KEY_SIZE, + _ED25519_SIG_SIZE, +) + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +class _Ed25519PublicKey(Ed25519PublicKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw + or format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self) -> bytes: + buf = self._backend._ffi.new("unsigned char []", _ED25519_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED25519_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED25519_KEY_SIZE)[:] + + def verify(self, signature: bytes, data: bytes) -> None: + evp_md_ctx = self._backend._lib.EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestVerifyInit( + evp_md_ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._evp_pkey, + ) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_DigestVerify( + evp_md_ctx, signature, len(signature), data, len(data) + ) + if res != 1: + self._backend._consume_errors() + raise exceptions.InvalidSignature + + +class _Ed25519PrivateKey(Ed25519PrivateKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self) -> Ed25519PublicKey: + buf = self._backend._ffi.new("unsigned char []", _ED25519_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED25519_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_KEY_SIZE) + public_bytes = self._backend._ffi.buffer(buf)[:] + return self._backend.ed25519_load_public_bytes(public_bytes) + + def sign(self, data: bytes) -> bytes: + evp_md_ctx = self._backend._lib.EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestSignInit( + evp_md_ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._evp_pkey, + ) + self._backend.openssl_assert(res == 1) + buf = self._backend._ffi.new("unsigned char[]", _ED25519_SIG_SIZE) + buflen = self._backend._ffi.new("size_t *", len(buf)) + res = self._backend._lib.EVP_DigestSign( + evp_md_ctx, buf, buflen, data, len(data) + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_SIG_SIZE) + return self._backend._ffi.buffer(buf, buflen[0])[:] + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw + or encoding is not serialization.Encoding.Raw + or not isinstance( + encryption_algorithm, serialization.NoEncryption + ) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self, self._evp_pkey, None + ) + + def _raw_private_bytes(self) -> bytes: + buf = self._backend._ffi.new("unsigned char []", _ED25519_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED25519_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_private_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED25519_KEY_SIZE)[:] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ed448.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ed448.py new file mode 100644 index 0000000..dad93c6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/ed448.py @@ -0,0 +1,156 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography import exceptions +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.ed448 import ( + Ed448PrivateKey, + Ed448PublicKey, +) + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + +_ED448_KEY_SIZE = 57 +_ED448_SIG_SIZE = 114 + + +class _Ed448PublicKey(Ed448PublicKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw + or format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self) -> bytes: + buf = self._backend._ffi.new("unsigned char []", _ED448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED448_KEY_SIZE)[:] + + def verify(self, signature: bytes, data: bytes) -> None: + evp_md_ctx = self._backend._lib.EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestVerifyInit( + evp_md_ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._evp_pkey, + ) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_DigestVerify( + evp_md_ctx, signature, len(signature), data, len(data) + ) + if res != 1: + self._backend._consume_errors() + raise exceptions.InvalidSignature + + +class _Ed448PrivateKey(Ed448PrivateKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self) -> Ed448PublicKey: + buf = self._backend._ffi.new("unsigned char []", _ED448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_KEY_SIZE) + public_bytes = self._backend._ffi.buffer(buf)[:] + return self._backend.ed448_load_public_bytes(public_bytes) + + def sign(self, data: bytes) -> bytes: + evp_md_ctx = self._backend._lib.EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestSignInit( + evp_md_ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._evp_pkey, + ) + self._backend.openssl_assert(res == 1) + buf = self._backend._ffi.new("unsigned char[]", _ED448_SIG_SIZE) + buflen = self._backend._ffi.new("size_t *", len(buf)) + res = self._backend._lib.EVP_DigestSign( + evp_md_ctx, buf, buflen, data, len(data) + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_SIG_SIZE) + return self._backend._ffi.buffer(buf, buflen[0])[:] + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw + or encoding is not serialization.Encoding.Raw + or not isinstance( + encryption_algorithm, serialization.NoEncryption + ) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self, self._evp_pkey, None + ) + + def _raw_private_bytes(self) -> bytes: + buf = self._backend._ffi.new("unsigned char []", _ED448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_private_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED448_KEY_SIZE)[:] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/encode_asn1.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/encode_asn1.py new file mode 100644 index 0000000..2f29d71 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/encode_asn1.py @@ -0,0 +1,18 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography import x509 + + +_CRLREASONFLAGS = { + x509.ReasonFlags.key_compromise: 1, + x509.ReasonFlags.ca_compromise: 2, + x509.ReasonFlags.affiliation_changed: 3, + x509.ReasonFlags.superseded: 4, + x509.ReasonFlags.cessation_of_operation: 5, + x509.ReasonFlags.certificate_hold: 6, + x509.ReasonFlags.privilege_withdrawn: 7, + x509.ReasonFlags.aa_compromise: 8, +} diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/hashes.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/hashes.py new file mode 100644 index 0000000..278b381 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/hashes.py @@ -0,0 +1,87 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import hashes + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +class _HashContext(hashes.HashContext): + def __init__( + self, backend: "Backend", algorithm: hashes.HashAlgorithm, ctx=None + ) -> None: + self._algorithm = algorithm + + self._backend = backend + + if ctx is None: + ctx = self._backend._lib.EVP_MD_CTX_new() + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.EVP_MD_CTX_free + ) + evp_md = self._backend._evp_md_from_algorithm(algorithm) + if evp_md == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "{} is not a supported hash on this backend.".format( + algorithm.name + ), + _Reasons.UNSUPPORTED_HASH, + ) + res = self._backend._lib.EVP_DigestInit_ex( + ctx, evp_md, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res != 0) + + self._ctx = ctx + + @property + def algorithm(self) -> hashes.HashAlgorithm: + return self._algorithm + + def copy(self) -> "_HashContext": + copied_ctx = self._backend._lib.EVP_MD_CTX_new() + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_MD_CTX_copy_ex(copied_ctx, self._ctx) + self._backend.openssl_assert(res != 0) + return _HashContext(self._backend, self.algorithm, ctx=copied_ctx) + + def update(self, data: bytes) -> None: + data_ptr = self._backend._ffi.from_buffer(data) + res = self._backend._lib.EVP_DigestUpdate( + self._ctx, data_ptr, len(data) + ) + self._backend.openssl_assert(res != 0) + + def finalize(self) -> bytes: + if isinstance(self.algorithm, hashes.ExtendableOutputFunction): + # extendable output functions use a different finalize + return self._finalize_xof() + else: + buf = self._backend._ffi.new( + "unsigned char[]", self._backend._lib.EVP_MAX_MD_SIZE + ) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.EVP_DigestFinal_ex(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert( + outlen[0] == self.algorithm.digest_size + ) + return self._backend._ffi.buffer(buf)[: outlen[0]] + + def _finalize_xof(self) -> bytes: + buf = self._backend._ffi.new( + "unsigned char[]", self.algorithm.digest_size + ) + res = self._backend._lib.EVP_DigestFinalXOF( + self._ctx, buf, self.algorithm.digest_size + ) + self._backend.openssl_assert(res != 0) + return self._backend._ffi.buffer(buf)[: self.algorithm.digest_size] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/hmac.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/hmac.py new file mode 100644 index 0000000..5fd5407 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/hmac.py @@ -0,0 +1,85 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import ( + InvalidSignature, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.primitives import constant_time, hashes + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +class _HMACContext(hashes.HashContext): + def __init__( + self, + backend: "Backend", + key: bytes, + algorithm: hashes.HashAlgorithm, + ctx=None, + ): + self._algorithm = algorithm + self._backend = backend + + if ctx is None: + ctx = self._backend._lib.HMAC_CTX_new() + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.HMAC_CTX_free) + evp_md = self._backend._evp_md_from_algorithm(algorithm) + if evp_md == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "{} is not a supported hash on this backend".format( + algorithm.name + ), + _Reasons.UNSUPPORTED_HASH, + ) + key_ptr = self._backend._ffi.from_buffer(key) + res = self._backend._lib.HMAC_Init_ex( + ctx, key_ptr, len(key), evp_md, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res != 0) + + self._ctx = ctx + self._key = key + + @property + def algorithm(self) -> hashes.HashAlgorithm: + return self._algorithm + + def copy(self) -> "_HMACContext": + copied_ctx = self._backend._lib.HMAC_CTX_new() + self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL) + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.HMAC_CTX_free + ) + res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx) + self._backend.openssl_assert(res != 0) + return _HMACContext( + self._backend, self._key, self.algorithm, ctx=copied_ctx + ) + + def update(self, data: bytes) -> None: + data_ptr = self._backend._ffi.from_buffer(data) + res = self._backend._lib.HMAC_Update(self._ctx, data_ptr, len(data)) + self._backend.openssl_assert(res != 0) + + def finalize(self) -> bytes: + buf = self._backend._ffi.new( + "unsigned char[]", self._backend._lib.EVP_MAX_MD_SIZE + ) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) + return self._backend._ffi.buffer(buf)[: outlen[0]] + + def verify(self, signature: bytes) -> None: + digest = self.finalize() + if not constant_time.bytes_eq(digest, signature): + raise InvalidSignature("Signature did not match digest.") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/poly1305.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/poly1305.py new file mode 100644 index 0000000..dd6d376 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/poly1305.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives import constant_time + + +_POLY1305_TAG_SIZE = 16 +_POLY1305_KEY_SIZE = 32 + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +class _Poly1305Context: + def __init__(self, backend: "Backend", key: bytes) -> None: + self._backend = backend + + key_ptr = self._backend._ffi.from_buffer(key) + # This function copies the key into OpenSSL-owned memory so we don't + # need to retain it ourselves + evp_pkey = self._backend._lib.EVP_PKEY_new_raw_private_key( + self._backend._lib.NID_poly1305, + self._backend._ffi.NULL, + key_ptr, + len(key), + ) + self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL) + self._evp_pkey = self._backend._ffi.gc( + evp_pkey, self._backend._lib.EVP_PKEY_free + ) + ctx = self._backend._lib.EVP_MD_CTX_new() + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + self._ctx = self._backend._ffi.gc( + ctx, self._backend._lib.EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestSignInit( + self._ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._evp_pkey, + ) + self._backend.openssl_assert(res == 1) + + def update(self, data: bytes) -> None: + data_ptr = self._backend._ffi.from_buffer(data) + res = self._backend._lib.EVP_DigestSignUpdate( + self._ctx, data_ptr, len(data) + ) + self._backend.openssl_assert(res != 0) + + def finalize(self) -> bytes: + buf = self._backend._ffi.new("unsigned char[]", _POLY1305_TAG_SIZE) + outlen = self._backend._ffi.new("size_t *", _POLY1305_TAG_SIZE) + res = self._backend._lib.EVP_DigestSignFinal(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == _POLY1305_TAG_SIZE) + return self._backend._ffi.buffer(buf)[: outlen[0]] + + def verify(self, tag: bytes) -> None: + mac = self.finalize() + if not constant_time.bytes_eq(mac, tag): + raise InvalidSignature("Value did not match computed tag.") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/rsa.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/rsa.py new file mode 100644 index 0000000..20b643c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/rsa.py @@ -0,0 +1,567 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography.exceptions import ( + InvalidSignature, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + utils as asym_utils, +) +from cryptography.hazmat.primitives.asymmetric.padding import ( + AsymmetricPadding, + MGF1, + OAEP, + PKCS1v15, + PSS, + _Auto, + _DigestLength, + _MaxLength, + calculate_max_pss_salt_length, +) +from cryptography.hazmat.primitives.asymmetric.rsa import ( + RSAPrivateKey, + RSAPrivateNumbers, + RSAPublicKey, + RSAPublicNumbers, +) + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +def _get_rsa_pss_salt_length( + backend: "Backend", + pss: PSS, + key: typing.Union[RSAPrivateKey, RSAPublicKey], + hash_algorithm: hashes.HashAlgorithm, +) -> int: + salt = pss._salt_length + + if isinstance(salt, _MaxLength): + return calculate_max_pss_salt_length(key, hash_algorithm) + elif isinstance(salt, _DigestLength): + return hash_algorithm.digest_size + elif isinstance(salt, _Auto): + if isinstance(key, RSAPrivateKey): + raise ValueError( + "PSS salt length can only be set to AUTO when verifying" + ) + return backend._lib.RSA_PSS_SALTLEN_AUTO + else: + return salt + + +def _enc_dec_rsa( + backend: "Backend", + key: typing.Union["_RSAPrivateKey", "_RSAPublicKey"], + data: bytes, + padding: AsymmetricPadding, +) -> bytes: + if not isinstance(padding, AsymmetricPadding): + raise TypeError("Padding must be an instance of AsymmetricPadding.") + + if isinstance(padding, PKCS1v15): + padding_enum = backend._lib.RSA_PKCS1_PADDING + elif isinstance(padding, OAEP): + padding_enum = backend._lib.RSA_PKCS1_OAEP_PADDING + + if not isinstance(padding._mgf, MGF1): + raise UnsupportedAlgorithm( + "Only MGF1 is supported by this backend.", + _Reasons.UNSUPPORTED_MGF, + ) + + if not backend.rsa_padding_supported(padding): + raise UnsupportedAlgorithm( + "This combination of padding and hash algorithm is not " + "supported by this backend.", + _Reasons.UNSUPPORTED_PADDING, + ) + + else: + raise UnsupportedAlgorithm( + "{} is not supported by this backend.".format(padding.name), + _Reasons.UNSUPPORTED_PADDING, + ) + + return _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding) + + +def _enc_dec_rsa_pkey_ctx( + backend: "Backend", + key: typing.Union["_RSAPrivateKey", "_RSAPublicKey"], + data: bytes, + padding_enum: int, + padding: AsymmetricPadding, +) -> bytes: + init: typing.Callable[[typing.Any], int] + crypt: typing.Callable[[typing.Any, typing.Any, int, bytes, int], int] + if isinstance(key, _RSAPublicKey): + init = backend._lib.EVP_PKEY_encrypt_init + crypt = backend._lib.EVP_PKEY_encrypt + else: + init = backend._lib.EVP_PKEY_decrypt_init + crypt = backend._lib.EVP_PKEY_decrypt + + pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) + backend.openssl_assert(pkey_ctx != backend._ffi.NULL) + pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) + res = init(pkey_ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) + backend.openssl_assert(res > 0) + buf_size = backend._lib.EVP_PKEY_size(key._evp_pkey) + backend.openssl_assert(buf_size > 0) + if isinstance(padding, OAEP): + mgf1_md = backend._evp_md_non_null_from_algorithm( + padding._mgf._algorithm + ) + res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) + backend.openssl_assert(res > 0) + oaep_md = backend._evp_md_non_null_from_algorithm(padding._algorithm) + res = backend._lib.EVP_PKEY_CTX_set_rsa_oaep_md(pkey_ctx, oaep_md) + backend.openssl_assert(res > 0) + + if ( + isinstance(padding, OAEP) + and padding._label is not None + and len(padding._label) > 0 + ): + # set0_rsa_oaep_label takes ownership of the char * so we need to + # copy it into some new memory + labelptr = backend._lib.OPENSSL_malloc(len(padding._label)) + backend.openssl_assert(labelptr != backend._ffi.NULL) + backend._ffi.memmove(labelptr, padding._label, len(padding._label)) + res = backend._lib.EVP_PKEY_CTX_set0_rsa_oaep_label( + pkey_ctx, labelptr, len(padding._label) + ) + backend.openssl_assert(res == 1) + + outlen = backend._ffi.new("size_t *", buf_size) + buf = backend._ffi.new("unsigned char[]", buf_size) + # Everything from this line onwards is written with the goal of being as + # constant-time as is practical given the constraints of Python and our + # API. See Bleichenbacher's '98 attack on RSA, and its many many variants. + # As such, you should not attempt to change this (particularly to "clean it + # up") without understanding why it was written this way (see + # Chesterton's Fence), and without measuring to verify you have not + # introduced observable time differences. + res = crypt(pkey_ctx, buf, outlen, data, len(data)) + resbuf = backend._ffi.buffer(buf)[: outlen[0]] + backend._lib.ERR_clear_error() + if res <= 0: + raise ValueError("Encryption/decryption failed.") + return resbuf + + +def _rsa_sig_determine_padding( + backend: "Backend", + key: typing.Union["_RSAPrivateKey", "_RSAPublicKey"], + padding: AsymmetricPadding, + algorithm: typing.Optional[hashes.HashAlgorithm], +) -> int: + if not isinstance(padding, AsymmetricPadding): + raise TypeError("Expected provider of AsymmetricPadding.") + + pkey_size = backend._lib.EVP_PKEY_size(key._evp_pkey) + backend.openssl_assert(pkey_size > 0) + + if isinstance(padding, PKCS1v15): + # Hash algorithm is ignored for PKCS1v15-padding, may be None. + padding_enum = backend._lib.RSA_PKCS1_PADDING + elif isinstance(padding, PSS): + if not isinstance(padding._mgf, MGF1): + raise UnsupportedAlgorithm( + "Only MGF1 is supported by this backend.", + _Reasons.UNSUPPORTED_MGF, + ) + + # PSS padding requires a hash algorithm + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + # Size of key in bytes - 2 is the maximum + # PSS signature length (salt length is checked later) + if pkey_size - algorithm.digest_size - 2 < 0: + raise ValueError( + "Digest too large for key size. Use a larger " + "key or different digest." + ) + + padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING + else: + raise UnsupportedAlgorithm( + "{} is not supported by this backend.".format(padding.name), + _Reasons.UNSUPPORTED_PADDING, + ) + + return padding_enum + + +# Hash algorithm can be absent (None) to initialize the context without setting +# any message digest algorithm. This is currently only valid for the PKCS1v15 +# padding type, where it means that the signature data is encoded/decoded +# as provided, without being wrapped in a DigestInfo structure. +def _rsa_sig_setup( + backend: "Backend", + padding: AsymmetricPadding, + algorithm: typing.Optional[hashes.HashAlgorithm], + key: typing.Union["_RSAPublicKey", "_RSAPrivateKey"], + init_func: typing.Callable[[typing.Any], int], +): + padding_enum = _rsa_sig_determine_padding(backend, key, padding, algorithm) + pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) + backend.openssl_assert(pkey_ctx != backend._ffi.NULL) + pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) + res = init_func(pkey_ctx) + if res != 1: + errors = backend._consume_errors() + raise ValueError("Unable to sign/verify with this key", errors) + + if algorithm is not None: + evp_md = backend._evp_md_non_null_from_algorithm(algorithm) + res = backend._lib.EVP_PKEY_CTX_set_signature_md(pkey_ctx, evp_md) + if res <= 0: + backend._consume_errors() + raise UnsupportedAlgorithm( + "{} is not supported by this backend for RSA signing.".format( + algorithm.name + ), + _Reasons.UNSUPPORTED_HASH, + ) + res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) + if res <= 0: + backend._consume_errors() + raise UnsupportedAlgorithm( + "{} is not supported for the RSA signature operation.".format( + padding.name + ), + _Reasons.UNSUPPORTED_PADDING, + ) + if isinstance(padding, PSS): + assert isinstance(algorithm, hashes.HashAlgorithm) + res = backend._lib.EVP_PKEY_CTX_set_rsa_pss_saltlen( + pkey_ctx, + _get_rsa_pss_salt_length(backend, padding, key, algorithm), + ) + backend.openssl_assert(res > 0) + + mgf1_md = backend._evp_md_non_null_from_algorithm( + padding._mgf._algorithm + ) + res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) + backend.openssl_assert(res > 0) + + return pkey_ctx + + +def _rsa_sig_sign( + backend: "Backend", + padding: AsymmetricPadding, + algorithm: hashes.HashAlgorithm, + private_key: "_RSAPrivateKey", + data: bytes, +) -> bytes: + pkey_ctx = _rsa_sig_setup( + backend, + padding, + algorithm, + private_key, + backend._lib.EVP_PKEY_sign_init, + ) + buflen = backend._ffi.new("size_t *") + res = backend._lib.EVP_PKEY_sign( + pkey_ctx, backend._ffi.NULL, buflen, data, len(data) + ) + backend.openssl_assert(res == 1) + buf = backend._ffi.new("unsigned char[]", buflen[0]) + res = backend._lib.EVP_PKEY_sign(pkey_ctx, buf, buflen, data, len(data)) + if res != 1: + errors = backend._consume_errors_with_text() + raise ValueError( + "Digest or salt length too long for key size. Use a larger key " + "or shorter salt length if you are specifying a PSS salt", + errors, + ) + + return backend._ffi.buffer(buf)[:] + + +def _rsa_sig_verify( + backend: "Backend", + padding: AsymmetricPadding, + algorithm: hashes.HashAlgorithm, + public_key: "_RSAPublicKey", + signature: bytes, + data: bytes, +) -> None: + pkey_ctx = _rsa_sig_setup( + backend, + padding, + algorithm, + public_key, + backend._lib.EVP_PKEY_verify_init, + ) + res = backend._lib.EVP_PKEY_verify( + pkey_ctx, signature, len(signature), data, len(data) + ) + # The previous call can return negative numbers in the event of an + # error. This is not a signature failure but we need to fail if it + # occurs. + backend.openssl_assert(res >= 0) + if res == 0: + backend._consume_errors() + raise InvalidSignature + + +def _rsa_sig_recover( + backend: "Backend", + padding: AsymmetricPadding, + algorithm: typing.Optional[hashes.HashAlgorithm], + public_key: "_RSAPublicKey", + signature: bytes, +) -> bytes: + pkey_ctx = _rsa_sig_setup( + backend, + padding, + algorithm, + public_key, + backend._lib.EVP_PKEY_verify_recover_init, + ) + + # Attempt to keep the rest of the code in this function as constant/time + # as possible. See the comment in _enc_dec_rsa_pkey_ctx. Note that the + # buflen parameter is used even though its value may be undefined in the + # error case. Due to the tolerant nature of Python slicing this does not + # trigger any exceptions. + maxlen = backend._lib.EVP_PKEY_size(public_key._evp_pkey) + backend.openssl_assert(maxlen > 0) + buf = backend._ffi.new("unsigned char[]", maxlen) + buflen = backend._ffi.new("size_t *", maxlen) + res = backend._lib.EVP_PKEY_verify_recover( + pkey_ctx, buf, buflen, signature, len(signature) + ) + resbuf = backend._ffi.buffer(buf)[: buflen[0]] + backend._lib.ERR_clear_error() + # Assume that all parameter errors are handled during the setup phase and + # any error here is due to invalid signature. + if res != 1: + raise InvalidSignature + return resbuf + + +class _RSAPrivateKey(RSAPrivateKey): + _evp_pkey: object + _rsa_cdata: object + _key_size: int + + def __init__( + self, backend: "Backend", rsa_cdata, evp_pkey, _skip_check_key: bool + ): + res: int + # RSA_check_key is slower in OpenSSL 3.0.0 due to improved + # primality checking. In normal use this is unlikely to be a problem + # since users don't load new keys constantly, but for TESTING we've + # added an init arg that allows skipping the checks. You should not + # use this in production code unless you understand the consequences. + if not _skip_check_key: + res = backend._lib.RSA_check_key(rsa_cdata) + if res != 1: + errors = backend._consume_errors_with_text() + raise ValueError("Invalid private key", errors) + # 2 is prime and passes an RSA key check, so we also check + # if p and q are odd just to be safe. + p = backend._ffi.new("BIGNUM **") + q = backend._ffi.new("BIGNUM **") + backend._lib.RSA_get0_factors(rsa_cdata, p, q) + backend.openssl_assert(p[0] != backend._ffi.NULL) + backend.openssl_assert(q[0] != backend._ffi.NULL) + p_odd = backend._lib.BN_is_odd(p[0]) + q_odd = backend._lib.BN_is_odd(q[0]) + if p_odd != 1 or q_odd != 1: + errors = backend._consume_errors_with_text() + raise ValueError("Invalid private key", errors) + + # Blinding is on by default in many versions of OpenSSL, but let's + # just be conservative here. + res = backend._lib.RSA_blinding_on(rsa_cdata, backend._ffi.NULL) + backend.openssl_assert(res == 1) + + self._backend = backend + self._rsa_cdata = rsa_cdata + self._evp_pkey = evp_pkey + + n = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, + n, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(n[0]) + + @property + def key_size(self) -> int: + return self._key_size + + def decrypt(self, ciphertext: bytes, padding: AsymmetricPadding) -> bytes: + key_size_bytes = (self.key_size + 7) // 8 + if key_size_bytes != len(ciphertext): + raise ValueError("Ciphertext length must be equal to key size.") + + return _enc_dec_rsa(self._backend, self, ciphertext, padding) + + def public_key(self) -> RSAPublicKey: + ctx = self._backend._lib.RSAPublicKey_dup(self._rsa_cdata) + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.RSA_free) + evp_pkey = self._backend._rsa_cdata_to_evp_pkey(ctx) + return _RSAPublicKey(self._backend, ctx, evp_pkey) + + def private_numbers(self) -> RSAPrivateNumbers: + n = self._backend._ffi.new("BIGNUM **") + e = self._backend._ffi.new("BIGNUM **") + d = self._backend._ffi.new("BIGNUM **") + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + dmp1 = self._backend._ffi.new("BIGNUM **") + dmq1 = self._backend._ffi.new("BIGNUM **") + iqmp = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key(self._rsa_cdata, n, e, d) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(d[0] != self._backend._ffi.NULL) + self._backend._lib.RSA_get0_factors(self._rsa_cdata, p, q) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend._lib.RSA_get0_crt_params( + self._rsa_cdata, dmp1, dmq1, iqmp + ) + self._backend.openssl_assert(dmp1[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(dmq1[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(iqmp[0] != self._backend._ffi.NULL) + return RSAPrivateNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + d=self._backend._bn_to_int(d[0]), + dmp1=self._backend._bn_to_int(dmp1[0]), + dmq1=self._backend._bn_to_int(dmq1[0]), + iqmp=self._backend._bn_to_int(iqmp[0]), + public_numbers=RSAPublicNumbers( + e=self._backend._bn_to_int(e[0]), + n=self._backend._bn_to_int(n[0]), + ), + ) + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self, + self._evp_pkey, + self._rsa_cdata, + ) + + def sign( + self, + data: bytes, + padding: AsymmetricPadding, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> bytes: + data, algorithm = _calculate_digest_and_algorithm(data, algorithm) + return _rsa_sig_sign(self._backend, padding, algorithm, self, data) + + +class _RSAPublicKey(RSAPublicKey): + _evp_pkey: object + _rsa_cdata: object + _key_size: int + + def __init__(self, backend: "Backend", rsa_cdata, evp_pkey): + self._backend = backend + self._rsa_cdata = rsa_cdata + self._evp_pkey = evp_pkey + + n = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, + n, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(n[0]) + + @property + def key_size(self) -> int: + return self._key_size + + def encrypt(self, plaintext: bytes, padding: AsymmetricPadding) -> bytes: + return _enc_dec_rsa(self._backend, self, plaintext, padding) + + def public_numbers(self) -> RSAPublicNumbers: + n = self._backend._ffi.new("BIGNUM **") + e = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, e, self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) + return RSAPublicNumbers( + e=self._backend._bn_to_int(e[0]), + n=self._backend._bn_to_int(n[0]), + ) + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, self._rsa_cdata + ) + + def verify( + self, + signature: bytes, + data: bytes, + padding: AsymmetricPadding, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> None: + data, algorithm = _calculate_digest_and_algorithm(data, algorithm) + _rsa_sig_verify( + self._backend, padding, algorithm, self, signature, data + ) + + def recover_data_from_signature( + self, + signature: bytes, + padding: AsymmetricPadding, + algorithm: typing.Optional[hashes.HashAlgorithm], + ) -> bytes: + if isinstance(algorithm, asym_utils.Prehashed): + raise TypeError( + "Prehashed is only supported in the sign and verify methods. " + "It cannot be used with recover_data_from_signature." + ) + return _rsa_sig_recover( + self._backend, padding, algorithm, self, signature + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/utils.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/utils.py new file mode 100644 index 0000000..3a70a58 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/utils.py @@ -0,0 +1,52 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +def _evp_pkey_derive(backend: "Backend", evp_pkey, peer_public_key) -> bytes: + ctx = backend._lib.EVP_PKEY_CTX_new(evp_pkey, backend._ffi.NULL) + backend.openssl_assert(ctx != backend._ffi.NULL) + ctx = backend._ffi.gc(ctx, backend._lib.EVP_PKEY_CTX_free) + res = backend._lib.EVP_PKEY_derive_init(ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_derive_set_peer(ctx, peer_public_key._evp_pkey) + backend.openssl_assert(res == 1) + keylen = backend._ffi.new("size_t *") + res = backend._lib.EVP_PKEY_derive(ctx, backend._ffi.NULL, keylen) + backend.openssl_assert(res == 1) + backend.openssl_assert(keylen[0] > 0) + buf = backend._ffi.new("unsigned char[]", keylen[0]) + res = backend._lib.EVP_PKEY_derive(ctx, buf, keylen) + if res != 1: + errors_with_text = backend._consume_errors_with_text() + raise ValueError("Error computing shared key.", errors_with_text) + + return backend._ffi.buffer(buf, keylen[0])[:] + + +def _calculate_digest_and_algorithm( + data: bytes, + algorithm: typing.Union[Prehashed, hashes.HashAlgorithm], +) -> typing.Tuple[bytes, hashes.HashAlgorithm]: + if not isinstance(algorithm, Prehashed): + hash_ctx = hashes.Hash(algorithm) + hash_ctx.update(data) + data = hash_ctx.finalize() + else: + algorithm = algorithm._algorithm + + if len(data) != algorithm.digest_size: + raise ValueError( + "The provided data must be the same length as the hash " + "algorithm's digest size." + ) + + return (data, algorithm) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x25519.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x25519.py new file mode 100644 index 0000000..f68501a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x25519.py @@ -0,0 +1,132 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.x25519 import ( + X25519PrivateKey, + X25519PublicKey, +) + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + + +_X25519_KEY_SIZE = 32 + + +class _X25519PublicKey(X25519PublicKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw + or format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self) -> bytes: + ucharpp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.EVP_PKEY_get1_tls_encodedpoint( + self._evp_pkey, ucharpp + ) + self._backend.openssl_assert(res == 32) + self._backend.openssl_assert(ucharpp[0] != self._backend._ffi.NULL) + data = self._backend._ffi.gc( + ucharpp[0], self._backend._lib.OPENSSL_free + ) + return self._backend._ffi.buffer(data, res)[:] + + +class _X25519PrivateKey(X25519PrivateKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self) -> X25519PublicKey: + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_PUBKEY_bio(bio, self._evp_pkey) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._lib.d2i_PUBKEY_bio( + bio, self._backend._ffi.NULL + ) + self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL) + evp_pkey = self._backend._ffi.gc( + evp_pkey, self._backend._lib.EVP_PKEY_free + ) + return _X25519PublicKey(self._backend, evp_pkey) + + def exchange(self, peer_public_key: X25519PublicKey) -> bytes: + if not isinstance(peer_public_key, X25519PublicKey): + raise TypeError("peer_public_key must be X25519PublicKey.") + + return _evp_pkey_derive(self._backend, self._evp_pkey, peer_public_key) + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw + or encoding is not serialization.Encoding.Raw + or not isinstance( + encryption_algorithm, serialization.NoEncryption + ) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self, self._evp_pkey, None + ) + + def _raw_private_bytes(self) -> bytes: + # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can + # switch this to EVP_PKEY_new_raw_private_key + # The trick we use here is serializing to a PKCS8 key and just + # using the last 32 bytes, which is the key itself. + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_PKCS8PrivateKey_bio( + bio, + self._evp_pkey, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + 0, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + ) + self._backend.openssl_assert(res == 1) + pkcs8 = self._backend._read_mem_bio(bio) + self._backend.openssl_assert(len(pkcs8) == 48) + return pkcs8[-_X25519_KEY_SIZE:] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x448.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x448.py new file mode 100644 index 0000000..f45db56 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x448.py @@ -0,0 +1,117 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.x448 import ( + X448PrivateKey, + X448PublicKey, +) + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.backend import Backend + +_X448_KEY_SIZE = 56 + + +class _X448PublicKey(X448PublicKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PublicFormat, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw + or format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self) -> bytes: + buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _X448_KEY_SIZE)[:] + + +class _X448PrivateKey(X448PrivateKey): + def __init__(self, backend: "Backend", evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self) -> X448PublicKey: + buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE) + public_bytes = self._backend._ffi.buffer(buf)[:] + return self._backend.x448_load_public_bytes(public_bytes) + + def exchange(self, peer_public_key: X448PublicKey) -> bytes: + if not isinstance(peer_public_key, X448PublicKey): + raise TypeError("peer_public_key must be X448PublicKey.") + + return _evp_pkey_derive(self._backend, self._evp_pkey, peer_public_key) + + def private_bytes( + self, + encoding: serialization.Encoding, + format: serialization.PrivateFormat, + encryption_algorithm: serialization.KeySerializationEncryption, + ) -> bytes: + if ( + encoding is serialization.Encoding.Raw + or format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw + or encoding is not serialization.Encoding.Raw + or not isinstance( + encryption_algorithm, serialization.NoEncryption + ) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self, self._evp_pkey, None + ) + + def _raw_private_bytes(self) -> bytes: + buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_private_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _X448_KEY_SIZE)[:] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x509.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x509.py new file mode 100644 index 0000000..aa4ed10 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/backends/openssl/x509.py @@ -0,0 +1,45 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import warnings + +from cryptography import utils, x509 + + +# This exists for pyOpenSSL compatibility and SHOULD NOT BE USED +# WE WILL REMOVE THIS VERY SOON. +def _Certificate(backend, x509) -> x509.Certificate: # noqa: N802 + warnings.warn( + "This version of cryptography contains a temporary pyOpenSSL " + "fallback path. Upgrade pyOpenSSL now.", + utils.DeprecatedIn35, + ) + return backend._ossl2cert(x509) + + +# This exists for pyOpenSSL compatibility and SHOULD NOT BE USED +# WE WILL REMOVE THIS VERY SOON. +def _CertificateSigningRequest( # noqa: N802 + backend, x509_req +) -> x509.CertificateSigningRequest: + warnings.warn( + "This version of cryptography contains a temporary pyOpenSSL " + "fallback path. Upgrade pyOpenSSL now.", + utils.DeprecatedIn35, + ) + return backend._ossl2csr(x509_req) + + +# This exists for pyOpenSSL compatibility and SHOULD NOT BE USED +# WE WILL REMOVE THIS VERY SOON. +def _CertificateRevocationList( # noqa: N802 + backend, x509_crl +) -> x509.CertificateRevocationList: + warnings.warn( + "This version of cryptography contains a temporary pyOpenSSL " + "fallback path. Upgrade pyOpenSSL now.", + utils.DeprecatedIn35, + ) + return backend._ossl2crl(x509_crl) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/__init__.py new file mode 100644 index 0000000..b509336 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/__init__.py @@ -0,0 +1,3 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so new file mode 100755 index 0000000..dcb10a0 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so differ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so new file mode 100755 index 0000000..279aa7a Binary files /dev/null and b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust.abi3.so differ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/__init__.pyi b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/__init__.pyi new file mode 100644 index 0000000..393ee7a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/__init__.pyi @@ -0,0 +1,2 @@ +def check_pkcs7_padding(data: bytes) -> bool: ... +def check_ansix923_padding(data: bytes) -> bool: ... diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/asn1.pyi b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/asn1.pyi new file mode 100644 index 0000000..5579c4f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/asn1.pyi @@ -0,0 +1,12 @@ +import typing + +class TestCertificate: + not_after_tag: int + not_before_tag: int + issuer_value_tags: typing.List[int] + subject_value_tags: typing.List[int] + +def decode_dss_signature(signature: bytes) -> typing.Tuple[int, int]: ... +def encode_dss_signature(r: int, s: int) -> bytes: ... +def parse_spki_for_data(data: bytes) -> bytes: ... +def test_parse_certificate(data: bytes) -> TestCertificate: ... diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/ocsp.pyi b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/ocsp.pyi new file mode 100644 index 0000000..91b8495 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/ocsp.pyi @@ -0,0 +1,22 @@ +import typing + +from cryptography.hazmat.primitives.asymmetric.types import PRIVATE_KEY_TYPES +from cryptography.hazmat.primitives import hashes +from cryptography.x509 import Extension +from cryptography.x509.ocsp import ( + OCSPRequest, + OCSPRequestBuilder, + OCSPResponse, + OCSPResponseStatus, + OCSPResponseBuilder, +) + +def load_der_ocsp_request(data: bytes) -> OCSPRequest: ... +def load_der_ocsp_response(data: bytes) -> OCSPResponse: ... +def create_ocsp_request(builder: OCSPRequestBuilder) -> OCSPRequest: ... +def create_ocsp_response( + status: OCSPResponseStatus, + builder: typing.Optional[OCSPResponseBuilder], + private_key: typing.Optional[PRIVATE_KEY_TYPES], + hash_algorithm: typing.Optional[hashes.HashAlgorithm], +) -> OCSPResponse: ... diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/x509.pyi b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/x509.pyi new file mode 100644 index 0000000..79aeb63 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/_rust/x509.pyi @@ -0,0 +1,36 @@ +import datetime +import typing + +from cryptography import x509 +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric.types import PRIVATE_KEY_TYPES + +def load_pem_x509_certificate(data: bytes) -> x509.Certificate: ... +def load_der_x509_certificate(data: bytes) -> x509.Certificate: ... +def load_pem_x509_crl(data: bytes) -> x509.CertificateRevocationList: ... +def load_der_x509_crl(data: bytes) -> x509.CertificateRevocationList: ... +def load_pem_x509_csr(data: bytes) -> x509.CertificateSigningRequest: ... +def load_der_x509_csr(data: bytes) -> x509.CertificateSigningRequest: ... +def encode_name_bytes(name: x509.Name) -> bytes: ... +def encode_extension_value(extension: x509.ExtensionType) -> bytes: ... +def create_x509_certificate( + builder: x509.CertificateBuilder, + private_key: PRIVATE_KEY_TYPES, + hash_algorithm: typing.Optional[hashes.HashAlgorithm], +) -> x509.Certificate: ... +def create_x509_csr( + builder: x509.CertificateSigningRequestBuilder, + private_key: PRIVATE_KEY_TYPES, + hash_algorithm: typing.Optional[hashes.HashAlgorithm], +) -> x509.CertificateSigningRequest: ... +def create_x509_crl( + builder: x509.CertificateRevocationListBuilder, + private_key: PRIVATE_KEY_TYPES, + hash_algorithm: typing.Optional[hashes.HashAlgorithm], +) -> x509.CertificateRevocationList: ... + +class Sct: ... +class Certificate: ... +class RevokedCertificate: ... +class CertificateRevocationList: ... +class CertificateSigningRequest: ... diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/__init__.py new file mode 100644 index 0000000..b509336 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/__init__.py @@ -0,0 +1,3 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/_conditional.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/_conditional.py new file mode 100644 index 0000000..ed81b84 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/_conditional.py @@ -0,0 +1,367 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + + +def cryptography_has_ec2m() -> typing.List[str]: + return [ + "EC_POINT_get_affine_coordinates_GF2m", + ] + + +def cryptography_has_ssl3_method() -> typing.List[str]: + return [ + "SSLv3_method", + "SSLv3_client_method", + "SSLv3_server_method", + ] + + +def cryptography_has_110_verification_params() -> typing.List[str]: + return ["X509_CHECK_FLAG_NEVER_CHECK_SUBJECT"] + + +def cryptography_has_set_cert_cb() -> typing.List[str]: + return [ + "SSL_CTX_set_cert_cb", + "SSL_set_cert_cb", + ] + + +def cryptography_has_ssl_st() -> typing.List[str]: + return [ + "SSL_ST_BEFORE", + "SSL_ST_OK", + "SSL_ST_INIT", + "SSL_ST_RENEGOTIATE", + ] + + +def cryptography_has_tls_st() -> typing.List[str]: + return [ + "TLS_ST_BEFORE", + "TLS_ST_OK", + ] + + +def cryptography_has_scrypt() -> typing.List[str]: + return [ + "EVP_PBE_scrypt", + ] + + +def cryptography_has_evp_pkey_dhx() -> typing.List[str]: + return [ + "EVP_PKEY_DHX", + ] + + +def cryptography_has_mem_functions() -> typing.List[str]: + return [ + "Cryptography_CRYPTO_set_mem_functions", + ] + + +def cryptography_has_x509_store_ctx_get_issuer() -> typing.List[str]: + return [ + "X509_STORE_get_get_issuer", + "X509_STORE_set_get_issuer", + ] + + +def cryptography_has_ed448() -> typing.List[str]: + return [ + "EVP_PKEY_ED448", + "NID_ED448", + ] + + +def cryptography_has_ed25519() -> typing.List[str]: + return [ + "NID_ED25519", + "EVP_PKEY_ED25519", + ] + + +def cryptography_has_poly1305() -> typing.List[str]: + return [ + "NID_poly1305", + "EVP_PKEY_POLY1305", + ] + + +def cryptography_has_oneshot_evp_digest_sign_verify() -> typing.List[str]: + return [ + "EVP_DigestSign", + "EVP_DigestVerify", + ] + + +def cryptography_has_evp_digestfinal_xof() -> typing.List[str]: + return [ + "EVP_DigestFinalXOF", + ] + + +def cryptography_has_evp_pkey_get_set_tls_encodedpoint() -> typing.List[str]: + return [ + "EVP_PKEY_get1_tls_encodedpoint", + "EVP_PKEY_set1_tls_encodedpoint", + ] + + +def cryptography_has_fips() -> typing.List[str]: + return [ + "FIPS_mode_set", + "FIPS_mode", + ] + + +def cryptography_has_psk() -> typing.List[str]: + return [ + "SSL_CTX_use_psk_identity_hint", + "SSL_CTX_set_psk_server_callback", + "SSL_CTX_set_psk_client_callback", + ] + + +def cryptography_has_psk_tlsv13() -> typing.List[str]: + return [ + "SSL_CTX_set_psk_find_session_callback", + "SSL_CTX_set_psk_use_session_callback", + "Cryptography_SSL_SESSION_new", + "SSL_CIPHER_find", + "SSL_SESSION_set1_master_key", + "SSL_SESSION_set_cipher", + "SSL_SESSION_set_protocol_version", + ] + + +def cryptography_has_custom_ext() -> typing.List[str]: + return [ + "SSL_CTX_add_client_custom_ext", + "SSL_CTX_add_server_custom_ext", + "SSL_extension_supported", + ] + + +def cryptography_has_openssl_cleanup() -> typing.List[str]: + return [ + "OPENSSL_cleanup", + ] + + +def cryptography_has_tlsv13() -> typing.List[str]: + return [ + "TLS1_3_VERSION", + "SSL_OP_NO_TLSv1_3", + ] + + +def cryptography_has_tlsv13_functions() -> typing.List[str]: + return [ + "SSL_VERIFY_POST_HANDSHAKE", + "SSL_CTX_set_ciphersuites", + "SSL_verify_client_post_handshake", + "SSL_CTX_set_post_handshake_auth", + "SSL_set_post_handshake_auth", + "SSL_SESSION_get_max_early_data", + "SSL_write_early_data", + "SSL_read_early_data", + "SSL_CTX_set_max_early_data", + ] + + +def cryptography_has_keylog() -> typing.List[str]: + return [ + "SSL_CTX_set_keylog_callback", + "SSL_CTX_get_keylog_callback", + ] + + +def cryptography_has_raw_key() -> typing.List[str]: + return [ + "EVP_PKEY_new_raw_private_key", + "EVP_PKEY_new_raw_public_key", + "EVP_PKEY_get_raw_private_key", + "EVP_PKEY_get_raw_public_key", + ] + + +def cryptography_has_engine() -> typing.List[str]: + return [ + "ENGINE_by_id", + "ENGINE_init", + "ENGINE_finish", + "ENGINE_get_default_RAND", + "ENGINE_set_default_RAND", + "ENGINE_unregister_RAND", + "ENGINE_ctrl_cmd", + "ENGINE_free", + "ENGINE_get_name", + "Cryptography_add_osrandom_engine", + "ENGINE_ctrl_cmd_string", + "ENGINE_load_builtin_engines", + "ENGINE_load_private_key", + "ENGINE_load_public_key", + "SSL_CTX_set_client_cert_engine", + ] + + +def cryptography_has_verified_chain() -> typing.List[str]: + return [ + "SSL_get0_verified_chain", + ] + + +def cryptography_has_srtp() -> typing.List[str]: + return [ + "SSL_CTX_set_tlsext_use_srtp", + "SSL_set_tlsext_use_srtp", + "SSL_get_selected_srtp_profile", + ] + + +def cryptography_has_get_proto_version() -> typing.List[str]: + return [ + "SSL_CTX_get_min_proto_version", + "SSL_CTX_get_max_proto_version", + "SSL_get_min_proto_version", + "SSL_get_max_proto_version", + ] + + +def cryptography_has_providers() -> typing.List[str]: + return [ + "OSSL_PROVIDER_load", + "OSSL_PROVIDER_unload", + "ERR_LIB_PROV", + "PROV_R_WRONG_FINAL_BLOCK_LENGTH", + "PROV_R_BAD_DECRYPT", + ] + + +def cryptography_has_op_no_renegotiation() -> typing.List[str]: + return [ + "SSL_OP_NO_RENEGOTIATION", + ] + + +def cryptography_has_dtls_get_data_mtu() -> typing.List[str]: + return [ + "DTLS_get_data_mtu", + ] + + +def cryptography_has_300_fips() -> typing.List[str]: + return [ + "EVP_default_properties_is_fips_enabled", + "EVP_default_properties_enable_fips", + ] + + +def cryptography_has_ssl_cookie() -> typing.List[str]: + return [ + "SSL_OP_COOKIE_EXCHANGE", + "DTLSv1_listen", + "SSL_CTX_set_cookie_generate_cb", + "SSL_CTX_set_cookie_verify_cb", + ] + + +def cryptography_has_pkcs7_funcs() -> typing.List[str]: + return [ + "SMIME_write_PKCS7", + "PEM_write_bio_PKCS7_stream", + "PKCS7_sign_add_signer", + "PKCS7_final", + "PKCS7_verify", + "SMIME_read_PKCS7", + "PKCS7_get0_signers", + ] + + +def cryptography_has_bn_flags() -> typing.List[str]: + return [ + "BN_FLG_CONSTTIME", + "BN_set_flags", + "BN_prime_checks_for_size", + ] + + +def cryptography_has_evp_pkey_dh() -> typing.List[str]: + return [ + "EVP_PKEY_set1_DH", + ] + + +def cryptography_has_300_evp_cipher() -> typing.List[str]: + return ["EVP_CIPHER_fetch", "EVP_CIPHER_free"] + + +def cryptography_has_unexpected_eof_while_reading() -> typing.List[str]: + return ["SSL_R_UNEXPECTED_EOF_WHILE_READING"] + + +# This is a mapping of +# {condition: function-returning-names-dependent-on-that-condition} so we can +# loop over them and delete unsupported names at runtime. It will be removed +# when cffi supports #if in cdef. We use functions instead of just a dict of +# lists so we can use coverage to measure which are used. +CONDITIONAL_NAMES = { + "Cryptography_HAS_EC2M": cryptography_has_ec2m, + "Cryptography_HAS_SSL3_METHOD": cryptography_has_ssl3_method, + "Cryptography_HAS_110_VERIFICATION_PARAMS": ( + cryptography_has_110_verification_params + ), + "Cryptography_HAS_SET_CERT_CB": cryptography_has_set_cert_cb, + "Cryptography_HAS_SSL_ST": cryptography_has_ssl_st, + "Cryptography_HAS_TLS_ST": cryptography_has_tls_st, + "Cryptography_HAS_SCRYPT": cryptography_has_scrypt, + "Cryptography_HAS_EVP_PKEY_DHX": cryptography_has_evp_pkey_dhx, + "Cryptography_HAS_MEM_FUNCTIONS": cryptography_has_mem_functions, + "Cryptography_HAS_X509_STORE_CTX_GET_ISSUER": ( + cryptography_has_x509_store_ctx_get_issuer + ), + "Cryptography_HAS_ED448": cryptography_has_ed448, + "Cryptography_HAS_ED25519": cryptography_has_ed25519, + "Cryptography_HAS_POLY1305": cryptography_has_poly1305, + "Cryptography_HAS_ONESHOT_EVP_DIGEST_SIGN_VERIFY": ( + cryptography_has_oneshot_evp_digest_sign_verify + ), + "Cryptography_HAS_EVP_PKEY_get_set_tls_encodedpoint": ( + cryptography_has_evp_pkey_get_set_tls_encodedpoint + ), + "Cryptography_HAS_FIPS": cryptography_has_fips, + "Cryptography_HAS_PSK": cryptography_has_psk, + "Cryptography_HAS_PSK_TLSv1_3": cryptography_has_psk_tlsv13, + "Cryptography_HAS_CUSTOM_EXT": cryptography_has_custom_ext, + "Cryptography_HAS_OPENSSL_CLEANUP": cryptography_has_openssl_cleanup, + "Cryptography_HAS_TLSv1_3": cryptography_has_tlsv13, + "Cryptography_HAS_TLSv1_3_FUNCTIONS": cryptography_has_tlsv13_functions, + "Cryptography_HAS_KEYLOG": cryptography_has_keylog, + "Cryptography_HAS_RAW_KEY": cryptography_has_raw_key, + "Cryptography_HAS_EVP_DIGESTFINAL_XOF": ( + cryptography_has_evp_digestfinal_xof + ), + "Cryptography_HAS_ENGINE": cryptography_has_engine, + "Cryptography_HAS_VERIFIED_CHAIN": cryptography_has_verified_chain, + "Cryptography_HAS_SRTP": cryptography_has_srtp, + "Cryptography_HAS_GET_PROTO_VERSION": cryptography_has_get_proto_version, + "Cryptography_HAS_PROVIDERS": cryptography_has_providers, + "Cryptography_HAS_OP_NO_RENEGOTIATION": ( + cryptography_has_op_no_renegotiation + ), + "Cryptography_HAS_DTLS_GET_DATA_MTU": cryptography_has_dtls_get_data_mtu, + "Cryptography_HAS_300_FIPS": cryptography_has_300_fips, + "Cryptography_HAS_SSL_COOKIE": cryptography_has_ssl_cookie, + "Cryptography_HAS_PKCS7_FUNCS": cryptography_has_pkcs7_funcs, + "Cryptography_HAS_BN_FLAGS": cryptography_has_bn_flags, + "Cryptography_HAS_EVP_PKEY_DH": cryptography_has_evp_pkey_dh, + "Cryptography_HAS_300_EVP_CIPHER": cryptography_has_300_evp_cipher, + "Cryptography_HAS_UNEXPECTED_EOF_WHILE_READING": ( + cryptography_has_unexpected_eof_while_reading + ), +} diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/binding.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/binding.py new file mode 100644 index 0000000..a6fbc94 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/bindings/openssl/binding.py @@ -0,0 +1,230 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import threading +import types +import typing +import warnings + +import cryptography +from cryptography import utils +from cryptography.exceptions import InternalError +from cryptography.hazmat.bindings._openssl import ffi, lib +from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES + +_OpenSSLErrorWithText = typing.NamedTuple( + "_OpenSSLErrorWithText", + [("code", int), ("lib", int), ("reason", int), ("reason_text", bytes)], +) + + +class _OpenSSLError: + def __init__(self, code: int, lib: int, reason: int): + self._code = code + self._lib = lib + self._reason = reason + + def _lib_reason_match(self, lib: int, reason: int) -> bool: + return lib == self.lib and reason == self.reason + + @property + def code(self) -> int: + return self._code + + @property + def lib(self) -> int: + return self._lib + + @property + def reason(self) -> int: + return self._reason + + +def _consume_errors(lib) -> typing.List[_OpenSSLError]: + errors = [] + while True: + code: int = lib.ERR_get_error() + if code == 0: + break + + err_lib: int = lib.ERR_GET_LIB(code) + err_reason: int = lib.ERR_GET_REASON(code) + + errors.append(_OpenSSLError(code, err_lib, err_reason)) + + return errors + + +def _errors_with_text( + errors: typing.List[_OpenSSLError], +) -> typing.List[_OpenSSLErrorWithText]: + errors_with_text = [] + for err in errors: + buf = ffi.new("char[]", 256) + lib.ERR_error_string_n(err.code, buf, len(buf)) + err_text_reason: bytes = ffi.string(buf) + + errors_with_text.append( + _OpenSSLErrorWithText( + err.code, err.lib, err.reason, err_text_reason + ) + ) + + return errors_with_text + + +def _consume_errors_with_text(lib): + return _errors_with_text(_consume_errors(lib)) + + +def _openssl_assert( + lib, ok: bool, errors: typing.Optional[typing.List[_OpenSSLError]] = None +) -> None: + if not ok: + if errors is None: + errors = _consume_errors(lib) + errors_with_text = _errors_with_text(errors) + + raise InternalError( + "Unknown OpenSSL error. This error is commonly encountered when " + "another library is not cleaning up the OpenSSL error stack. If " + "you are using cryptography with another library that uses " + "OpenSSL try disabling it before reporting a bug. Otherwise " + "please file an issue at https://github.com/pyca/cryptography/" + "issues with information on how to reproduce " + "this. ({0!r})".format(errors_with_text), + errors_with_text, + ) + + +def build_conditional_library(lib, conditional_names): + conditional_lib = types.ModuleType("lib") + conditional_lib._original_lib = lib # type: ignore[attr-defined] + excluded_names = set() + for condition, names_cb in conditional_names.items(): + if not getattr(lib, condition): + excluded_names.update(names_cb()) + + for attr in dir(lib): + if attr not in excluded_names: + setattr(conditional_lib, attr, getattr(lib, attr)) + + return conditional_lib + + +class Binding: + """ + OpenSSL API wrapper. + """ + + lib: typing.ClassVar = None + ffi = ffi + _lib_loaded = False + _init_lock = threading.Lock() + _legacy_provider: typing.Any = None + _default_provider: typing.Any = None + + def __init__(self): + self._ensure_ffi_initialized() + + def _enable_fips(self) -> None: + # This function enables FIPS mode for OpenSSL 3.0.0 on installs that + # have the FIPS provider installed properly. + _openssl_assert(self.lib, self.lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER) + self._base_provider = self.lib.OSSL_PROVIDER_load( + self.ffi.NULL, b"base" + ) + _openssl_assert(self.lib, self._base_provider != self.ffi.NULL) + self.lib._fips_provider = self.lib.OSSL_PROVIDER_load( + self.ffi.NULL, b"fips" + ) + _openssl_assert(self.lib, self.lib._fips_provider != self.ffi.NULL) + + res = self.lib.EVP_default_properties_enable_fips(self.ffi.NULL, 1) + _openssl_assert(self.lib, res == 1) + + @classmethod + def _register_osrandom_engine(cls): + # Clear any errors extant in the queue before we start. In many + # scenarios other things may be interacting with OpenSSL in the same + # process space and it has proven untenable to assume that they will + # reliably clear the error queue. Once we clear it here we will + # error on any subsequent unexpected item in the stack. + cls.lib.ERR_clear_error() + if cls.lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE: + result = cls.lib.Cryptography_add_osrandom_engine() + _openssl_assert(cls.lib, result in (1, 2)) + + @classmethod + def _ensure_ffi_initialized(cls): + with cls._init_lock: + if not cls._lib_loaded: + cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES) + cls._lib_loaded = True + cls._register_osrandom_engine() + # As of OpenSSL 3.0.0 we must register a legacy cipher provider + # to get RC2 (needed for junk asymmetric private key + # serialization), RC4, Blowfish, IDEA, SEED, etc. These things + # are ugly legacy, but we aren't going to get rid of them + # any time soon. + if cls.lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER: + cls._legacy_provider = cls.lib.OSSL_PROVIDER_load( + cls.ffi.NULL, b"legacy" + ) + _openssl_assert( + cls.lib, cls._legacy_provider != cls.ffi.NULL + ) + cls._default_provider = cls.lib.OSSL_PROVIDER_load( + cls.ffi.NULL, b"default" + ) + _openssl_assert( + cls.lib, cls._default_provider != cls.ffi.NULL + ) + + @classmethod + def init_static_locks(cls): + cls._ensure_ffi_initialized() + + +def _verify_openssl_version(lib): + if ( + lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 + and not lib.CRYPTOGRAPHY_IS_LIBRESSL + and not lib.CRYPTOGRAPHY_IS_BORINGSSL + ): + warnings.warn( + "OpenSSL version 1.1.0 is no longer supported by the OpenSSL " + "project, please upgrade. The next release of cryptography will " + "be the last to support compiling with OpenSSL 1.1.0.", + utils.DeprecatedIn37, + ) + + +def _verify_package_version(version): + # Occasionally we run into situations where the version of the Python + # package does not match the version of the shared object that is loaded. + # This may occur in environments where multiple versions of cryptography + # are installed and available in the python path. To avoid errors cropping + # up later this code checks that the currently imported package and the + # shared object that were loaded have the same version and raise an + # ImportError if they do not + so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION) + if version.encode("ascii") != so_package_version: + raise ImportError( + "The version of cryptography does not match the loaded " + "shared object. This can happen if you have multiple copies of " + "cryptography installed in your Python path. Please try creating " + "a new virtual environment to resolve this issue. " + "Loaded python version: {}, shared object version: {}".format( + version, so_package_version + ) + ) + + +_verify_package_version(cryptography.__version__) + +Binding.init_static_locks() + +_verify_openssl_version(Binding.lib) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/__init__.py new file mode 100644 index 0000000..b509336 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/__init__.py @@ -0,0 +1,3 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_asymmetric.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_asymmetric.py new file mode 100644 index 0000000..cdadbde --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_asymmetric.py @@ -0,0 +1,17 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc + + +# This exists to break an import cycle. It is normally accessible from the +# asymmetric padding module. + + +class AsymmetricPadding(metaclass=abc.ABCMeta): + @abc.abstractproperty + def name(self) -> str: + """ + A string naming this padding (e.g. "PSS", "PKCS1"). + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_cipheralgorithm.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_cipheralgorithm.py new file mode 100644 index 0000000..7f32204 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_cipheralgorithm.py @@ -0,0 +1,40 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc +import typing + + +# This exists to break an import cycle. It is normally accessible from the +# ciphers module. + + +class CipherAlgorithm(metaclass=abc.ABCMeta): + @abc.abstractproperty + def name(self) -> str: + """ + A string naming this mode (e.g. "AES", "Camellia"). + """ + + @abc.abstractproperty + def key_sizes(self) -> typing.FrozenSet[int]: + """ + Valid key sizes for this algorithm in bits + """ + + @abc.abstractproperty + def key_size(self) -> int: + """ + The size of the key being used as an integer in bits (e.g. 128, 256). + """ + + +class BlockCipherAlgorithm(metaclass=abc.ABCMeta): + key: bytes + + @abc.abstractproperty + def block_size(self) -> int: + """ + The size of a block as an integer in bits (e.g. 64, 128). + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_serialization.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_serialization.py new file mode 100644 index 0000000..160a6b8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/_serialization.py @@ -0,0 +1,55 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc + +from cryptography import utils + +# This exists to break an import cycle. These classes are normally accessible +# from the serialization module. + + +class Encoding(utils.Enum): + PEM = "PEM" + DER = "DER" + OpenSSH = "OpenSSH" + Raw = "Raw" + X962 = "ANSI X9.62" + SMIME = "S/MIME" + + +class PrivateFormat(utils.Enum): + PKCS8 = "PKCS8" + TraditionalOpenSSL = "TraditionalOpenSSL" + Raw = "Raw" + OpenSSH = "OpenSSH" + + +class PublicFormat(utils.Enum): + SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1" + PKCS1 = "Raw PKCS#1" + OpenSSH = "OpenSSH" + Raw = "Raw" + CompressedPoint = "X9.62 Compressed Point" + UncompressedPoint = "X9.62 Uncompressed Point" + + +class ParameterFormat(utils.Enum): + PKCS3 = "PKCS3" + + +class KeySerializationEncryption(metaclass=abc.ABCMeta): + pass + + +class BestAvailableEncryption(KeySerializationEncryption): + def __init__(self, password: bytes): + if not isinstance(password, bytes) or len(password) == 0: + raise ValueError("Password must be 1 or more bytes.") + + self.password = password + + +class NoEncryption(KeySerializationEncryption): + pass diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/__init__.py new file mode 100644 index 0000000..b509336 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/__init__.py @@ -0,0 +1,3 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py new file mode 100644 index 0000000..2093ad4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py @@ -0,0 +1,250 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing + +from cryptography.hazmat.primitives import _serialization + + +_MIN_MODULUS_SIZE = 512 + + +def generate_parameters( + generator: int, key_size: int, backend: typing.Any = None +) -> "DHParameters": + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.generate_dh_parameters(generator, key_size) + + +class DHParameterNumbers: + def __init__(self, p: int, g: int, q: typing.Optional[int] = None) -> None: + if not isinstance(p, int) or not isinstance(g, int): + raise TypeError("p and g must be integers") + if q is not None and not isinstance(q, int): + raise TypeError("q must be integer or None") + + if g < 2: + raise ValueError("DH generator must be 2 or greater") + + if p.bit_length() < _MIN_MODULUS_SIZE: + raise ValueError( + "p (modulus) must be at least {}-bit".format(_MIN_MODULUS_SIZE) + ) + + self._p = p + self._g = g + self._q = q + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DHParameterNumbers): + return NotImplemented + + return ( + self._p == other._p and self._g == other._g and self._q == other._q + ) + + def parameters(self, backend: typing.Any = None) -> "DHParameters": + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_dh_parameter_numbers(self) + + @property + def p(self) -> int: + return self._p + + @property + def g(self) -> int: + return self._g + + @property + def q(self) -> typing.Optional[int]: + return self._q + + +class DHPublicNumbers: + def __init__(self, y: int, parameter_numbers: DHParameterNumbers) -> None: + if not isinstance(y, int): + raise TypeError("y must be an integer.") + + if not isinstance(parameter_numbers, DHParameterNumbers): + raise TypeError( + "parameters must be an instance of DHParameterNumbers." + ) + + self._y = y + self._parameter_numbers = parameter_numbers + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DHPublicNumbers): + return NotImplemented + + return ( + self._y == other._y + and self._parameter_numbers == other._parameter_numbers + ) + + def public_key(self, backend: typing.Any = None) -> "DHPublicKey": + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_dh_public_numbers(self) + + @property + def y(self) -> int: + return self._y + + @property + def parameter_numbers(self) -> DHParameterNumbers: + return self._parameter_numbers + + +class DHPrivateNumbers: + def __init__(self, x: int, public_numbers: DHPublicNumbers) -> None: + if not isinstance(x, int): + raise TypeError("x must be an integer.") + + if not isinstance(public_numbers, DHPublicNumbers): + raise TypeError( + "public_numbers must be an instance of " "DHPublicNumbers." + ) + + self._x = x + self._public_numbers = public_numbers + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DHPrivateNumbers): + return NotImplemented + + return ( + self._x == other._x + and self._public_numbers == other._public_numbers + ) + + def private_key(self, backend: typing.Any = None) -> "DHPrivateKey": + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_dh_private_numbers(self) + + @property + def public_numbers(self) -> DHPublicNumbers: + return self._public_numbers + + @property + def x(self) -> int: + return self._x + + +class DHParameters(metaclass=abc.ABCMeta): + @abc.abstractmethod + def generate_private_key(self) -> "DHPrivateKey": + """ + Generates and returns a DHPrivateKey. + """ + + @abc.abstractmethod + def parameter_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.ParameterFormat, + ) -> bytes: + """ + Returns the parameters serialized as bytes. + """ + + @abc.abstractmethod + def parameter_numbers(self) -> DHParameterNumbers: + """ + Returns a DHParameterNumbers. + """ + + +DHParametersWithSerialization = DHParameters + + +class DHPublicKey(metaclass=abc.ABCMeta): + @abc.abstractproperty + def key_size(self) -> int: + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def parameters(self) -> DHParameters: + """ + The DHParameters object associated with this public key. + """ + + @abc.abstractmethod + def public_numbers(self) -> DHPublicNumbers: + """ + Returns a DHPublicNumbers. + """ + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + +DHPublicKeyWithSerialization = DHPublicKey + + +class DHPrivateKey(metaclass=abc.ABCMeta): + @abc.abstractproperty + def key_size(self) -> int: + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def public_key(self) -> DHPublicKey: + """ + The DHPublicKey associated with this private key. + """ + + @abc.abstractmethod + def parameters(self) -> DHParameters: + """ + The DHParameters object associated with this private key. + """ + + @abc.abstractmethod + def exchange(self, peer_public_key: DHPublicKey) -> bytes: + """ + Given peer's DHPublicKey, carry out the key exchange and + return shared key as bytes. + """ + + @abc.abstractmethod + def private_numbers(self) -> DHPrivateNumbers: + """ + Returns a DHPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + +DHPrivateKeyWithSerialization = DHPrivateKey diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/dsa.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/dsa.py new file mode 100644 index 0000000..5e58709 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/dsa.py @@ -0,0 +1,288 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing + +from cryptography.hazmat.primitives import _serialization, hashes +from cryptography.hazmat.primitives.asymmetric import ( + utils as asym_utils, +) + + +class DSAParameters(metaclass=abc.ABCMeta): + @abc.abstractmethod + def generate_private_key(self) -> "DSAPrivateKey": + """ + Generates and returns a DSAPrivateKey. + """ + + @abc.abstractmethod + def parameter_numbers(self) -> "DSAParameterNumbers": + """ + Returns a DSAParameterNumbers. + """ + + +DSAParametersWithNumbers = DSAParameters + + +class DSAPrivateKey(metaclass=abc.ABCMeta): + @abc.abstractproperty + def key_size(self) -> int: + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def public_key(self) -> "DSAPublicKey": + """ + The DSAPublicKey associated with this private key. + """ + + @abc.abstractmethod + def parameters(self) -> DSAParameters: + """ + The DSAParameters object associated with this private key. + """ + + @abc.abstractmethod + def sign( + self, + data: bytes, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> bytes: + """ + Signs the data + """ + + @abc.abstractmethod + def private_numbers(self) -> "DSAPrivateNumbers": + """ + Returns a DSAPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + +DSAPrivateKeyWithSerialization = DSAPrivateKey + + +class DSAPublicKey(metaclass=abc.ABCMeta): + @abc.abstractproperty + def key_size(self) -> int: + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def parameters(self) -> DSAParameters: + """ + The DSAParameters object associated with this public key. + """ + + @abc.abstractmethod + def public_numbers(self) -> "DSAPublicNumbers": + """ + Returns a DSAPublicNumbers. + """ + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify( + self, + signature: bytes, + data: bytes, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> None: + """ + Verifies the signature of the data. + """ + + +DSAPublicKeyWithSerialization = DSAPublicKey + + +class DSAParameterNumbers: + def __init__(self, p: int, q: int, g: int): + if ( + not isinstance(p, int) + or not isinstance(q, int) + or not isinstance(g, int) + ): + raise TypeError( + "DSAParameterNumbers p, q, and g arguments must be integers." + ) + + self._p = p + self._q = q + self._g = g + + @property + def p(self) -> int: + return self._p + + @property + def q(self) -> int: + return self._q + + @property + def g(self) -> int: + return self._g + + def parameters(self, backend: typing.Any = None) -> DSAParameters: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_dsa_parameter_numbers(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DSAParameterNumbers): + return NotImplemented + + return self.p == other.p and self.q == other.q and self.g == other.g + + def __repr__(self) -> str: + return ( + "".format(self=self) + ) + + +class DSAPublicNumbers: + def __init__(self, y: int, parameter_numbers: DSAParameterNumbers): + if not isinstance(y, int): + raise TypeError("DSAPublicNumbers y argument must be an integer.") + + if not isinstance(parameter_numbers, DSAParameterNumbers): + raise TypeError( + "parameter_numbers must be a DSAParameterNumbers instance." + ) + + self._y = y + self._parameter_numbers = parameter_numbers + + @property + def y(self) -> int: + return self._y + + @property + def parameter_numbers(self) -> DSAParameterNumbers: + return self._parameter_numbers + + def public_key(self, backend: typing.Any = None) -> DSAPublicKey: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_dsa_public_numbers(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DSAPublicNumbers): + return NotImplemented + + return ( + self.y == other.y + and self.parameter_numbers == other.parameter_numbers + ) + + def __repr__(self) -> str: + return ( + "".format(self=self) + ) + + +class DSAPrivateNumbers: + def __init__(self, x: int, public_numbers: DSAPublicNumbers): + if not isinstance(x, int): + raise TypeError("DSAPrivateNumbers x argument must be an integer.") + + if not isinstance(public_numbers, DSAPublicNumbers): + raise TypeError( + "public_numbers must be a DSAPublicNumbers instance." + ) + self._public_numbers = public_numbers + self._x = x + + @property + def x(self) -> int: + return self._x + + @property + def public_numbers(self) -> DSAPublicNumbers: + return self._public_numbers + + def private_key(self, backend: typing.Any = None) -> DSAPrivateKey: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_dsa_private_numbers(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DSAPrivateNumbers): + return NotImplemented + + return ( + self.x == other.x and self.public_numbers == other.public_numbers + ) + + +def generate_parameters( + key_size: int, backend: typing.Any = None +) -> DSAParameters: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.generate_dsa_parameters(key_size) + + +def generate_private_key( + key_size: int, backend: typing.Any = None +) -> DSAPrivateKey: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.generate_dsa_private_key_and_parameters(key_size) + + +def _check_dsa_parameters(parameters: DSAParameterNumbers) -> None: + if parameters.p.bit_length() not in [1024, 2048, 3072, 4096]: + raise ValueError( + "p must be exactly 1024, 2048, 3072, or 4096 bits long" + ) + if parameters.q.bit_length() not in [160, 224, 256]: + raise ValueError("q must be exactly 160, 224, or 256 bits long") + + if not (1 < parameters.g < parameters.p): + raise ValueError("g, p don't satisfy 1 < g < p.") + + +def _check_dsa_private_numbers(numbers: DSAPrivateNumbers) -> None: + parameters = numbers.public_numbers.parameter_numbers + _check_dsa_parameters(parameters) + if numbers.x <= 0 or numbers.x >= parameters.q: + raise ValueError("x must be > 0 and < q.") + + if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p): + raise ValueError("y must be equal to (g ** x % p).") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ec.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ec.py new file mode 100644 index 0000000..3aaa382 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ec.py @@ -0,0 +1,523 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing +import warnings + +from cryptography import utils +from cryptography.hazmat._oid import ObjectIdentifier +from cryptography.hazmat.primitives import _serialization, hashes +from cryptography.hazmat.primitives.asymmetric import ( + utils as asym_utils, +) + + +class EllipticCurveOID: + SECP192R1 = ObjectIdentifier("1.2.840.10045.3.1.1") + SECP224R1 = ObjectIdentifier("1.3.132.0.33") + SECP256K1 = ObjectIdentifier("1.3.132.0.10") + SECP256R1 = ObjectIdentifier("1.2.840.10045.3.1.7") + SECP384R1 = ObjectIdentifier("1.3.132.0.34") + SECP521R1 = ObjectIdentifier("1.3.132.0.35") + BRAINPOOLP256R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.7") + BRAINPOOLP384R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.11") + BRAINPOOLP512R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.13") + SECT163K1 = ObjectIdentifier("1.3.132.0.1") + SECT163R2 = ObjectIdentifier("1.3.132.0.15") + SECT233K1 = ObjectIdentifier("1.3.132.0.26") + SECT233R1 = ObjectIdentifier("1.3.132.0.27") + SECT283K1 = ObjectIdentifier("1.3.132.0.16") + SECT283R1 = ObjectIdentifier("1.3.132.0.17") + SECT409K1 = ObjectIdentifier("1.3.132.0.36") + SECT409R1 = ObjectIdentifier("1.3.132.0.37") + SECT571K1 = ObjectIdentifier("1.3.132.0.38") + SECT571R1 = ObjectIdentifier("1.3.132.0.39") + + +class EllipticCurve(metaclass=abc.ABCMeta): + @abc.abstractproperty + def name(self) -> str: + """ + The name of the curve. e.g. secp256r1. + """ + + @abc.abstractproperty + def key_size(self) -> int: + """ + Bit size of a secret scalar for the curve. + """ + + +class EllipticCurveSignatureAlgorithm(metaclass=abc.ABCMeta): + @abc.abstractproperty + def algorithm( + self, + ) -> typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm]: + """ + The digest algorithm used with this signature. + """ + + +class EllipticCurvePrivateKey(metaclass=abc.ABCMeta): + @abc.abstractmethod + def exchange( + self, algorithm: "ECDH", peer_public_key: "EllipticCurvePublicKey" + ) -> bytes: + """ + Performs a key exchange operation using the provided algorithm with the + provided peer's public key. + """ + + @abc.abstractmethod + def public_key(self) -> "EllipticCurvePublicKey": + """ + The EllipticCurvePublicKey for this private key. + """ + + @abc.abstractproperty + def curve(self) -> EllipticCurve: + """ + The EllipticCurve that this key is on. + """ + + @abc.abstractproperty + def key_size(self) -> int: + """ + Bit size of a secret scalar for the curve. + """ + + @abc.abstractmethod + def sign( + self, + data: bytes, + signature_algorithm: EllipticCurveSignatureAlgorithm, + ) -> bytes: + """ + Signs the data + """ + + @abc.abstractmethod + def private_numbers(self) -> "EllipticCurvePrivateNumbers": + """ + Returns an EllipticCurvePrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + +EllipticCurvePrivateKeyWithSerialization = EllipticCurvePrivateKey + + +class EllipticCurvePublicKey(metaclass=abc.ABCMeta): + @abc.abstractproperty + def curve(self) -> EllipticCurve: + """ + The EllipticCurve that this key is on. + """ + + @abc.abstractproperty + def key_size(self) -> int: + """ + Bit size of a secret scalar for the curve. + """ + + @abc.abstractmethod + def public_numbers(self) -> "EllipticCurvePublicNumbers": + """ + Returns an EllipticCurvePublicNumbers. + """ + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify( + self, + signature: bytes, + data: bytes, + signature_algorithm: EllipticCurveSignatureAlgorithm, + ) -> None: + """ + Verifies the signature of the data. + """ + + @classmethod + def from_encoded_point( + cls, curve: EllipticCurve, data: bytes + ) -> "EllipticCurvePublicKey": + utils._check_bytes("data", data) + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must be an EllipticCurve instance") + + if len(data) == 0: + raise ValueError("data must not be an empty byte string") + + if data[0] not in [0x02, 0x03, 0x04]: + raise ValueError("Unsupported elliptic curve point type") + + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.load_elliptic_curve_public_bytes(curve, data) + + +EllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey + + +class SECT571R1(EllipticCurve): + name = "sect571r1" + key_size = 570 + + +class SECT409R1(EllipticCurve): + name = "sect409r1" + key_size = 409 + + +class SECT283R1(EllipticCurve): + name = "sect283r1" + key_size = 283 + + +class SECT233R1(EllipticCurve): + name = "sect233r1" + key_size = 233 + + +class SECT163R2(EllipticCurve): + name = "sect163r2" + key_size = 163 + + +class SECT571K1(EllipticCurve): + name = "sect571k1" + key_size = 571 + + +class SECT409K1(EllipticCurve): + name = "sect409k1" + key_size = 409 + + +class SECT283K1(EllipticCurve): + name = "sect283k1" + key_size = 283 + + +class SECT233K1(EllipticCurve): + name = "sect233k1" + key_size = 233 + + +class SECT163K1(EllipticCurve): + name = "sect163k1" + key_size = 163 + + +class SECP521R1(EllipticCurve): + name = "secp521r1" + key_size = 521 + + +class SECP384R1(EllipticCurve): + name = "secp384r1" + key_size = 384 + + +class SECP256R1(EllipticCurve): + name = "secp256r1" + key_size = 256 + + +class SECP256K1(EllipticCurve): + name = "secp256k1" + key_size = 256 + + +class SECP224R1(EllipticCurve): + name = "secp224r1" + key_size = 224 + + +class SECP192R1(EllipticCurve): + name = "secp192r1" + key_size = 192 + + +class BrainpoolP256R1(EllipticCurve): + name = "brainpoolP256r1" + key_size = 256 + + +class BrainpoolP384R1(EllipticCurve): + name = "brainpoolP384r1" + key_size = 384 + + +class BrainpoolP512R1(EllipticCurve): + name = "brainpoolP512r1" + key_size = 512 + + +_CURVE_TYPES: typing.Dict[str, typing.Type[EllipticCurve]] = { + "prime192v1": SECP192R1, + "prime256v1": SECP256R1, + "secp192r1": SECP192R1, + "secp224r1": SECP224R1, + "secp256r1": SECP256R1, + "secp384r1": SECP384R1, + "secp521r1": SECP521R1, + "secp256k1": SECP256K1, + "sect163k1": SECT163K1, + "sect233k1": SECT233K1, + "sect283k1": SECT283K1, + "sect409k1": SECT409K1, + "sect571k1": SECT571K1, + "sect163r2": SECT163R2, + "sect233r1": SECT233R1, + "sect283r1": SECT283R1, + "sect409r1": SECT409R1, + "sect571r1": SECT571R1, + "brainpoolP256r1": BrainpoolP256R1, + "brainpoolP384r1": BrainpoolP384R1, + "brainpoolP512r1": BrainpoolP512R1, +} + + +class ECDSA(EllipticCurveSignatureAlgorithm): + def __init__( + self, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ): + self._algorithm = algorithm + + @property + def algorithm( + self, + ) -> typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm]: + return self._algorithm + + +def generate_private_key( + curve: EllipticCurve, backend: typing.Any = None +) -> EllipticCurvePrivateKey: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.generate_elliptic_curve_private_key(curve) + + +def derive_private_key( + private_value: int, + curve: EllipticCurve, + backend: typing.Any = None, +) -> EllipticCurvePrivateKey: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + if not isinstance(private_value, int): + raise TypeError("private_value must be an integer type.") + + if private_value <= 0: + raise ValueError("private_value must be a positive integer.") + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must provide the EllipticCurve interface.") + + return ossl.derive_elliptic_curve_private_key(private_value, curve) + + +class EllipticCurvePublicNumbers: + def __init__(self, x: int, y: int, curve: EllipticCurve): + if not isinstance(x, int) or not isinstance(y, int): + raise TypeError("x and y must be integers.") + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must provide the EllipticCurve interface.") + + self._y = y + self._x = x + self._curve = curve + + def public_key(self, backend: typing.Any = None) -> EllipticCurvePublicKey: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_elliptic_curve_public_numbers(self) + + def encode_point(self) -> bytes: + warnings.warn( + "encode_point has been deprecated on EllipticCurvePublicNumbers" + " and will be removed in a future version. Please use " + "EllipticCurvePublicKey.public_bytes to obtain both " + "compressed and uncompressed point encoding.", + utils.PersistentlyDeprecated2019, + stacklevel=2, + ) + # key_size is in bits. Convert to bytes and round up + byte_length = (self.curve.key_size + 7) // 8 + return ( + b"\x04" + + utils.int_to_bytes(self.x, byte_length) + + utils.int_to_bytes(self.y, byte_length) + ) + + @classmethod + def from_encoded_point( + cls, curve: EllipticCurve, data: bytes + ) -> "EllipticCurvePublicNumbers": + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must be an EllipticCurve instance") + + warnings.warn( + "Support for unsafe construction of public numbers from " + "encoded data will be removed in a future version. " + "Please use EllipticCurvePublicKey.from_encoded_point", + utils.PersistentlyDeprecated2019, + stacklevel=2, + ) + + if data.startswith(b"\x04"): + # key_size is in bits. Convert to bytes and round up + byte_length = (curve.key_size + 7) // 8 + if len(data) == 2 * byte_length + 1: + x = int.from_bytes(data[1 : byte_length + 1], "big") + y = int.from_bytes(data[byte_length + 1 :], "big") + return cls(x, y, curve) + else: + raise ValueError("Invalid elliptic curve point data length") + else: + raise ValueError("Unsupported elliptic curve point type") + + @property + def curve(self) -> EllipticCurve: + return self._curve + + @property + def x(self) -> int: + return self._x + + @property + def y(self) -> int: + return self._y + + def __eq__(self, other: object) -> bool: + if not isinstance(other, EllipticCurvePublicNumbers): + return NotImplemented + + return ( + self.x == other.x + and self.y == other.y + and self.curve.name == other.curve.name + and self.curve.key_size == other.curve.key_size + ) + + def __hash__(self) -> int: + return hash((self.x, self.y, self.curve.name, self.curve.key_size)) + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + +class EllipticCurvePrivateNumbers: + def __init__( + self, private_value: int, public_numbers: EllipticCurvePublicNumbers + ): + if not isinstance(private_value, int): + raise TypeError("private_value must be an integer.") + + if not isinstance(public_numbers, EllipticCurvePublicNumbers): + raise TypeError( + "public_numbers must be an EllipticCurvePublicNumbers " + "instance." + ) + + self._private_value = private_value + self._public_numbers = public_numbers + + def private_key( + self, backend: typing.Any = None + ) -> EllipticCurvePrivateKey: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_elliptic_curve_private_numbers(self) + + @property + def private_value(self) -> int: + return self._private_value + + @property + def public_numbers(self) -> EllipticCurvePublicNumbers: + return self._public_numbers + + def __eq__(self, other: object) -> bool: + if not isinstance(other, EllipticCurvePrivateNumbers): + return NotImplemented + + return ( + self.private_value == other.private_value + and self.public_numbers == other.public_numbers + ) + + def __hash__(self) -> int: + return hash((self.private_value, self.public_numbers)) + + +class ECDH: + pass + + +_OID_TO_CURVE = { + EllipticCurveOID.SECP192R1: SECP192R1, + EllipticCurveOID.SECP224R1: SECP224R1, + EllipticCurveOID.SECP256K1: SECP256K1, + EllipticCurveOID.SECP256R1: SECP256R1, + EllipticCurveOID.SECP384R1: SECP384R1, + EllipticCurveOID.SECP521R1: SECP521R1, + EllipticCurveOID.BRAINPOOLP256R1: BrainpoolP256R1, + EllipticCurveOID.BRAINPOOLP384R1: BrainpoolP384R1, + EllipticCurveOID.BRAINPOOLP512R1: BrainpoolP512R1, + EllipticCurveOID.SECT163K1: SECT163K1, + EllipticCurveOID.SECT163R2: SECT163R2, + EllipticCurveOID.SECT233K1: SECT233K1, + EllipticCurveOID.SECT233R1: SECT233R1, + EllipticCurveOID.SECT283K1: SECT283K1, + EllipticCurveOID.SECT283R1: SECT283R1, + EllipticCurveOID.SECT409K1: SECT409K1, + EllipticCurveOID.SECT409R1: SECT409R1, + EllipticCurveOID.SECT571K1: SECT571K1, + EllipticCurveOID.SECT571R1: SECT571R1, +} + + +def get_curve_for_oid(oid: ObjectIdentifier) -> typing.Type[EllipticCurve]: + try: + return _OID_TO_CURVE[oid] + except KeyError: + raise LookupError( + "The provided object identifier has no matching elliptic " + "curve class" + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ed25519.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ed25519.py new file mode 100644 index 0000000..4327702 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ed25519.py @@ -0,0 +1,92 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import _serialization + + +_ED25519_KEY_SIZE = 32 +_ED25519_SIG_SIZE = 64 + + +class Ed25519PublicKey(metaclass=abc.ABCMeta): + @classmethod + def from_public_bytes(cls, data: bytes) -> "Ed25519PublicKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.ed25519_supported(): + raise UnsupportedAlgorithm( + "ed25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + + return backend.ed25519_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def verify(self, signature: bytes, data: bytes) -> None: + """ + Verify the signature. + """ + + +class Ed25519PrivateKey(metaclass=abc.ABCMeta): + @classmethod + def generate(cls) -> "Ed25519PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.ed25519_supported(): + raise UnsupportedAlgorithm( + "ed25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + + return backend.ed25519_generate_key() + + @classmethod + def from_private_bytes(cls, data: bytes) -> "Ed25519PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.ed25519_supported(): + raise UnsupportedAlgorithm( + "ed25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + + return backend.ed25519_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self) -> Ed25519PublicKey: + """ + The Ed25519PublicKey derived from the private key. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + The serialized bytes of the private key. + """ + + @abc.abstractmethod + def sign(self, data: bytes) -> bytes: + """ + Signs the data. + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ed448.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ed448.py new file mode 100644 index 0000000..27bc27c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/ed448.py @@ -0,0 +1,87 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import _serialization + + +class Ed448PublicKey(metaclass=abc.ABCMeta): + @classmethod + def from_public_bytes(cls, data: bytes) -> "Ed448PublicKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.ed448_supported(): + raise UnsupportedAlgorithm( + "ed448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + + return backend.ed448_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def verify(self, signature: bytes, data: bytes) -> None: + """ + Verify the signature. + """ + + +class Ed448PrivateKey(metaclass=abc.ABCMeta): + @classmethod + def generate(cls) -> "Ed448PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.ed448_supported(): + raise UnsupportedAlgorithm( + "ed448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + return backend.ed448_generate_key() + + @classmethod + def from_private_bytes(cls, data: bytes) -> "Ed448PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.ed448_supported(): + raise UnsupportedAlgorithm( + "ed448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM, + ) + + return backend.ed448_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self) -> Ed448PublicKey: + """ + The Ed448PublicKey derived from the private key. + """ + + @abc.abstractmethod + def sign(self, data: bytes) -> bytes: + """ + Signs the data. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + The serialized bytes of the private key. + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/padding.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/padding.py new file mode 100644 index 0000000..dd3c648 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/padding.py @@ -0,0 +1,101 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing + +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives._asymmetric import ( + AsymmetricPadding as AsymmetricPadding, +) +from cryptography.hazmat.primitives.asymmetric import rsa + + +class PKCS1v15(AsymmetricPadding): + name = "EMSA-PKCS1-v1_5" + + +class _MaxLength: + "Sentinel value for `MAX_LENGTH`." + + +class _Auto: + "Sentinel value for `AUTO`." + + +class _DigestLength: + "Sentinel value for `DIGEST_LENGTH`." + + +class PSS(AsymmetricPadding): + MAX_LENGTH = _MaxLength() + AUTO = _Auto() + DIGEST_LENGTH = _DigestLength() + name = "EMSA-PSS" + _salt_length: typing.Union[int, _MaxLength, _Auto, _DigestLength] + + def __init__( + self, + mgf: "MGF", + salt_length: typing.Union[int, _MaxLength, _Auto, _DigestLength], + ) -> None: + self._mgf = mgf + + if not isinstance( + salt_length, (int, _MaxLength, _Auto, _DigestLength) + ): + raise TypeError( + "salt_length must be an integer, MAX_LENGTH, " + "DIGEST_LENGTH, or AUTO" + ) + + if isinstance(salt_length, int) and salt_length < 0: + raise ValueError("salt_length must be zero or greater.") + + self._salt_length = salt_length + + +class OAEP(AsymmetricPadding): + name = "EME-OAEP" + + def __init__( + self, + mgf: "MGF", + algorithm: hashes.HashAlgorithm, + label: typing.Optional[bytes], + ): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + self._mgf = mgf + self._algorithm = algorithm + self._label = label + + +class MGF(metaclass=abc.ABCMeta): + _algorithm: hashes.HashAlgorithm + + +class MGF1(MGF): + MAX_LENGTH = _MaxLength() + + def __init__(self, algorithm: hashes.HashAlgorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + self._algorithm = algorithm + + +def calculate_max_pss_salt_length( + key: typing.Union["rsa.RSAPrivateKey", "rsa.RSAPublicKey"], + hash_algorithm: hashes.HashAlgorithm, +) -> int: + if not isinstance(key, (rsa.RSAPrivateKey, rsa.RSAPublicKey)): + raise TypeError("key must be an RSA public or private key") + # bit length - 1 per RFC 3447 + emlen = (key.key_size + 6) // 8 + salt_length = emlen - hash_algorithm.digest_size - 2 + assert salt_length >= 0 + return salt_length diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/rsa.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/rsa.py new file mode 100644 index 0000000..5ffe767 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/rsa.py @@ -0,0 +1,425 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing +from math import gcd + +from cryptography.hazmat.primitives import _serialization, hashes +from cryptography.hazmat.primitives._asymmetric import AsymmetricPadding +from cryptography.hazmat.primitives.asymmetric import ( + utils as asym_utils, +) + + +class RSAPrivateKey(metaclass=abc.ABCMeta): + @abc.abstractmethod + def decrypt(self, ciphertext: bytes, padding: AsymmetricPadding) -> bytes: + """ + Decrypts the provided ciphertext. + """ + + @abc.abstractproperty + def key_size(self) -> int: + """ + The bit length of the public modulus. + """ + + @abc.abstractmethod + def public_key(self) -> "RSAPublicKey": + """ + The RSAPublicKey associated with this private key. + """ + + @abc.abstractmethod + def sign( + self, + data: bytes, + padding: AsymmetricPadding, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> bytes: + """ + Signs the data. + """ + + @abc.abstractmethod + def private_numbers(self) -> "RSAPrivateNumbers": + """ + Returns an RSAPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + +RSAPrivateKeyWithSerialization = RSAPrivateKey + + +class RSAPublicKey(metaclass=abc.ABCMeta): + @abc.abstractmethod + def encrypt(self, plaintext: bytes, padding: AsymmetricPadding) -> bytes: + """ + Encrypts the given plaintext. + """ + + @abc.abstractproperty + def key_size(self) -> int: + """ + The bit length of the public modulus. + """ + + @abc.abstractmethod + def public_numbers(self) -> "RSAPublicNumbers": + """ + Returns an RSAPublicNumbers + """ + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify( + self, + signature: bytes, + data: bytes, + padding: AsymmetricPadding, + algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm], + ) -> None: + """ + Verifies the signature of the data. + """ + + @abc.abstractmethod + def recover_data_from_signature( + self, + signature: bytes, + padding: AsymmetricPadding, + algorithm: typing.Optional[hashes.HashAlgorithm], + ) -> bytes: + """ + Recovers the original data from the signature. + """ + + +RSAPublicKeyWithSerialization = RSAPublicKey + + +def generate_private_key( + public_exponent: int, + key_size: int, + backend: typing.Any = None, +) -> RSAPrivateKey: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + _verify_rsa_parameters(public_exponent, key_size) + return ossl.generate_rsa_private_key(public_exponent, key_size) + + +def _verify_rsa_parameters(public_exponent: int, key_size: int) -> None: + if public_exponent not in (3, 65537): + raise ValueError( + "public_exponent must be either 3 (for legacy compatibility) or " + "65537. Almost everyone should choose 65537 here!" + ) + + if key_size < 512: + raise ValueError("key_size must be at least 512-bits.") + + +def _check_private_key_components( + p: int, + q: int, + private_exponent: int, + dmp1: int, + dmq1: int, + iqmp: int, + public_exponent: int, + modulus: int, +) -> None: + if modulus < 3: + raise ValueError("modulus must be >= 3.") + + if p >= modulus: + raise ValueError("p must be < modulus.") + + if q >= modulus: + raise ValueError("q must be < modulus.") + + if dmp1 >= modulus: + raise ValueError("dmp1 must be < modulus.") + + if dmq1 >= modulus: + raise ValueError("dmq1 must be < modulus.") + + if iqmp >= modulus: + raise ValueError("iqmp must be < modulus.") + + if private_exponent >= modulus: + raise ValueError("private_exponent must be < modulus.") + + if public_exponent < 3 or public_exponent >= modulus: + raise ValueError("public_exponent must be >= 3 and < modulus.") + + if public_exponent & 1 == 0: + raise ValueError("public_exponent must be odd.") + + if dmp1 & 1 == 0: + raise ValueError("dmp1 must be odd.") + + if dmq1 & 1 == 0: + raise ValueError("dmq1 must be odd.") + + if p * q != modulus: + raise ValueError("p*q must equal modulus.") + + +def _check_public_key_components(e: int, n: int) -> None: + if n < 3: + raise ValueError("n must be >= 3.") + + if e < 3 or e >= n: + raise ValueError("e must be >= 3 and < n.") + + if e & 1 == 0: + raise ValueError("e must be odd.") + + +def _modinv(e: int, m: int) -> int: + """ + Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1 + """ + x1, x2 = 1, 0 + a, b = e, m + while b > 0: + q, r = divmod(a, b) + xn = x1 - q * x2 + a, b, x1, x2 = b, r, x2, xn + return x1 % m + + +def rsa_crt_iqmp(p: int, q: int) -> int: + """ + Compute the CRT (q ** -1) % p value from RSA primes p and q. + """ + return _modinv(q, p) + + +def rsa_crt_dmp1(private_exponent: int, p: int) -> int: + """ + Compute the CRT private_exponent % (p - 1) value from the RSA + private_exponent (d) and p. + """ + return private_exponent % (p - 1) + + +def rsa_crt_dmq1(private_exponent: int, q: int) -> int: + """ + Compute the CRT private_exponent % (q - 1) value from the RSA + private_exponent (d) and q. + """ + return private_exponent % (q - 1) + + +# Controls the number of iterations rsa_recover_prime_factors will perform +# to obtain the prime factors. Each iteration increments by 2 so the actual +# maximum attempts is half this number. +_MAX_RECOVERY_ATTEMPTS = 1000 + + +def rsa_recover_prime_factors( + n: int, e: int, d: int +) -> typing.Tuple[int, int]: + """ + Compute factors p and q from the private exponent d. We assume that n has + no more than two factors. This function is adapted from code in PyCrypto. + """ + # See 8.2.2(i) in Handbook of Applied Cryptography. + ktot = d * e - 1 + # The quantity d*e-1 is a multiple of phi(n), even, + # and can be represented as t*2^s. + t = ktot + while t % 2 == 0: + t = t // 2 + # Cycle through all multiplicative inverses in Zn. + # The algorithm is non-deterministic, but there is a 50% chance + # any candidate a leads to successful factoring. + # See "Digitalized Signatures and Public Key Functions as Intractable + # as Factorization", M. Rabin, 1979 + spotted = False + a = 2 + while not spotted and a < _MAX_RECOVERY_ATTEMPTS: + k = t + # Cycle through all values a^{t*2^i}=a^k + while k < ktot: + cand = pow(a, k, n) + # Check if a^k is a non-trivial root of unity (mod n) + if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1: + # We have found a number such that (cand-1)(cand+1)=0 (mod n). + # Either of the terms divides n. + p = gcd(cand + 1, n) + spotted = True + break + k *= 2 + # This value was not any good... let's try another! + a += 2 + if not spotted: + raise ValueError("Unable to compute factors p and q from exponent d.") + # Found ! + q, r = divmod(n, p) + assert r == 0 + p, q = sorted((p, q), reverse=True) + return (p, q) + + +class RSAPrivateNumbers: + def __init__( + self, + p: int, + q: int, + d: int, + dmp1: int, + dmq1: int, + iqmp: int, + public_numbers: "RSAPublicNumbers", + ): + if ( + not isinstance(p, int) + or not isinstance(q, int) + or not isinstance(d, int) + or not isinstance(dmp1, int) + or not isinstance(dmq1, int) + or not isinstance(iqmp, int) + ): + raise TypeError( + "RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must" + " all be an integers." + ) + + if not isinstance(public_numbers, RSAPublicNumbers): + raise TypeError( + "RSAPrivateNumbers public_numbers must be an RSAPublicNumbers" + " instance." + ) + + self._p = p + self._q = q + self._d = d + self._dmp1 = dmp1 + self._dmq1 = dmq1 + self._iqmp = iqmp + self._public_numbers = public_numbers + + @property + def p(self) -> int: + return self._p + + @property + def q(self) -> int: + return self._q + + @property + def d(self) -> int: + return self._d + + @property + def dmp1(self) -> int: + return self._dmp1 + + @property + def dmq1(self) -> int: + return self._dmq1 + + @property + def iqmp(self) -> int: + return self._iqmp + + @property + def public_numbers(self) -> "RSAPublicNumbers": + return self._public_numbers + + def private_key(self, backend: typing.Any = None) -> RSAPrivateKey: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_rsa_private_numbers(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, RSAPrivateNumbers): + return NotImplemented + + return ( + self.p == other.p + and self.q == other.q + and self.d == other.d + and self.dmp1 == other.dmp1 + and self.dmq1 == other.dmq1 + and self.iqmp == other.iqmp + and self.public_numbers == other.public_numbers + ) + + def __hash__(self) -> int: + return hash( + ( + self.p, + self.q, + self.d, + self.dmp1, + self.dmq1, + self.iqmp, + self.public_numbers, + ) + ) + + +class RSAPublicNumbers: + def __init__(self, e: int, n: int): + if not isinstance(e, int) or not isinstance(n, int): + raise TypeError("RSAPublicNumbers arguments must be integers.") + + self._e = e + self._n = n + + @property + def e(self) -> int: + return self._e + + @property + def n(self) -> int: + return self._n + + def public_key(self, backend: typing.Any = None) -> RSAPublicKey: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.load_rsa_public_numbers(self) + + def __repr__(self) -> str: + return "".format(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, RSAPublicNumbers): + return NotImplemented + + return self.e == other.e and self.n == other.n + + def __hash__(self) -> int: + return hash((self.e, self.n)) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/types.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/types.py new file mode 100644 index 0000000..d497815 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/types.py @@ -0,0 +1,69 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.hazmat.primitives.asymmetric import ( + dh, + dsa, + ec, + ed25519, + ed448, + rsa, + x25519, + x448, +) + + +# Every asymmetric key type +PUBLIC_KEY_TYPES = typing.Union[ + dh.DHPublicKey, + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + x25519.X25519PublicKey, + x448.X448PublicKey, +] +# Every asymmetric key type +PRIVATE_KEY_TYPES = typing.Union[ + dh.DHPrivateKey, + ed25519.Ed25519PrivateKey, + ed448.Ed448PrivateKey, + rsa.RSAPrivateKey, + dsa.DSAPrivateKey, + ec.EllipticCurvePrivateKey, + x25519.X25519PrivateKey, + x448.X448PrivateKey, +] +# Just the key types we allow to be used for x509 signing. This mirrors +# the certificate public key types +CERTIFICATE_PRIVATE_KEY_TYPES = typing.Union[ + ed25519.Ed25519PrivateKey, + ed448.Ed448PrivateKey, + rsa.RSAPrivateKey, + dsa.DSAPrivateKey, + ec.EllipticCurvePrivateKey, +] +# Just the key types we allow to be used for x509 signing. This mirrors +# the certificate private key types +CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES = typing.Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, +] +# This type removes DHPublicKey. x448/x25519 can be a public key +# but cannot be used in signing so they are allowed here. +CERTIFICATE_PUBLIC_KEY_TYPES = typing.Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + x25519.X25519PublicKey, + x448.X448PublicKey, +] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py new file mode 100644 index 0000000..638ecb3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py @@ -0,0 +1,24 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography.hazmat.bindings._rust import asn1 +from cryptography.hazmat.primitives import hashes + + +decode_dss_signature = asn1.decode_dss_signature +encode_dss_signature = asn1.encode_dss_signature + + +class Prehashed: + def __init__(self, algorithm: hashes.HashAlgorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of HashAlgorithm.") + + self._algorithm = algorithm + self._digest_size = algorithm.digest_size + + @property + def digest_size(self) -> int: + return self._digest_size diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/x25519.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/x25519.py new file mode 100644 index 0000000..690af78 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/x25519.py @@ -0,0 +1,81 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import _serialization + + +class X25519PublicKey(metaclass=abc.ABCMeta): + @classmethod + def from_public_bytes(cls, data: bytes) -> "X25519PublicKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + + return backend.x25519_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + The serialized bytes of the public key. + """ + + +class X25519PrivateKey(metaclass=abc.ABCMeta): + @classmethod + def generate(cls) -> "X25519PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + return backend.x25519_generate_key() + + @classmethod + def from_private_bytes(cls, data: bytes) -> "X25519PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + + return backend.x25519_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self) -> X25519PublicKey: + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + The serialized bytes of the private key. + """ + + @abc.abstractmethod + def exchange(self, peer_public_key: X25519PublicKey) -> bytes: + """ + Performs a key exchange operation using the provided peer's public key. + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/x448.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/x448.py new file mode 100644 index 0000000..7f71c27 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/asymmetric/x448.py @@ -0,0 +1,81 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import _serialization + + +class X448PublicKey(metaclass=abc.ABCMeta): + @classmethod + def from_public_bytes(cls, data: bytes) -> "X448PublicKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.x448_supported(): + raise UnsupportedAlgorithm( + "X448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + + return backend.x448_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PublicFormat, + ) -> bytes: + """ + The serialized bytes of the public key. + """ + + +class X448PrivateKey(metaclass=abc.ABCMeta): + @classmethod + def generate(cls) -> "X448PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.x448_supported(): + raise UnsupportedAlgorithm( + "X448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + return backend.x448_generate_key() + + @classmethod + def from_private_bytes(cls, data: bytes) -> "X448PrivateKey": + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.x448_supported(): + raise UnsupportedAlgorithm( + "X448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM, + ) + + return backend.x448_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self) -> X448PublicKey: + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def private_bytes( + self, + encoding: _serialization.Encoding, + format: _serialization.PrivateFormat, + encryption_algorithm: _serialization.KeySerializationEncryption, + ) -> bytes: + """ + The serialized bytes of the private key. + """ + + @abc.abstractmethod + def exchange(self, peer_public_key: X448PublicKey) -> bytes: + """ + Performs a key exchange operation using the provided peer's public key. + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/__init__.py new file mode 100644 index 0000000..874dbd4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/__init__.py @@ -0,0 +1,27 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography.hazmat.primitives._cipheralgorithm import ( + BlockCipherAlgorithm, + CipherAlgorithm, +) +from cryptography.hazmat.primitives.ciphers.base import ( + AEADCipherContext, + AEADDecryptionContext, + AEADEncryptionContext, + Cipher, + CipherContext, +) + + +__all__ = [ + "Cipher", + "CipherAlgorithm", + "BlockCipherAlgorithm", + "CipherContext", + "AEADCipherContext", + "AEADDecryptionContext", + "AEADEncryptionContext", +] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/aead.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/aead.py new file mode 100644 index 0000000..3cdb3eb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/aead.py @@ -0,0 +1,361 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import os +import typing + +from cryptography import exceptions, utils +from cryptography.hazmat.backends.openssl import aead +from cryptography.hazmat.backends.openssl.backend import backend + + +class ChaCha20Poly1305: + _MAX_SIZE = 2**31 - 1 + + def __init__(self, key: bytes): + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "ChaCha20Poly1305 is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER, + ) + utils._check_byteslike("key", key) + + if len(key) != 32: + raise ValueError("ChaCha20Poly1305 key must be 32 bytes.") + + self._key = key + + @classmethod + def generate_key(cls) -> bytes: + return os.urandom(32) + + def encrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**31 - 1 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt(backend, self, nonce, data, [associated_data], 16) + + def decrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt(backend, self, nonce, data, [associated_data], 16) + + def _check_params( + self, + nonce: bytes, + data: bytes, + associated_data: bytes, + ) -> None: + utils._check_byteslike("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) != 12: + raise ValueError("Nonce must be 12 bytes") + + +class AESCCM: + _MAX_SIZE = 2**31 - 1 + + def __init__(self, key: bytes, tag_length: int = 16): + utils._check_byteslike("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESCCM key must be 128, 192, or 256 bits.") + + self._key = key + if not isinstance(tag_length, int): + raise TypeError("tag_length must be an integer") + + if tag_length not in (4, 6, 8, 10, 12, 14, 16): + raise ValueError("Invalid tag_length") + + self._tag_length = tag_length + + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "AESCCM is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER, + ) + + @classmethod + def generate_key(cls, bit_length: int) -> bytes: + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**31 - 1 bytes" + ) + + self._check_params(nonce, data, associated_data) + self._validate_lengths(nonce, len(data)) + return aead._encrypt( + backend, self, nonce, data, [associated_data], self._tag_length + ) + + def decrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, [associated_data], self._tag_length + ) + + def _validate_lengths(self, nonce: bytes, data_len: int) -> None: + # For information about computing this, see + # https://tools.ietf.org/html/rfc3610#section-2.1 + l_val = 15 - len(nonce) + if 2 ** (8 * l_val) < data_len: + raise ValueError("Data too long for nonce") + + def _check_params( + self, nonce: bytes, data: bytes, associated_data: bytes + ) -> None: + utils._check_byteslike("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if not 7 <= len(nonce) <= 13: + raise ValueError("Nonce must be between 7 and 13 bytes") + + +class AESGCM: + _MAX_SIZE = 2**31 - 1 + + def __init__(self, key: bytes): + utils._check_byteslike("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESGCM key must be 128, 192, or 256 bits.") + + self._key = key + + @classmethod + def generate_key(cls, bit_length: int) -> bytes: + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**31 - 1 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt(backend, self, nonce, data, [associated_data], 16) + + def decrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt(backend, self, nonce, data, [associated_data], 16) + + def _check_params( + self, + nonce: bytes, + data: bytes, + associated_data: bytes, + ) -> None: + utils._check_byteslike("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) < 8 or len(nonce) > 128: + raise ValueError("Nonce must be between 8 and 128 bytes") + + +class AESOCB3: + _MAX_SIZE = 2**31 - 1 + + def __init__(self, key: bytes): + utils._check_byteslike("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESOCB3 key must be 128, 192, or 256 bits.") + + self._key = key + + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "OCB3 is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER, + ) + + @classmethod + def generate_key(cls, bit_length: int) -> bytes: + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**31 - 1 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt(backend, self, nonce, data, [associated_data], 16) + + def decrypt( + self, + nonce: bytes, + data: bytes, + associated_data: typing.Optional[bytes], + ) -> bytes: + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt(backend, self, nonce, data, [associated_data], 16) + + def _check_params( + self, + nonce: bytes, + data: bytes, + associated_data: bytes, + ) -> None: + utils._check_byteslike("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) < 12 or len(nonce) > 15: + raise ValueError("Nonce must be between 12 and 15 bytes") + + +class AESSIV(object): + _MAX_SIZE = 2**31 - 1 + + def __init__(self, key: bytes): + utils._check_byteslike("key", key) + if len(key) not in (32, 48, 64): + raise ValueError("AESSIV key must be 256, 384, or 512 bits.") + + self._key = key + + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "AES-SIV is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER, + ) + + @classmethod + def generate_key(cls, bit_length: int) -> bytes: + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (256, 384, 512): + raise ValueError("bit_length must be 256, 384, or 512") + + return os.urandom(bit_length // 8) + + def encrypt( + self, + data: bytes, + associated_data: typing.Optional[typing.List[bytes]], + ) -> bytes: + if associated_data is None: + associated_data = [] + + self._check_params(data, associated_data) + + if len(data) > self._MAX_SIZE or any( + len(ad) > self._MAX_SIZE for ad in associated_data + ): + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**31 - 1 bytes" + ) + + return aead._encrypt(backend, self, b"", data, associated_data, 16) + + def decrypt( + self, + data: bytes, + associated_data: typing.Optional[typing.List[bytes]], + ) -> bytes: + if associated_data is None: + associated_data = [] + + self._check_params(data, associated_data) + + return aead._decrypt(backend, self, b"", data, associated_data, 16) + + def _check_params( + self, + data: bytes, + associated_data: typing.List, + ) -> None: + utils._check_bytes("data", data) + if not isinstance(associated_data, list) or not all( + isinstance(x, bytes) for x in associated_data + ): + raise TypeError("associated_data must be a list of bytes or None") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/algorithms.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/algorithms.py new file mode 100644 index 0000000..e327e76 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/algorithms.py @@ -0,0 +1,207 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography import utils +from cryptography.hazmat.primitives.ciphers import ( + BlockCipherAlgorithm, + CipherAlgorithm, +) + + +def _verify_key_size(algorithm: CipherAlgorithm, key: bytes) -> bytes: + # Verify that the key is instance of bytes + utils._check_byteslike("key", key) + + # Verify that the key size matches the expected key size + if len(key) * 8 not in algorithm.key_sizes: + raise ValueError( + "Invalid key size ({}) for {}.".format( + len(key) * 8, algorithm.name + ) + ) + return key + + +class AES(CipherAlgorithm, BlockCipherAlgorithm): + name = "AES" + block_size = 128 + # 512 added to support AES-256-XTS, which uses 512-bit keys + key_sizes = frozenset([128, 192, 256, 512]) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +class Camellia(CipherAlgorithm, BlockCipherAlgorithm): + name = "camellia" + block_size = 128 + key_sizes = frozenset([128, 192, 256]) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +class TripleDES(CipherAlgorithm, BlockCipherAlgorithm): + name = "3DES" + block_size = 64 + key_sizes = frozenset([64, 128, 192]) + + def __init__(self, key: bytes): + if len(key) == 8: + key += key + key + elif len(key) == 16: + key += key[:8] + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +class Blowfish(CipherAlgorithm, BlockCipherAlgorithm): + name = "Blowfish" + block_size = 64 + key_sizes = frozenset(range(32, 449, 8)) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +_BlowfishInternal = Blowfish +utils.deprecated( + Blowfish, + __name__, + "Blowfish has been deprecated", + utils.DeprecatedIn37, + name="Blowfish", +) + + +class CAST5(CipherAlgorithm, BlockCipherAlgorithm): + name = "CAST5" + block_size = 64 + key_sizes = frozenset(range(40, 129, 8)) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +_CAST5Internal = CAST5 +utils.deprecated( + CAST5, + __name__, + "CAST5 has been deprecated", + utils.DeprecatedIn37, + name="CAST5", +) + + +class ARC4(CipherAlgorithm): + name = "RC4" + key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256]) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +class IDEA(CipherAlgorithm, BlockCipherAlgorithm): + name = "IDEA" + block_size = 64 + key_sizes = frozenset([128]) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +_IDEAInternal = IDEA +utils.deprecated( + IDEA, + __name__, + "IDEA has been deprecated", + utils.DeprecatedIn37, + name="IDEA", +) + + +class SEED(CipherAlgorithm, BlockCipherAlgorithm): + name = "SEED" + block_size = 128 + key_sizes = frozenset([128]) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +_SEEDInternal = SEED +utils.deprecated( + SEED, + __name__, + "SEED has been deprecated", + utils.DeprecatedIn37, + name="SEED", +) + + +class ChaCha20(CipherAlgorithm): + name = "ChaCha20" + key_sizes = frozenset([256]) + + def __init__(self, key: bytes, nonce: bytes): + self.key = _verify_key_size(self, key) + utils._check_byteslike("nonce", nonce) + + if len(nonce) != 16: + raise ValueError("nonce must be 128-bits (16 bytes)") + + self._nonce = nonce + + @property + def nonce(self) -> bytes: + return self._nonce + + @property + def key_size(self) -> int: + return len(self.key) * 8 + + +class SM4(CipherAlgorithm, BlockCipherAlgorithm): + name = "SM4" + block_size = 128 + key_sizes = frozenset([128]) + + def __init__(self, key: bytes): + self.key = _verify_key_size(self, key) + + @property + def key_size(self) -> int: + return len(self.key) * 8 diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/base.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/base.py new file mode 100644 index 0000000..2ea7fc6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/base.py @@ -0,0 +1,269 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing + +from cryptography.exceptions import ( + AlreadyFinalized, + AlreadyUpdated, + NotYetFinalized, +) +from cryptography.hazmat.primitives._cipheralgorithm import CipherAlgorithm +from cryptography.hazmat.primitives.ciphers import modes + + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.ciphers import ( + _CipherContext as _BackendCipherContext, + ) + + +class CipherContext(metaclass=abc.ABCMeta): + @abc.abstractmethod + def update(self, data: bytes) -> bytes: + """ + Processes the provided bytes through the cipher and returns the results + as bytes. + """ + + @abc.abstractmethod + def update_into(self, data: bytes, buf: bytes) -> int: + """ + Processes the provided bytes and writes the resulting data into the + provided buffer. Returns the number of bytes written. + """ + + @abc.abstractmethod + def finalize(self) -> bytes: + """ + Returns the results of processing the final block as bytes. + """ + + +class AEADCipherContext(CipherContext, metaclass=abc.ABCMeta): + @abc.abstractmethod + def authenticate_additional_data(self, data: bytes) -> None: + """ + Authenticates the provided bytes. + """ + + +class AEADDecryptionContext(AEADCipherContext, metaclass=abc.ABCMeta): + @abc.abstractmethod + def finalize_with_tag(self, tag: bytes) -> bytes: + """ + Returns the results of processing the final block as bytes and allows + delayed passing of the authentication tag. + """ + + +class AEADEncryptionContext(AEADCipherContext, metaclass=abc.ABCMeta): + @abc.abstractproperty + def tag(self) -> bytes: + """ + Returns tag bytes. This is only available after encryption is + finalized. + """ + + +Mode = typing.TypeVar( + "Mode", bound=typing.Optional[modes.Mode], covariant=True +) + + +class Cipher(typing.Generic[Mode]): + def __init__( + self, + algorithm: CipherAlgorithm, + mode: Mode, + backend: typing.Any = None, + ): + + if not isinstance(algorithm, CipherAlgorithm): + raise TypeError("Expected interface of CipherAlgorithm.") + + if mode is not None: + # mypy needs this assert to narrow the type from our generic + # type. Maybe it won't some time in the future. + assert isinstance(mode, modes.Mode) + mode.validate_for_algorithm(algorithm) + + self.algorithm = algorithm + self.mode = mode + + @typing.overload + def encryptor( + self: "Cipher[modes.ModeWithAuthenticationTag]", + ) -> AEADEncryptionContext: + ... + + @typing.overload + def encryptor( + self: "_CIPHER_TYPE", + ) -> CipherContext: + ... + + def encryptor(self): + if isinstance(self.mode, modes.ModeWithAuthenticationTag): + if self.mode.tag is not None: + raise ValueError( + "Authentication tag must be None when encrypting." + ) + from cryptography.hazmat.backends.openssl.backend import backend + + ctx = backend.create_symmetric_encryption_ctx( + self.algorithm, self.mode + ) + return self._wrap_ctx(ctx, encrypt=True) + + @typing.overload + def decryptor( + self: "Cipher[modes.ModeWithAuthenticationTag]", + ) -> AEADDecryptionContext: + ... + + @typing.overload + def decryptor( + self: "_CIPHER_TYPE", + ) -> CipherContext: + ... + + def decryptor(self): + from cryptography.hazmat.backends.openssl.backend import backend + + ctx = backend.create_symmetric_decryption_ctx( + self.algorithm, self.mode + ) + return self._wrap_ctx(ctx, encrypt=False) + + def _wrap_ctx( + self, ctx: "_BackendCipherContext", encrypt: bool + ) -> typing.Union[ + AEADEncryptionContext, AEADDecryptionContext, CipherContext + ]: + if isinstance(self.mode, modes.ModeWithAuthenticationTag): + if encrypt: + return _AEADEncryptionContext(ctx) + else: + return _AEADDecryptionContext(ctx) + else: + return _CipherContext(ctx) + + +_CIPHER_TYPE = Cipher[ + typing.Union[ + modes.ModeWithNonce, + modes.ModeWithTweak, + None, + modes.ECB, + modes.ModeWithInitializationVector, + ] +] + + +class _CipherContext(CipherContext): + _ctx: typing.Optional["_BackendCipherContext"] + + def __init__(self, ctx: "_BackendCipherContext") -> None: + self._ctx = ctx + + def update(self, data: bytes) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return self._ctx.update(data) + + def update_into(self, data: bytes, buf: bytes) -> int: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return self._ctx.update_into(data, buf) + + def finalize(self) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize() + self._ctx = None + return data + + +class _AEADCipherContext(AEADCipherContext): + _ctx: typing.Optional["_BackendCipherContext"] + _tag: typing.Optional[bytes] + + def __init__(self, ctx: "_BackendCipherContext") -> None: + self._ctx = ctx + self._bytes_processed = 0 + self._aad_bytes_processed = 0 + self._tag = None + self._updated = False + + def _check_limit(self, data_size: int) -> None: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + self._updated = True + self._bytes_processed += data_size + if self._bytes_processed > self._ctx._mode._MAX_ENCRYPTED_BYTES: + raise ValueError( + "{} has a maximum encrypted byte limit of {}".format( + self._ctx._mode.name, self._ctx._mode._MAX_ENCRYPTED_BYTES + ) + ) + + def update(self, data: bytes) -> bytes: + self._check_limit(len(data)) + # mypy needs this assert even though _check_limit already checked + assert self._ctx is not None + return self._ctx.update(data) + + def update_into(self, data: bytes, buf: bytes) -> int: + self._check_limit(len(data)) + # mypy needs this assert even though _check_limit already checked + assert self._ctx is not None + return self._ctx.update_into(data, buf) + + def finalize(self) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize() + self._tag = self._ctx.tag + self._ctx = None + return data + + def authenticate_additional_data(self, data: bytes) -> None: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if self._updated: + raise AlreadyUpdated("Update has been called on this context.") + + self._aad_bytes_processed += len(data) + if self._aad_bytes_processed > self._ctx._mode._MAX_AAD_BYTES: + raise ValueError( + "{} has a maximum AAD byte limit of {}".format( + self._ctx._mode.name, self._ctx._mode._MAX_AAD_BYTES + ) + ) + + self._ctx.authenticate_additional_data(data) + + +class _AEADDecryptionContext(_AEADCipherContext, AEADDecryptionContext): + def finalize_with_tag(self, tag: bytes) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize_with_tag(tag) + self._tag = self._ctx.tag + self._ctx = None + return data + + +class _AEADEncryptionContext(_AEADCipherContext, AEADEncryptionContext): + @property + def tag(self) -> bytes: + if self._ctx is not None: + raise NotYetFinalized( + "You must finalize encryption before " "getting the tag." + ) + assert self._tag is not None + return self._tag diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/modes.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/modes.py new file mode 100644 index 0000000..6911742 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/ciphers/modes.py @@ -0,0 +1,263 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives._cipheralgorithm import ( + BlockCipherAlgorithm, + CipherAlgorithm, +) + + +class Mode(metaclass=abc.ABCMeta): + @abc.abstractproperty + def name(self) -> str: + """ + A string naming this mode (e.g. "ECB", "CBC"). + """ + + @abc.abstractmethod + def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: + """ + Checks that all the necessary invariants of this (mode, algorithm) + combination are met. + """ + + +class ModeWithInitializationVector(Mode, metaclass=abc.ABCMeta): + @abc.abstractproperty + def initialization_vector(self) -> bytes: + """ + The value of the initialization vector for this mode as bytes. + """ + + +class ModeWithTweak(Mode, metaclass=abc.ABCMeta): + @abc.abstractproperty + def tweak(self) -> bytes: + """ + The value of the tweak for this mode as bytes. + """ + + +class ModeWithNonce(Mode, metaclass=abc.ABCMeta): + @abc.abstractproperty + def nonce(self) -> bytes: + """ + The value of the nonce for this mode as bytes. + """ + + +class ModeWithAuthenticationTag(Mode, metaclass=abc.ABCMeta): + @abc.abstractproperty + def tag(self) -> typing.Optional[bytes]: + """ + The value of the tag supplied to the constructor of this mode. + """ + + +def _check_aes_key_length(self: Mode, algorithm: CipherAlgorithm) -> None: + if algorithm.key_size > 256 and algorithm.name == "AES": + raise ValueError( + "Only 128, 192, and 256 bit keys are allowed for this AES mode" + ) + + +def _check_iv_length( + self: ModeWithInitializationVector, algorithm: BlockCipherAlgorithm +) -> None: + if len(self.initialization_vector) * 8 != algorithm.block_size: + raise ValueError( + "Invalid IV size ({}) for {}.".format( + len(self.initialization_vector), self.name + ) + ) + + +def _check_nonce_length( + nonce: bytes, name: str, algorithm: CipherAlgorithm +) -> None: + if not isinstance(algorithm, BlockCipherAlgorithm): + raise UnsupportedAlgorithm( + f"{name} requires a block cipher algorithm", + _Reasons.UNSUPPORTED_CIPHER, + ) + if len(nonce) * 8 != algorithm.block_size: + raise ValueError( + "Invalid nonce size ({}) for {}.".format(len(nonce), name) + ) + + +def _check_iv_and_key_length( + self: ModeWithInitializationVector, algorithm: CipherAlgorithm +) -> None: + if not isinstance(algorithm, BlockCipherAlgorithm): + raise UnsupportedAlgorithm( + f"{self} requires a block cipher algorithm", + _Reasons.UNSUPPORTED_CIPHER, + ) + _check_aes_key_length(self, algorithm) + _check_iv_length(self, algorithm) + + +class CBC(ModeWithInitializationVector): + name = "CBC" + + def __init__(self, initialization_vector: bytes): + utils._check_byteslike("initialization_vector", initialization_vector) + self._initialization_vector = initialization_vector + + @property + def initialization_vector(self) -> bytes: + return self._initialization_vector + + validate_for_algorithm = _check_iv_and_key_length + + +class XTS(ModeWithTweak): + name = "XTS" + + def __init__(self, tweak: bytes): + utils._check_byteslike("tweak", tweak) + + if len(tweak) != 16: + raise ValueError("tweak must be 128-bits (16 bytes)") + + self._tweak = tweak + + @property + def tweak(self) -> bytes: + return self._tweak + + def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: + if algorithm.key_size not in (256, 512): + raise ValueError( + "The XTS specification requires a 256-bit key for AES-128-XTS" + " and 512-bit key for AES-256-XTS" + ) + + +class ECB(Mode): + name = "ECB" + + validate_for_algorithm = _check_aes_key_length + + +class OFB(ModeWithInitializationVector): + name = "OFB" + + def __init__(self, initialization_vector: bytes): + utils._check_byteslike("initialization_vector", initialization_vector) + self._initialization_vector = initialization_vector + + @property + def initialization_vector(self) -> bytes: + return self._initialization_vector + + validate_for_algorithm = _check_iv_and_key_length + + +class CFB(ModeWithInitializationVector): + name = "CFB" + + def __init__(self, initialization_vector: bytes): + utils._check_byteslike("initialization_vector", initialization_vector) + self._initialization_vector = initialization_vector + + @property + def initialization_vector(self) -> bytes: + return self._initialization_vector + + validate_for_algorithm = _check_iv_and_key_length + + +class CFB8(ModeWithInitializationVector): + name = "CFB8" + + def __init__(self, initialization_vector: bytes): + utils._check_byteslike("initialization_vector", initialization_vector) + self._initialization_vector = initialization_vector + + @property + def initialization_vector(self) -> bytes: + return self._initialization_vector + + validate_for_algorithm = _check_iv_and_key_length + + +class CTR(ModeWithNonce): + name = "CTR" + + def __init__(self, nonce: bytes): + utils._check_byteslike("nonce", nonce) + self._nonce = nonce + + @property + def nonce(self) -> bytes: + return self._nonce + + def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: + _check_aes_key_length(self, algorithm) + _check_nonce_length(self.nonce, self.name, algorithm) + + +class GCM(ModeWithInitializationVector, ModeWithAuthenticationTag): + name = "GCM" + _MAX_ENCRYPTED_BYTES = (2**39 - 256) // 8 + _MAX_AAD_BYTES = (2**64) // 8 + + def __init__( + self, + initialization_vector: bytes, + tag: typing.Optional[bytes] = None, + min_tag_length: int = 16, + ): + # OpenSSL 3.0.0 constrains GCM IVs to [64, 1024] bits inclusive + # This is a sane limit anyway so we'll enforce it here. + utils._check_byteslike("initialization_vector", initialization_vector) + if len(initialization_vector) < 8 or len(initialization_vector) > 128: + raise ValueError( + "initialization_vector must be between 8 and 128 bytes (64 " + "and 1024 bits)." + ) + self._initialization_vector = initialization_vector + if tag is not None: + utils._check_bytes("tag", tag) + if min_tag_length < 4: + raise ValueError("min_tag_length must be >= 4") + if len(tag) < min_tag_length: + raise ValueError( + "Authentication tag must be {} bytes or longer.".format( + min_tag_length + ) + ) + self._tag = tag + self._min_tag_length = min_tag_length + + @property + def tag(self) -> typing.Optional[bytes]: + return self._tag + + @property + def initialization_vector(self) -> bytes: + return self._initialization_vector + + def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: + _check_aes_key_length(self, algorithm) + if not isinstance(algorithm, BlockCipherAlgorithm): + raise UnsupportedAlgorithm( + "GCM requires a block cipher algorithm", + _Reasons.UNSUPPORTED_CIPHER, + ) + block_size_bytes = algorithm.block_size // 8 + if self._tag is not None and len(self._tag) > block_size_bytes: + raise ValueError( + "Authentication tag cannot be more than {} bytes.".format( + block_size_bytes + ) + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/cmac.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/cmac.py new file mode 100644 index 0000000..e08d65e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/cmac.py @@ -0,0 +1,66 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, +) +from cryptography.hazmat.primitives import ciphers + +if typing.TYPE_CHECKING: + from cryptography.hazmat.backends.openssl.cmac import _CMACContext + + +class CMAC: + _ctx: typing.Optional["_CMACContext"] + _algorithm: ciphers.BlockCipherAlgorithm + + def __init__( + self, + algorithm: ciphers.BlockCipherAlgorithm, + backend: typing.Any = None, + ctx: typing.Optional["_CMACContext"] = None, + ): + if not isinstance(algorithm, ciphers.BlockCipherAlgorithm): + raise TypeError("Expected instance of BlockCipherAlgorithm.") + self._algorithm = algorithm + + if ctx is None: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + self._ctx = ossl.create_cmac_ctx(self._algorithm) + else: + self._ctx = ctx + + def update(self, data: bytes) -> None: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + utils._check_bytes("data", data) + self._ctx.update(data) + + def finalize(self) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + def verify(self, signature: bytes) -> None: + utils._check_bytes("signature", signature) + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(signature) + + def copy(self) -> "CMAC": + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return CMAC(self._algorithm, ctx=self._ctx.copy()) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/constant_time.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/constant_time.py new file mode 100644 index 0000000..a02fa9c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/constant_time.py @@ -0,0 +1,13 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import hmac + + +def bytes_eq(a: bytes, b: bytes) -> bool: + if not isinstance(a, bytes) or not isinstance(b, bytes): + raise TypeError("a and b must be bytes.") + + return hmac.compare_digest(a, b) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/hashes.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/hashes.py new file mode 100644 index 0000000..cc0771d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/hashes.py @@ -0,0 +1,259 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, +) + + +class HashAlgorithm(metaclass=abc.ABCMeta): + @abc.abstractproperty + def name(self) -> str: + """ + A string naming this algorithm (e.g. "sha256", "md5"). + """ + + @abc.abstractproperty + def digest_size(self) -> int: + """ + The size of the resulting digest in bytes. + """ + + @abc.abstractproperty + def block_size(self) -> typing.Optional[int]: + """ + The internal block size of the hash function, or None if the hash + function does not use blocks internally (e.g. SHA3). + """ + + +class HashContext(metaclass=abc.ABCMeta): + @abc.abstractproperty + def algorithm(self) -> HashAlgorithm: + """ + A HashAlgorithm that will be used by this context. + """ + + @abc.abstractmethod + def update(self, data: bytes) -> None: + """ + Processes the provided bytes through the hash. + """ + + @abc.abstractmethod + def finalize(self) -> bytes: + """ + Finalizes the hash context and returns the hash digest as bytes. + """ + + @abc.abstractmethod + def copy(self) -> "HashContext": + """ + Return a HashContext that is a copy of the current context. + """ + + +class ExtendableOutputFunction(metaclass=abc.ABCMeta): + """ + An interface for extendable output functions. + """ + + +class Hash(HashContext): + _ctx: typing.Optional[HashContext] + + def __init__( + self, + algorithm: HashAlgorithm, + backend: typing.Any = None, + ctx: typing.Optional["HashContext"] = None, + ): + if not isinstance(algorithm, HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + self._algorithm = algorithm + + if ctx is None: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + self._ctx = ossl.create_hash_ctx(self.algorithm) + else: + self._ctx = ctx + + @property + def algorithm(self) -> HashAlgorithm: + return self._algorithm + + def update(self, data: bytes) -> None: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + utils._check_byteslike("data", data) + self._ctx.update(data) + + def copy(self) -> "Hash": + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return Hash(self.algorithm, ctx=self._ctx.copy()) + + def finalize(self) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + +class SHA1(HashAlgorithm): + name = "sha1" + digest_size = 20 + block_size = 64 + + +class SHA512_224(HashAlgorithm): # noqa: N801 + name = "sha512-224" + digest_size = 28 + block_size = 128 + + +class SHA512_256(HashAlgorithm): # noqa: N801 + name = "sha512-256" + digest_size = 32 + block_size = 128 + + +class SHA224(HashAlgorithm): + name = "sha224" + digest_size = 28 + block_size = 64 + + +class SHA256(HashAlgorithm): + name = "sha256" + digest_size = 32 + block_size = 64 + + +class SHA384(HashAlgorithm): + name = "sha384" + digest_size = 48 + block_size = 128 + + +class SHA512(HashAlgorithm): + name = "sha512" + digest_size = 64 + block_size = 128 + + +class SHA3_224(HashAlgorithm): # noqa: N801 + name = "sha3-224" + digest_size = 28 + block_size = None + + +class SHA3_256(HashAlgorithm): # noqa: N801 + name = "sha3-256" + digest_size = 32 + block_size = None + + +class SHA3_384(HashAlgorithm): # noqa: N801 + name = "sha3-384" + digest_size = 48 + block_size = None + + +class SHA3_512(HashAlgorithm): # noqa: N801 + name = "sha3-512" + digest_size = 64 + block_size = None + + +class SHAKE128(HashAlgorithm, ExtendableOutputFunction): + name = "shake128" + block_size = None + + def __init__(self, digest_size: int): + if not isinstance(digest_size, int): + raise TypeError("digest_size must be an integer") + + if digest_size < 1: + raise ValueError("digest_size must be a positive integer") + + self._digest_size = digest_size + + @property + def digest_size(self) -> int: + return self._digest_size + + +class SHAKE256(HashAlgorithm, ExtendableOutputFunction): + name = "shake256" + block_size = None + + def __init__(self, digest_size: int): + if not isinstance(digest_size, int): + raise TypeError("digest_size must be an integer") + + if digest_size < 1: + raise ValueError("digest_size must be a positive integer") + + self._digest_size = digest_size + + @property + def digest_size(self) -> int: + return self._digest_size + + +class MD5(HashAlgorithm): + name = "md5" + digest_size = 16 + block_size = 64 + + +class BLAKE2b(HashAlgorithm): + name = "blake2b" + _max_digest_size = 64 + _min_digest_size = 1 + block_size = 128 + + def __init__(self, digest_size: int): + + if digest_size != 64: + raise ValueError("Digest size must be 64") + + self._digest_size = digest_size + + @property + def digest_size(self) -> int: + return self._digest_size + + +class BLAKE2s(HashAlgorithm): + name = "blake2s" + block_size = 64 + _max_digest_size = 32 + _min_digest_size = 1 + + def __init__(self, digest_size: int): + + if digest_size != 32: + raise ValueError("Digest size must be 32") + + self._digest_size = digest_size + + @property + def digest_size(self) -> int: + return self._digest_size + + +class SM3(HashAlgorithm): + name = "sm3" + digest_size = 32 + block_size = 64 diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/hmac.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/hmac.py new file mode 100644 index 0000000..1577326 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/hmac.py @@ -0,0 +1,72 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, +) +from cryptography.hazmat.backends.openssl.hmac import _HMACContext +from cryptography.hazmat.primitives import hashes + + +class HMAC(hashes.HashContext): + _ctx: typing.Optional[_HMACContext] + + def __init__( + self, + key: bytes, + algorithm: hashes.HashAlgorithm, + backend: typing.Any = None, + ctx=None, + ): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + self._algorithm = algorithm + + self._key = key + if ctx is None: + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + self._ctx = ossl.create_hmac_ctx(key, self.algorithm) + else: + self._ctx = ctx + + @property + def algorithm(self) -> hashes.HashAlgorithm: + return self._algorithm + + def update(self, data: bytes) -> None: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + utils._check_byteslike("data", data) + self._ctx.update(data) + + def copy(self) -> "HMAC": + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return HMAC( + self._key, + self.algorithm, + ctx=self._ctx.copy(), + ) + + def finalize(self) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + def verify(self, signature: bytes) -> None: + utils._check_bytes("signature", signature) + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(signature) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/__init__.py new file mode 100644 index 0000000..38e2f8b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/__init__.py @@ -0,0 +1,22 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc + + +class KeyDerivationFunction(metaclass=abc.ABCMeta): + @abc.abstractmethod + def derive(self, key_material: bytes) -> bytes: + """ + Deterministically generates and returns a new key based on the existing + key material. + """ + + @abc.abstractmethod + def verify(self, key_material: bytes, expected_key: bytes) -> None: + """ + Checks whether the key generated by the key material matches the + expected derived key. Raises an exception if they do not match. + """ diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/concatkdf.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/concatkdf.py new file mode 100644 index 0000000..0b0262e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/concatkdf.py @@ -0,0 +1,130 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + InvalidKey, +) +from cryptography.hazmat.primitives import constant_time, hashes, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +def _int_to_u32be(n: int) -> bytes: + return n.to_bytes(length=4, byteorder="big") + + +def _common_args_checks( + algorithm: hashes.HashAlgorithm, + length: int, + otherinfo: typing.Optional[bytes], +) -> None: + max_length = algorithm.digest_size * (2**32 - 1) + if length > max_length: + raise ValueError( + "Cannot derive keys larger than {} bits.".format(max_length) + ) + if otherinfo is not None: + utils._check_bytes("otherinfo", otherinfo) + + +def _concatkdf_derive( + key_material: bytes, + length: int, + auxfn: typing.Callable[[], hashes.HashContext], + otherinfo: bytes, +) -> bytes: + utils._check_byteslike("key_material", key_material) + output = [b""] + outlen = 0 + counter = 1 + + while length > outlen: + h = auxfn() + h.update(_int_to_u32be(counter)) + h.update(key_material) + h.update(otherinfo) + output.append(h.finalize()) + outlen += len(output[-1]) + counter += 1 + + return b"".join(output)[:length] + + +class ConcatKDFHash(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + length: int, + otherinfo: typing.Optional[bytes], + backend: typing.Any = None, + ): + _common_args_checks(algorithm, length, otherinfo) + self._algorithm = algorithm + self._length = length + self._otherinfo: bytes = otherinfo if otherinfo is not None else b"" + + self._used = False + + def _hash(self) -> hashes.Hash: + return hashes.Hash(self._algorithm) + + def derive(self, key_material: bytes) -> bytes: + if self._used: + raise AlreadyFinalized + self._used = True + return _concatkdf_derive( + key_material, self._length, self._hash, self._otherinfo + ) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +class ConcatKDFHMAC(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + length: int, + salt: typing.Optional[bytes], + otherinfo: typing.Optional[bytes], + backend: typing.Any = None, + ): + _common_args_checks(algorithm, length, otherinfo) + self._algorithm = algorithm + self._length = length + self._otherinfo: bytes = otherinfo if otherinfo is not None else b"" + + if algorithm.block_size is None: + raise TypeError( + "{} is unsupported for ConcatKDF".format(algorithm.name) + ) + + if salt is None: + salt = b"\x00" * algorithm.block_size + else: + utils._check_bytes("salt", salt) + + self._salt = salt + + self._used = False + + def _hmac(self) -> hmac.HMAC: + return hmac.HMAC(self._salt, self._algorithm) + + def derive(self, key_material: bytes) -> bytes: + if self._used: + raise AlreadyFinalized + self._used = True + return _concatkdf_derive( + key_material, self._length, self._hmac, self._otherinfo + ) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/hkdf.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/hkdf.py new file mode 100644 index 0000000..44889b6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/hkdf.py @@ -0,0 +1,103 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + InvalidKey, +) +from cryptography.hazmat.primitives import constant_time, hashes, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +class HKDF(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + length: int, + salt: typing.Optional[bytes], + info: typing.Optional[bytes], + backend: typing.Any = None, + ): + self._algorithm = algorithm + + if salt is None: + salt = b"\x00" * self._algorithm.digest_size + else: + utils._check_bytes("salt", salt) + + self._salt = salt + + self._hkdf_expand = HKDFExpand(self._algorithm, length, info) + + def _extract(self, key_material: bytes) -> bytes: + h = hmac.HMAC(self._salt, self._algorithm) + h.update(key_material) + return h.finalize() + + def derive(self, key_material: bytes) -> bytes: + utils._check_byteslike("key_material", key_material) + return self._hkdf_expand.derive(self._extract(key_material)) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +class HKDFExpand(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + length: int, + info: typing.Optional[bytes], + backend: typing.Any = None, + ): + self._algorithm = algorithm + + max_length = 255 * algorithm.digest_size + + if length > max_length: + raise ValueError( + "Cannot derive keys larger than {} octets.".format(max_length) + ) + + self._length = length + + if info is None: + info = b"" + else: + utils._check_bytes("info", info) + + self._info = info + + self._used = False + + def _expand(self, key_material: bytes) -> bytes: + output = [b""] + counter = 1 + + while self._algorithm.digest_size * (len(output) - 1) < self._length: + h = hmac.HMAC(key_material, self._algorithm) + h.update(output[-1]) + h.update(self._info) + h.update(bytes([counter])) + output.append(h.finalize()) + counter += 1 + + return b"".join(output)[: self._length] + + def derive(self, key_material: bytes) -> bytes: + utils._check_byteslike("key_material", key_material) + if self._used: + raise AlreadyFinalized + + self._used = True + return self._expand(key_material) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/kbkdf.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/kbkdf.py new file mode 100644 index 0000000..d365130 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/kbkdf.py @@ -0,0 +1,258 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + InvalidKey, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.primitives import ( + ciphers, + cmac, + constant_time, + hashes, + hmac, +) +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +class Mode(utils.Enum): + CounterMode = "ctr" + + +class CounterLocation(utils.Enum): + BeforeFixed = "before_fixed" + AfterFixed = "after_fixed" + + +class _KBKDFDeriver: + def __init__( + self, + prf: typing.Callable, + mode: Mode, + length: int, + rlen: int, + llen: typing.Optional[int], + location: CounterLocation, + label: typing.Optional[bytes], + context: typing.Optional[bytes], + fixed: typing.Optional[bytes], + ): + assert callable(prf) + + if not isinstance(mode, Mode): + raise TypeError("mode must be of type Mode") + + if not isinstance(location, CounterLocation): + raise TypeError("location must be of type CounterLocation") + + if (label or context) and fixed: + raise ValueError( + "When supplying fixed data, " "label and context are ignored." + ) + + if rlen is None or not self._valid_byte_length(rlen): + raise ValueError("rlen must be between 1 and 4") + + if llen is None and fixed is None: + raise ValueError("Please specify an llen") + + if llen is not None and not isinstance(llen, int): + raise TypeError("llen must be an integer") + + if label is None: + label = b"" + + if context is None: + context = b"" + + utils._check_bytes("label", label) + utils._check_bytes("context", context) + self._prf = prf + self._mode = mode + self._length = length + self._rlen = rlen + self._llen = llen + self._location = location + self._label = label + self._context = context + self._used = False + self._fixed_data = fixed + + @staticmethod + def _valid_byte_length(value: int) -> bool: + if not isinstance(value, int): + raise TypeError("value must be of type int") + + value_bin = utils.int_to_bytes(1, value) + if not 1 <= len(value_bin) <= 4: + return False + return True + + def derive(self, key_material: bytes, prf_output_size: int) -> bytes: + if self._used: + raise AlreadyFinalized + + utils._check_byteslike("key_material", key_material) + self._used = True + + # inverse floor division (equivalent to ceiling) + rounds = -(-self._length // prf_output_size) + + output = [b""] + + # For counter mode, the number of iterations shall not be + # larger than 2^r-1, where r <= 32 is the binary length of the counter + # This ensures that the counter values used as an input to the + # PRF will not repeat during a particular call to the KDF function. + r_bin = utils.int_to_bytes(1, self._rlen) + if rounds > pow(2, len(r_bin) * 8) - 1: + raise ValueError("There are too many iterations.") + + for i in range(1, rounds + 1): + h = self._prf(key_material) + + counter = utils.int_to_bytes(i, self._rlen) + if self._location == CounterLocation.BeforeFixed: + h.update(counter) + + h.update(self._generate_fixed_input()) + + if self._location == CounterLocation.AfterFixed: + h.update(counter) + + output.append(h.finalize()) + + return b"".join(output)[: self._length] + + def _generate_fixed_input(self) -> bytes: + if self._fixed_data and isinstance(self._fixed_data, bytes): + return self._fixed_data + + l_val = utils.int_to_bytes(self._length * 8, self._llen) + + return b"".join([self._label, b"\x00", self._context, l_val]) + + +class KBKDFHMAC(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + mode: Mode, + length: int, + rlen: int, + llen: typing.Optional[int], + location: CounterLocation, + label: typing.Optional[bytes], + context: typing.Optional[bytes], + fixed: typing.Optional[bytes], + backend: typing.Any = None, + ): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported hash algorithm.", + _Reasons.UNSUPPORTED_HASH, + ) + + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + if not ossl.hmac_supported(algorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported hmac algorithm.", + _Reasons.UNSUPPORTED_HASH, + ) + + self._algorithm = algorithm + + self._deriver = _KBKDFDeriver( + self._prf, + mode, + length, + rlen, + llen, + location, + label, + context, + fixed, + ) + + def _prf(self, key_material: bytes) -> hmac.HMAC: + return hmac.HMAC(key_material, self._algorithm) + + def derive(self, key_material: bytes) -> bytes: + return self._deriver.derive(key_material, self._algorithm.digest_size) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +class KBKDFCMAC(KeyDerivationFunction): + def __init__( + self, + algorithm, + mode: Mode, + length: int, + rlen: int, + llen: typing.Optional[int], + location: CounterLocation, + label: typing.Optional[bytes], + context: typing.Optional[bytes], + fixed: typing.Optional[bytes], + backend: typing.Any = None, + ): + if not issubclass( + algorithm, ciphers.BlockCipherAlgorithm + ) or not issubclass(algorithm, ciphers.CipherAlgorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported cipher algorithm.", + _Reasons.UNSUPPORTED_CIPHER, + ) + + self._algorithm = algorithm + self._cipher: typing.Optional[ciphers.BlockCipherAlgorithm] = None + + self._deriver = _KBKDFDeriver( + self._prf, + mode, + length, + rlen, + llen, + location, + label, + context, + fixed, + ) + + def _prf(self, _: bytes) -> cmac.CMAC: + assert self._cipher is not None + + return cmac.CMAC(self._cipher) + + def derive(self, key_material: bytes) -> bytes: + self._cipher = self._algorithm(key_material) + + assert self._cipher is not None + + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + if not ossl.cmac_algorithm_supported(self._cipher): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported cipher algorithm.", + _Reasons.UNSUPPORTED_CIPHER, + ) + + return self._deriver.derive(key_material, self._cipher.block_size // 8) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/pbkdf2.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/pbkdf2.py new file mode 100644 index 0000000..8d23f8c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/pbkdf2.py @@ -0,0 +1,65 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + InvalidKey, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.primitives import constant_time, hashes +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +class PBKDF2HMAC(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + length: int, + salt: bytes, + iterations: int, + backend: typing.Any = None, + ): + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + if not ossl.pbkdf2_hmac_supported(algorithm): + raise UnsupportedAlgorithm( + "{} is not supported for PBKDF2 by this backend.".format( + algorithm.name + ), + _Reasons.UNSUPPORTED_HASH, + ) + self._used = False + self._algorithm = algorithm + self._length = length + utils._check_bytes("salt", salt) + self._salt = salt + self._iterations = iterations + + def derive(self, key_material: bytes) -> bytes: + if self._used: + raise AlreadyFinalized("PBKDF2 instances can only be used once.") + self._used = True + + utils._check_byteslike("key_material", key_material) + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.derive_pbkdf2_hmac( + self._algorithm, + self._length, + self._salt, + self._iterations, + key_material, + ) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + derived_key = self.derive(key_material) + if not constant_time.bytes_eq(derived_key, expected_key): + raise InvalidKey("Keys do not match.") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/scrypt.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/scrypt.py new file mode 100644 index 0000000..ff81bbb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/scrypt.py @@ -0,0 +1,74 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import sys +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + InvalidKey, + UnsupportedAlgorithm, +) +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +# This is used by the scrypt tests to skip tests that require more memory +# than the MEM_LIMIT +_MEM_LIMIT = sys.maxsize // 2 + + +class Scrypt(KeyDerivationFunction): + def __init__( + self, + salt: bytes, + length: int, + n: int, + r: int, + p: int, + backend: typing.Any = None, + ): + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + if not ossl.scrypt_supported(): + raise UnsupportedAlgorithm( + "This version of OpenSSL does not support scrypt" + ) + self._length = length + utils._check_bytes("salt", salt) + if n < 2 or (n & (n - 1)) != 0: + raise ValueError("n must be greater than 1 and be a power of 2.") + + if r < 1: + raise ValueError("r must be greater than or equal to 1.") + + if p < 1: + raise ValueError("p must be greater than or equal to 1.") + + self._used = False + self._salt = salt + self._n = n + self._r = r + self._p = p + + def derive(self, key_material: bytes) -> bytes: + if self._used: + raise AlreadyFinalized("Scrypt instances can only be used once.") + self._used = True + + utils._check_byteslike("key_material", key_material) + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.derive_scrypt( + key_material, self._salt, self._length, self._n, self._r, self._p + ) + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + derived_key = self.derive(key_material) + if not constant_time.bytes_eq(derived_key, expected_key): + raise InvalidKey("Keys do not match.") diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/x963kdf.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/x963kdf.py new file mode 100644 index 0000000..aa6bcc1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/kdf/x963kdf.py @@ -0,0 +1,65 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + InvalidKey, +) +from cryptography.hazmat.primitives import constant_time, hashes +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +def _int_to_u32be(n: int) -> bytes: + return n.to_bytes(length=4, byteorder="big") + + +class X963KDF(KeyDerivationFunction): + def __init__( + self, + algorithm: hashes.HashAlgorithm, + length: int, + sharedinfo: typing.Optional[bytes], + backend: typing.Any = None, + ): + max_len = algorithm.digest_size * (2**32 - 1) + if length > max_len: + raise ValueError( + "Cannot derive keys larger than {} bits.".format(max_len) + ) + if sharedinfo is not None: + utils._check_bytes("sharedinfo", sharedinfo) + + self._algorithm = algorithm + self._length = length + self._sharedinfo = sharedinfo + self._used = False + + def derive(self, key_material: bytes) -> bytes: + if self._used: + raise AlreadyFinalized + self._used = True + utils._check_byteslike("key_material", key_material) + output = [b""] + outlen = 0 + counter = 1 + + while self._length > outlen: + h = hashes.Hash(self._algorithm) + h.update(key_material) + h.update(_int_to_u32be(counter)) + if self._sharedinfo is not None: + h.update(self._sharedinfo) + output.append(h.finalize()) + outlen += len(output[-1]) + counter += 1 + + return b"".join(output)[: self._length] + + def verify(self, key_material: bytes, expected_key: bytes) -> None: + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/keywrap.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/keywrap.py new file mode 100644 index 0000000..64771ca --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/keywrap.py @@ -0,0 +1,176 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import ECB +from cryptography.hazmat.primitives.constant_time import bytes_eq + + +def _wrap_core( + wrapping_key: bytes, + a: bytes, + r: typing.List[bytes], +) -> bytes: + # RFC 3394 Key Wrap - 2.2.1 (index method) + encryptor = Cipher(AES(wrapping_key), ECB()).encryptor() + n = len(r) + for j in range(6): + for i in range(n): + # every encryption operation is a discrete 16 byte chunk (because + # AES has a 128-bit block size) and since we're using ECB it is + # safe to reuse the encryptor for the entire operation + b = encryptor.update(a + r[i]) + a = ( + int.from_bytes(b[:8], byteorder="big") ^ ((n * j) + i + 1) + ).to_bytes(length=8, byteorder="big") + r[i] = b[-8:] + + assert encryptor.finalize() == b"" + + return a + b"".join(r) + + +def aes_key_wrap( + wrapping_key: bytes, + key_to_wrap: bytes, + backend: typing.Any = None, +) -> bytes: + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + if len(key_to_wrap) < 16: + raise ValueError("The key to wrap must be at least 16 bytes") + + if len(key_to_wrap) % 8 != 0: + raise ValueError("The key to wrap must be a multiple of 8 bytes") + + a = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6" + r = [key_to_wrap[i : i + 8] for i in range(0, len(key_to_wrap), 8)] + return _wrap_core(wrapping_key, a, r) + + +def _unwrap_core( + wrapping_key: bytes, + a: bytes, + r: typing.List[bytes], +) -> typing.Tuple[bytes, typing.List[bytes]]: + # Implement RFC 3394 Key Unwrap - 2.2.2 (index method) + decryptor = Cipher(AES(wrapping_key), ECB()).decryptor() + n = len(r) + for j in reversed(range(6)): + for i in reversed(range(n)): + atr = ( + int.from_bytes(a, byteorder="big") ^ ((n * j) + i + 1) + ).to_bytes(length=8, byteorder="big") + r[i] + # every decryption operation is a discrete 16 byte chunk so + # it is safe to reuse the decryptor for the entire operation + b = decryptor.update(atr) + a = b[:8] + r[i] = b[-8:] + + assert decryptor.finalize() == b"" + return a, r + + +def aes_key_wrap_with_padding( + wrapping_key: bytes, + key_to_wrap: bytes, + backend: typing.Any = None, +) -> bytes: + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + aiv = b"\xA6\x59\x59\xA6" + len(key_to_wrap).to_bytes( + length=4, byteorder="big" + ) + # pad the key to wrap if necessary + pad = (8 - (len(key_to_wrap) % 8)) % 8 + key_to_wrap = key_to_wrap + b"\x00" * pad + if len(key_to_wrap) == 8: + # RFC 5649 - 4.1 - exactly 8 octets after padding + encryptor = Cipher(AES(wrapping_key), ECB()).encryptor() + b = encryptor.update(aiv + key_to_wrap) + assert encryptor.finalize() == b"" + return b + else: + r = [key_to_wrap[i : i + 8] for i in range(0, len(key_to_wrap), 8)] + return _wrap_core(wrapping_key, aiv, r) + + +def aes_key_unwrap_with_padding( + wrapping_key: bytes, + wrapped_key: bytes, + backend: typing.Any = None, +) -> bytes: + if len(wrapped_key) < 16: + raise InvalidUnwrap("Must be at least 16 bytes") + + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + if len(wrapped_key) == 16: + # RFC 5649 - 4.2 - exactly two 64-bit blocks + decryptor = Cipher(AES(wrapping_key), ECB()).decryptor() + out = decryptor.update(wrapped_key) + assert decryptor.finalize() == b"" + a = out[:8] + data = out[8:] + n = 1 + else: + r = [wrapped_key[i : i + 8] for i in range(0, len(wrapped_key), 8)] + encrypted_aiv = r.pop(0) + n = len(r) + a, r = _unwrap_core(wrapping_key, encrypted_aiv, r) + data = b"".join(r) + + # 1) Check that MSB(32,A) = A65959A6. + # 2) Check that 8*(n-1) < LSB(32,A) <= 8*n. If so, let + # MLI = LSB(32,A). + # 3) Let b = (8*n)-MLI, and then check that the rightmost b octets of + # the output data are zero. + mli = int.from_bytes(a[4:], byteorder="big") + b = (8 * n) - mli + if ( + not bytes_eq(a[:4], b"\xa6\x59\x59\xa6") + or not 8 * (n - 1) < mli <= 8 * n + or (b != 0 and not bytes_eq(data[-b:], b"\x00" * b)) + ): + raise InvalidUnwrap() + + if b == 0: + return data + else: + return data[:-b] + + +def aes_key_unwrap( + wrapping_key: bytes, + wrapped_key: bytes, + backend: typing.Any = None, +) -> bytes: + if len(wrapped_key) < 24: + raise InvalidUnwrap("Must be at least 24 bytes") + + if len(wrapped_key) % 8 != 0: + raise InvalidUnwrap("The wrapped key must be a multiple of 8 bytes") + + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + aiv = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6" + r = [wrapped_key[i : i + 8] for i in range(0, len(wrapped_key), 8)] + a = r.pop(0) + a, r = _unwrap_core(wrapping_key, a, r) + if not bytes_eq(a, aiv): + raise InvalidUnwrap() + + return b"".join(r) + + +class InvalidUnwrap(Exception): + pass diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/padding.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/padding.py new file mode 100644 index 0000000..d6c1d91 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/padding.py @@ -0,0 +1,224 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import typing + +from cryptography import utils +from cryptography.exceptions import AlreadyFinalized +from cryptography.hazmat.bindings._rust import ( + check_ansix923_padding, + check_pkcs7_padding, +) + + +class PaddingContext(metaclass=abc.ABCMeta): + @abc.abstractmethod + def update(self, data: bytes) -> bytes: + """ + Pads the provided bytes and returns any available data as bytes. + """ + + @abc.abstractmethod + def finalize(self) -> bytes: + """ + Finalize the padding, returns bytes. + """ + + +def _byte_padding_check(block_size: int) -> None: + if not (0 <= block_size <= 2040): + raise ValueError("block_size must be in range(0, 2041).") + + if block_size % 8 != 0: + raise ValueError("block_size must be a multiple of 8.") + + +def _byte_padding_update( + buffer_: typing.Optional[bytes], data: bytes, block_size: int +) -> typing.Tuple[bytes, bytes]: + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + utils._check_byteslike("data", data) + + buffer_ += bytes(data) + + finished_blocks = len(buffer_) // (block_size // 8) + + result = buffer_[: finished_blocks * (block_size // 8)] + buffer_ = buffer_[finished_blocks * (block_size // 8) :] + + return buffer_, result + + +def _byte_padding_pad( + buffer_: typing.Optional[bytes], + block_size: int, + paddingfn: typing.Callable[[int], bytes], +) -> bytes: + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + pad_size = block_size // 8 - len(buffer_) + return buffer_ + paddingfn(pad_size) + + +def _byte_unpadding_update( + buffer_: typing.Optional[bytes], data: bytes, block_size: int +) -> typing.Tuple[bytes, bytes]: + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + utils._check_byteslike("data", data) + + buffer_ += bytes(data) + + finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0) + + result = buffer_[: finished_blocks * (block_size // 8)] + buffer_ = buffer_[finished_blocks * (block_size // 8) :] + + return buffer_, result + + +def _byte_unpadding_check( + buffer_: typing.Optional[bytes], + block_size: int, + checkfn: typing.Callable[[bytes], int], +) -> bytes: + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if len(buffer_) != block_size // 8: + raise ValueError("Invalid padding bytes.") + + valid = checkfn(buffer_) + + if not valid: + raise ValueError("Invalid padding bytes.") + + pad_size = buffer_[-1] + return buffer_[:-pad_size] + + +class PKCS7: + def __init__(self, block_size: int): + _byte_padding_check(block_size) + self.block_size = block_size + + def padder(self) -> PaddingContext: + return _PKCS7PaddingContext(self.block_size) + + def unpadder(self) -> PaddingContext: + return _PKCS7UnpaddingContext(self.block_size) + + +class _PKCS7PaddingContext(PaddingContext): + _buffer: typing.Optional[bytes] + + def __init__(self, block_size: int): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data: bytes) -> bytes: + self._buffer, result = _byte_padding_update( + self._buffer, data, self.block_size + ) + return result + + def _padding(self, size: int) -> bytes: + return bytes([size]) * size + + def finalize(self) -> bytes: + result = _byte_padding_pad( + self._buffer, self.block_size, self._padding + ) + self._buffer = None + return result + + +class _PKCS7UnpaddingContext(PaddingContext): + _buffer: typing.Optional[bytes] + + def __init__(self, block_size: int): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data: bytes) -> bytes: + self._buffer, result = _byte_unpadding_update( + self._buffer, data, self.block_size + ) + return result + + def finalize(self) -> bytes: + result = _byte_unpadding_check( + self._buffer, self.block_size, check_pkcs7_padding + ) + self._buffer = None + return result + + +class ANSIX923: + def __init__(self, block_size: int): + _byte_padding_check(block_size) + self.block_size = block_size + + def padder(self) -> PaddingContext: + return _ANSIX923PaddingContext(self.block_size) + + def unpadder(self) -> PaddingContext: + return _ANSIX923UnpaddingContext(self.block_size) + + +class _ANSIX923PaddingContext(PaddingContext): + _buffer: typing.Optional[bytes] + + def __init__(self, block_size: int): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data: bytes) -> bytes: + self._buffer, result = _byte_padding_update( + self._buffer, data, self.block_size + ) + return result + + def _padding(self, size: int) -> bytes: + return bytes([0]) * (size - 1) + bytes([size]) + + def finalize(self) -> bytes: + result = _byte_padding_pad( + self._buffer, self.block_size, self._padding + ) + self._buffer = None + return result + + +class _ANSIX923UnpaddingContext(PaddingContext): + _buffer: typing.Optional[bytes] + + def __init__(self, block_size: int): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data: bytes) -> bytes: + self._buffer, result = _byte_unpadding_update( + self._buffer, data, self.block_size + ) + return result + + def finalize(self) -> bytes: + result = _byte_unpadding_check( + self._buffer, + self.block_size, + check_ansix923_padding, + ) + self._buffer = None + return result diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/poly1305.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/poly1305.py new file mode 100644 index 0000000..7fcf4a5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/poly1305.py @@ -0,0 +1,60 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, + UnsupportedAlgorithm, + _Reasons, +) +from cryptography.hazmat.backends.openssl.poly1305 import _Poly1305Context + + +class Poly1305: + _ctx: typing.Optional[_Poly1305Context] + + def __init__(self, key: bytes): + from cryptography.hazmat.backends.openssl.backend import backend + + if not backend.poly1305_supported(): + raise UnsupportedAlgorithm( + "poly1305 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_MAC, + ) + self._ctx = backend.create_poly1305_ctx(key) + + def update(self, data: bytes) -> None: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + utils._check_byteslike("data", data) + self._ctx.update(data) + + def finalize(self) -> bytes: + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + mac = self._ctx.finalize() + self._ctx = None + return mac + + def verify(self, tag: bytes) -> None: + utils._check_bytes("tag", tag) + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(tag) + + @classmethod + def generate_tag(cls, key: bytes, data: bytes) -> bytes: + p = Poly1305(key) + p.update(data) + return p.finalize() + + @classmethod + def verify_tag(cls, key: bytes, data: bytes, tag: bytes) -> None: + p = Poly1305(key) + p.update(data) + p.verify(tag) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/__init__.py new file mode 100644 index 0000000..1e0174b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/__init__.py @@ -0,0 +1,45 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography.hazmat.primitives._serialization import ( + BestAvailableEncryption, + Encoding, + KeySerializationEncryption, + NoEncryption, + ParameterFormat, + PrivateFormat, + PublicFormat, +) +from cryptography.hazmat.primitives.serialization.base import ( + load_der_parameters, + load_der_private_key, + load_der_public_key, + load_pem_parameters, + load_pem_private_key, + load_pem_public_key, +) +from cryptography.hazmat.primitives.serialization.ssh import ( + load_ssh_private_key, + load_ssh_public_key, +) + + +__all__ = [ + "load_der_parameters", + "load_der_private_key", + "load_der_public_key", + "load_pem_parameters", + "load_pem_private_key", + "load_pem_public_key", + "load_ssh_private_key", + "load_ssh_public_key", + "Encoding", + "PrivateFormat", + "PublicFormat", + "ParameterFormat", + "KeySerializationEncryption", + "BestAvailableEncryption", + "NoEncryption", +] diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/base.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/base.py new file mode 100644 index 0000000..059b6e4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/base.py @@ -0,0 +1,64 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import typing + +from cryptography.hazmat.primitives.asymmetric import dh +from cryptography.hazmat.primitives.asymmetric.types import ( + PRIVATE_KEY_TYPES, + PUBLIC_KEY_TYPES, +) + + +def load_pem_private_key( + data: bytes, + password: typing.Optional[bytes], + backend: typing.Any = None, +) -> PRIVATE_KEY_TYPES: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_pem_private_key(data, password) + + +def load_pem_public_key( + data: bytes, backend: typing.Any = None +) -> PUBLIC_KEY_TYPES: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_pem_public_key(data) + + +def load_pem_parameters( + data: bytes, backend: typing.Any = None +) -> "dh.DHParameters": + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_pem_parameters(data) + + +def load_der_private_key( + data: bytes, + password: typing.Optional[bytes], + backend: typing.Any = None, +) -> PRIVATE_KEY_TYPES: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_der_private_key(data, password) + + +def load_der_public_key( + data: bytes, backend: typing.Any = None +) -> PUBLIC_KEY_TYPES: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_der_public_key(data) + + +def load_der_parameters( + data: bytes, backend: typing.Any = None +) -> "dh.DHParameters": + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_der_parameters(data) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/pkcs12.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/pkcs12.py new file mode 100644 index 0000000..791befd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/pkcs12.py @@ -0,0 +1,219 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography import x509 +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import ( + dsa, + ec, + ed25519, + ed448, + rsa, +) +from cryptography.hazmat.primitives.asymmetric.types import ( + PRIVATE_KEY_TYPES, +) + + +_ALLOWED_PKCS12_TYPES = typing.Union[ + rsa.RSAPrivateKey, + dsa.DSAPrivateKey, + ec.EllipticCurvePrivateKey, + ed25519.Ed25519PrivateKey, + ed448.Ed448PrivateKey, +] + + +class PKCS12Certificate: + def __init__( + self, + cert: x509.Certificate, + friendly_name: typing.Optional[bytes], + ): + if not isinstance(cert, x509.Certificate): + raise TypeError("Expecting x509.Certificate object") + if friendly_name is not None and not isinstance(friendly_name, bytes): + raise TypeError("friendly_name must be bytes or None") + self._cert = cert + self._friendly_name = friendly_name + + @property + def friendly_name(self) -> typing.Optional[bytes]: + return self._friendly_name + + @property + def certificate(self) -> x509.Certificate: + return self._cert + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PKCS12Certificate): + return NotImplemented + + return ( + self.certificate == other.certificate + and self.friendly_name == other.friendly_name + ) + + def __hash__(self) -> int: + return hash((self.certificate, self.friendly_name)) + + def __repr__(self) -> str: + return "".format( + self.certificate, self.friendly_name + ) + + +class PKCS12KeyAndCertificates: + def __init__( + self, + key: typing.Optional[PRIVATE_KEY_TYPES], + cert: typing.Optional[PKCS12Certificate], + additional_certs: typing.List[PKCS12Certificate], + ): + if key is not None and not isinstance( + key, + ( + rsa.RSAPrivateKey, + dsa.DSAPrivateKey, + ec.EllipticCurvePrivateKey, + ed25519.Ed25519PrivateKey, + ed448.Ed448PrivateKey, + ), + ): + raise TypeError( + "Key must be RSA, DSA, EllipticCurve, ED25519, or ED448" + " private key, or None." + ) + if cert is not None and not isinstance(cert, PKCS12Certificate): + raise TypeError("cert must be a PKCS12Certificate object or None") + if not all( + isinstance(add_cert, PKCS12Certificate) + for add_cert in additional_certs + ): + raise TypeError( + "all values in additional_certs must be PKCS12Certificate" + " objects" + ) + self._key = key + self._cert = cert + self._additional_certs = additional_certs + + @property + def key(self) -> typing.Optional[PRIVATE_KEY_TYPES]: + return self._key + + @property + def cert(self) -> typing.Optional[PKCS12Certificate]: + return self._cert + + @property + def additional_certs(self) -> typing.List[PKCS12Certificate]: + return self._additional_certs + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PKCS12KeyAndCertificates): + return NotImplemented + + return ( + self.key == other.key + and self.cert == other.cert + and self.additional_certs == other.additional_certs + ) + + def __hash__(self) -> int: + return hash((self.key, self.cert, tuple(self.additional_certs))) + + def __repr__(self) -> str: + fmt = ( + "" + ) + return fmt.format(self.key, self.cert, self.additional_certs) + + +def load_key_and_certificates( + data: bytes, + password: typing.Optional[bytes], + backend: typing.Any = None, +) -> typing.Tuple[ + typing.Optional[PRIVATE_KEY_TYPES], + typing.Optional[x509.Certificate], + typing.List[x509.Certificate], +]: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_key_and_certificates_from_pkcs12(data, password) + + +def load_pkcs12( + data: bytes, + password: typing.Optional[bytes], + backend: typing.Any = None, +) -> PKCS12KeyAndCertificates: + from cryptography.hazmat.backends.openssl.backend import backend as ossl + + return ossl.load_pkcs12(data, password) + + +_PKCS12_CAS_TYPES = typing.Union[ + x509.Certificate, + PKCS12Certificate, +] + + +def serialize_key_and_certificates( + name: typing.Optional[bytes], + key: typing.Optional[_ALLOWED_PKCS12_TYPES], + cert: typing.Optional[x509.Certificate], + cas: typing.Optional[typing.Iterable[_PKCS12_CAS_TYPES]], + encryption_algorithm: serialization.KeySerializationEncryption, +) -> bytes: + if key is not None and not isinstance( + key, + ( + rsa.RSAPrivateKey, + dsa.DSAPrivateKey, + ec.EllipticCurvePrivateKey, + ed25519.Ed25519PrivateKey, + ed448.Ed448PrivateKey, + ), + ): + raise TypeError( + "Key must be RSA, DSA, EllipticCurve, ED25519, or ED448" + " private key, or None." + ) + if cert is not None and not isinstance(cert, x509.Certificate): + raise TypeError("cert must be a certificate or None") + + if cas is not None: + cas = list(cas) + if not all( + isinstance( + val, + ( + x509.Certificate, + PKCS12Certificate, + ), + ) + for val in cas + ): + raise TypeError("all values in cas must be certificates") + + if not isinstance( + encryption_algorithm, serialization.KeySerializationEncryption + ): + raise TypeError( + "Key encryption algorithm must be a " + "KeySerializationEncryption instance" + ) + + if key is None and cert is None and not cas: + raise ValueError("You must supply at least one of key, cert, or cas") + + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.serialize_key_and_certificates_to_pkcs12( + name, key, cert, cas, encryption_algorithm + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/pkcs7.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/pkcs7.py new file mode 100644 index 0000000..6641416 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/pkcs7.py @@ -0,0 +1,180 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography import utils +from cryptography import x509 +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ec, rsa +from cryptography.utils import _check_byteslike + + +def load_pem_pkcs7_certificates(data: bytes) -> typing.List[x509.Certificate]: + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.load_pem_pkcs7_certificates(data) + + +def load_der_pkcs7_certificates(data: bytes) -> typing.List[x509.Certificate]: + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.load_der_pkcs7_certificates(data) + + +def serialize_certificates( + certs: typing.List[x509.Certificate], + encoding: serialization.Encoding, +) -> bytes: + from cryptography.hazmat.backends.openssl.backend import backend + + return backend.pkcs7_serialize_certificates(certs, encoding) + + +_ALLOWED_PKCS7_HASH_TYPES = typing.Union[ + hashes.SHA1, + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, +] + +_ALLOWED_PRIVATE_KEY_TYPES = typing.Union[ + rsa.RSAPrivateKey, ec.EllipticCurvePrivateKey +] + + +class PKCS7Options(utils.Enum): + Text = "Add text/plain MIME type" + Binary = "Don't translate input data into canonical MIME format" + DetachedSignature = "Don't embed data in the PKCS7 structure" + NoCapabilities = "Don't embed SMIME capabilities" + NoAttributes = "Don't embed authenticatedAttributes" + NoCerts = "Don't embed signer certificate" + + +class PKCS7SignatureBuilder: + def __init__( + self, + data: typing.Optional[bytes] = None, + signers: typing.List[ + typing.Tuple[ + x509.Certificate, + _ALLOWED_PRIVATE_KEY_TYPES, + _ALLOWED_PKCS7_HASH_TYPES, + ] + ] = [], + additional_certs: typing.List[x509.Certificate] = [], + ): + self._data = data + self._signers = signers + self._additional_certs = additional_certs + + def set_data(self, data: bytes) -> "PKCS7SignatureBuilder": + _check_byteslike("data", data) + if self._data is not None: + raise ValueError("data may only be set once") + + return PKCS7SignatureBuilder(data, self._signers) + + def add_signer( + self, + certificate: x509.Certificate, + private_key: _ALLOWED_PRIVATE_KEY_TYPES, + hash_algorithm: _ALLOWED_PKCS7_HASH_TYPES, + ) -> "PKCS7SignatureBuilder": + if not isinstance( + hash_algorithm, + ( + hashes.SHA1, + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, + ), + ): + raise TypeError( + "hash_algorithm must be one of hashes.SHA1, SHA224, " + "SHA256, SHA384, or SHA512" + ) + if not isinstance(certificate, x509.Certificate): + raise TypeError("certificate must be a x509.Certificate") + + if not isinstance( + private_key, (rsa.RSAPrivateKey, ec.EllipticCurvePrivateKey) + ): + raise TypeError("Only RSA & EC keys are supported at this time.") + + return PKCS7SignatureBuilder( + self._data, + self._signers + [(certificate, private_key, hash_algorithm)], + ) + + def add_certificate( + self, certificate: x509.Certificate + ) -> "PKCS7SignatureBuilder": + if not isinstance(certificate, x509.Certificate): + raise TypeError("certificate must be a x509.Certificate") + + return PKCS7SignatureBuilder( + self._data, self._signers, self._additional_certs + [certificate] + ) + + def sign( + self, + encoding: serialization.Encoding, + options: typing.Iterable[PKCS7Options], + backend: typing.Any = None, + ) -> bytes: + if len(self._signers) == 0: + raise ValueError("Must have at least one signer") + if self._data is None: + raise ValueError("You must add data to sign") + options = list(options) + if not all(isinstance(x, PKCS7Options) for x in options): + raise ValueError("options must be from the PKCS7Options enum") + if encoding not in ( + serialization.Encoding.PEM, + serialization.Encoding.DER, + serialization.Encoding.SMIME, + ): + raise ValueError( + "Must be PEM, DER, or SMIME from the Encoding enum" + ) + + # Text is a meaningless option unless it is accompanied by + # DetachedSignature + if ( + PKCS7Options.Text in options + and PKCS7Options.DetachedSignature not in options + ): + raise ValueError( + "When passing the Text option you must also pass " + "DetachedSignature" + ) + + if PKCS7Options.Text in options and encoding in ( + serialization.Encoding.DER, + serialization.Encoding.PEM, + ): + raise ValueError( + "The Text option is only available for SMIME serialization" + ) + + # No attributes implies no capabilities so we'll error if you try to + # pass both. + if ( + PKCS7Options.NoAttributes in options + and PKCS7Options.NoCapabilities in options + ): + raise ValueError( + "NoAttributes is a superset of NoCapabilities. Do not pass " + "both values." + ) + + from cryptography.hazmat.backends.openssl.backend import ( + backend as ossl, + ) + + return ossl.pkcs7_sign(self, encoding, options) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/ssh.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/ssh.py new file mode 100644 index 0000000..6649db5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/serialization/ssh.py @@ -0,0 +1,757 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import binascii +import os +import re +import typing +from base64 import encodebytes as _base64_encode + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm +from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed25519, rsa +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, +) + +try: + from bcrypt import kdf as _bcrypt_kdf + + _bcrypt_supported = True +except ImportError: + _bcrypt_supported = False + + def _bcrypt_kdf( + password: bytes, + salt: bytes, + desired_key_bytes: int, + rounds: int, + ignore_few_rounds: bool = False, + ) -> bytes: + raise UnsupportedAlgorithm("Need bcrypt module") + + +_SSH_ED25519 = b"ssh-ed25519" +_SSH_RSA = b"ssh-rsa" +_SSH_DSA = b"ssh-dss" +_ECDSA_NISTP256 = b"ecdsa-sha2-nistp256" +_ECDSA_NISTP384 = b"ecdsa-sha2-nistp384" +_ECDSA_NISTP521 = b"ecdsa-sha2-nistp521" +_CERT_SUFFIX = b"-cert-v01@openssh.com" + +_SSH_PUBKEY_RC = re.compile(rb"\A(\S+)[ \t]+(\S+)") +_SK_MAGIC = b"openssh-key-v1\0" +_SK_START = b"-----BEGIN OPENSSH PRIVATE KEY-----" +_SK_END = b"-----END OPENSSH PRIVATE KEY-----" +_BCRYPT = b"bcrypt" +_NONE = b"none" +_DEFAULT_CIPHER = b"aes256-ctr" +_DEFAULT_ROUNDS = 16 +_MAX_PASSWORD = 72 + +# re is only way to work on bytes-like data +_PEM_RC = re.compile(_SK_START + b"(.*?)" + _SK_END, re.DOTALL) + +# padding for max blocksize +_PADDING = memoryview(bytearray(range(1, 1 + 16))) + +# ciphers that are actually used in key wrapping +_SSH_CIPHERS: typing.Dict[ + bytes, + typing.Tuple[ + typing.Type[algorithms.AES], + int, + typing.Union[typing.Type[modes.CTR], typing.Type[modes.CBC]], + int, + ], +] = { + b"aes256-ctr": (algorithms.AES, 32, modes.CTR, 16), + b"aes256-cbc": (algorithms.AES, 32, modes.CBC, 16), +} + +# map local curve name to key type +_ECDSA_KEY_TYPE = { + "secp256r1": _ECDSA_NISTP256, + "secp384r1": _ECDSA_NISTP384, + "secp521r1": _ECDSA_NISTP521, +} + + +def _ecdsa_key_type(public_key: ec.EllipticCurvePublicKey) -> bytes: + """Return SSH key_type and curve_name for private key.""" + curve = public_key.curve + if curve.name not in _ECDSA_KEY_TYPE: + raise ValueError( + f"Unsupported curve for ssh private key: {curve.name!r}" + ) + return _ECDSA_KEY_TYPE[curve.name] + + +def _ssh_pem_encode( + data: bytes, + prefix: bytes = _SK_START + b"\n", + suffix: bytes = _SK_END + b"\n", +) -> bytes: + return b"".join([prefix, _base64_encode(data), suffix]) + + +def _check_block_size(data: bytes, block_len: int) -> None: + """Require data to be full blocks""" + if not data or len(data) % block_len != 0: + raise ValueError("Corrupt data: missing padding") + + +def _check_empty(data: bytes) -> None: + """All data should have been parsed.""" + if data: + raise ValueError("Corrupt data: unparsed data") + + +def _init_cipher( + ciphername: bytes, + password: typing.Optional[bytes], + salt: bytes, + rounds: int, +) -> Cipher[typing.Union[modes.CBC, modes.CTR]]: + """Generate key + iv and return cipher.""" + if not password: + raise ValueError("Key is password-protected.") + + algo, key_len, mode, iv_len = _SSH_CIPHERS[ciphername] + seed = _bcrypt_kdf(password, salt, key_len + iv_len, rounds, True) + return Cipher(algo(seed[:key_len]), mode(seed[key_len:])) + + +def _get_u32(data: memoryview) -> typing.Tuple[int, memoryview]: + """Uint32""" + if len(data) < 4: + raise ValueError("Invalid data") + return int.from_bytes(data[:4], byteorder="big"), data[4:] + + +def _get_u64(data: memoryview) -> typing.Tuple[int, memoryview]: + """Uint64""" + if len(data) < 8: + raise ValueError("Invalid data") + return int.from_bytes(data[:8], byteorder="big"), data[8:] + + +def _get_sshstr(data: memoryview) -> typing.Tuple[memoryview, memoryview]: + """Bytes with u32 length prefix""" + n, data = _get_u32(data) + if n > len(data): + raise ValueError("Invalid data") + return data[:n], data[n:] + + +def _get_mpint(data: memoryview) -> typing.Tuple[int, memoryview]: + """Big integer.""" + val, data = _get_sshstr(data) + if val and val[0] > 0x7F: + raise ValueError("Invalid data") + return int.from_bytes(val, "big"), data + + +def _to_mpint(val: int) -> bytes: + """Storage format for signed bigint.""" + if val < 0: + raise ValueError("negative mpint not allowed") + if not val: + return b"" + nbytes = (val.bit_length() + 8) // 8 + return utils.int_to_bytes(val, nbytes) + + +class _FragList: + """Build recursive structure without data copy.""" + + flist: typing.List[bytes] + + def __init__(self, init: typing.List[bytes] = None) -> None: + self.flist = [] + if init: + self.flist.extend(init) + + def put_raw(self, val: bytes) -> None: + """Add plain bytes""" + self.flist.append(val) + + def put_u32(self, val: int) -> None: + """Big-endian uint32""" + self.flist.append(val.to_bytes(length=4, byteorder="big")) + + def put_sshstr(self, val: typing.Union[bytes, "_FragList"]) -> None: + """Bytes prefixed with u32 length""" + if isinstance(val, (bytes, memoryview, bytearray)): + self.put_u32(len(val)) + self.flist.append(val) + else: + self.put_u32(val.size()) + self.flist.extend(val.flist) + + def put_mpint(self, val: int) -> None: + """Big-endian bigint prefixed with u32 length""" + self.put_sshstr(_to_mpint(val)) + + def size(self) -> int: + """Current number of bytes""" + return sum(map(len, self.flist)) + + def render(self, dstbuf: memoryview, pos: int = 0) -> int: + """Write into bytearray""" + for frag in self.flist: + flen = len(frag) + start, pos = pos, pos + flen + dstbuf[start:pos] = frag + return pos + + def tobytes(self) -> bytes: + """Return as bytes""" + buf = memoryview(bytearray(self.size())) + self.render(buf) + return buf.tobytes() + + +class _SSHFormatRSA: + """Format for RSA keys. + + Public: + mpint e, n + Private: + mpint n, e, d, iqmp, p, q + """ + + def get_public(self, data: memoryview): + """RSA public fields""" + e, data = _get_mpint(data) + n, data = _get_mpint(data) + return (e, n), data + + def load_public( + self, data: memoryview + ) -> typing.Tuple[rsa.RSAPublicKey, memoryview]: + """Make RSA public key from data.""" + (e, n), data = self.get_public(data) + public_numbers = rsa.RSAPublicNumbers(e, n) + public_key = public_numbers.public_key() + return public_key, data + + def load_private( + self, data: memoryview, pubfields + ) -> typing.Tuple[rsa.RSAPrivateKey, memoryview]: + """Make RSA private key from data.""" + n, data = _get_mpint(data) + e, data = _get_mpint(data) + d, data = _get_mpint(data) + iqmp, data = _get_mpint(data) + p, data = _get_mpint(data) + q, data = _get_mpint(data) + + if (e, n) != pubfields: + raise ValueError("Corrupt data: rsa field mismatch") + dmp1 = rsa.rsa_crt_dmp1(d, p) + dmq1 = rsa.rsa_crt_dmq1(d, q) + public_numbers = rsa.RSAPublicNumbers(e, n) + private_numbers = rsa.RSAPrivateNumbers( + p, q, d, dmp1, dmq1, iqmp, public_numbers + ) + private_key = private_numbers.private_key() + return private_key, data + + def encode_public( + self, public_key: rsa.RSAPublicKey, f_pub: _FragList + ) -> None: + """Write RSA public key""" + pubn = public_key.public_numbers() + f_pub.put_mpint(pubn.e) + f_pub.put_mpint(pubn.n) + + def encode_private( + self, private_key: rsa.RSAPrivateKey, f_priv: _FragList + ) -> None: + """Write RSA private key""" + private_numbers = private_key.private_numbers() + public_numbers = private_numbers.public_numbers + + f_priv.put_mpint(public_numbers.n) + f_priv.put_mpint(public_numbers.e) + + f_priv.put_mpint(private_numbers.d) + f_priv.put_mpint(private_numbers.iqmp) + f_priv.put_mpint(private_numbers.p) + f_priv.put_mpint(private_numbers.q) + + +class _SSHFormatDSA: + """Format for DSA keys. + + Public: + mpint p, q, g, y + Private: + mpint p, q, g, y, x + """ + + def get_public( + self, data: memoryview + ) -> typing.Tuple[typing.Tuple, memoryview]: + """DSA public fields""" + p, data = _get_mpint(data) + q, data = _get_mpint(data) + g, data = _get_mpint(data) + y, data = _get_mpint(data) + return (p, q, g, y), data + + def load_public( + self, data: memoryview + ) -> typing.Tuple[dsa.DSAPublicKey, memoryview]: + """Make DSA public key from data.""" + (p, q, g, y), data = self.get_public(data) + parameter_numbers = dsa.DSAParameterNumbers(p, q, g) + public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers) + self._validate(public_numbers) + public_key = public_numbers.public_key() + return public_key, data + + def load_private( + self, data: memoryview, pubfields + ) -> typing.Tuple[dsa.DSAPrivateKey, memoryview]: + """Make DSA private key from data.""" + (p, q, g, y), data = self.get_public(data) + x, data = _get_mpint(data) + + if (p, q, g, y) != pubfields: + raise ValueError("Corrupt data: dsa field mismatch") + parameter_numbers = dsa.DSAParameterNumbers(p, q, g) + public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers) + self._validate(public_numbers) + private_numbers = dsa.DSAPrivateNumbers(x, public_numbers) + private_key = private_numbers.private_key() + return private_key, data + + def encode_public( + self, public_key: dsa.DSAPublicKey, f_pub: _FragList + ) -> None: + """Write DSA public key""" + public_numbers = public_key.public_numbers() + parameter_numbers = public_numbers.parameter_numbers + self._validate(public_numbers) + + f_pub.put_mpint(parameter_numbers.p) + f_pub.put_mpint(parameter_numbers.q) + f_pub.put_mpint(parameter_numbers.g) + f_pub.put_mpint(public_numbers.y) + + def encode_private( + self, private_key: dsa.DSAPrivateKey, f_priv: _FragList + ) -> None: + """Write DSA private key""" + self.encode_public(private_key.public_key(), f_priv) + f_priv.put_mpint(private_key.private_numbers().x) + + def _validate(self, public_numbers: dsa.DSAPublicNumbers) -> None: + parameter_numbers = public_numbers.parameter_numbers + if parameter_numbers.p.bit_length() != 1024: + raise ValueError("SSH supports only 1024 bit DSA keys") + + +class _SSHFormatECDSA: + """Format for ECDSA keys. + + Public: + str curve + bytes point + Private: + str curve + bytes point + mpint secret + """ + + def __init__(self, ssh_curve_name: bytes, curve: ec.EllipticCurve): + self.ssh_curve_name = ssh_curve_name + self.curve = curve + + def get_public( + self, data: memoryview + ) -> typing.Tuple[typing.Tuple, memoryview]: + """ECDSA public fields""" + curve, data = _get_sshstr(data) + point, data = _get_sshstr(data) + if curve != self.ssh_curve_name: + raise ValueError("Curve name mismatch") + if point[0] != 4: + raise NotImplementedError("Need uncompressed point") + return (curve, point), data + + def load_public( + self, data: memoryview + ) -> typing.Tuple[ec.EllipticCurvePublicKey, memoryview]: + """Make ECDSA public key from data.""" + (curve_name, point), data = self.get_public(data) + public_key = ec.EllipticCurvePublicKey.from_encoded_point( + self.curve, point.tobytes() + ) + return public_key, data + + def load_private( + self, data: memoryview, pubfields + ) -> typing.Tuple[ec.EllipticCurvePrivateKey, memoryview]: + """Make ECDSA private key from data.""" + (curve_name, point), data = self.get_public(data) + secret, data = _get_mpint(data) + + if (curve_name, point) != pubfields: + raise ValueError("Corrupt data: ecdsa field mismatch") + private_key = ec.derive_private_key(secret, self.curve) + return private_key, data + + def encode_public( + self, public_key: ec.EllipticCurvePublicKey, f_pub: _FragList + ) -> None: + """Write ECDSA public key""" + point = public_key.public_bytes( + Encoding.X962, PublicFormat.UncompressedPoint + ) + f_pub.put_sshstr(self.ssh_curve_name) + f_pub.put_sshstr(point) + + def encode_private( + self, private_key: ec.EllipticCurvePrivateKey, f_priv: _FragList + ) -> None: + """Write ECDSA private key""" + public_key = private_key.public_key() + private_numbers = private_key.private_numbers() + + self.encode_public(public_key, f_priv) + f_priv.put_mpint(private_numbers.private_value) + + +class _SSHFormatEd25519: + """Format for Ed25519 keys. + + Public: + bytes point + Private: + bytes point + bytes secret_and_point + """ + + def get_public( + self, data: memoryview + ) -> typing.Tuple[typing.Tuple, memoryview]: + """Ed25519 public fields""" + point, data = _get_sshstr(data) + return (point,), data + + def load_public( + self, data: memoryview + ) -> typing.Tuple[ed25519.Ed25519PublicKey, memoryview]: + """Make Ed25519 public key from data.""" + (point,), data = self.get_public(data) + public_key = ed25519.Ed25519PublicKey.from_public_bytes( + point.tobytes() + ) + return public_key, data + + def load_private( + self, data: memoryview, pubfields + ) -> typing.Tuple[ed25519.Ed25519PrivateKey, memoryview]: + """Make Ed25519 private key from data.""" + (point,), data = self.get_public(data) + keypair, data = _get_sshstr(data) + + secret = keypair[:32] + point2 = keypair[32:] + if point != point2 or (point,) != pubfields: + raise ValueError("Corrupt data: ed25519 field mismatch") + private_key = ed25519.Ed25519PrivateKey.from_private_bytes(secret) + return private_key, data + + def encode_public( + self, public_key: ed25519.Ed25519PublicKey, f_pub: _FragList + ) -> None: + """Write Ed25519 public key""" + raw_public_key = public_key.public_bytes( + Encoding.Raw, PublicFormat.Raw + ) + f_pub.put_sshstr(raw_public_key) + + def encode_private( + self, private_key: ed25519.Ed25519PrivateKey, f_priv: _FragList + ) -> None: + """Write Ed25519 private key""" + public_key = private_key.public_key() + raw_private_key = private_key.private_bytes( + Encoding.Raw, PrivateFormat.Raw, NoEncryption() + ) + raw_public_key = public_key.public_bytes( + Encoding.Raw, PublicFormat.Raw + ) + f_keypair = _FragList([raw_private_key, raw_public_key]) + + self.encode_public(public_key, f_priv) + f_priv.put_sshstr(f_keypair) + + +_KEY_FORMATS = { + _SSH_RSA: _SSHFormatRSA(), + _SSH_DSA: _SSHFormatDSA(), + _SSH_ED25519: _SSHFormatEd25519(), + _ECDSA_NISTP256: _SSHFormatECDSA(b"nistp256", ec.SECP256R1()), + _ECDSA_NISTP384: _SSHFormatECDSA(b"nistp384", ec.SECP384R1()), + _ECDSA_NISTP521: _SSHFormatECDSA(b"nistp521", ec.SECP521R1()), +} + + +def _lookup_kformat(key_type: bytes): + """Return valid format or throw error""" + if not isinstance(key_type, bytes): + key_type = memoryview(key_type).tobytes() + if key_type in _KEY_FORMATS: + return _KEY_FORMATS[key_type] + raise UnsupportedAlgorithm(f"Unsupported key type: {key_type!r}") + + +_SSH_PRIVATE_KEY_TYPES = typing.Union[ + ec.EllipticCurvePrivateKey, + rsa.RSAPrivateKey, + dsa.DSAPrivateKey, + ed25519.Ed25519PrivateKey, +] + + +def load_ssh_private_key( + data: bytes, + password: typing.Optional[bytes], + backend: typing.Any = None, +) -> _SSH_PRIVATE_KEY_TYPES: + """Load private key from OpenSSH custom encoding.""" + utils._check_byteslike("data", data) + if password is not None: + utils._check_bytes("password", password) + + m = _PEM_RC.search(data) + if not m: + raise ValueError("Not OpenSSH private key format") + p1 = m.start(1) + p2 = m.end(1) + data = binascii.a2b_base64(memoryview(data)[p1:p2]) + if not data.startswith(_SK_MAGIC): + raise ValueError("Not OpenSSH private key format") + data = memoryview(data)[len(_SK_MAGIC) :] + + # parse header + ciphername, data = _get_sshstr(data) + kdfname, data = _get_sshstr(data) + kdfoptions, data = _get_sshstr(data) + nkeys, data = _get_u32(data) + if nkeys != 1: + raise ValueError("Only one key supported") + + # load public key data + pubdata, data = _get_sshstr(data) + pub_key_type, pubdata = _get_sshstr(pubdata) + kformat = _lookup_kformat(pub_key_type) + pubfields, pubdata = kformat.get_public(pubdata) + _check_empty(pubdata) + + # load secret data + edata, data = _get_sshstr(data) + _check_empty(data) + + if (ciphername, kdfname) != (_NONE, _NONE): + ciphername_bytes = ciphername.tobytes() + if ciphername_bytes not in _SSH_CIPHERS: + raise UnsupportedAlgorithm( + f"Unsupported cipher: {ciphername_bytes!r}" + ) + if kdfname != _BCRYPT: + raise UnsupportedAlgorithm(f"Unsupported KDF: {kdfname!r}") + blklen = _SSH_CIPHERS[ciphername_bytes][3] + _check_block_size(edata, blklen) + salt, kbuf = _get_sshstr(kdfoptions) + rounds, kbuf = _get_u32(kbuf) + _check_empty(kbuf) + ciph = _init_cipher(ciphername_bytes, password, salt.tobytes(), rounds) + edata = memoryview(ciph.decryptor().update(edata)) + else: + blklen = 8 + _check_block_size(edata, blklen) + ck1, edata = _get_u32(edata) + ck2, edata = _get_u32(edata) + if ck1 != ck2: + raise ValueError("Corrupt data: broken checksum") + + # load per-key struct + key_type, edata = _get_sshstr(edata) + if key_type != pub_key_type: + raise ValueError("Corrupt data: key type mismatch") + private_key, edata = kformat.load_private(edata, pubfields) + comment, edata = _get_sshstr(edata) + + # yes, SSH does padding check *after* all other parsing is done. + # need to follow as it writes zero-byte padding too. + if edata != _PADDING[: len(edata)]: + raise ValueError("Corrupt data: invalid padding") + + return private_key + + +def serialize_ssh_private_key( + private_key: _SSH_PRIVATE_KEY_TYPES, + password: typing.Optional[bytes] = None, +) -> bytes: + """Serialize private key with OpenSSH custom encoding.""" + if password is not None: + utils._check_bytes("password", password) + if password and len(password) > _MAX_PASSWORD: + raise ValueError( + "Passwords longer than 72 bytes are not supported by " + "OpenSSH private key format" + ) + + if isinstance(private_key, ec.EllipticCurvePrivateKey): + key_type = _ecdsa_key_type(private_key.public_key()) + elif isinstance(private_key, rsa.RSAPrivateKey): + key_type = _SSH_RSA + elif isinstance(private_key, dsa.DSAPrivateKey): + key_type = _SSH_DSA + elif isinstance(private_key, ed25519.Ed25519PrivateKey): + key_type = _SSH_ED25519 + else: + raise ValueError("Unsupported key type") + kformat = _lookup_kformat(key_type) + + # setup parameters + f_kdfoptions = _FragList() + if password: + ciphername = _DEFAULT_CIPHER + blklen = _SSH_CIPHERS[ciphername][3] + kdfname = _BCRYPT + rounds = _DEFAULT_ROUNDS + salt = os.urandom(16) + f_kdfoptions.put_sshstr(salt) + f_kdfoptions.put_u32(rounds) + ciph = _init_cipher(ciphername, password, salt, rounds) + else: + ciphername = kdfname = _NONE + blklen = 8 + ciph = None + nkeys = 1 + checkval = os.urandom(4) + comment = b"" + + # encode public and private parts together + f_public_key = _FragList() + f_public_key.put_sshstr(key_type) + kformat.encode_public(private_key.public_key(), f_public_key) + + f_secrets = _FragList([checkval, checkval]) + f_secrets.put_sshstr(key_type) + kformat.encode_private(private_key, f_secrets) + f_secrets.put_sshstr(comment) + f_secrets.put_raw(_PADDING[: blklen - (f_secrets.size() % blklen)]) + + # top-level structure + f_main = _FragList() + f_main.put_raw(_SK_MAGIC) + f_main.put_sshstr(ciphername) + f_main.put_sshstr(kdfname) + f_main.put_sshstr(f_kdfoptions) + f_main.put_u32(nkeys) + f_main.put_sshstr(f_public_key) + f_main.put_sshstr(f_secrets) + + # copy result info bytearray + slen = f_secrets.size() + mlen = f_main.size() + buf = memoryview(bytearray(mlen + blklen)) + f_main.render(buf) + ofs = mlen - slen + + # encrypt in-place + if ciph is not None: + ciph.encryptor().update_into(buf[ofs:mlen], buf[ofs:]) + + txt = _ssh_pem_encode(buf[:mlen]) + buf[ofs:mlen] = bytearray(slen) + return txt + + +_SSH_PUBLIC_KEY_TYPES = typing.Union[ + ec.EllipticCurvePublicKey, + rsa.RSAPublicKey, + dsa.DSAPublicKey, + ed25519.Ed25519PublicKey, +] + + +def load_ssh_public_key( + data: bytes, backend: typing.Any = None +) -> _SSH_PUBLIC_KEY_TYPES: + """Load public key from OpenSSH one-line format.""" + utils._check_byteslike("data", data) + + m = _SSH_PUBKEY_RC.match(data) + if not m: + raise ValueError("Invalid line format") + key_type = orig_key_type = m.group(1) + key_body = m.group(2) + with_cert = False + if _CERT_SUFFIX == key_type[-len(_CERT_SUFFIX) :]: + with_cert = True + key_type = key_type[: -len(_CERT_SUFFIX)] + kformat = _lookup_kformat(key_type) + + try: + rest = memoryview(binascii.a2b_base64(key_body)) + except (TypeError, binascii.Error): + raise ValueError("Invalid key format") + + inner_key_type, rest = _get_sshstr(rest) + if inner_key_type != orig_key_type: + raise ValueError("Invalid key format") + if with_cert: + nonce, rest = _get_sshstr(rest) + public_key, rest = kformat.load_public(rest) + if with_cert: + serial, rest = _get_u64(rest) + cctype, rest = _get_u32(rest) + key_id, rest = _get_sshstr(rest) + principals, rest = _get_sshstr(rest) + valid_after, rest = _get_u64(rest) + valid_before, rest = _get_u64(rest) + crit_options, rest = _get_sshstr(rest) + extensions, rest = _get_sshstr(rest) + reserved, rest = _get_sshstr(rest) + sig_key, rest = _get_sshstr(rest) + signature, rest = _get_sshstr(rest) + _check_empty(rest) + return public_key + + +def serialize_ssh_public_key(public_key: _SSH_PUBLIC_KEY_TYPES) -> bytes: + """One-line public key format for OpenSSH""" + if isinstance(public_key, ec.EllipticCurvePublicKey): + key_type = _ecdsa_key_type(public_key) + elif isinstance(public_key, rsa.RSAPublicKey): + key_type = _SSH_RSA + elif isinstance(public_key, dsa.DSAPublicKey): + key_type = _SSH_DSA + elif isinstance(public_key, ed25519.Ed25519PublicKey): + key_type = _SSH_ED25519 + else: + raise ValueError("Unsupported key type") + kformat = _lookup_kformat(key_type) + + f_pub = _FragList() + f_pub.put_sshstr(key_type) + kformat.encode_public(public_key, f_pub) + + pub = binascii.b2a_base64(f_pub.tobytes()).strip() + return b"".join([key_type, b" ", pub]) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/__init__.py new file mode 100644 index 0000000..8a8b30f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/__init__.py @@ -0,0 +1,7 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InvalidToken(Exception): + pass diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/hotp.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/hotp.py new file mode 100644 index 0000000..9730af2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/hotp.py @@ -0,0 +1,92 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import base64 +import typing +from urllib.parse import quote, urlencode + +from cryptography.hazmat.primitives import constant_time, hmac +from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA512 +from cryptography.hazmat.primitives.twofactor import InvalidToken + + +_ALLOWED_HASH_TYPES = typing.Union[SHA1, SHA256, SHA512] + + +def _generate_uri( + hotp: "HOTP", + type_name: str, + account_name: str, + issuer: typing.Optional[str], + extra_parameters: typing.List[typing.Tuple[str, int]], +) -> str: + parameters = [ + ("digits", hotp._length), + ("secret", base64.b32encode(hotp._key)), + ("algorithm", hotp._algorithm.name.upper()), + ] + + if issuer is not None: + parameters.append(("issuer", issuer)) + + parameters.extend(extra_parameters) + + label = ( + f"{quote(issuer)}:{quote(account_name)}" + if issuer + else quote(account_name) + ) + return f"otpauth://{type_name}/{label}?{urlencode(parameters)}" + + +class HOTP: + def __init__( + self, + key: bytes, + length: int, + algorithm: _ALLOWED_HASH_TYPES, + backend: typing.Any = None, + enforce_key_length: bool = True, + ) -> None: + if len(key) < 16 and enforce_key_length is True: + raise ValueError("Key length has to be at least 128 bits.") + + if not isinstance(length, int): + raise TypeError("Length parameter must be an integer type.") + + if length < 6 or length > 8: + raise ValueError("Length of HOTP has to be between 6 to 8.") + + if not isinstance(algorithm, (SHA1, SHA256, SHA512)): + raise TypeError("Algorithm must be SHA1, SHA256 or SHA512.") + + self._key = key + self._length = length + self._algorithm = algorithm + + def generate(self, counter: int) -> bytes: + truncated_value = self._dynamic_truncate(counter) + hotp = truncated_value % (10**self._length) + return "{0:0{1}}".format(hotp, self._length).encode() + + def verify(self, hotp: bytes, counter: int) -> None: + if not constant_time.bytes_eq(self.generate(counter), hotp): + raise InvalidToken("Supplied HOTP value does not match.") + + def _dynamic_truncate(self, counter: int) -> int: + ctx = hmac.HMAC(self._key, self._algorithm) + ctx.update(counter.to_bytes(length=8, byteorder="big")) + hmac_value = ctx.finalize() + + offset = hmac_value[len(hmac_value) - 1] & 0b1111 + p = hmac_value[offset : offset + 4] + return int.from_bytes(p, byteorder="big") & 0x7FFFFFFF + + def get_provisioning_uri( + self, account_name: str, counter: int, issuer: typing.Optional[str] + ) -> str: + return _generate_uri( + self, "hotp", account_name, issuer, [("counter", int(counter))] + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/totp.py b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/totp.py new file mode 100644 index 0000000..317baba --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/hazmat/primitives/twofactor/totp.py @@ -0,0 +1,48 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import typing + +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.twofactor import InvalidToken +from cryptography.hazmat.primitives.twofactor.hotp import ( + HOTP, + _ALLOWED_HASH_TYPES, + _generate_uri, +) + + +class TOTP: + def __init__( + self, + key: bytes, + length: int, + algorithm: _ALLOWED_HASH_TYPES, + time_step: int, + backend: typing.Any = None, + enforce_key_length: bool = True, + ): + self._time_step = time_step + self._hotp = HOTP( + key, length, algorithm, enforce_key_length=enforce_key_length + ) + + def generate(self, time: typing.Union[int, float]) -> bytes: + counter = int(time / self._time_step) + return self._hotp.generate(counter) + + def verify(self, totp: bytes, time: int) -> None: + if not constant_time.bytes_eq(self.generate(time), totp): + raise InvalidToken("Supplied TOTP value does not match.") + + def get_provisioning_uri( + self, account_name: str, issuer: typing.Optional[str] + ) -> str: + return _generate_uri( + self._hotp, + "totp", + account_name, + issuer, + [("period", int(self._time_step))], + ) diff --git a/myenv/lib/python3.9/site-packages/cryptography/py.typed b/myenv/lib/python3.9/site-packages/cryptography/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/cryptography/utils.py b/myenv/lib/python3.9/site-packages/cryptography/utils.py new file mode 100644 index 0000000..df3abc8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/utils.py @@ -0,0 +1,180 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import enum +import inspect +import sys +import types +import typing +import warnings + + +# We use a UserWarning subclass, instead of DeprecationWarning, because CPython +# decided deprecation warnings should be invisble by default. +class CryptographyDeprecationWarning(UserWarning): + pass + + +# Several APIs were deprecated with no specific end-of-life date because of the +# ubiquity of their use. They should not be removed until we agree on when that +# cycle ends. +PersistentlyDeprecated2019 = CryptographyDeprecationWarning +DeprecatedIn35 = CryptographyDeprecationWarning +DeprecatedIn36 = CryptographyDeprecationWarning +DeprecatedIn37 = CryptographyDeprecationWarning + + +def _check_bytes(name: str, value: bytes) -> None: + if not isinstance(value, bytes): + raise TypeError("{} must be bytes".format(name)) + + +def _check_byteslike(name: str, value: bytes) -> None: + try: + memoryview(value) + except TypeError: + raise TypeError("{} must be bytes-like".format(name)) + + +if typing.TYPE_CHECKING: + from typing_extensions import Protocol + + _T_class = typing.TypeVar("_T_class", bound=type) + + class _RegisterDecoratorType(Protocol): + def __call__( + self, klass: _T_class, *, check_annotations: bool = False + ) -> _T_class: + ... + + +def register_interface(iface: abc.ABCMeta) -> "_RegisterDecoratorType": + def register_decorator( + klass: "_T_class", *, check_annotations: bool = False + ) -> "_T_class": + verify_interface(iface, klass, check_annotations=check_annotations) + iface.register(klass) + return klass + + return register_decorator + + +def int_to_bytes(integer: int, length: typing.Optional[int] = None) -> bytes: + return integer.to_bytes( + length or (integer.bit_length() + 7) // 8 or 1, "big" + ) + + +class InterfaceNotImplemented(Exception): + pass + + +def strip_annotation(signature: inspect.Signature) -> inspect.Signature: + return inspect.Signature( + [ + param.replace(annotation=inspect.Parameter.empty) + for param in signature.parameters.values() + ] + ) + + +def verify_interface( + iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False +): + for method in iface.__abstractmethods__: + if not hasattr(klass, method): + raise InterfaceNotImplemented( + "{} is missing a {!r} method".format(klass, method) + ) + if isinstance(getattr(iface, method), abc.abstractproperty): + # Can't properly verify these yet. + continue + sig = inspect.signature(getattr(iface, method)) + actual = inspect.signature(getattr(klass, method)) + if check_annotations: + ok = sig == actual + else: + ok = strip_annotation(sig) == strip_annotation(actual) + if not ok: + raise InterfaceNotImplemented( + "{}.{}'s signature differs from the expected. Expected: " + "{!r}. Received: {!r}".format(klass, method, sig, actual) + ) + + +class _DeprecatedValue: + def __init__(self, value: object, message: str, warning_class): + self.value = value + self.message = message + self.warning_class = warning_class + + +class _ModuleWithDeprecations(types.ModuleType): + def __init__(self, module: types.ModuleType): + super().__init__(module.__name__) + self.__dict__["_module"] = module + + def __getattr__(self, attr: str) -> object: + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + obj = obj.value + return obj + + def __setattr__(self, attr: str, value: object) -> None: + setattr(self._module, attr, value) + + def __delattr__(self, attr: str) -> None: + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + + delattr(self._module, attr) + + def __dir__(self) -> typing.Sequence[str]: + return ["_module"] + dir(self._module) + + +def deprecated( + value: object, + module_name: str, + message: str, + warning_class: typing.Type[Warning], + name: typing.Optional[str] = None, +) -> _DeprecatedValue: + module = sys.modules[module_name] + if not isinstance(module, _ModuleWithDeprecations): + sys.modules[module_name] = module = _ModuleWithDeprecations(module) + dv = _DeprecatedValue(value, message, warning_class) + # Maintain backwards compatibility with `name is None` for pyOpenSSL. + if name is not None: + setattr(module, name, dv) + return dv + + +def cached_property(func: typing.Callable) -> property: + cached_name = "_cached_{}".format(func) + sentinel = object() + + def inner(instance: object): + cache = getattr(instance, cached_name, sentinel) + if cache is not sentinel: + return cache + result = func(instance) + setattr(instance, cached_name, result) + return result + + return property(inner) + + +# Python 3.10 changed representation of enums. We use well-defined object +# representation and string representation from Python 3.9. +class Enum(enum.Enum): + def __repr__(self) -> str: + return f"<{self.__class__.__name__}.{self._name_}: {self._value_!r}>" + + def __str__(self) -> str: + return f"{self.__class__.__name__}.{self._name_}" diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/__init__.py b/myenv/lib/python3.9/site-packages/cryptography/x509/__init__.py new file mode 100644 index 0000000..906075d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/__init__.py @@ -0,0 +1,249 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +from cryptography.x509 import certificate_transparency +from cryptography.x509.base import ( + Attribute, + AttributeNotFound, + Attributes, + Certificate, + CertificateBuilder, + CertificateRevocationList, + CertificateRevocationListBuilder, + CertificateSigningRequest, + CertificateSigningRequestBuilder, + InvalidVersion, + RevokedCertificate, + RevokedCertificateBuilder, + Version, + load_der_x509_certificate, + load_der_x509_crl, + load_der_x509_csr, + load_pem_x509_certificate, + load_pem_x509_crl, + load_pem_x509_csr, + random_serial_number, +) +from cryptography.x509.extensions import ( + AccessDescription, + AuthorityInformationAccess, + AuthorityKeyIdentifier, + BasicConstraints, + CRLDistributionPoints, + CRLNumber, + CRLReason, + CertificateIssuer, + CertificatePolicies, + DeltaCRLIndicator, + DistributionPoint, + DuplicateExtension, + ExtendedKeyUsage, + Extension, + ExtensionNotFound, + ExtensionType, + Extensions, + FreshestCRL, + GeneralNames, + InhibitAnyPolicy, + InvalidityDate, + IssuerAlternativeName, + IssuingDistributionPoint, + KeyUsage, + NameConstraints, + NoticeReference, + OCSPNoCheck, + OCSPNonce, + PolicyConstraints, + PolicyInformation, + PrecertPoison, + PrecertificateSignedCertificateTimestamps, + ReasonFlags, + SignedCertificateTimestamps, + SubjectAlternativeName, + SubjectInformationAccess, + SubjectKeyIdentifier, + TLSFeature, + TLSFeatureType, + UnrecognizedExtension, + UserNotice, +) +from cryptography.x509.general_name import ( + DNSName, + DirectoryName, + GeneralName, + IPAddress, + OtherName, + RFC822Name, + RegisteredID, + UniformResourceIdentifier, + UnsupportedGeneralNameType, +) +from cryptography.x509.name import ( + Name, + NameAttribute, + RelativeDistinguishedName, +) +from cryptography.x509.oid import ( + AuthorityInformationAccessOID, + CRLEntryExtensionOID, + CertificatePoliciesOID, + ExtendedKeyUsageOID, + ExtensionOID, + NameOID, + ObjectIdentifier, + SignatureAlgorithmOID, +) + + +OID_AUTHORITY_INFORMATION_ACCESS = ExtensionOID.AUTHORITY_INFORMATION_ACCESS +OID_AUTHORITY_KEY_IDENTIFIER = ExtensionOID.AUTHORITY_KEY_IDENTIFIER +OID_BASIC_CONSTRAINTS = ExtensionOID.BASIC_CONSTRAINTS +OID_CERTIFICATE_POLICIES = ExtensionOID.CERTIFICATE_POLICIES +OID_CRL_DISTRIBUTION_POINTS = ExtensionOID.CRL_DISTRIBUTION_POINTS +OID_EXTENDED_KEY_USAGE = ExtensionOID.EXTENDED_KEY_USAGE +OID_FRESHEST_CRL = ExtensionOID.FRESHEST_CRL +OID_INHIBIT_ANY_POLICY = ExtensionOID.INHIBIT_ANY_POLICY +OID_ISSUER_ALTERNATIVE_NAME = ExtensionOID.ISSUER_ALTERNATIVE_NAME +OID_KEY_USAGE = ExtensionOID.KEY_USAGE +OID_NAME_CONSTRAINTS = ExtensionOID.NAME_CONSTRAINTS +OID_OCSP_NO_CHECK = ExtensionOID.OCSP_NO_CHECK +OID_POLICY_CONSTRAINTS = ExtensionOID.POLICY_CONSTRAINTS +OID_POLICY_MAPPINGS = ExtensionOID.POLICY_MAPPINGS +OID_SUBJECT_ALTERNATIVE_NAME = ExtensionOID.SUBJECT_ALTERNATIVE_NAME +OID_SUBJECT_DIRECTORY_ATTRIBUTES = ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES +OID_SUBJECT_INFORMATION_ACCESS = ExtensionOID.SUBJECT_INFORMATION_ACCESS +OID_SUBJECT_KEY_IDENTIFIER = ExtensionOID.SUBJECT_KEY_IDENTIFIER + +OID_DSA_WITH_SHA1 = SignatureAlgorithmOID.DSA_WITH_SHA1 +OID_DSA_WITH_SHA224 = SignatureAlgorithmOID.DSA_WITH_SHA224 +OID_DSA_WITH_SHA256 = SignatureAlgorithmOID.DSA_WITH_SHA256 +OID_ECDSA_WITH_SHA1 = SignatureAlgorithmOID.ECDSA_WITH_SHA1 +OID_ECDSA_WITH_SHA224 = SignatureAlgorithmOID.ECDSA_WITH_SHA224 +OID_ECDSA_WITH_SHA256 = SignatureAlgorithmOID.ECDSA_WITH_SHA256 +OID_ECDSA_WITH_SHA384 = SignatureAlgorithmOID.ECDSA_WITH_SHA384 +OID_ECDSA_WITH_SHA512 = SignatureAlgorithmOID.ECDSA_WITH_SHA512 +OID_RSA_WITH_MD5 = SignatureAlgorithmOID.RSA_WITH_MD5 +OID_RSA_WITH_SHA1 = SignatureAlgorithmOID.RSA_WITH_SHA1 +OID_RSA_WITH_SHA224 = SignatureAlgorithmOID.RSA_WITH_SHA224 +OID_RSA_WITH_SHA256 = SignatureAlgorithmOID.RSA_WITH_SHA256 +OID_RSA_WITH_SHA384 = SignatureAlgorithmOID.RSA_WITH_SHA384 +OID_RSA_WITH_SHA512 = SignatureAlgorithmOID.RSA_WITH_SHA512 +OID_RSASSA_PSS = SignatureAlgorithmOID.RSASSA_PSS + +OID_COMMON_NAME = NameOID.COMMON_NAME +OID_COUNTRY_NAME = NameOID.COUNTRY_NAME +OID_DOMAIN_COMPONENT = NameOID.DOMAIN_COMPONENT +OID_DN_QUALIFIER = NameOID.DN_QUALIFIER +OID_EMAIL_ADDRESS = NameOID.EMAIL_ADDRESS +OID_GENERATION_QUALIFIER = NameOID.GENERATION_QUALIFIER +OID_GIVEN_NAME = NameOID.GIVEN_NAME +OID_LOCALITY_NAME = NameOID.LOCALITY_NAME +OID_ORGANIZATIONAL_UNIT_NAME = NameOID.ORGANIZATIONAL_UNIT_NAME +OID_ORGANIZATION_NAME = NameOID.ORGANIZATION_NAME +OID_PSEUDONYM = NameOID.PSEUDONYM +OID_SERIAL_NUMBER = NameOID.SERIAL_NUMBER +OID_STATE_OR_PROVINCE_NAME = NameOID.STATE_OR_PROVINCE_NAME +OID_SURNAME = NameOID.SURNAME +OID_TITLE = NameOID.TITLE + +OID_CLIENT_AUTH = ExtendedKeyUsageOID.CLIENT_AUTH +OID_CODE_SIGNING = ExtendedKeyUsageOID.CODE_SIGNING +OID_EMAIL_PROTECTION = ExtendedKeyUsageOID.EMAIL_PROTECTION +OID_OCSP_SIGNING = ExtendedKeyUsageOID.OCSP_SIGNING +OID_SERVER_AUTH = ExtendedKeyUsageOID.SERVER_AUTH +OID_TIME_STAMPING = ExtendedKeyUsageOID.TIME_STAMPING + +OID_ANY_POLICY = CertificatePoliciesOID.ANY_POLICY +OID_CPS_QUALIFIER = CertificatePoliciesOID.CPS_QUALIFIER +OID_CPS_USER_NOTICE = CertificatePoliciesOID.CPS_USER_NOTICE + +OID_CERTIFICATE_ISSUER = CRLEntryExtensionOID.CERTIFICATE_ISSUER +OID_CRL_REASON = CRLEntryExtensionOID.CRL_REASON +OID_INVALIDITY_DATE = CRLEntryExtensionOID.INVALIDITY_DATE + +OID_CA_ISSUERS = AuthorityInformationAccessOID.CA_ISSUERS +OID_OCSP = AuthorityInformationAccessOID.OCSP + +__all__ = [ + "certificate_transparency", + "load_pem_x509_certificate", + "load_der_x509_certificate", + "load_pem_x509_csr", + "load_der_x509_csr", + "load_pem_x509_crl", + "load_der_x509_crl", + "random_serial_number", + "Attribute", + "AttributeNotFound", + "Attributes", + "InvalidVersion", + "DeltaCRLIndicator", + "DuplicateExtension", + "ExtensionNotFound", + "UnsupportedGeneralNameType", + "NameAttribute", + "Name", + "RelativeDistinguishedName", + "ObjectIdentifier", + "ExtensionType", + "Extensions", + "Extension", + "ExtendedKeyUsage", + "FreshestCRL", + "IssuingDistributionPoint", + "TLSFeature", + "TLSFeatureType", + "OCSPNoCheck", + "BasicConstraints", + "CRLNumber", + "KeyUsage", + "AuthorityInformationAccess", + "SubjectInformationAccess", + "AccessDescription", + "CertificatePolicies", + "PolicyInformation", + "UserNotice", + "NoticeReference", + "SubjectKeyIdentifier", + "NameConstraints", + "CRLDistributionPoints", + "DistributionPoint", + "ReasonFlags", + "InhibitAnyPolicy", + "SubjectAlternativeName", + "IssuerAlternativeName", + "AuthorityKeyIdentifier", + "GeneralNames", + "GeneralName", + "RFC822Name", + "DNSName", + "UniformResourceIdentifier", + "RegisteredID", + "DirectoryName", + "IPAddress", + "OtherName", + "Certificate", + "CertificateRevocationList", + "CertificateRevocationListBuilder", + "CertificateSigningRequest", + "RevokedCertificate", + "RevokedCertificateBuilder", + "CertificateSigningRequestBuilder", + "CertificateBuilder", + "Version", + "OID_CA_ISSUERS", + "OID_OCSP", + "CertificateIssuer", + "CRLReason", + "InvalidityDate", + "UnrecognizedExtension", + "PolicyConstraints", + "PrecertificateSignedCertificateTimestamps", + "PrecertPoison", + "OCSPNonce", + "SignedCertificateTimestamps", + "SignatureAlgorithmOID", + "NameOID", +] diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/base.py b/myenv/lib/python3.9/site-packages/cryptography/x509/base.py new file mode 100644 index 0000000..8213682 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/base.py @@ -0,0 +1,1090 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import datetime +import os +import typing + +from cryptography import utils +from cryptography.hazmat.bindings._rust import x509 as rust_x509 +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + dsa, + ec, + ed25519, + ed448, + rsa, + x25519, + x448, +) +from cryptography.hazmat.primitives.asymmetric.types import ( + CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES, + CERTIFICATE_PRIVATE_KEY_TYPES, + CERTIFICATE_PUBLIC_KEY_TYPES, +) +from cryptography.x509.extensions import ( + Extension, + ExtensionType, + Extensions, + _make_sequence_methods, +) +from cryptography.x509.name import Name, _ASN1Type +from cryptography.x509.oid import ObjectIdentifier + + +_EARLIEST_UTC_TIME = datetime.datetime(1950, 1, 1) + + +class AttributeNotFound(Exception): + def __init__(self, msg: str, oid: ObjectIdentifier) -> None: + super(AttributeNotFound, self).__init__(msg) + self.oid = oid + + +def _reject_duplicate_extension( + extension: Extension[ExtensionType], + extensions: typing.List[Extension[ExtensionType]], +) -> None: + # This is quadratic in the number of extensions + for e in extensions: + if e.oid == extension.oid: + raise ValueError("This extension has already been set.") + + +def _reject_duplicate_attribute( + oid: ObjectIdentifier, + attributes: typing.List[ + typing.Tuple[ObjectIdentifier, bytes, typing.Optional[int]] + ], +) -> None: + # This is quadratic in the number of attributes + for attr_oid, _, _ in attributes: + if attr_oid == oid: + raise ValueError("This attribute has already been set.") + + +def _convert_to_naive_utc_time(time: datetime.datetime) -> datetime.datetime: + """Normalizes a datetime to a naive datetime in UTC. + + time -- datetime to normalize. Assumed to be in UTC if not timezone + aware. + """ + if time.tzinfo is not None: + offset = time.utcoffset() + offset = offset if offset else datetime.timedelta() + return time.replace(tzinfo=None) - offset + else: + return time + + +class Attribute: + def __init__( + self, + oid: ObjectIdentifier, + value: bytes, + _type: int = _ASN1Type.UTF8String.value, + ) -> None: + self._oid = oid + self._value = value + self._type = _type + + @property + def oid(self) -> ObjectIdentifier: + return self._oid + + @property + def value(self) -> bytes: + return self._value + + def __repr__(self) -> str: + return "".format(self.oid, self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Attribute): + return NotImplemented + + return ( + self.oid == other.oid + and self.value == other.value + and self._type == other._type + ) + + def __hash__(self) -> int: + return hash((self.oid, self.value, self._type)) + + +class Attributes: + def __init__( + self, + attributes: typing.Iterable[Attribute], + ) -> None: + self._attributes = list(attributes) + + __len__, __iter__, __getitem__ = _make_sequence_methods("_attributes") + + def __repr__(self) -> str: + return "".format(self._attributes) + + def get_attribute_for_oid(self, oid: ObjectIdentifier) -> Attribute: + for attr in self: + if attr.oid == oid: + return attr + + raise AttributeNotFound("No {} attribute was found".format(oid), oid) + + +class Version(utils.Enum): + v1 = 0 + v3 = 2 + + +class InvalidVersion(Exception): + def __init__(self, msg: str, parsed_version: int) -> None: + super(InvalidVersion, self).__init__(msg) + self.parsed_version = parsed_version + + +class Certificate(metaclass=abc.ABCMeta): + @abc.abstractmethod + def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes: + """ + Returns bytes using digest passed. + """ + + @abc.abstractproperty + def serial_number(self) -> int: + """ + Returns certificate serial number + """ + + @abc.abstractproperty + def version(self) -> Version: + """ + Returns the certificate version + """ + + @abc.abstractmethod + def public_key(self) -> CERTIFICATE_PUBLIC_KEY_TYPES: + """ + Returns the public key + """ + + @abc.abstractproperty + def not_valid_before(self) -> datetime.datetime: + """ + Not before time (represented as UTC datetime) + """ + + @abc.abstractproperty + def not_valid_after(self) -> datetime.datetime: + """ + Not after time (represented as UTC datetime) + """ + + @abc.abstractproperty + def issuer(self) -> Name: + """ + Returns the issuer name object. + """ + + @abc.abstractproperty + def subject(self) -> Name: + """ + Returns the subject name object. + """ + + @abc.abstractproperty + def signature_hash_algorithm( + self, + ) -> typing.Optional[hashes.HashAlgorithm]: + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self) -> ObjectIdentifier: + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def extensions(self) -> Extensions: + """ + Returns an Extensions object. + """ + + @abc.abstractproperty + def signature(self) -> bytes: + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certificate_bytes(self) -> bytes: + """ + Returns the tbsCertificate payload bytes as defined in RFC 5280. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Checks equality. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Computes a hash. + """ + + @abc.abstractmethod + def public_bytes(self, encoding: serialization.Encoding) -> bytes: + """ + Serializes the certificate to PEM or DER format. + """ + + +# Runtime isinstance checks need this since the rust class is not a subclass. +Certificate.register(rust_x509.Certificate) + + +class RevokedCertificate(metaclass=abc.ABCMeta): + @abc.abstractproperty + def serial_number(self) -> int: + """ + Returns the serial number of the revoked certificate. + """ + + @abc.abstractproperty + def revocation_date(self) -> datetime.datetime: + """ + Returns the date of when this certificate was revoked. + """ + + @abc.abstractproperty + def extensions(self) -> Extensions: + """ + Returns an Extensions object containing a list of Revoked extensions. + """ + + +# Runtime isinstance checks need this since the rust class is not a subclass. +RevokedCertificate.register(rust_x509.RevokedCertificate) + + +class _RawRevokedCertificate(RevokedCertificate): + def __init__( + self, + serial_number: int, + revocation_date: datetime.datetime, + extensions: Extensions, + ): + self._serial_number = serial_number + self._revocation_date = revocation_date + self._extensions = extensions + + @property + def serial_number(self) -> int: + return self._serial_number + + @property + def revocation_date(self) -> datetime.datetime: + return self._revocation_date + + @property + def extensions(self) -> Extensions: + return self._extensions + + +class CertificateRevocationList(metaclass=abc.ABCMeta): + @abc.abstractmethod + def public_bytes(self, encoding: serialization.Encoding) -> bytes: + """ + Serializes the CRL to PEM or DER format. + """ + + @abc.abstractmethod + def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes: + """ + Returns bytes using digest passed. + """ + + @abc.abstractmethod + def get_revoked_certificate_by_serial_number( + self, serial_number: int + ) -> typing.Optional[RevokedCertificate]: + """ + Returns an instance of RevokedCertificate or None if the serial_number + is not in the CRL. + """ + + @abc.abstractproperty + def signature_hash_algorithm( + self, + ) -> typing.Optional[hashes.HashAlgorithm]: + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self) -> ObjectIdentifier: + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def issuer(self) -> Name: + """ + Returns the X509Name with the issuer of this CRL. + """ + + @abc.abstractproperty + def next_update(self) -> typing.Optional[datetime.datetime]: + """ + Returns the date of next update for this CRL. + """ + + @abc.abstractproperty + def last_update(self) -> datetime.datetime: + """ + Returns the date of last update for this CRL. + """ + + @abc.abstractproperty + def extensions(self) -> Extensions: + """ + Returns an Extensions object containing a list of CRL extensions. + """ + + @abc.abstractproperty + def signature(self) -> bytes: + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certlist_bytes(self) -> bytes: + """ + Returns the tbsCertList payload bytes as defined in RFC 5280. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Checks equality. + """ + + @abc.abstractmethod + def __len__(self) -> int: + """ + Number of revoked certificates in the CRL. + """ + + @typing.overload + def __getitem__(self, idx: int) -> RevokedCertificate: + ... + + @typing.overload + def __getitem__(self, idx: slice) -> typing.List[RevokedCertificate]: + ... + + @abc.abstractmethod + def __getitem__( + self, idx: typing.Union[int, slice] + ) -> typing.Union[RevokedCertificate, typing.List[RevokedCertificate]]: + """ + Returns a revoked certificate (or slice of revoked certificates). + """ + + @abc.abstractmethod + def __iter__(self) -> typing.Iterator[RevokedCertificate]: + """ + Iterator over the revoked certificates + """ + + @abc.abstractmethod + def is_signature_valid( + self, public_key: CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES + ) -> bool: + """ + Verifies signature of revocation list against given public key. + """ + + +CertificateRevocationList.register(rust_x509.CertificateRevocationList) + + +class CertificateSigningRequest(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Checks equality. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Computes a hash. + """ + + @abc.abstractmethod + def public_key(self) -> CERTIFICATE_PUBLIC_KEY_TYPES: + """ + Returns the public key + """ + + @abc.abstractproperty + def subject(self) -> Name: + """ + Returns the subject name object. + """ + + @abc.abstractproperty + def signature_hash_algorithm( + self, + ) -> typing.Optional[hashes.HashAlgorithm]: + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self) -> ObjectIdentifier: + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def extensions(self) -> Extensions: + """ + Returns the extensions in the signing request. + """ + + @abc.abstractproperty + def attributes(self) -> Attributes: + """ + Returns an Attributes object. + """ + + @abc.abstractmethod + def public_bytes(self, encoding: serialization.Encoding) -> bytes: + """ + Encodes the request to PEM or DER format. + """ + + @abc.abstractproperty + def signature(self) -> bytes: + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certrequest_bytes(self) -> bytes: + """ + Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC + 2986. + """ + + @abc.abstractproperty + def is_signature_valid(self) -> bool: + """ + Verifies signature of signing request. + """ + + @abc.abstractmethod + def get_attribute_for_oid(self, oid: ObjectIdentifier) -> bytes: + """ + Get the attribute value for a given OID. + """ + + +# Runtime isinstance checks need this since the rust class is not a subclass. +CertificateSigningRequest.register(rust_x509.CertificateSigningRequest) + + +# Backend argument preserved for API compatibility, but ignored. +def load_pem_x509_certificate( + data: bytes, backend: typing.Any = None +) -> Certificate: + return rust_x509.load_pem_x509_certificate(data) + + +# Backend argument preserved for API compatibility, but ignored. +def load_der_x509_certificate( + data: bytes, backend: typing.Any = None +) -> Certificate: + return rust_x509.load_der_x509_certificate(data) + + +# Backend argument preserved for API compatibility, but ignored. +def load_pem_x509_csr( + data: bytes, backend: typing.Any = None +) -> CertificateSigningRequest: + return rust_x509.load_pem_x509_csr(data) + + +# Backend argument preserved for API compatibility, but ignored. +def load_der_x509_csr( + data: bytes, backend: typing.Any = None +) -> CertificateSigningRequest: + return rust_x509.load_der_x509_csr(data) + + +# Backend argument preserved for API compatibility, but ignored. +def load_pem_x509_crl( + data: bytes, backend: typing.Any = None +) -> CertificateRevocationList: + return rust_x509.load_pem_x509_crl(data) + + +# Backend argument preserved for API compatibility, but ignored. +def load_der_x509_crl( + data: bytes, backend: typing.Any = None +) -> CertificateRevocationList: + return rust_x509.load_der_x509_crl(data) + + +class CertificateSigningRequestBuilder: + def __init__( + self, + subject_name: typing.Optional[Name] = None, + extensions: typing.List[Extension[ExtensionType]] = [], + attributes: typing.List[ + typing.Tuple[ObjectIdentifier, bytes, typing.Optional[int]] + ] = [], + ): + """ + Creates an empty X.509 certificate request (v1). + """ + self._subject_name = subject_name + self._extensions = extensions + self._attributes = attributes + + def subject_name(self, name: Name) -> "CertificateSigningRequestBuilder": + """ + Sets the certificate requestor's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError("Expecting x509.Name object.") + if self._subject_name is not None: + raise ValueError("The subject name may only be set once.") + return CertificateSigningRequestBuilder( + name, self._extensions, self._attributes + ) + + def add_extension( + self, extval: ExtensionType, critical: bool + ) -> "CertificateSigningRequestBuilder": + """ + Adds an X.509 extension to the certificate request. + """ + if not isinstance(extval, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extval.oid, critical, extval) + _reject_duplicate_extension(extension, self._extensions) + + return CertificateSigningRequestBuilder( + self._subject_name, + self._extensions + [extension], + self._attributes, + ) + + def add_attribute( + self, + oid: ObjectIdentifier, + value: bytes, + *, + _tag: typing.Optional[_ASN1Type] = None, + ) -> "CertificateSigningRequestBuilder": + """ + Adds an X.509 attribute with an OID and associated value. + """ + if not isinstance(oid, ObjectIdentifier): + raise TypeError("oid must be an ObjectIdentifier") + + if not isinstance(value, bytes): + raise TypeError("value must be bytes") + + if _tag is not None and not isinstance(_tag, _ASN1Type): + raise TypeError("tag must be _ASN1Type") + + _reject_duplicate_attribute(oid, self._attributes) + + if _tag is not None: + tag = _tag.value + else: + tag = None + + return CertificateSigningRequestBuilder( + self._subject_name, + self._extensions, + self._attributes + [(oid, value, tag)], + ) + + def sign( + self, + private_key: CERTIFICATE_PRIVATE_KEY_TYPES, + algorithm: typing.Optional[hashes.HashAlgorithm], + backend: typing.Any = None, + ) -> CertificateSigningRequest: + """ + Signs the request using the requestor's private key. + """ + if self._subject_name is None: + raise ValueError("A CertificateSigningRequest must have a subject") + return rust_x509.create_x509_csr(self, private_key, algorithm) + + +class CertificateBuilder: + _extensions: typing.List[Extension[ExtensionType]] + + def __init__( + self, + issuer_name: typing.Optional[Name] = None, + subject_name: typing.Optional[Name] = None, + public_key: typing.Optional[CERTIFICATE_PUBLIC_KEY_TYPES] = None, + serial_number: typing.Optional[int] = None, + not_valid_before: typing.Optional[datetime.datetime] = None, + not_valid_after: typing.Optional[datetime.datetime] = None, + extensions: typing.List[Extension[ExtensionType]] = [], + ) -> None: + self._version = Version.v3 + self._issuer_name = issuer_name + self._subject_name = subject_name + self._public_key = public_key + self._serial_number = serial_number + self._not_valid_before = not_valid_before + self._not_valid_after = not_valid_after + self._extensions = extensions + + def issuer_name(self, name: Name) -> "CertificateBuilder": + """ + Sets the CA's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError("Expecting x509.Name object.") + if self._issuer_name is not None: + raise ValueError("The issuer name may only be set once.") + return CertificateBuilder( + name, + self._subject_name, + self._public_key, + self._serial_number, + self._not_valid_before, + self._not_valid_after, + self._extensions, + ) + + def subject_name(self, name: Name) -> "CertificateBuilder": + """ + Sets the requestor's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError("Expecting x509.Name object.") + if self._subject_name is not None: + raise ValueError("The subject name may only be set once.") + return CertificateBuilder( + self._issuer_name, + name, + self._public_key, + self._serial_number, + self._not_valid_before, + self._not_valid_after, + self._extensions, + ) + + def public_key( + self, + key: CERTIFICATE_PUBLIC_KEY_TYPES, + ) -> "CertificateBuilder": + """ + Sets the requestor's public key (as found in the signing request). + """ + if not isinstance( + key, + ( + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + x25519.X25519PublicKey, + x448.X448PublicKey, + ), + ): + raise TypeError( + "Expecting one of DSAPublicKey, RSAPublicKey," + " EllipticCurvePublicKey, Ed25519PublicKey," + " Ed448PublicKey, X25519PublicKey, or " + "X448PublicKey." + ) + if self._public_key is not None: + raise ValueError("The public key may only be set once.") + return CertificateBuilder( + self._issuer_name, + self._subject_name, + key, + self._serial_number, + self._not_valid_before, + self._not_valid_after, + self._extensions, + ) + + def serial_number(self, number: int) -> "CertificateBuilder": + """ + Sets the certificate serial number. + """ + if not isinstance(number, int): + raise TypeError("Serial number must be of integral type.") + if self._serial_number is not None: + raise ValueError("The serial number may only be set once.") + if number <= 0: + raise ValueError("The serial number should be positive.") + + # ASN.1 integers are always signed, so most significant bit must be + # zero. + if number.bit_length() >= 160: # As defined in RFC 5280 + raise ValueError( + "The serial number should not be more than 159 " "bits." + ) + return CertificateBuilder( + self._issuer_name, + self._subject_name, + self._public_key, + number, + self._not_valid_before, + self._not_valid_after, + self._extensions, + ) + + def not_valid_before( + self, time: datetime.datetime + ) -> "CertificateBuilder": + """ + Sets the certificate activation time. + """ + if not isinstance(time, datetime.datetime): + raise TypeError("Expecting datetime object.") + if self._not_valid_before is not None: + raise ValueError("The not valid before may only be set once.") + time = _convert_to_naive_utc_time(time) + if time < _EARLIEST_UTC_TIME: + raise ValueError( + "The not valid before date must be on or after" + " 1950 January 1)." + ) + if self._not_valid_after is not None and time > self._not_valid_after: + raise ValueError( + "The not valid before date must be before the not valid after " + "date." + ) + return CertificateBuilder( + self._issuer_name, + self._subject_name, + self._public_key, + self._serial_number, + time, + self._not_valid_after, + self._extensions, + ) + + def not_valid_after(self, time: datetime.datetime) -> "CertificateBuilder": + """ + Sets the certificate expiration time. + """ + if not isinstance(time, datetime.datetime): + raise TypeError("Expecting datetime object.") + if self._not_valid_after is not None: + raise ValueError("The not valid after may only be set once.") + time = _convert_to_naive_utc_time(time) + if time < _EARLIEST_UTC_TIME: + raise ValueError( + "The not valid after date must be on or after" + " 1950 January 1." + ) + if ( + self._not_valid_before is not None + and time < self._not_valid_before + ): + raise ValueError( + "The not valid after date must be after the not valid before " + "date." + ) + return CertificateBuilder( + self._issuer_name, + self._subject_name, + self._public_key, + self._serial_number, + self._not_valid_before, + time, + self._extensions, + ) + + def add_extension( + self, extval: ExtensionType, critical: bool + ) -> "CertificateBuilder": + """ + Adds an X.509 extension to the certificate. + """ + if not isinstance(extval, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extval.oid, critical, extval) + _reject_duplicate_extension(extension, self._extensions) + + return CertificateBuilder( + self._issuer_name, + self._subject_name, + self._public_key, + self._serial_number, + self._not_valid_before, + self._not_valid_after, + self._extensions + [extension], + ) + + def sign( + self, + private_key: CERTIFICATE_PRIVATE_KEY_TYPES, + algorithm: typing.Optional[hashes.HashAlgorithm], + backend: typing.Any = None, + ) -> Certificate: + """ + Signs the certificate using the CA's private key. + """ + if self._subject_name is None: + raise ValueError("A certificate must have a subject name") + + if self._issuer_name is None: + raise ValueError("A certificate must have an issuer name") + + if self._serial_number is None: + raise ValueError("A certificate must have a serial number") + + if self._not_valid_before is None: + raise ValueError("A certificate must have a not valid before time") + + if self._not_valid_after is None: + raise ValueError("A certificate must have a not valid after time") + + if self._public_key is None: + raise ValueError("A certificate must have a public key") + + return rust_x509.create_x509_certificate(self, private_key, algorithm) + + +class CertificateRevocationListBuilder: + _extensions: typing.List[Extension[ExtensionType]] + _revoked_certificates: typing.List[RevokedCertificate] + + def __init__( + self, + issuer_name: typing.Optional[Name] = None, + last_update: typing.Optional[datetime.datetime] = None, + next_update: typing.Optional[datetime.datetime] = None, + extensions: typing.List[Extension[ExtensionType]] = [], + revoked_certificates: typing.List[RevokedCertificate] = [], + ): + self._issuer_name = issuer_name + self._last_update = last_update + self._next_update = next_update + self._extensions = extensions + self._revoked_certificates = revoked_certificates + + def issuer_name( + self, issuer_name: Name + ) -> "CertificateRevocationListBuilder": + if not isinstance(issuer_name, Name): + raise TypeError("Expecting x509.Name object.") + if self._issuer_name is not None: + raise ValueError("The issuer name may only be set once.") + return CertificateRevocationListBuilder( + issuer_name, + self._last_update, + self._next_update, + self._extensions, + self._revoked_certificates, + ) + + def last_update( + self, last_update: datetime.datetime + ) -> "CertificateRevocationListBuilder": + if not isinstance(last_update, datetime.datetime): + raise TypeError("Expecting datetime object.") + if self._last_update is not None: + raise ValueError("Last update may only be set once.") + last_update = _convert_to_naive_utc_time(last_update) + if last_update < _EARLIEST_UTC_TIME: + raise ValueError( + "The last update date must be on or after" " 1950 January 1." + ) + if self._next_update is not None and last_update > self._next_update: + raise ValueError( + "The last update date must be before the next update date." + ) + return CertificateRevocationListBuilder( + self._issuer_name, + last_update, + self._next_update, + self._extensions, + self._revoked_certificates, + ) + + def next_update( + self, next_update: datetime.datetime + ) -> "CertificateRevocationListBuilder": + if not isinstance(next_update, datetime.datetime): + raise TypeError("Expecting datetime object.") + if self._next_update is not None: + raise ValueError("Last update may only be set once.") + next_update = _convert_to_naive_utc_time(next_update) + if next_update < _EARLIEST_UTC_TIME: + raise ValueError( + "The last update date must be on or after" " 1950 January 1." + ) + if self._last_update is not None and next_update < self._last_update: + raise ValueError( + "The next update date must be after the last update date." + ) + return CertificateRevocationListBuilder( + self._issuer_name, + self._last_update, + next_update, + self._extensions, + self._revoked_certificates, + ) + + def add_extension( + self, extval: ExtensionType, critical: bool + ) -> "CertificateRevocationListBuilder": + """ + Adds an X.509 extension to the certificate revocation list. + """ + if not isinstance(extval, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extval.oid, critical, extval) + _reject_duplicate_extension(extension, self._extensions) + return CertificateRevocationListBuilder( + self._issuer_name, + self._last_update, + self._next_update, + self._extensions + [extension], + self._revoked_certificates, + ) + + def add_revoked_certificate( + self, revoked_certificate: RevokedCertificate + ) -> "CertificateRevocationListBuilder": + """ + Adds a revoked certificate to the CRL. + """ + if not isinstance(revoked_certificate, RevokedCertificate): + raise TypeError("Must be an instance of RevokedCertificate") + + return CertificateRevocationListBuilder( + self._issuer_name, + self._last_update, + self._next_update, + self._extensions, + self._revoked_certificates + [revoked_certificate], + ) + + def sign( + self, + private_key: CERTIFICATE_PRIVATE_KEY_TYPES, + algorithm: typing.Optional[hashes.HashAlgorithm], + backend: typing.Any = None, + ) -> CertificateRevocationList: + if self._issuer_name is None: + raise ValueError("A CRL must have an issuer name") + + if self._last_update is None: + raise ValueError("A CRL must have a last update time") + + if self._next_update is None: + raise ValueError("A CRL must have a next update time") + + return rust_x509.create_x509_crl(self, private_key, algorithm) + + +class RevokedCertificateBuilder: + def __init__( + self, + serial_number: typing.Optional[int] = None, + revocation_date: typing.Optional[datetime.datetime] = None, + extensions: typing.List[Extension[ExtensionType]] = [], + ): + self._serial_number = serial_number + self._revocation_date = revocation_date + self._extensions = extensions + + def serial_number(self, number: int) -> "RevokedCertificateBuilder": + if not isinstance(number, int): + raise TypeError("Serial number must be of integral type.") + if self._serial_number is not None: + raise ValueError("The serial number may only be set once.") + if number <= 0: + raise ValueError("The serial number should be positive") + + # ASN.1 integers are always signed, so most significant bit must be + # zero. + if number.bit_length() >= 160: # As defined in RFC 5280 + raise ValueError( + "The serial number should not be more than 159 " "bits." + ) + return RevokedCertificateBuilder( + number, self._revocation_date, self._extensions + ) + + def revocation_date( + self, time: datetime.datetime + ) -> "RevokedCertificateBuilder": + if not isinstance(time, datetime.datetime): + raise TypeError("Expecting datetime object.") + if self._revocation_date is not None: + raise ValueError("The revocation date may only be set once.") + time = _convert_to_naive_utc_time(time) + if time < _EARLIEST_UTC_TIME: + raise ValueError( + "The revocation date must be on or after" " 1950 January 1." + ) + return RevokedCertificateBuilder( + self._serial_number, time, self._extensions + ) + + def add_extension( + self, extval: ExtensionType, critical: bool + ) -> "RevokedCertificateBuilder": + if not isinstance(extval, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extval.oid, critical, extval) + _reject_duplicate_extension(extension, self._extensions) + return RevokedCertificateBuilder( + self._serial_number, + self._revocation_date, + self._extensions + [extension], + ) + + def build(self, backend: typing.Any = None) -> RevokedCertificate: + if self._serial_number is None: + raise ValueError("A revoked certificate must have a serial number") + if self._revocation_date is None: + raise ValueError( + "A revoked certificate must have a revocation date" + ) + return _RawRevokedCertificate( + self._serial_number, + self._revocation_date, + Extensions(self._extensions), + ) + + +def random_serial_number() -> int: + return int.from_bytes(os.urandom(20), "big") >> 1 diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/certificate_transparency.py b/myenv/lib/python3.9/site-packages/cryptography/x509/certificate_transparency.py new file mode 100644 index 0000000..8c198a1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/certificate_transparency.py @@ -0,0 +1,48 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import datetime + +from cryptography import utils +from cryptography.hazmat.bindings._rust import x509 as rust_x509 + + +class LogEntryType(utils.Enum): + X509_CERTIFICATE = 0 + PRE_CERTIFICATE = 1 + + +class Version(utils.Enum): + v1 = 0 + + +class SignedCertificateTimestamp(metaclass=abc.ABCMeta): + @abc.abstractproperty + def version(self) -> Version: + """ + Returns the SCT version. + """ + + @abc.abstractproperty + def log_id(self) -> bytes: + """ + Returns an identifier indicating which log this SCT is for. + """ + + @abc.abstractproperty + def timestamp(self) -> datetime.datetime: + """ + Returns the timestamp for this SCT. + """ + + @abc.abstractproperty + def entry_type(self) -> LogEntryType: + """ + Returns whether this is an SCT for a certificate or pre-certificate. + """ + + +SignedCertificateTimestamp.register(rust_x509.Sct) diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/extensions.py b/myenv/lib/python3.9/site-packages/cryptography/x509/extensions.py new file mode 100644 index 0000000..e64f602 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/extensions.py @@ -0,0 +1,2103 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import datetime +import hashlib +import ipaddress +import typing + +from cryptography import utils +from cryptography.hazmat.bindings._rust import asn1 +from cryptography.hazmat.bindings._rust import x509 as rust_x509 +from cryptography.hazmat.primitives import constant_time, serialization +from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey +from cryptography.hazmat.primitives.asymmetric.types import ( + CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES, + CERTIFICATE_PUBLIC_KEY_TYPES, +) +from cryptography.x509.certificate_transparency import ( + SignedCertificateTimestamp, +) +from cryptography.x509.general_name import ( + DNSName, + DirectoryName, + GeneralName, + IPAddress, + OtherName, + RFC822Name, + RegisteredID, + UniformResourceIdentifier, + _IPADDRESS_TYPES, +) +from cryptography.x509.name import Name, RelativeDistinguishedName +from cryptography.x509.oid import ( + CRLEntryExtensionOID, + ExtensionOID, + OCSPExtensionOID, + ObjectIdentifier, +) + +ExtensionTypeVar = typing.TypeVar( + "ExtensionTypeVar", bound="ExtensionType", covariant=True +) + + +def _key_identifier_from_public_key( + public_key: CERTIFICATE_PUBLIC_KEY_TYPES, +) -> bytes: + if isinstance(public_key, RSAPublicKey): + data = public_key.public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.PKCS1, + ) + elif isinstance(public_key, EllipticCurvePublicKey): + data = public_key.public_bytes( + serialization.Encoding.X962, + serialization.PublicFormat.UncompressedPoint, + ) + else: + # This is a very slow way to do this. + serialized = public_key.public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.SubjectPublicKeyInfo, + ) + data = asn1.parse_spki_for_data(serialized) + + return hashlib.sha1(data).digest() + + +def _make_sequence_methods(field_name: str): + def len_method(self) -> int: + return len(getattr(self, field_name)) + + def iter_method(self): + return iter(getattr(self, field_name)) + + def getitem_method(self, idx): + return getattr(self, field_name)[idx] + + return len_method, iter_method, getitem_method + + +class DuplicateExtension(Exception): + def __init__(self, msg: str, oid: ObjectIdentifier) -> None: + super(DuplicateExtension, self).__init__(msg) + self.oid = oid + + +class ExtensionNotFound(Exception): + def __init__(self, msg: str, oid: ObjectIdentifier) -> None: + super(ExtensionNotFound, self).__init__(msg) + self.oid = oid + + +class ExtensionType(metaclass=abc.ABCMeta): + oid: typing.ClassVar[ObjectIdentifier] + + def public_bytes(self) -> bytes: + """ + Serializes the extension type to DER. + """ + raise NotImplementedError( + "public_bytes is not implemented for extension type {0!r}".format( + self + ) + ) + + +class Extensions: + def __init__( + self, extensions: typing.Iterable["Extension[ExtensionType]"] + ) -> None: + self._extensions = list(extensions) + + def get_extension_for_oid( + self, oid: ObjectIdentifier + ) -> "Extension[ExtensionType]": + for ext in self: + if ext.oid == oid: + return ext + + raise ExtensionNotFound("No {} extension was found".format(oid), oid) + + def get_extension_for_class( + self, extclass: typing.Type[ExtensionTypeVar] + ) -> "Extension[ExtensionTypeVar]": + if extclass is UnrecognizedExtension: + raise TypeError( + "UnrecognizedExtension can't be used with " + "get_extension_for_class because more than one instance of the" + " class may be present." + ) + + for ext in self: + if isinstance(ext.value, extclass): + return ext + + raise ExtensionNotFound( + "No {} extension was found".format(extclass), extclass.oid + ) + + __len__, __iter__, __getitem__ = _make_sequence_methods("_extensions") + + def __repr__(self) -> str: + return "".format(self._extensions) + + +class CRLNumber(ExtensionType): + oid = ExtensionOID.CRL_NUMBER + + def __init__(self, crl_number: int) -> None: + if not isinstance(crl_number, int): + raise TypeError("crl_number must be an integer") + + self._crl_number = crl_number + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CRLNumber): + return NotImplemented + + return self.crl_number == other.crl_number + + def __hash__(self) -> int: + return hash(self.crl_number) + + def __repr__(self) -> str: + return "".format(self.crl_number) + + @property + def crl_number(self) -> int: + return self._crl_number + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class AuthorityKeyIdentifier(ExtensionType): + oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER + + def __init__( + self, + key_identifier: typing.Optional[bytes], + authority_cert_issuer: typing.Optional[typing.Iterable[GeneralName]], + authority_cert_serial_number: typing.Optional[int], + ) -> None: + if (authority_cert_issuer is None) != ( + authority_cert_serial_number is None + ): + raise ValueError( + "authority_cert_issuer and authority_cert_serial_number " + "must both be present or both None" + ) + + if authority_cert_issuer is not None: + authority_cert_issuer = list(authority_cert_issuer) + if not all( + isinstance(x, GeneralName) for x in authority_cert_issuer + ): + raise TypeError( + "authority_cert_issuer must be a list of GeneralName " + "objects" + ) + + if authority_cert_serial_number is not None and not isinstance( + authority_cert_serial_number, int + ): + raise TypeError("authority_cert_serial_number must be an integer") + + self._key_identifier = key_identifier + self._authority_cert_issuer = authority_cert_issuer + self._authority_cert_serial_number = authority_cert_serial_number + + # This takes a subset of CERTIFICATE_PUBLIC_KEY_TYPES because an issuer + # cannot have an X25519/X448 key. This introduces some unfortunate + # asymmetry that requires typing users to explicitly + # narrow their type, but we should make this accurate and not just + # convenient. + @classmethod + def from_issuer_public_key( + cls, public_key: CERTIFICATE_ISSUER_PUBLIC_KEY_TYPES + ) -> "AuthorityKeyIdentifier": + digest = _key_identifier_from_public_key(public_key) + return cls( + key_identifier=digest, + authority_cert_issuer=None, + authority_cert_serial_number=None, + ) + + @classmethod + def from_issuer_subject_key_identifier( + cls, ski: "SubjectKeyIdentifier" + ) -> "AuthorityKeyIdentifier": + return cls( + key_identifier=ski.digest, + authority_cert_issuer=None, + authority_cert_serial_number=None, + ) + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AuthorityKeyIdentifier): + return NotImplemented + + return ( + self.key_identifier == other.key_identifier + and self.authority_cert_issuer == other.authority_cert_issuer + and self.authority_cert_serial_number + == other.authority_cert_serial_number + ) + + def __hash__(self) -> int: + if self.authority_cert_issuer is None: + aci = None + else: + aci = tuple(self.authority_cert_issuer) + return hash( + (self.key_identifier, aci, self.authority_cert_serial_number) + ) + + @property + def key_identifier(self) -> typing.Optional[bytes]: + return self._key_identifier + + @property + def authority_cert_issuer( + self, + ) -> typing.Optional[typing.List[GeneralName]]: + return self._authority_cert_issuer + + @property + def authority_cert_serial_number(self) -> typing.Optional[int]: + return self._authority_cert_serial_number + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class SubjectKeyIdentifier(ExtensionType): + oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER + + def __init__(self, digest: bytes) -> None: + self._digest = digest + + @classmethod + def from_public_key( + cls, public_key: CERTIFICATE_PUBLIC_KEY_TYPES + ) -> "SubjectKeyIdentifier": + return cls(_key_identifier_from_public_key(public_key)) + + @property + def digest(self) -> bytes: + return self._digest + + @property + def key_identifier(self) -> bytes: + return self._digest + + def __repr__(self) -> str: + return "".format(self.digest) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SubjectKeyIdentifier): + return NotImplemented + + return constant_time.bytes_eq(self.digest, other.digest) + + def __hash__(self) -> int: + return hash(self.digest) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class AuthorityInformationAccess(ExtensionType): + oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS + + def __init__( + self, descriptions: typing.Iterable["AccessDescription"] + ) -> None: + descriptions = list(descriptions) + if not all(isinstance(x, AccessDescription) for x in descriptions): + raise TypeError( + "Every item in the descriptions list must be an " + "AccessDescription" + ) + + self._descriptions = descriptions + + __len__, __iter__, __getitem__ = _make_sequence_methods("_descriptions") + + def __repr__(self) -> str: + return "".format(self._descriptions) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AuthorityInformationAccess): + return NotImplemented + + return self._descriptions == other._descriptions + + def __hash__(self) -> int: + return hash(tuple(self._descriptions)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class SubjectInformationAccess(ExtensionType): + oid = ExtensionOID.SUBJECT_INFORMATION_ACCESS + + def __init__( + self, descriptions: typing.Iterable["AccessDescription"] + ) -> None: + descriptions = list(descriptions) + if not all(isinstance(x, AccessDescription) for x in descriptions): + raise TypeError( + "Every item in the descriptions list must be an " + "AccessDescription" + ) + + self._descriptions = descriptions + + __len__, __iter__, __getitem__ = _make_sequence_methods("_descriptions") + + def __repr__(self) -> str: + return "".format(self._descriptions) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SubjectInformationAccess): + return NotImplemented + + return self._descriptions == other._descriptions + + def __hash__(self) -> int: + return hash(tuple(self._descriptions)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class AccessDescription: + def __init__( + self, access_method: ObjectIdentifier, access_location: GeneralName + ) -> None: + if not isinstance(access_method, ObjectIdentifier): + raise TypeError("access_method must be an ObjectIdentifier") + + if not isinstance(access_location, GeneralName): + raise TypeError("access_location must be a GeneralName") + + self._access_method = access_method + self._access_location = access_location + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AccessDescription): + return NotImplemented + + return ( + self.access_method == other.access_method + and self.access_location == other.access_location + ) + + def __hash__(self) -> int: + return hash((self.access_method, self.access_location)) + + @property + def access_method(self) -> ObjectIdentifier: + return self._access_method + + @property + def access_location(self) -> GeneralName: + return self._access_location + + +class BasicConstraints(ExtensionType): + oid = ExtensionOID.BASIC_CONSTRAINTS + + def __init__(self, ca: bool, path_length: typing.Optional[int]) -> None: + if not isinstance(ca, bool): + raise TypeError("ca must be a boolean value") + + if path_length is not None and not ca: + raise ValueError("path_length must be None when ca is False") + + if path_length is not None and ( + not isinstance(path_length, int) or path_length < 0 + ): + raise TypeError( + "path_length must be a non-negative integer or None" + ) + + self._ca = ca + self._path_length = path_length + + @property + def ca(self) -> bool: + return self._ca + + @property + def path_length(self) -> typing.Optional[int]: + return self._path_length + + def __repr__(self) -> str: + return ( + "" + ).format(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BasicConstraints): + return NotImplemented + + return self.ca == other.ca and self.path_length == other.path_length + + def __hash__(self) -> int: + return hash((self.ca, self.path_length)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class DeltaCRLIndicator(ExtensionType): + oid = ExtensionOID.DELTA_CRL_INDICATOR + + def __init__(self, crl_number: int) -> None: + if not isinstance(crl_number, int): + raise TypeError("crl_number must be an integer") + + self._crl_number = crl_number + + @property + def crl_number(self) -> int: + return self._crl_number + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DeltaCRLIndicator): + return NotImplemented + + return self.crl_number == other.crl_number + + def __hash__(self) -> int: + return hash(self.crl_number) + + def __repr__(self) -> str: + return "".format(self) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class CRLDistributionPoints(ExtensionType): + oid = ExtensionOID.CRL_DISTRIBUTION_POINTS + + def __init__( + self, distribution_points: typing.Iterable["DistributionPoint"] + ) -> None: + distribution_points = list(distribution_points) + if not all( + isinstance(x, DistributionPoint) for x in distribution_points + ): + raise TypeError( + "distribution_points must be a list of DistributionPoint " + "objects" + ) + + self._distribution_points = distribution_points + + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_distribution_points" + ) + + def __repr__(self) -> str: + return "".format(self._distribution_points) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CRLDistributionPoints): + return NotImplemented + + return self._distribution_points == other._distribution_points + + def __hash__(self) -> int: + return hash(tuple(self._distribution_points)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class FreshestCRL(ExtensionType): + oid = ExtensionOID.FRESHEST_CRL + + def __init__( + self, distribution_points: typing.Iterable["DistributionPoint"] + ) -> None: + distribution_points = list(distribution_points) + if not all( + isinstance(x, DistributionPoint) for x in distribution_points + ): + raise TypeError( + "distribution_points must be a list of DistributionPoint " + "objects" + ) + + self._distribution_points = distribution_points + + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_distribution_points" + ) + + def __repr__(self) -> str: + return "".format(self._distribution_points) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, FreshestCRL): + return NotImplemented + + return self._distribution_points == other._distribution_points + + def __hash__(self) -> int: + return hash(tuple(self._distribution_points)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class DistributionPoint: + def __init__( + self, + full_name: typing.Optional[typing.Iterable[GeneralName]], + relative_name: typing.Optional[RelativeDistinguishedName], + reasons: typing.Optional[typing.FrozenSet["ReasonFlags"]], + crl_issuer: typing.Optional[typing.Iterable[GeneralName]], + ) -> None: + if full_name and relative_name: + raise ValueError( + "You cannot provide both full_name and relative_name, at " + "least one must be None." + ) + + if full_name is not None: + full_name = list(full_name) + if not all(isinstance(x, GeneralName) for x in full_name): + raise TypeError( + "full_name must be a list of GeneralName objects" + ) + + if relative_name: + if not isinstance(relative_name, RelativeDistinguishedName): + raise TypeError( + "relative_name must be a RelativeDistinguishedName" + ) + + if crl_issuer is not None: + crl_issuer = list(crl_issuer) + if not all(isinstance(x, GeneralName) for x in crl_issuer): + raise TypeError( + "crl_issuer must be None or a list of general names" + ) + + if reasons and ( + not isinstance(reasons, frozenset) + or not all(isinstance(x, ReasonFlags) for x in reasons) + ): + raise TypeError("reasons must be None or frozenset of ReasonFlags") + + if reasons and ( + ReasonFlags.unspecified in reasons + or ReasonFlags.remove_from_crl in reasons + ): + raise ValueError( + "unspecified and remove_from_crl are not valid reasons in a " + "DistributionPoint" + ) + + if reasons and not crl_issuer and not (full_name or relative_name): + raise ValueError( + "You must supply crl_issuer, full_name, or relative_name when " + "reasons is not None" + ) + + self._full_name = full_name + self._relative_name = relative_name + self._reasons = reasons + self._crl_issuer = crl_issuer + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DistributionPoint): + return NotImplemented + + return ( + self.full_name == other.full_name + and self.relative_name == other.relative_name + and self.reasons == other.reasons + and self.crl_issuer == other.crl_issuer + ) + + def __hash__(self) -> int: + if self.full_name is not None: + fn: typing.Optional[typing.Tuple[GeneralName, ...]] = tuple( + self.full_name + ) + else: + fn = None + + if self.crl_issuer is not None: + crl_issuer: typing.Optional[ + typing.Tuple[GeneralName, ...] + ] = tuple(self.crl_issuer) + else: + crl_issuer = None + + return hash((fn, self.relative_name, self.reasons, crl_issuer)) + + @property + def full_name(self) -> typing.Optional[typing.List[GeneralName]]: + return self._full_name + + @property + def relative_name(self) -> typing.Optional[RelativeDistinguishedName]: + return self._relative_name + + @property + def reasons(self) -> typing.Optional[typing.FrozenSet["ReasonFlags"]]: + return self._reasons + + @property + def crl_issuer(self) -> typing.Optional[typing.List[GeneralName]]: + return self._crl_issuer + + +class ReasonFlags(utils.Enum): + unspecified = "unspecified" + key_compromise = "keyCompromise" + ca_compromise = "cACompromise" + affiliation_changed = "affiliationChanged" + superseded = "superseded" + cessation_of_operation = "cessationOfOperation" + certificate_hold = "certificateHold" + privilege_withdrawn = "privilegeWithdrawn" + aa_compromise = "aACompromise" + remove_from_crl = "removeFromCRL" + + +# These are distribution point bit string mappings. Not to be confused with +# CRLReason reason flags bit string mappings. +# ReasonFlags ::= BIT STRING { +# unused (0), +# keyCompromise (1), +# cACompromise (2), +# affiliationChanged (3), +# superseded (4), +# cessationOfOperation (5), +# certificateHold (6), +# privilegeWithdrawn (7), +# aACompromise (8) } +_REASON_BIT_MAPPING = { + 1: ReasonFlags.key_compromise, + 2: ReasonFlags.ca_compromise, + 3: ReasonFlags.affiliation_changed, + 4: ReasonFlags.superseded, + 5: ReasonFlags.cessation_of_operation, + 6: ReasonFlags.certificate_hold, + 7: ReasonFlags.privilege_withdrawn, + 8: ReasonFlags.aa_compromise, +} + + +class PolicyConstraints(ExtensionType): + oid = ExtensionOID.POLICY_CONSTRAINTS + + def __init__( + self, + require_explicit_policy: typing.Optional[int], + inhibit_policy_mapping: typing.Optional[int], + ) -> None: + if require_explicit_policy is not None and not isinstance( + require_explicit_policy, int + ): + raise TypeError( + "require_explicit_policy must be a non-negative integer or " + "None" + ) + + if inhibit_policy_mapping is not None and not isinstance( + inhibit_policy_mapping, int + ): + raise TypeError( + "inhibit_policy_mapping must be a non-negative integer or None" + ) + + if inhibit_policy_mapping is None and require_explicit_policy is None: + raise ValueError( + "At least one of require_explicit_policy and " + "inhibit_policy_mapping must not be None" + ) + + self._require_explicit_policy = require_explicit_policy + self._inhibit_policy_mapping = inhibit_policy_mapping + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PolicyConstraints): + return NotImplemented + + return ( + self.require_explicit_policy == other.require_explicit_policy + and self.inhibit_policy_mapping == other.inhibit_policy_mapping + ) + + def __hash__(self) -> int: + return hash( + (self.require_explicit_policy, self.inhibit_policy_mapping) + ) + + @property + def require_explicit_policy(self) -> typing.Optional[int]: + return self._require_explicit_policy + + @property + def inhibit_policy_mapping(self) -> typing.Optional[int]: + return self._inhibit_policy_mapping + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class CertificatePolicies(ExtensionType): + oid = ExtensionOID.CERTIFICATE_POLICIES + + def __init__(self, policies: typing.Iterable["PolicyInformation"]) -> None: + policies = list(policies) + if not all(isinstance(x, PolicyInformation) for x in policies): + raise TypeError( + "Every item in the policies list must be a " + "PolicyInformation" + ) + + self._policies = policies + + __len__, __iter__, __getitem__ = _make_sequence_methods("_policies") + + def __repr__(self) -> str: + return "".format(self._policies) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CertificatePolicies): + return NotImplemented + + return self._policies == other._policies + + def __hash__(self) -> int: + return hash(tuple(self._policies)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class PolicyInformation: + def __init__( + self, + policy_identifier: ObjectIdentifier, + policy_qualifiers: typing.Optional[ + typing.Iterable[typing.Union[str, "UserNotice"]] + ], + ) -> None: + if not isinstance(policy_identifier, ObjectIdentifier): + raise TypeError("policy_identifier must be an ObjectIdentifier") + + self._policy_identifier = policy_identifier + + if policy_qualifiers is not None: + policy_qualifiers = list(policy_qualifiers) + if not all( + isinstance(x, (str, UserNotice)) for x in policy_qualifiers + ): + raise TypeError( + "policy_qualifiers must be a list of strings and/or " + "UserNotice objects or None" + ) + + self._policy_qualifiers = policy_qualifiers + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PolicyInformation): + return NotImplemented + + return ( + self.policy_identifier == other.policy_identifier + and self.policy_qualifiers == other.policy_qualifiers + ) + + def __hash__(self) -> int: + if self.policy_qualifiers is not None: + pq: typing.Optional[ + typing.Tuple[typing.Union[str, "UserNotice"], ...] + ] = tuple(self.policy_qualifiers) + else: + pq = None + + return hash((self.policy_identifier, pq)) + + @property + def policy_identifier(self) -> ObjectIdentifier: + return self._policy_identifier + + @property + def policy_qualifiers( + self, + ) -> typing.Optional[typing.List[typing.Union[str, "UserNotice"]]]: + return self._policy_qualifiers + + +class UserNotice: + def __init__( + self, + notice_reference: typing.Optional["NoticeReference"], + explicit_text: typing.Optional[str], + ) -> None: + if notice_reference and not isinstance( + notice_reference, NoticeReference + ): + raise TypeError( + "notice_reference must be None or a NoticeReference" + ) + + self._notice_reference = notice_reference + self._explicit_text = explicit_text + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UserNotice): + return NotImplemented + + return ( + self.notice_reference == other.notice_reference + and self.explicit_text == other.explicit_text + ) + + def __hash__(self) -> int: + return hash((self.notice_reference, self.explicit_text)) + + @property + def notice_reference(self) -> typing.Optional["NoticeReference"]: + return self._notice_reference + + @property + def explicit_text(self) -> typing.Optional[str]: + return self._explicit_text + + +class NoticeReference: + def __init__( + self, + organization: typing.Optional[str], + notice_numbers: typing.Iterable[int], + ) -> None: + self._organization = organization + notice_numbers = list(notice_numbers) + if not all(isinstance(x, int) for x in notice_numbers): + raise TypeError("notice_numbers must be a list of integers") + + self._notice_numbers = notice_numbers + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, NoticeReference): + return NotImplemented + + return ( + self.organization == other.organization + and self.notice_numbers == other.notice_numbers + ) + + def __hash__(self) -> int: + return hash((self.organization, tuple(self.notice_numbers))) + + @property + def organization(self) -> typing.Optional[str]: + return self._organization + + @property + def notice_numbers(self) -> typing.List[int]: + return self._notice_numbers + + +class ExtendedKeyUsage(ExtensionType): + oid = ExtensionOID.EXTENDED_KEY_USAGE + + def __init__(self, usages: typing.Iterable[ObjectIdentifier]) -> None: + usages = list(usages) + if not all(isinstance(x, ObjectIdentifier) for x in usages): + raise TypeError( + "Every item in the usages list must be an ObjectIdentifier" + ) + + self._usages = usages + + __len__, __iter__, __getitem__ = _make_sequence_methods("_usages") + + def __repr__(self) -> str: + return "".format(self._usages) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, ExtendedKeyUsage): + return NotImplemented + + return self._usages == other._usages + + def __hash__(self) -> int: + return hash(tuple(self._usages)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class OCSPNoCheck(ExtensionType): + oid = ExtensionOID.OCSP_NO_CHECK + + def __eq__(self, other: object) -> bool: + if not isinstance(other, OCSPNoCheck): + return NotImplemented + + return True + + def __hash__(self) -> int: + return hash(OCSPNoCheck) + + def __repr__(self) -> str: + return "" + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class PrecertPoison(ExtensionType): + oid = ExtensionOID.PRECERT_POISON + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PrecertPoison): + return NotImplemented + + return True + + def __hash__(self) -> int: + return hash(PrecertPoison) + + def __repr__(self) -> str: + return "" + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class TLSFeature(ExtensionType): + oid = ExtensionOID.TLS_FEATURE + + def __init__(self, features: typing.Iterable["TLSFeatureType"]) -> None: + features = list(features) + if ( + not all(isinstance(x, TLSFeatureType) for x in features) + or len(features) == 0 + ): + raise TypeError( + "features must be a list of elements from the TLSFeatureType " + "enum" + ) + + self._features = features + + __len__, __iter__, __getitem__ = _make_sequence_methods("_features") + + def __repr__(self) -> str: + return "".format(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, TLSFeature): + return NotImplemented + + return self._features == other._features + + def __hash__(self) -> int: + return hash(tuple(self._features)) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class TLSFeatureType(utils.Enum): + # status_request is defined in RFC 6066 and is used for what is commonly + # called OCSP Must-Staple when present in the TLS Feature extension in an + # X.509 certificate. + status_request = 5 + # status_request_v2 is defined in RFC 6961 and allows multiple OCSP + # responses to be provided. It is not currently in use by clients or + # servers. + status_request_v2 = 17 + + +_TLS_FEATURE_TYPE_TO_ENUM = {x.value: x for x in TLSFeatureType} + + +class InhibitAnyPolicy(ExtensionType): + oid = ExtensionOID.INHIBIT_ANY_POLICY + + def __init__(self, skip_certs: int) -> None: + if not isinstance(skip_certs, int): + raise TypeError("skip_certs must be an integer") + + if skip_certs < 0: + raise ValueError("skip_certs must be a non-negative integer") + + self._skip_certs = skip_certs + + def __repr__(self) -> str: + return "".format(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, InhibitAnyPolicy): + return NotImplemented + + return self.skip_certs == other.skip_certs + + def __hash__(self) -> int: + return hash(self.skip_certs) + + @property + def skip_certs(self) -> int: + return self._skip_certs + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class KeyUsage(ExtensionType): + oid = ExtensionOID.KEY_USAGE + + def __init__( + self, + digital_signature: bool, + content_commitment: bool, + key_encipherment: bool, + data_encipherment: bool, + key_agreement: bool, + key_cert_sign: bool, + crl_sign: bool, + encipher_only: bool, + decipher_only: bool, + ) -> None: + if not key_agreement and (encipher_only or decipher_only): + raise ValueError( + "encipher_only and decipher_only can only be true when " + "key_agreement is true" + ) + + self._digital_signature = digital_signature + self._content_commitment = content_commitment + self._key_encipherment = key_encipherment + self._data_encipherment = data_encipherment + self._key_agreement = key_agreement + self._key_cert_sign = key_cert_sign + self._crl_sign = crl_sign + self._encipher_only = encipher_only + self._decipher_only = decipher_only + + @property + def digital_signature(self) -> bool: + return self._digital_signature + + @property + def content_commitment(self) -> bool: + return self._content_commitment + + @property + def key_encipherment(self) -> bool: + return self._key_encipherment + + @property + def data_encipherment(self) -> bool: + return self._data_encipherment + + @property + def key_agreement(self) -> bool: + return self._key_agreement + + @property + def key_cert_sign(self) -> bool: + return self._key_cert_sign + + @property + def crl_sign(self) -> bool: + return self._crl_sign + + @property + def encipher_only(self) -> bool: + if not self.key_agreement: + raise ValueError( + "encipher_only is undefined unless key_agreement is true" + ) + else: + return self._encipher_only + + @property + def decipher_only(self) -> bool: + if not self.key_agreement: + raise ValueError( + "decipher_only is undefined unless key_agreement is true" + ) + else: + return self._decipher_only + + def __repr__(self) -> str: + try: + encipher_only = self.encipher_only + decipher_only = self.decipher_only + except ValueError: + # Users found None confusing because even though encipher/decipher + # have no meaning unless key_agreement is true, to construct an + # instance of the class you still need to pass False. + encipher_only = False + decipher_only = False + + return ( + "" + ).format(self, encipher_only, decipher_only) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, KeyUsage): + return NotImplemented + + return ( + self.digital_signature == other.digital_signature + and self.content_commitment == other.content_commitment + and self.key_encipherment == other.key_encipherment + and self.data_encipherment == other.data_encipherment + and self.key_agreement == other.key_agreement + and self.key_cert_sign == other.key_cert_sign + and self.crl_sign == other.crl_sign + and self._encipher_only == other._encipher_only + and self._decipher_only == other._decipher_only + ) + + def __hash__(self) -> int: + return hash( + ( + self.digital_signature, + self.content_commitment, + self.key_encipherment, + self.data_encipherment, + self.key_agreement, + self.key_cert_sign, + self.crl_sign, + self._encipher_only, + self._decipher_only, + ) + ) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class NameConstraints(ExtensionType): + oid = ExtensionOID.NAME_CONSTRAINTS + + def __init__( + self, + permitted_subtrees: typing.Optional[typing.Iterable[GeneralName]], + excluded_subtrees: typing.Optional[typing.Iterable[GeneralName]], + ) -> None: + if permitted_subtrees is not None: + permitted_subtrees = list(permitted_subtrees) + if not permitted_subtrees: + raise ValueError( + "permitted_subtrees must be a non-empty list or None" + ) + if not all(isinstance(x, GeneralName) for x in permitted_subtrees): + raise TypeError( + "permitted_subtrees must be a list of GeneralName objects " + "or None" + ) + + self._validate_ip_name(permitted_subtrees) + + if excluded_subtrees is not None: + excluded_subtrees = list(excluded_subtrees) + if not excluded_subtrees: + raise ValueError( + "excluded_subtrees must be a non-empty list or None" + ) + if not all(isinstance(x, GeneralName) for x in excluded_subtrees): + raise TypeError( + "excluded_subtrees must be a list of GeneralName objects " + "or None" + ) + + self._validate_ip_name(excluded_subtrees) + + if permitted_subtrees is None and excluded_subtrees is None: + raise ValueError( + "At least one of permitted_subtrees and excluded_subtrees " + "must not be None" + ) + + self._permitted_subtrees = permitted_subtrees + self._excluded_subtrees = excluded_subtrees + + def __eq__(self, other: object) -> bool: + if not isinstance(other, NameConstraints): + return NotImplemented + + return ( + self.excluded_subtrees == other.excluded_subtrees + and self.permitted_subtrees == other.permitted_subtrees + ) + + def _validate_ip_name(self, tree: typing.Iterable[GeneralName]) -> None: + if any( + isinstance(name, IPAddress) + and not isinstance( + name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network) + ) + for name in tree + ): + raise TypeError( + "IPAddress name constraints must be an IPv4Network or" + " IPv6Network object" + ) + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __hash__(self) -> int: + if self.permitted_subtrees is not None: + ps: typing.Optional[typing.Tuple[GeneralName, ...]] = tuple( + self.permitted_subtrees + ) + else: + ps = None + + if self.excluded_subtrees is not None: + es: typing.Optional[typing.Tuple[GeneralName, ...]] = tuple( + self.excluded_subtrees + ) + else: + es = None + + return hash((ps, es)) + + @property + def permitted_subtrees( + self, + ) -> typing.Optional[typing.List[GeneralName]]: + return self._permitted_subtrees + + @property + def excluded_subtrees( + self, + ) -> typing.Optional[typing.List[GeneralName]]: + return self._excluded_subtrees + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class Extension(typing.Generic[ExtensionTypeVar]): + def __init__( + self, oid: ObjectIdentifier, critical: bool, value: ExtensionTypeVar + ) -> None: + if not isinstance(oid, ObjectIdentifier): + raise TypeError( + "oid argument must be an ObjectIdentifier instance." + ) + + if not isinstance(critical, bool): + raise TypeError("critical must be a boolean value") + + self._oid = oid + self._critical = critical + self._value = value + + @property + def oid(self) -> ObjectIdentifier: + return self._oid + + @property + def critical(self) -> bool: + return self._critical + + @property + def value(self) -> ExtensionTypeVar: + return self._value + + def __repr__(self) -> str: + return ( + "" + ).format(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Extension): + return NotImplemented + + return ( + self.oid == other.oid + and self.critical == other.critical + and self.value == other.value + ) + + def __hash__(self) -> int: + return hash((self.oid, self.critical, self.value)) + + +class GeneralNames: + def __init__(self, general_names: typing.Iterable[GeneralName]) -> None: + general_names = list(general_names) + if not all(isinstance(x, GeneralName) for x in general_names): + raise TypeError( + "Every item in the general_names list must be an " + "object conforming to the GeneralName interface" + ) + + self._general_names = general_names + + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") + + @typing.overload + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[UniformResourceIdentifier], + typing.Type[RFC822Name], + ], + ) -> typing.List[str]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[DirectoryName], + ) -> typing.List[Name]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[RegisteredID], + ) -> typing.List[ObjectIdentifier]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[IPAddress] + ) -> typing.List[_IPADDRESS_TYPES]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[OtherName] + ) -> typing.List[OtherName]: + ... + + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[DirectoryName], + typing.Type[IPAddress], + typing.Type[OtherName], + typing.Type[RFC822Name], + typing.Type[RegisteredID], + typing.Type[UniformResourceIdentifier], + ], + ) -> typing.Union[ + typing.List[_IPADDRESS_TYPES], + typing.List[str], + typing.List[OtherName], + typing.List[Name], + typing.List[ObjectIdentifier], + ]: + # Return the value of each GeneralName, except for OtherName instances + # which we return directly because it has two important properties not + # just one value. + objs = (i for i in self if isinstance(i, type)) + if type != OtherName: + return [i.value for i in objs] + return list(objs) + + def __repr__(self) -> str: + return "".format(self._general_names) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, GeneralNames): + return NotImplemented + + return self._general_names == other._general_names + + def __hash__(self) -> int: + return hash(tuple(self._general_names)) + + +class SubjectAlternativeName(ExtensionType): + oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME + + def __init__(self, general_names: typing.Iterable[GeneralName]) -> None: + self._general_names = GeneralNames(general_names) + + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") + + @typing.overload + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[UniformResourceIdentifier], + typing.Type[RFC822Name], + ], + ) -> typing.List[str]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[DirectoryName], + ) -> typing.List[Name]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[RegisteredID], + ) -> typing.List[ObjectIdentifier]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[IPAddress] + ) -> typing.List[_IPADDRESS_TYPES]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[OtherName] + ) -> typing.List[OtherName]: + ... + + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[DirectoryName], + typing.Type[IPAddress], + typing.Type[OtherName], + typing.Type[RFC822Name], + typing.Type[RegisteredID], + typing.Type[UniformResourceIdentifier], + ], + ) -> typing.Union[ + typing.List[_IPADDRESS_TYPES], + typing.List[str], + typing.List[OtherName], + typing.List[Name], + typing.List[ObjectIdentifier], + ]: + return self._general_names.get_values_for_type(type) + + def __repr__(self) -> str: + return "".format(self._general_names) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SubjectAlternativeName): + return NotImplemented + + return self._general_names == other._general_names + + def __hash__(self) -> int: + return hash(self._general_names) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class IssuerAlternativeName(ExtensionType): + oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME + + def __init__(self, general_names: typing.Iterable[GeneralName]) -> None: + self._general_names = GeneralNames(general_names) + + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") + + @typing.overload + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[UniformResourceIdentifier], + typing.Type[RFC822Name], + ], + ) -> typing.List[str]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[DirectoryName], + ) -> typing.List[Name]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[RegisteredID], + ) -> typing.List[ObjectIdentifier]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[IPAddress] + ) -> typing.List[_IPADDRESS_TYPES]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[OtherName] + ) -> typing.List[OtherName]: + ... + + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[DirectoryName], + typing.Type[IPAddress], + typing.Type[OtherName], + typing.Type[RFC822Name], + typing.Type[RegisteredID], + typing.Type[UniformResourceIdentifier], + ], + ) -> typing.Union[ + typing.List[_IPADDRESS_TYPES], + typing.List[str], + typing.List[OtherName], + typing.List[Name], + typing.List[ObjectIdentifier], + ]: + return self._general_names.get_values_for_type(type) + + def __repr__(self) -> str: + return "".format(self._general_names) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, IssuerAlternativeName): + return NotImplemented + + return self._general_names == other._general_names + + def __hash__(self) -> int: + return hash(self._general_names) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class CertificateIssuer(ExtensionType): + oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER + + def __init__(self, general_names: typing.Iterable[GeneralName]) -> None: + self._general_names = GeneralNames(general_names) + + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") + + @typing.overload + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[UniformResourceIdentifier], + typing.Type[RFC822Name], + ], + ) -> typing.List[str]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[DirectoryName], + ) -> typing.List[Name]: + ... + + @typing.overload + def get_values_for_type( + self, + type: typing.Type[RegisteredID], + ) -> typing.List[ObjectIdentifier]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[IPAddress] + ) -> typing.List[_IPADDRESS_TYPES]: + ... + + @typing.overload + def get_values_for_type( + self, type: typing.Type[OtherName] + ) -> typing.List[OtherName]: + ... + + def get_values_for_type( + self, + type: typing.Union[ + typing.Type[DNSName], + typing.Type[DirectoryName], + typing.Type[IPAddress], + typing.Type[OtherName], + typing.Type[RFC822Name], + typing.Type[RegisteredID], + typing.Type[UniformResourceIdentifier], + ], + ) -> typing.Union[ + typing.List[_IPADDRESS_TYPES], + typing.List[str], + typing.List[OtherName], + typing.List[Name], + typing.List[ObjectIdentifier], + ]: + return self._general_names.get_values_for_type(type) + + def __repr__(self) -> str: + return "".format(self._general_names) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CertificateIssuer): + return NotImplemented + + return self._general_names == other._general_names + + def __hash__(self) -> int: + return hash(self._general_names) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class CRLReason(ExtensionType): + oid = CRLEntryExtensionOID.CRL_REASON + + def __init__(self, reason: ReasonFlags) -> None: + if not isinstance(reason, ReasonFlags): + raise TypeError("reason must be an element from ReasonFlags") + + self._reason = reason + + def __repr__(self) -> str: + return "".format(self._reason) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CRLReason): + return NotImplemented + + return self.reason == other.reason + + def __hash__(self) -> int: + return hash(self.reason) + + @property + def reason(self) -> ReasonFlags: + return self._reason + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class InvalidityDate(ExtensionType): + oid = CRLEntryExtensionOID.INVALIDITY_DATE + + def __init__(self, invalidity_date: datetime.datetime) -> None: + if not isinstance(invalidity_date, datetime.datetime): + raise TypeError("invalidity_date must be a datetime.datetime") + + self._invalidity_date = invalidity_date + + def __repr__(self) -> str: + return "".format( + self._invalidity_date + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, InvalidityDate): + return NotImplemented + + return self.invalidity_date == other.invalidity_date + + def __hash__(self) -> int: + return hash(self.invalidity_date) + + @property + def invalidity_date(self) -> datetime.datetime: + return self._invalidity_date + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class PrecertificateSignedCertificateTimestamps(ExtensionType): + oid = ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS + + def __init__( + self, + signed_certificate_timestamps: typing.Iterable[ + SignedCertificateTimestamp + ], + ) -> None: + signed_certificate_timestamps = list(signed_certificate_timestamps) + if not all( + isinstance(sct, SignedCertificateTimestamp) + for sct in signed_certificate_timestamps + ): + raise TypeError( + "Every item in the signed_certificate_timestamps list must be " + "a SignedCertificateTimestamp" + ) + self._signed_certificate_timestamps = signed_certificate_timestamps + + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_signed_certificate_timestamps" + ) + + def __repr__(self) -> str: + return "".format( + list(self) + ) + + def __hash__(self) -> int: + return hash(tuple(self._signed_certificate_timestamps)) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PrecertificateSignedCertificateTimestamps): + return NotImplemented + + return ( + self._signed_certificate_timestamps + == other._signed_certificate_timestamps + ) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class SignedCertificateTimestamps(ExtensionType): + oid = ExtensionOID.SIGNED_CERTIFICATE_TIMESTAMPS + + def __init__( + self, + signed_certificate_timestamps: typing.Iterable[ + SignedCertificateTimestamp + ], + ) -> None: + signed_certificate_timestamps = list(signed_certificate_timestamps) + if not all( + isinstance(sct, SignedCertificateTimestamp) + for sct in signed_certificate_timestamps + ): + raise TypeError( + "Every item in the signed_certificate_timestamps list must be " + "a SignedCertificateTimestamp" + ) + self._signed_certificate_timestamps = signed_certificate_timestamps + + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_signed_certificate_timestamps" + ) + + def __repr__(self) -> str: + return "".format(list(self)) + + def __hash__(self) -> int: + return hash(tuple(self._signed_certificate_timestamps)) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SignedCertificateTimestamps): + return NotImplemented + + return ( + self._signed_certificate_timestamps + == other._signed_certificate_timestamps + ) + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class OCSPNonce(ExtensionType): + oid = OCSPExtensionOID.NONCE + + def __init__(self, nonce: bytes) -> None: + if not isinstance(nonce, bytes): + raise TypeError("nonce must be bytes") + + self._nonce = nonce + + def __eq__(self, other: object) -> bool: + if not isinstance(other, OCSPNonce): + return NotImplemented + + return self.nonce == other.nonce + + def __hash__(self) -> int: + return hash(self.nonce) + + def __repr__(self) -> str: + return "".format(self) + + @property + def nonce(self) -> bytes: + return self._nonce + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class IssuingDistributionPoint(ExtensionType): + oid = ExtensionOID.ISSUING_DISTRIBUTION_POINT + + def __init__( + self, + full_name: typing.Optional[typing.Iterable[GeneralName]], + relative_name: typing.Optional[RelativeDistinguishedName], + only_contains_user_certs: bool, + only_contains_ca_certs: bool, + only_some_reasons: typing.Optional[typing.FrozenSet[ReasonFlags]], + indirect_crl: bool, + only_contains_attribute_certs: bool, + ) -> None: + if full_name is not None: + full_name = list(full_name) + + if only_some_reasons and ( + not isinstance(only_some_reasons, frozenset) + or not all(isinstance(x, ReasonFlags) for x in only_some_reasons) + ): + raise TypeError( + "only_some_reasons must be None or frozenset of ReasonFlags" + ) + + if only_some_reasons and ( + ReasonFlags.unspecified in only_some_reasons + or ReasonFlags.remove_from_crl in only_some_reasons + ): + raise ValueError( + "unspecified and remove_from_crl are not valid reasons in an " + "IssuingDistributionPoint" + ) + + if not ( + isinstance(only_contains_user_certs, bool) + and isinstance(only_contains_ca_certs, bool) + and isinstance(indirect_crl, bool) + and isinstance(only_contains_attribute_certs, bool) + ): + raise TypeError( + "only_contains_user_certs, only_contains_ca_certs, " + "indirect_crl and only_contains_attribute_certs " + "must all be boolean." + ) + + crl_constraints = [ + only_contains_user_certs, + only_contains_ca_certs, + indirect_crl, + only_contains_attribute_certs, + ] + + if len([x for x in crl_constraints if x]) > 1: + raise ValueError( + "Only one of the following can be set to True: " + "only_contains_user_certs, only_contains_ca_certs, " + "indirect_crl, only_contains_attribute_certs" + ) + + if not any( + [ + only_contains_user_certs, + only_contains_ca_certs, + indirect_crl, + only_contains_attribute_certs, + full_name, + relative_name, + only_some_reasons, + ] + ): + raise ValueError( + "Cannot create empty extension: " + "if only_contains_user_certs, only_contains_ca_certs, " + "indirect_crl, and only_contains_attribute_certs are all False" + ", then either full_name, relative_name, or only_some_reasons " + "must have a value." + ) + + self._only_contains_user_certs = only_contains_user_certs + self._only_contains_ca_certs = only_contains_ca_certs + self._indirect_crl = indirect_crl + self._only_contains_attribute_certs = only_contains_attribute_certs + self._only_some_reasons = only_some_reasons + self._full_name = full_name + self._relative_name = relative_name + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, IssuingDistributionPoint): + return NotImplemented + + return ( + self.full_name == other.full_name + and self.relative_name == other.relative_name + and self.only_contains_user_certs == other.only_contains_user_certs + and self.only_contains_ca_certs == other.only_contains_ca_certs + and self.only_some_reasons == other.only_some_reasons + and self.indirect_crl == other.indirect_crl + and self.only_contains_attribute_certs + == other.only_contains_attribute_certs + ) + + def __hash__(self) -> int: + return hash( + ( + self.full_name, + self.relative_name, + self.only_contains_user_certs, + self.only_contains_ca_certs, + self.only_some_reasons, + self.indirect_crl, + self.only_contains_attribute_certs, + ) + ) + + @property + def full_name(self) -> typing.Optional[typing.List[GeneralName]]: + return self._full_name + + @property + def relative_name(self) -> typing.Optional[RelativeDistinguishedName]: + return self._relative_name + + @property + def only_contains_user_certs(self) -> bool: + return self._only_contains_user_certs + + @property + def only_contains_ca_certs(self) -> bool: + return self._only_contains_ca_certs + + @property + def only_some_reasons( + self, + ) -> typing.Optional[typing.FrozenSet[ReasonFlags]]: + return self._only_some_reasons + + @property + def indirect_crl(self) -> bool: + return self._indirect_crl + + @property + def only_contains_attribute_certs(self) -> bool: + return self._only_contains_attribute_certs + + def public_bytes(self) -> bytes: + return rust_x509.encode_extension_value(self) + + +class UnrecognizedExtension(ExtensionType): + def __init__(self, oid: ObjectIdentifier, value: bytes) -> None: + if not isinstance(oid, ObjectIdentifier): + raise TypeError("oid must be an ObjectIdentifier") + self._oid = oid + self._value = value + + @property + def oid(self) -> ObjectIdentifier: # type: ignore[override] + return self._oid + + @property + def value(self) -> bytes: + return self._value + + def __repr__(self) -> str: + return ( + "".format(self) + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UnrecognizedExtension): + return NotImplemented + + return self.oid == other.oid and self.value == other.value + + def __hash__(self) -> int: + return hash((self.oid, self.value)) + + def public_bytes(self) -> bytes: + return self.value diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/general_name.py b/myenv/lib/python3.9/site-packages/cryptography/x509/general_name.py new file mode 100644 index 0000000..9939233 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/general_name.py @@ -0,0 +1,284 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import ipaddress +import typing +from email.utils import parseaddr + +from cryptography.x509.name import Name +from cryptography.x509.oid import ObjectIdentifier + + +_IPADDRESS_TYPES = typing.Union[ + ipaddress.IPv4Address, + ipaddress.IPv6Address, + ipaddress.IPv4Network, + ipaddress.IPv6Network, +] + + +class UnsupportedGeneralNameType(Exception): + pass + + +class GeneralName(metaclass=abc.ABCMeta): + @abc.abstractproperty + def value(self) -> typing.Any: + """ + Return the value of the object + """ + + +class RFC822Name(GeneralName): + def __init__(self, value: str) -> None: + if isinstance(value, str): + try: + value.encode("ascii") + except UnicodeEncodeError: + raise ValueError( + "RFC822Name values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "a library like idna." + ) + else: + raise TypeError("value must be string") + + name, address = parseaddr(value) + if name or not address: + # parseaddr has found a name (e.g. Name ) or the entire + # value is an empty string. + raise ValueError("Invalid rfc822name value") + + self._value = value + + @property + def value(self) -> str: + return self._value + + @classmethod + def _init_without_validation(cls, value: str) -> "RFC822Name": + instance = cls.__new__(cls) + instance._value = value + return instance + + def __repr__(self) -> str: + return "".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, RFC822Name): + return NotImplemented + + return self.value == other.value + + def __hash__(self) -> int: + return hash(self.value) + + +class DNSName(GeneralName): + def __init__(self, value: str) -> None: + if isinstance(value, str): + try: + value.encode("ascii") + except UnicodeEncodeError: + raise ValueError( + "DNSName values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "a library like idna." + ) + else: + raise TypeError("value must be string") + + self._value = value + + @property + def value(self) -> str: + return self._value + + @classmethod + def _init_without_validation(cls, value: str) -> "DNSName": + instance = cls.__new__(cls) + instance._value = value + return instance + + def __repr__(self) -> str: + return "".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DNSName): + return NotImplemented + + return self.value == other.value + + def __hash__(self) -> int: + return hash(self.value) + + +class UniformResourceIdentifier(GeneralName): + def __init__(self, value: str) -> None: + if isinstance(value, str): + try: + value.encode("ascii") + except UnicodeEncodeError: + raise ValueError( + "URI values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "a library like idna." + ) + else: + raise TypeError("value must be string") + + self._value = value + + @property + def value(self) -> str: + return self._value + + @classmethod + def _init_without_validation( + cls, value: str + ) -> "UniformResourceIdentifier": + instance = cls.__new__(cls) + instance._value = value + return instance + + def __repr__(self) -> str: + return "".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UniformResourceIdentifier): + return NotImplemented + + return self.value == other.value + + def __hash__(self) -> int: + return hash(self.value) + + +class DirectoryName(GeneralName): + def __init__(self, value: Name) -> None: + if not isinstance(value, Name): + raise TypeError("value must be a Name") + + self._value = value + + @property + def value(self) -> Name: + return self._value + + def __repr__(self) -> str: + return "".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DirectoryName): + return NotImplemented + + return self.value == other.value + + def __hash__(self) -> int: + return hash(self.value) + + +class RegisteredID(GeneralName): + def __init__(self, value: ObjectIdentifier) -> None: + if not isinstance(value, ObjectIdentifier): + raise TypeError("value must be an ObjectIdentifier") + + self._value = value + + @property + def value(self) -> ObjectIdentifier: + return self._value + + def __repr__(self) -> str: + return "".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, RegisteredID): + return NotImplemented + + return self.value == other.value + + def __hash__(self) -> int: + return hash(self.value) + + +class IPAddress(GeneralName): + def __init__(self, value: _IPADDRESS_TYPES) -> None: + if not isinstance( + value, + ( + ipaddress.IPv4Address, + ipaddress.IPv6Address, + ipaddress.IPv4Network, + ipaddress.IPv6Network, + ), + ): + raise TypeError( + "value must be an instance of ipaddress.IPv4Address, " + "ipaddress.IPv6Address, ipaddress.IPv4Network, or " + "ipaddress.IPv6Network" + ) + + self._value = value + + @property + def value(self) -> _IPADDRESS_TYPES: + return self._value + + def _packed(self) -> bytes: + if isinstance( + self.value, (ipaddress.IPv4Address, ipaddress.IPv6Address) + ): + return self.value.packed + else: + return ( + self.value.network_address.packed + self.value.netmask.packed + ) + + def __repr__(self) -> str: + return "".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, IPAddress): + return NotImplemented + + return self.value == other.value + + def __hash__(self) -> int: + return hash(self.value) + + +class OtherName(GeneralName): + def __init__(self, type_id: ObjectIdentifier, value: bytes) -> None: + if not isinstance(type_id, ObjectIdentifier): + raise TypeError("type_id must be an ObjectIdentifier") + if not isinstance(value, bytes): + raise TypeError("value must be a binary string") + + self._type_id = type_id + self._value = value + + @property + def type_id(self) -> ObjectIdentifier: + return self._type_id + + @property + def value(self) -> bytes: + return self._value + + def __repr__(self) -> str: + return "".format( + self.type_id, self.value + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, OtherName): + return NotImplemented + + return self.type_id == other.type_id and self.value == other.value + + def __hash__(self) -> int: + return hash((self.type_id, self.value)) diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/name.py b/myenv/lib/python3.9/site-packages/cryptography/x509/name.py new file mode 100644 index 0000000..4b32115 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/name.py @@ -0,0 +1,445 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import binascii +import re +import sys +import typing +import warnings + +from cryptography import utils +from cryptography.hazmat.bindings._rust import ( + x509 as rust_x509, +) +from cryptography.x509.oid import NameOID, ObjectIdentifier + + +class _ASN1Type(utils.Enum): + BitString = 3 + OctetString = 4 + UTF8String = 12 + NumericString = 18 + PrintableString = 19 + T61String = 20 + IA5String = 22 + UTCTime = 23 + GeneralizedTime = 24 + VisibleString = 26 + UniversalString = 28 + BMPString = 30 + + +_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type} +_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = { + NameOID.COUNTRY_NAME: _ASN1Type.PrintableString, + NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString, + NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString, + NameOID.DN_QUALIFIER: _ASN1Type.PrintableString, + NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String, + NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String, +} + +# Type alias +_OidNameMap = typing.Mapping[ObjectIdentifier, str] + +#: Short attribute names from RFC 4514: +#: https://tools.ietf.org/html/rfc4514#page-7 +_NAMEOID_TO_NAME: _OidNameMap = { + NameOID.COMMON_NAME: "CN", + NameOID.LOCALITY_NAME: "L", + NameOID.STATE_OR_PROVINCE_NAME: "ST", + NameOID.ORGANIZATION_NAME: "O", + NameOID.ORGANIZATIONAL_UNIT_NAME: "OU", + NameOID.COUNTRY_NAME: "C", + NameOID.STREET_ADDRESS: "STREET", + NameOID.DOMAIN_COMPONENT: "DC", + NameOID.USER_ID: "UID", +} +_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()} + + +def _escape_dn_value(val: typing.Union[str, bytes]) -> str: + """Escape special characters in RFC4514 Distinguished Name value.""" + + if not val: + return "" + + # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character + # followed by the hexadecimal encoding of the octets. + if isinstance(val, bytes): + return "#" + binascii.hexlify(val).decode("utf8") + + # See https://tools.ietf.org/html/rfc4514#section-2.4 + val = val.replace("\\", "\\\\") + val = val.replace('"', '\\"') + val = val.replace("+", "\\+") + val = val.replace(",", "\\,") + val = val.replace(";", "\\;") + val = val.replace("<", "\\<") + val = val.replace(">", "\\>") + val = val.replace("\0", "\\00") + + if val[0] in ("#", " "): + val = "\\" + val + if val[-1] == " ": + val = val[:-1] + "\\ " + + return val + + +def _unescape_dn_value(val: str) -> str: + if not val: + return "" + + # See https://tools.ietf.org/html/rfc4514#section-3 + + # special = escaped / SPACE / SHARP / EQUALS + # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE + def sub(m): + val = m.group(1) + # Regular escape + if len(val) == 1: + return val + # Hex-value scape + return chr(int(val, 16)) + + return _RFC4514NameParser._PAIR_RE.sub(sub, val) + + +class NameAttribute: + def __init__( + self, + oid: ObjectIdentifier, + value: typing.Union[str, bytes], + _type: typing.Optional[_ASN1Type] = None, + *, + _validate: bool = True, + ) -> None: + if not isinstance(oid, ObjectIdentifier): + raise TypeError( + "oid argument must be an ObjectIdentifier instance." + ) + if _type == _ASN1Type.BitString: + if oid != NameOID.X500_UNIQUE_IDENTIFIER: + raise TypeError( + "oid must be X500_UNIQUE_IDENTIFIER for BitString type." + ) + if not isinstance(value, bytes): + raise TypeError("value must be bytes for BitString") + else: + if not isinstance(value, str): + raise TypeError("value argument must be a str") + + if ( + oid == NameOID.COUNTRY_NAME + or oid == NameOID.JURISDICTION_COUNTRY_NAME + ): + assert isinstance(value, str) + c_len = len(value.encode("utf8")) + if c_len != 2 and _validate is True: + raise ValueError( + "Country name must be a 2 character country code" + ) + elif c_len != 2: + warnings.warn( + "Country names should be two characters, but the " + "attribute is {} characters in length.".format(c_len), + stacklevel=2, + ) + + # The appropriate ASN1 string type varies by OID and is defined across + # multiple RFCs including 2459, 3280, and 5280. In general UTF8String + # is preferred (2459), but 3280 and 5280 specify several OIDs with + # alternate types. This means when we see the sentinel value we need + # to look up whether the OID has a non-UTF8 type. If it does, set it + # to that. Otherwise, UTF8! + if _type is None: + _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String) + + if not isinstance(_type, _ASN1Type): + raise TypeError("_type must be from the _ASN1Type enum") + + self._oid = oid + self._value = value + self._type = _type + + @property + def oid(self) -> ObjectIdentifier: + return self._oid + + @property + def value(self) -> typing.Union[str, bytes]: + return self._value + + @property + def rfc4514_attribute_name(self) -> str: + """ + The short attribute name (for example "CN") if available, + otherwise the OID dotted string. + """ + return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string) + + def rfc4514_string( + self, attr_name_overrides: typing.Optional[_OidNameMap] = None + ) -> str: + """ + Format as RFC4514 Distinguished Name string. + + Use short attribute name if available, otherwise fall back to OID + dotted string. + """ + attr_name = ( + attr_name_overrides.get(self.oid) if attr_name_overrides else None + ) + if attr_name is None: + attr_name = self.rfc4514_attribute_name + + return f"{attr_name}={_escape_dn_value(self.value)}" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, NameAttribute): + return NotImplemented + + return self.oid == other.oid and self.value == other.value + + def __hash__(self) -> int: + return hash((self.oid, self.value)) + + def __repr__(self) -> str: + return "".format(self) + + +class RelativeDistinguishedName: + def __init__(self, attributes: typing.Iterable[NameAttribute]): + attributes = list(attributes) + if not attributes: + raise ValueError("a relative distinguished name cannot be empty") + if not all(isinstance(x, NameAttribute) for x in attributes): + raise TypeError("attributes must be an iterable of NameAttribute") + + # Keep list and frozenset to preserve attribute order where it matters + self._attributes = attributes + self._attribute_set = frozenset(attributes) + + if len(self._attribute_set) != len(attributes): + raise ValueError("duplicate attributes are not allowed") + + def get_attributes_for_oid( + self, oid: ObjectIdentifier + ) -> typing.List[NameAttribute]: + return [i for i in self if i.oid == oid] + + def rfc4514_string( + self, attr_name_overrides: typing.Optional[_OidNameMap] = None + ) -> str: + """ + Format as RFC4514 Distinguished Name string. + + Within each RDN, attributes are joined by '+', although that is rarely + used in certificates. + """ + return "+".join( + attr.rfc4514_string(attr_name_overrides) + for attr in self._attributes + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, RelativeDistinguishedName): + return NotImplemented + + return self._attribute_set == other._attribute_set + + def __hash__(self) -> int: + return hash(self._attribute_set) + + def __iter__(self) -> typing.Iterator[NameAttribute]: + return iter(self._attributes) + + def __len__(self) -> int: + return len(self._attributes) + + def __repr__(self) -> str: + return "".format(self.rfc4514_string()) + + +class Name: + @typing.overload + def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None: + ... + + @typing.overload + def __init__( + self, attributes: typing.Iterable[RelativeDistinguishedName] + ) -> None: + ... + + def __init__( + self, + attributes: typing.Iterable[ + typing.Union[NameAttribute, RelativeDistinguishedName] + ], + ) -> None: + attributes = list(attributes) + if all(isinstance(x, NameAttribute) for x in attributes): + self._attributes = [ + RelativeDistinguishedName([typing.cast(NameAttribute, x)]) + for x in attributes + ] + elif all(isinstance(x, RelativeDistinguishedName) for x in attributes): + self._attributes = typing.cast( + typing.List[RelativeDistinguishedName], attributes + ) + else: + raise TypeError( + "attributes must be a list of NameAttribute" + " or a list RelativeDistinguishedName" + ) + + @classmethod + def from_rfc4514_string(cls, data: str) -> "Name": + return _RFC4514NameParser(data).parse() + + def rfc4514_string( + self, attr_name_overrides: typing.Optional[_OidNameMap] = None + ) -> str: + """ + Format as RFC4514 Distinguished Name string. + For example 'CN=foobar.com,O=Foo Corp,C=US' + + An X.509 name is a two-level structure: a list of sets of attributes. + Each list element is separated by ',' and within each list element, set + elements are separated by '+'. The latter is almost never used in + real world certificates. According to RFC4514 section 2.1 the + RDNSequence must be reversed when converting to string representation. + """ + return ",".join( + attr.rfc4514_string(attr_name_overrides) + for attr in reversed(self._attributes) + ) + + def get_attributes_for_oid( + self, oid: ObjectIdentifier + ) -> typing.List[NameAttribute]: + return [i for i in self if i.oid == oid] + + @property + def rdns(self) -> typing.List[RelativeDistinguishedName]: + return self._attributes + + def public_bytes(self, backend: typing.Any = None) -> bytes: + return rust_x509.encode_name_bytes(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Name): + return NotImplemented + + return self._attributes == other._attributes + + def __hash__(self) -> int: + # TODO: this is relatively expensive, if this looks like a bottleneck + # for you, consider optimizing! + return hash(tuple(self._attributes)) + + def __iter__(self) -> typing.Iterator[NameAttribute]: + for rdn in self._attributes: + for ava in rdn: + yield ava + + def __len__(self) -> int: + return sum(len(rdn) for rdn in self._attributes) + + def __repr__(self) -> str: + rdns = ",".join(attr.rfc4514_string() for attr in self._attributes) + return "".format(rdns) + + +class _RFC4514NameParser: + _OID_RE = re.compile(r"(0|([1-9]\d*))(\.(0|([1-9]\d*)))+") + _DESCR_RE = re.compile(r"[a-zA-Z][a-zA-Z\d-]*") + + _PAIR = r"\\([\\ #=\"\+,;<>]|[\da-zA-Z]{2})" + _PAIR_RE = re.compile(_PAIR) + _LUTF1 = r"[\x01-\x1f\x21\x24-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]" + _SUTF1 = r"[\x01-\x21\x23-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]" + _TUTF1 = r"[\x01-\x1F\x21\x23-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]" + _UTFMB = rf"[\x80-{chr(sys.maxunicode)}]" + _LEADCHAR = rf"{_LUTF1}|{_UTFMB}" + _STRINGCHAR = rf"{_SUTF1}|{_UTFMB}" + _TRAILCHAR = rf"{_TUTF1}|{_UTFMB}" + _STRING_RE = re.compile( + rf""" + ( + ({_LEADCHAR}|{_PAIR}) + ( + ({_STRINGCHAR}|{_PAIR})* + ({_TRAILCHAR}|{_PAIR}) + )? + )? + """, + re.VERBOSE, + ) + _HEXSTRING_RE = re.compile(r"#([\da-zA-Z]{2})+") + + def __init__(self, data: str) -> None: + self._data = data + self._idx = 0 + + def _has_data(self) -> bool: + return self._idx < len(self._data) + + def _peek(self) -> typing.Optional[str]: + if self._has_data(): + return self._data[self._idx] + return None + + def _read_char(self, ch: str) -> None: + if self._peek() != ch: + raise ValueError + self._idx += 1 + + def _read_re(self, pat) -> str: + match = pat.match(self._data, pos=self._idx) + if match is None: + raise ValueError + val = match.group() + self._idx += len(val) + return val + + def parse(self) -> Name: + rdns = [self._parse_rdn()] + + while self._has_data(): + self._read_char(",") + rdns.append(self._parse_rdn()) + + return Name(rdns) + + def _parse_rdn(self) -> RelativeDistinguishedName: + nas = [self._parse_na()] + while self._peek() == "+": + self._read_char("+") + nas.append(self._parse_na()) + + return RelativeDistinguishedName(nas) + + def _parse_na(self) -> NameAttribute: + try: + oid_value = self._read_re(self._OID_RE) + except ValueError: + name = self._read_re(self._DESCR_RE) + oid = _NAME_TO_NAMEOID.get(name) + if oid is None: + raise ValueError + else: + oid = ObjectIdentifier(oid_value) + + self._read_char("=") + if self._peek() == "#": + value = self._read_re(self._HEXSTRING_RE) + value = binascii.unhexlify(value[1:]).decode() + else: + raw_value = self._read_re(self._STRING_RE) + value = _unescape_dn_value(raw_value) + + return NameAttribute(oid, value) diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/ocsp.py b/myenv/lib/python3.9/site-packages/cryptography/x509/ocsp.py new file mode 100644 index 0000000..c01e77a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/ocsp.py @@ -0,0 +1,551 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +import abc +import datetime +import typing + +from cryptography import utils +from cryptography import x509 +from cryptography.hazmat.bindings._rust import ocsp +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric.types import ( + CERTIFICATE_PRIVATE_KEY_TYPES, +) +from cryptography.x509.base import ( + _EARLIEST_UTC_TIME, + _convert_to_naive_utc_time, + _reject_duplicate_extension, +) + + +class OCSPResponderEncoding(utils.Enum): + HASH = "By Hash" + NAME = "By Name" + + +class OCSPResponseStatus(utils.Enum): + SUCCESSFUL = 0 + MALFORMED_REQUEST = 1 + INTERNAL_ERROR = 2 + TRY_LATER = 3 + SIG_REQUIRED = 5 + UNAUTHORIZED = 6 + + +_ALLOWED_HASHES = ( + hashes.SHA1, + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, +) + + +def _verify_algorithm(algorithm: hashes.HashAlgorithm) -> None: + if not isinstance(algorithm, _ALLOWED_HASHES): + raise ValueError( + "Algorithm must be SHA1, SHA224, SHA256, SHA384, or SHA512" + ) + + +class OCSPCertStatus(utils.Enum): + GOOD = 0 + REVOKED = 1 + UNKNOWN = 2 + + +class _SingleResponse: + def __init__( + self, + cert: x509.Certificate, + issuer: x509.Certificate, + algorithm: hashes.HashAlgorithm, + cert_status: OCSPCertStatus, + this_update: datetime.datetime, + next_update: typing.Optional[datetime.datetime], + revocation_time: typing.Optional[datetime.datetime], + revocation_reason: typing.Optional[x509.ReasonFlags], + ): + if not isinstance(cert, x509.Certificate) or not isinstance( + issuer, x509.Certificate + ): + raise TypeError("cert and issuer must be a Certificate") + + _verify_algorithm(algorithm) + if not isinstance(this_update, datetime.datetime): + raise TypeError("this_update must be a datetime object") + if next_update is not None and not isinstance( + next_update, datetime.datetime + ): + raise TypeError("next_update must be a datetime object or None") + + self._cert = cert + self._issuer = issuer + self._algorithm = algorithm + self._this_update = this_update + self._next_update = next_update + + if not isinstance(cert_status, OCSPCertStatus): + raise TypeError( + "cert_status must be an item from the OCSPCertStatus enum" + ) + if cert_status is not OCSPCertStatus.REVOKED: + if revocation_time is not None: + raise ValueError( + "revocation_time can only be provided if the certificate " + "is revoked" + ) + if revocation_reason is not None: + raise ValueError( + "revocation_reason can only be provided if the certificate" + " is revoked" + ) + else: + if not isinstance(revocation_time, datetime.datetime): + raise TypeError("revocation_time must be a datetime object") + + revocation_time = _convert_to_naive_utc_time(revocation_time) + if revocation_time < _EARLIEST_UTC_TIME: + raise ValueError( + "The revocation_time must be on or after" + " 1950 January 1." + ) + + if revocation_reason is not None and not isinstance( + revocation_reason, x509.ReasonFlags + ): + raise TypeError( + "revocation_reason must be an item from the ReasonFlags " + "enum or None" + ) + + self._cert_status = cert_status + self._revocation_time = revocation_time + self._revocation_reason = revocation_reason + + +class OCSPRequest(metaclass=abc.ABCMeta): + @abc.abstractproperty + def issuer_key_hash(self) -> bytes: + """ + The hash of the issuer public key + """ + + @abc.abstractproperty + def issuer_name_hash(self) -> bytes: + """ + The hash of the issuer name + """ + + @abc.abstractproperty + def hash_algorithm(self) -> hashes.HashAlgorithm: + """ + The hash algorithm used in the issuer name and key hashes + """ + + @abc.abstractproperty + def serial_number(self) -> int: + """ + The serial number of the cert whose status is being checked + """ + + @abc.abstractmethod + def public_bytes(self, encoding: serialization.Encoding) -> bytes: + """ + Serializes the request to DER + """ + + @abc.abstractproperty + def extensions(self) -> x509.Extensions: + """ + The list of request extensions. Not single request extensions. + """ + + +class OCSPSingleResponse(metaclass=abc.ABCMeta): + @abc.abstractproperty + def certificate_status(self) -> OCSPCertStatus: + """ + The status of the certificate (an element from the OCSPCertStatus enum) + """ + + @abc.abstractproperty + def revocation_time(self) -> typing.Optional[datetime.datetime]: + """ + The date of when the certificate was revoked or None if not + revoked. + """ + + @abc.abstractproperty + def revocation_reason(self) -> typing.Optional[x509.ReasonFlags]: + """ + The reason the certificate was revoked or None if not specified or + not revoked. + """ + + @abc.abstractproperty + def this_update(self) -> datetime.datetime: + """ + The most recent time at which the status being indicated is known by + the responder to have been correct + """ + + @abc.abstractproperty + def next_update(self) -> typing.Optional[datetime.datetime]: + """ + The time when newer information will be available + """ + + @abc.abstractproperty + def issuer_key_hash(self) -> bytes: + """ + The hash of the issuer public key + """ + + @abc.abstractproperty + def issuer_name_hash(self) -> bytes: + """ + The hash of the issuer name + """ + + @abc.abstractproperty + def hash_algorithm(self) -> hashes.HashAlgorithm: + """ + The hash algorithm used in the issuer name and key hashes + """ + + @abc.abstractproperty + def serial_number(self) -> int: + """ + The serial number of the cert whose status is being checked + """ + + +class OCSPResponse(metaclass=abc.ABCMeta): + @abc.abstractproperty + def responses(self) -> typing.Iterator[OCSPSingleResponse]: + """ + An iterator over the individual SINGLERESP structures in the + response + """ + + @abc.abstractproperty + def response_status(self) -> OCSPResponseStatus: + """ + The status of the response. This is a value from the OCSPResponseStatus + enumeration + """ + + @abc.abstractproperty + def signature_algorithm_oid(self) -> x509.ObjectIdentifier: + """ + The ObjectIdentifier of the signature algorithm + """ + + @abc.abstractproperty + def signature_hash_algorithm( + self, + ) -> typing.Optional[hashes.HashAlgorithm]: + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + """ + + @abc.abstractproperty + def signature(self) -> bytes: + """ + The signature bytes + """ + + @abc.abstractproperty + def tbs_response_bytes(self) -> bytes: + """ + The tbsResponseData bytes + """ + + @abc.abstractproperty + def certificates(self) -> typing.List[x509.Certificate]: + """ + A list of certificates used to help build a chain to verify the OCSP + response. This situation occurs when the OCSP responder uses a delegate + certificate. + """ + + @abc.abstractproperty + def responder_key_hash(self) -> typing.Optional[bytes]: + """ + The responder's key hash or None + """ + + @abc.abstractproperty + def responder_name(self) -> typing.Optional[x509.Name]: + """ + The responder's Name or None + """ + + @abc.abstractproperty + def produced_at(self) -> datetime.datetime: + """ + The time the response was produced + """ + + @abc.abstractproperty + def certificate_status(self) -> OCSPCertStatus: + """ + The status of the certificate (an element from the OCSPCertStatus enum) + """ + + @abc.abstractproperty + def revocation_time(self) -> typing.Optional[datetime.datetime]: + """ + The date of when the certificate was revoked or None if not + revoked. + """ + + @abc.abstractproperty + def revocation_reason(self) -> typing.Optional[x509.ReasonFlags]: + """ + The reason the certificate was revoked or None if not specified or + not revoked. + """ + + @abc.abstractproperty + def this_update(self) -> datetime.datetime: + """ + The most recent time at which the status being indicated is known by + the responder to have been correct + """ + + @abc.abstractproperty + def next_update(self) -> typing.Optional[datetime.datetime]: + """ + The time when newer information will be available + """ + + @abc.abstractproperty + def issuer_key_hash(self) -> bytes: + """ + The hash of the issuer public key + """ + + @abc.abstractproperty + def issuer_name_hash(self) -> bytes: + """ + The hash of the issuer name + """ + + @abc.abstractproperty + def hash_algorithm(self) -> hashes.HashAlgorithm: + """ + The hash algorithm used in the issuer name and key hashes + """ + + @abc.abstractproperty + def serial_number(self) -> int: + """ + The serial number of the cert whose status is being checked + """ + + @abc.abstractproperty + def extensions(self) -> x509.Extensions: + """ + The list of response extensions. Not single response extensions. + """ + + @abc.abstractproperty + def single_extensions(self) -> x509.Extensions: + """ + The list of single response extensions. Not response extensions. + """ + + @abc.abstractmethod + def public_bytes(self, encoding: serialization.Encoding) -> bytes: + """ + Serializes the response to DER + """ + + +class OCSPRequestBuilder: + def __init__( + self, + request: typing.Optional[ + typing.Tuple[ + x509.Certificate, x509.Certificate, hashes.HashAlgorithm + ] + ] = None, + extensions: typing.List[x509.Extension[x509.ExtensionType]] = [], + ) -> None: + self._request = request + self._extensions = extensions + + def add_certificate( + self, + cert: x509.Certificate, + issuer: x509.Certificate, + algorithm: hashes.HashAlgorithm, + ) -> "OCSPRequestBuilder": + if self._request is not None: + raise ValueError("Only one certificate can be added to a request") + + _verify_algorithm(algorithm) + if not isinstance(cert, x509.Certificate) or not isinstance( + issuer, x509.Certificate + ): + raise TypeError("cert and issuer must be a Certificate") + + return OCSPRequestBuilder((cert, issuer, algorithm), self._extensions) + + def add_extension( + self, extval: x509.ExtensionType, critical: bool + ) -> "OCSPRequestBuilder": + if not isinstance(extval, x509.ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = x509.Extension(extval.oid, critical, extval) + _reject_duplicate_extension(extension, self._extensions) + + return OCSPRequestBuilder( + self._request, self._extensions + [extension] + ) + + def build(self) -> OCSPRequest: + if self._request is None: + raise ValueError("You must add a certificate before building") + + return ocsp.create_ocsp_request(self) + + +class OCSPResponseBuilder: + def __init__( + self, + response: typing.Optional[_SingleResponse] = None, + responder_id: typing.Optional[ + typing.Tuple[x509.Certificate, OCSPResponderEncoding] + ] = None, + certs: typing.Optional[typing.List[x509.Certificate]] = None, + extensions: typing.List[x509.Extension[x509.ExtensionType]] = [], + ): + self._response = response + self._responder_id = responder_id + self._certs = certs + self._extensions = extensions + + def add_response( + self, + cert: x509.Certificate, + issuer: x509.Certificate, + algorithm: hashes.HashAlgorithm, + cert_status: OCSPCertStatus, + this_update: datetime.datetime, + next_update: typing.Optional[datetime.datetime], + revocation_time: typing.Optional[datetime.datetime], + revocation_reason: typing.Optional[x509.ReasonFlags], + ) -> "OCSPResponseBuilder": + if self._response is not None: + raise ValueError("Only one response per OCSPResponse.") + + singleresp = _SingleResponse( + cert, + issuer, + algorithm, + cert_status, + this_update, + next_update, + revocation_time, + revocation_reason, + ) + return OCSPResponseBuilder( + singleresp, + self._responder_id, + self._certs, + self._extensions, + ) + + def responder_id( + self, encoding: OCSPResponderEncoding, responder_cert: x509.Certificate + ) -> "OCSPResponseBuilder": + if self._responder_id is not None: + raise ValueError("responder_id can only be set once") + if not isinstance(responder_cert, x509.Certificate): + raise TypeError("responder_cert must be a Certificate") + if not isinstance(encoding, OCSPResponderEncoding): + raise TypeError( + "encoding must be an element from OCSPResponderEncoding" + ) + + return OCSPResponseBuilder( + self._response, + (responder_cert, encoding), + self._certs, + self._extensions, + ) + + def certificates( + self, certs: typing.Iterable[x509.Certificate] + ) -> "OCSPResponseBuilder": + if self._certs is not None: + raise ValueError("certificates may only be set once") + certs = list(certs) + if len(certs) == 0: + raise ValueError("certs must not be an empty list") + if not all(isinstance(x, x509.Certificate) for x in certs): + raise TypeError("certs must be a list of Certificates") + return OCSPResponseBuilder( + self._response, + self._responder_id, + certs, + self._extensions, + ) + + def add_extension( + self, extval: x509.ExtensionType, critical: bool + ) -> "OCSPResponseBuilder": + if not isinstance(extval, x509.ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = x509.Extension(extval.oid, critical, extval) + _reject_duplicate_extension(extension, self._extensions) + + return OCSPResponseBuilder( + self._response, + self._responder_id, + self._certs, + self._extensions + [extension], + ) + + def sign( + self, + private_key: CERTIFICATE_PRIVATE_KEY_TYPES, + algorithm: typing.Optional[hashes.HashAlgorithm], + ) -> OCSPResponse: + if self._response is None: + raise ValueError("You must add a response before signing") + if self._responder_id is None: + raise ValueError("You must add a responder_id before signing") + + return ocsp.create_ocsp_response( + OCSPResponseStatus.SUCCESSFUL, self, private_key, algorithm + ) + + @classmethod + def build_unsuccessful( + cls, response_status: OCSPResponseStatus + ) -> OCSPResponse: + if not isinstance(response_status, OCSPResponseStatus): + raise TypeError( + "response_status must be an item from OCSPResponseStatus" + ) + if response_status is OCSPResponseStatus.SUCCESSFUL: + raise ValueError("response_status cannot be SUCCESSFUL") + + return ocsp.create_ocsp_response(response_status, None, None, None) + + +def load_der_ocsp_request(data: bytes) -> OCSPRequest: + return ocsp.load_der_ocsp_request(data) + + +def load_der_ocsp_response(data: bytes) -> OCSPResponse: + return ocsp.load_der_ocsp_response(data) diff --git a/myenv/lib/python3.9/site-packages/cryptography/x509/oid.py b/myenv/lib/python3.9/site-packages/cryptography/x509/oid.py new file mode 100644 index 0000000..9bfac75 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/cryptography/x509/oid.py @@ -0,0 +1,32 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from cryptography.hazmat._oid import ( + AttributeOID, + AuthorityInformationAccessOID, + CRLEntryExtensionOID, + CertificatePoliciesOID, + ExtendedKeyUsageOID, + ExtensionOID, + NameOID, + OCSPExtensionOID, + ObjectIdentifier, + SignatureAlgorithmOID, + SubjectInformationAccessOID, +) + + +__all__ = [ + "AttributeOID", + "AuthorityInformationAccessOID", + "CRLEntryExtensionOID", + "CertificatePoliciesOID", + "ExtendedKeyUsageOID", + "ExtensionOID", + "NameOID", + "OCSPExtensionOID", + "ObjectIdentifier", + "SignatureAlgorithmOID", + "SubjectInformationAccessOID", +] diff --git a/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/LICENSE new file mode 100644 index 0000000..6e9cde5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2004-2016 California Institute of Technology. +Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +All rights reserved. + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met:: + + - Redistribution of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistribution in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentations and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/METADATA new file mode 100644 index 0000000..250218c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/METADATA @@ -0,0 +1,269 @@ +Metadata-Version: 2.1 +Name: dill +Version: 0.3.5.1 +Summary: serialize all of python +Home-page: https://github.com/uqfoundation/dill +Download-URL: https://pypi.org/project/dill/#files +Author: Mike McKerns +Author-email: mmckerns@uqfoundation.org +Maintainer: Mike McKerns +Maintainer-email: mmckerns@uqfoundation.org +License: 3-clause BSD +Project-URL: Documentation, http://dill.rtfd.io +Project-URL: Source Code, https://github.com/uqfoundation/dill +Project-URL: Bug Tracker, https://github.com/uqfoundation/dill/issues +Platform: Linux +Platform: Windows +Platform: Mac +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.* +License-File: LICENSE +Provides-Extra: graph +Requires-Dist: objgraph (>=1.7.2) ; extra == 'graph' +Provides-Extra: readline + +----------------------------- +dill: serialize all of python +----------------------------- + +About Dill +========== + +``dill`` extends python's ``pickle`` module for serializing and de-serializing +python objects to the majority of the built-in python types. Serialization +is the process of converting an object to a byte stream, and the inverse +of which is converting a byte stream back to a python object hierarchy. + +``dill`` provides the user the same interface as the ``pickle`` module, and +also includes some additional features. In addition to pickling python +objects, ``dill`` provides the ability to save the state of an interpreter +session in a single command. Hence, it would be feasable to save an +interpreter session, close the interpreter, ship the pickled file to +another computer, open a new interpreter, unpickle the session and +thus continue from the 'saved' state of the original interpreter +session. + +``dill`` can be used to store python objects to a file, but the primary +usage is to send python objects across the network as a byte stream. +``dill`` is quite flexible, and allows arbitrary user defined classes +and functions to be serialized. Thus ``dill`` is not intended to be +secure against erroneously or maliciously constructed data. It is +left to the user to decide whether the data they unpickle is from +a trustworthy source. + +``dill`` is part of ``pathos``, a python framework for heterogeneous computing. +``dill`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/dill/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``dill`` can pickle the following standard types: + + - none, type, bool, int, long, float, complex, str, unicode, + - tuple, list, dict, file, buffer, builtin, + - both old and new style classes, + - instances of old and new style classes, + - set, frozenset, array, functions, exceptions + +``dill`` can also pickle more 'exotic' standard types: + + - functions with yields, nested functions, lambdas, + - cell, method, unboundmethod, module, code, methodwrapper, + - dictproxy, methoddescriptor, getsetdescriptor, memberdescriptor, + - wrapperdescriptor, xrange, slice, + - notimplemented, ellipsis, quit + +``dill`` cannot yet pickle these standard types: + + - frame, generator, traceback + +``dill`` also provides the capability to: + + - save and load python interpreter sessions + - save and extract the source code from functions and classes + - interactively diagnose pickling errors + + +Current Release +=============== + +The latest released version of ``dill`` is available from: + + https://pypi.org/project/dill + +``dill`` is distributed under a 3-clause BSD license. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``dill`` can be installed with ``pip``:: + + $ pip install dill + +To optionally include the ``objgraph`` diagnostic tool in the install:: + + $ pip install dill[graph] + +For windows users, to optionally install session history tools:: + + $ pip install dill[readline] + + +Requirements +============ + +``dill`` requires: + + - ``python`` (or ``pypy``), **==2.7** or **>=3.7** + - ``setuptools``, **>=42** + +Optional requirements: + + - ``objgraph``, **>=1.7.2** + - ``pyreadline``, **>=1.7.1** (on windows) + + +Basic Usage +=========== + +``dill`` is a drop-in replacement for ``pickle``. Existing code can be +updated to allow complete pickling using:: + + >>> import dill as pickle + +or:: + + >>> from dill import dumps, loads + +``dumps`` converts the object to a unique byte string, and ``loads`` performs +the inverse operation:: + + >>> squared = lambda x: x**2 + >>> loads(dumps(squared))(3) + 9 + +There are a number of options to control serialization which are provided +as keyword arguments to several ``dill`` functions: + +* with *protocol*, the pickle protocol level can be set. This uses the + same value as the ``pickle`` module, *HIGHEST_PROTOCOL* or *DEFAULT_PROTOCOL*. +* with *byref=True*, ``dill`` to behave a lot more like pickle with + certain objects (like modules) pickled by reference as opposed to + attempting to pickle the object itself. +* with *recurse=True*, objects referred to in the global dictionary are + recursively traced and pickled, instead of the default behavior of + attempting to store the entire global dictionary. +* with *fmode*, the contents of the file can be pickled along with the file + handle, which is useful if the object is being sent over the wire to a + remote system which does not have the original file on disk. Options are + *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content + and *FILE_FMODE* for content and handle. +* with *ignore=False*, objects reconstructed with types defined in the + top-level script environment use the existing type in the environment + rather than a possibly different reconstructed type. + +The default serialization can also be set globally in *dill.settings*. +Thus, we can modify how ``dill`` handles references to the global dictionary +locally or globally:: + + >>> import dill.settings + >>> dumps(absolute) == dumps(absolute, recurse=True) + False + >>> dill.settings['recurse'] = True + >>> dumps(absolute) == dumps(absolute, recurse=True) + True + +``dill`` also includes source code inspection, as an alternate to pickling:: + + >>> import dill.source + >>> print(dill.source.getsource(squared)) + squared = lambda x:x**2 + +To aid in debugging pickling issues, use *dill.detect* which provides +tools like pickle tracing:: + + >>> import dill.detect + >>> dill.detect.trace(True) + >>> f = dumps(squared) + F1: at 0x108899e18> + F2: + # F2 + Co: at 0x10866a270, file "", line 1> + F2: + # F2 + # Co + D1: + # D1 + D2: + # D2 + # F1 + >>> dill.detect.trace(False) + +With trace, we see how ``dill`` stored the lambda (``F1``) by first storing +``_create_function``, the underlying code object (``Co``) and ``_create_code`` +(which is used to handle code objects), then we handle the reference to +the global dict (``D2``). A ``#`` marks when the object is actually stored. + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that +demonstrate how ``dill`` can serialize different python objects. You can +run the test suite with ``python -m dill.tests``. The contents of any +pickle file can be examined with ``undill``. As ``dill`` conforms to +the ``pickle`` interface, the examples and documentation found at +http://docs.python.org/library/pickle.html also apply to ``dill`` +if one will ``import dill as pickle``. The source code is also generally +well documented, so further questions may be resolved by inspecting the +code itself. Please feel free to submit a ticket on github, or ask a +question on stackoverflow (**@Mike McKerns**). +If you would like to share how you use ``dill`` in your work, please send +an email (to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``dill`` to do research that leads to publication, we ask that you +acknowledge use of ``dill`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. diff --git a/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/RECORD new file mode 100644 index 0000000..da83252 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/RECORD @@ -0,0 +1,44 @@ +dill/__diff.py,sha256=66V4Y3jCZXL7qpCrxp2Emvk8XW-TUqgco_D4kdjvsu4,7211 +dill/__init__.py,sha256=hxoYKydGFTrNUnQArWKrM_ITy83xWAd20fjjgQrK6vU,13437 +dill/_dill.py,sha256=kc3fyUgZQZWxa_6nNkCRO8yumVvMizB01P_StU66MTU,81628 +dill/_objects.py,sha256=Ojaz1_gGrTegRSYCH_-uV8d3RvLXGwNdJKitqcjTZJo,20432 +dill/_shims.py,sha256=RzQ8pGN--AtOc5qnlEprjcVPvpiT79Q77TdtVyl22Gg,9730 +dill/detect.py,sha256=qpaHJXMWMklFDOurFyyN5PLUgkp_wD2nq6acyENqKVM,12336 +dill/objtypes.py,sha256=l3lfnIox6YJQXN-kXejlajubu-rAAfZ8alQF73l2V3U,736 +dill/pointers.py,sha256=7NIitFkL91Z0-_kTBHXGEqL8seI9qWVouFbykgDlcUw,4467 +dill/settings.py,sha256=Waa57PLciMqHzVhFkGEPmObeweLXruDl86tsWXuhb9Y,719 +dill/source.py,sha256=K6Gds4WbAa1Byv7yX-yqbO_njdt_CHLYwcr2NiVpQEM,44917 +dill/temp.py,sha256=yhV-agXaEweDfzzFVNJQCzQ8osTe4ywCvoSjpCft-LM,8242 +dill/tests/__init__.py,sha256=vZabTjRnwYaN-v3uL2fJBqKqj0T7_9Omwy63JSigvTA,501 +dill/tests/__main__.py,sha256=lxSBOWQpr8CWLrlrwZHhIOwS5bbvnU0wzTzwEnJpFVs,834 +dill/tests/test_check.py,sha256=aoqn8iGaJ_v5gP2wrMSDQeEr3QLbuvkARv7FPAkI1_U,1278 +dill/tests/test_classdef.py,sha256=1gkzO-Re6m0LvzrEKbddhPUWbO2NOdbCgBXDDHjPuvM,6944 +dill/tests/test_detect.py,sha256=btoQKJ-K4jeJlmvGoIkTaFak7lHM4OyglgPdbJ8yEsc,4143 +dill/tests/test_dictviews.py,sha256=_A3DiEkOz8aYt8XWQxgMYjsMWOof6qTOjZrB7WL_Vt4,1240 +dill/tests/test_diff.py,sha256=RS1nx0apXUe8jK-W3B7E0lSyHcQxM6fqD5GWU2AzatU,2761 +dill/tests/test_extendpickle.py,sha256=Ltr_pkjtE0gFBuMwx2t-G5aJX0XUi-fLixbXUtzKsS8,1368 +dill/tests/test_fglobals.py,sha256=a3xGhcKEp7ly3YDE1zKTL0NWtp6Hz7NmmCD_2IPDDLA,1679 +dill/tests/test_file.py,sha256=XCt2vYTTvqhDrI-w93IBbIouxU0SjYeMqty_K2pR_LM,13642 +dill/tests/test_functions.py,sha256=DqBQj9unXgN8lnNDcB6CBDa3uEi0FsJxSMacWaiqoZU,2745 +dill/tests/test_functors.py,sha256=Sc9npIl2jTaIWi9Q_1RP37mhLo1ipapilrKN8Pe9JzA,930 +dill/tests/test_mixins.py,sha256=oVHtv1W3v7VgpiBKtJIR1nqa2D6oDK6Br4zbIfVuXOI,4007 +dill/tests/test_module.py,sha256=HKlpDn0LW6bZ2D4Qv9sJAtiaEvXr9mLl9a1jzYtTt4A,1973 +dill/tests/test_moduledict.py,sha256=GQ-grHuJl1uwEarrhVFoizNvxcfR5pQJDLETfjj481M,1182 +dill/tests/test_nested.py,sha256=5ML9p0OhgII9P_y4waSwVX3LXwh38ual09ujSuKwhpw,3134 +dill/tests/test_objects.py,sha256=zJtLBerYOmBa9TMMgwOJLmwm-xvXZ0IEEhXDdy5VhvE,1779 +dill/tests/test_properties.py,sha256=1mBRrIeYAu_4x14viOtN8uMeefQ8Zwv6Ovvu3EPDg9g,1346 +dill/tests/test_recursive.py,sha256=PvTy1RfVpXbUdtUo-DBhT9rrNQfHeY3kIpfHUCSVlAY,4209 +dill/tests/test_restricted.py,sha256=fcEbNfLbb1ar4EuR_Np9QoOqdnLeRoXSQXnIMvp73Lg,783 +dill/tests/test_selected.py,sha256=Y6U7D4JlNRxXWKTzM_QIGjSPu8dQfZB5VIJoBplF2y0,2533 +dill/tests/test_session.py,sha256=kuX2yOkhd-P10l_rK4wsleKqMCf7KpUZnny3FVnJAOs,8741 +dill/tests/test_source.py,sha256=3K2yLts31FKruAsnvs_8W_VeN8ECS6y4plQtKug2oTI,6375 +dill/tests/test_temp.py,sha256=w2Z-GDhxz3Gx1jXMWpnE1728s7i0eGfXwc1VNWGl1OQ,2619 +dill/tests/test_weakref.py,sha256=S-p_FbiyKPzdpWMsTA8tV-nlYKMBLrgs_ydmINcHF9w,2056 +../../../bin/get_objgraph,sha256=tCXH1qHc46QqOlPMafrRFAHqdExneBYTkUTz3pXDTRk,1692 +../../../bin/undill,sha256=YY_ky_yIW3Zkwodhg9q6vojvt5E-Y-iI0hefJ90VioE,628 +dill-0.3.5.1.dist-info/LICENSE,sha256=evNBT-0eWbe4mO8SKnan2GaHddldBVbI6nbeRKXZoZc,1790 +dill-0.3.5.1.dist-info/METADATA,sha256=tLSZPXPB7L8FBky9p4RQKXS_nPi6VK8BVRB2ELlIqoE,9725 +dill-0.3.5.1.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +dill-0.3.5.1.dist-info/top_level.txt,sha256=HLSIyYIjQzJiBvs3_-16ntezE3j6mWGTW0DT1xDd7X0,5 +dill-0.3.5.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +dill-0.3.5.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/WHEEL new file mode 100644 index 0000000..0b18a28 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/top_level.txt new file mode 100644 index 0000000..85eea70 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill-0.3.5.1.dist-info/top_level.txt @@ -0,0 +1 @@ +dill diff --git a/myenv/lib/python3.9/site-packages/dill/__diff.py b/myenv/lib/python3.9/site-packages/dill/__diff.py new file mode 100644 index 0000000..df2589e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/__diff.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +""" +Module to show if an object has changed since it was memorised +""" + +import os +import sys +import types +try: + import numpy + HAS_NUMPY = True +except: + HAS_NUMPY = False +try: + import builtins +except ImportError: + import __builtin__ as builtins + +# pypy doesn't use reference counting +getrefcount = getattr(sys, 'getrefcount', lambda x:0) + +# memo of objects indexed by id to a tuple (attributes, sequence items) +# attributes is a dict indexed by attribute name to attribute id +# sequence items is either a list of ids, of a dictionary of keys to ids +memo = {} +id_to_obj = {} +# types that cannot have changing attributes +builtins_types = set((str, list, dict, set, frozenset, int)) +dont_memo = set(id(i) for i in (memo, sys.modules, sys.path_importer_cache, + os.environ, id_to_obj)) + + +def get_attrs(obj): + """ + Gets all the attributes of an object though its __dict__ or return None + """ + if type(obj) in builtins_types \ + or type(obj) is type and obj in builtins_types: + return + try: + return obj.__dict__ + except: + return + + +def get_seq(obj, cache={str: False, frozenset: False, list: True, set: True, + dict: True, tuple: True, type: False, + types.ModuleType: False, types.FunctionType: False, + types.BuiltinFunctionType: False}): + """ + Gets all the items in a sequence or return None + """ + try: + o_type = obj.__class__ + except AttributeError: + o_type = type(obj) + hsattr = hasattr + if o_type in cache: + if cache[o_type]: + if hsattr(obj, "copy"): + return obj.copy() + return obj + elif HAS_NUMPY and o_type in (numpy.ndarray, numpy.ma.core.MaskedConstant): + if obj.shape and obj.size: + return obj + else: + return [] + elif hsattr(obj, "__contains__") and hsattr(obj, "__iter__") \ + and hsattr(obj, "__len__") and hsattr(o_type, "__contains__") \ + and hsattr(o_type, "__iter__") and hsattr(o_type, "__len__"): + cache[o_type] = True + if hsattr(obj, "copy"): + return obj.copy() + return obj + else: + cache[o_type] = False + return None + + +def memorise(obj, force=False): + """ + Adds an object to the memo, and recursively adds all the objects + attributes, and if it is a container, its items. Use force=True to update + an object already in the memo. Updating is not recursively done. + """ + obj_id = id(obj) + if obj_id in memo and not force or obj_id in dont_memo: + return + id_ = id + g = get_attrs(obj) + if g is None: + attrs_id = None + else: + attrs_id = dict((key,id_(value)) for key, value in g.items()) + + s = get_seq(obj) + if s is None: + seq_id = None + elif hasattr(s, "items"): + seq_id = dict((id_(key),id_(value)) for key, value in s.items()) + elif not hasattr(s, "__len__"): #XXX: avoid TypeError from unexpected case + seq_id = None + else: + seq_id = [id_(i) for i in s] + + memo[obj_id] = attrs_id, seq_id + id_to_obj[obj_id] = obj + mem = memorise + if g is not None: + [mem(value) for key, value in g.items()] + + if s is not None: + if hasattr(s, "items"): + [(mem(key), mem(item)) + for key, item in s.items()] + else: + if hasattr(s, '__len__'): + [mem(item) for item in s] + else: mem(s) + + +def release_gone(): + itop, mp, src = id_to_obj.pop, memo.pop, getrefcount + [(itop(id_), mp(id_)) for id_, obj in list(id_to_obj.items()) + if src(obj) < 4] #XXX: correct for pypy? + + +def whats_changed(obj, seen=None, simple=False, first=True): + """ + Check an object against the memo. Returns a list in the form + (attribute changes, container changed). Attribute changes is a dict of + attribute name to attribute value. container changed is a boolean. + If simple is true, just returns a boolean. None for either item means + that it has not been checked yet + """ + # Special cases + if first: + # ignore the _ variable, which only appears in interactive sessions + if "_" in builtins.__dict__: + del builtins._ + if seen is None: + seen = {} + + obj_id = id(obj) + + if obj_id in seen: + if simple: + return any(seen[obj_id]) + return seen[obj_id] + + # Safety checks + if obj_id in dont_memo: + seen[obj_id] = [{}, False] + if simple: + return False + return seen[obj_id] + elif obj_id not in memo: + if simple: + return True + else: + raise RuntimeError("Object not memorised " + str(obj)) + + seen[obj_id] = ({}, False) + + chngd = whats_changed + id_ = id + + # compare attributes + attrs = get_attrs(obj) + if attrs is None: + changed = {} + else: + obj_attrs = memo[obj_id][0] + obj_get = obj_attrs.get + changed = dict((key,None) for key in obj_attrs if key not in attrs) + for key, o in attrs.items(): + if id_(o) != obj_get(key, None) or chngd(o, seen, True, False): + changed[key] = o + + # compare sequence + items = get_seq(obj) + seq_diff = False + if (items is not None) and (hasattr(items, '__len__')): + obj_seq = memo[obj_id][1] + if (len(items) != len(obj_seq)): + seq_diff = True + elif hasattr(obj, "items"): # dict type obj + obj_get = obj_seq.get + for key, item in items.items(): + if id_(item) != obj_get(id_(key)) \ + or chngd(key, seen, True, False) \ + or chngd(item, seen, True, False): + seq_diff = True + break + else: + for i, j in zip(items, obj_seq): # list type obj + if id_(i) != j or chngd(i, seen, True, False): + seq_diff = True + break + seen[obj_id] = changed, seq_diff + if simple: + return changed or seq_diff + return changed, seq_diff + + +def has_changed(*args, **kwds): + kwds['simple'] = True # ignore simple if passed in + return whats_changed(*args, **kwds) + +__import__ = __import__ + + +def _imp(*args, **kwds): + """ + Replaces the default __import__, to allow a module to be memorised + before the user can change it + """ + before = set(sys.modules.keys()) + mod = __import__(*args, **kwds) + after = set(sys.modules.keys()).difference(before) + for m in after: + memorise(sys.modules[m]) + return mod + +builtins.__import__ = _imp +if hasattr(builtins, "_"): + del builtins._ + +# memorise all already imported modules. This implies that this must be +# imported first for any changes to be recorded +for mod in sys.modules.values(): + memorise(mod) +release_gone() diff --git a/myenv/lib/python3.9/site-packages/dill/__init__.py b/myenv/lib/python3.9/site-packages/dill/__init__.py new file mode 100644 index 0000000..e47f482 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/__init__.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +# author, version, license, and long description +__version__ = '0.3.5.1' +__author__ = 'Mike McKerns' + +__doc__ = """ +----------------------------- +dill: serialize all of python +----------------------------- + +About Dill +========== + +``dill`` extends python's ``pickle`` module for serializing and de-serializing +python objects to the majority of the built-in python types. Serialization +is the process of converting an object to a byte stream, and the inverse +of which is converting a byte stream back to a python object hierarchy. + +``dill`` provides the user the same interface as the ``pickle`` module, and +also includes some additional features. In addition to pickling python +objects, ``dill`` provides the ability to save the state of an interpreter +session in a single command. Hence, it would be feasable to save an +interpreter session, close the interpreter, ship the pickled file to +another computer, open a new interpreter, unpickle the session and +thus continue from the 'saved' state of the original interpreter +session. + +``dill`` can be used to store python objects to a file, but the primary +usage is to send python objects across the network as a byte stream. +``dill`` is quite flexible, and allows arbitrary user defined classes +and functions to be serialized. Thus ``dill`` is not intended to be +secure against erroneously or maliciously constructed data. It is +left to the user to decide whether the data they unpickle is from +a trustworthy source. + +``dill`` is part of ``pathos``, a python framework for heterogeneous computing. +``dill`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/dill/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``dill`` can pickle the following standard types: + + - none, type, bool, int, long, float, complex, str, unicode, + - tuple, list, dict, file, buffer, builtin, + - both old and new style classes, + - instances of old and new style classes, + - set, frozenset, array, functions, exceptions + +``dill`` can also pickle more 'exotic' standard types: + + - functions with yields, nested functions, lambdas, + - cell, method, unboundmethod, module, code, methodwrapper, + - dictproxy, methoddescriptor, getsetdescriptor, memberdescriptor, + - wrapperdescriptor, xrange, slice, + - notimplemented, ellipsis, quit + +``dill`` cannot yet pickle these standard types: + + - frame, generator, traceback + +``dill`` also provides the capability to: + + - save and load python interpreter sessions + - save and extract the source code from functions and classes + - interactively diagnose pickling errors + + +Current Release +=============== + +The latest released version of ``dill`` is available from: + + https://pypi.org/project/dill + +``dill`` is distributed under a 3-clause BSD license. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``dill`` can be installed with ``pip``:: + + $ pip install dill + +To optionally include the ``objgraph`` diagnostic tool in the install:: + + $ pip install dill[graph] + +For windows users, to optionally install session history tools:: + + $ pip install dill[readline] + + +Requirements +============ + +``dill`` requires: + + - ``python`` (or ``pypy``), **==2.7** or **>=3.7** + - ``setuptools``, **>=42** + +Optional requirements: + + - ``objgraph``, **>=1.7.2** + - ``pyreadline``, **>=1.7.1** (on windows) + + +Basic Usage +=========== + +``dill`` is a drop-in replacement for ``pickle``. Existing code can be +updated to allow complete pickling using:: + + >>> import dill as pickle + +or:: + + >>> from dill import dumps, loads + +``dumps`` converts the object to a unique byte string, and ``loads`` performs +the inverse operation:: + + >>> squared = lambda x: x**2 + >>> loads(dumps(squared))(3) + 9 + +There are a number of options to control serialization which are provided +as keyword arguments to several ``dill`` functions: + +* with *protocol*, the pickle protocol level can be set. This uses the + same value as the ``pickle`` module, *HIGHEST_PROTOCOL* or *DEFAULT_PROTOCOL*. +* with *byref=True*, ``dill`` to behave a lot more like pickle with + certain objects (like modules) pickled by reference as opposed to + attempting to pickle the object itself. +* with *recurse=True*, objects referred to in the global dictionary are + recursively traced and pickled, instead of the default behavior of + attempting to store the entire global dictionary. +* with *fmode*, the contents of the file can be pickled along with the file + handle, which is useful if the object is being sent over the wire to a + remote system which does not have the original file on disk. Options are + *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content + and *FILE_FMODE* for content and handle. +* with *ignore=False*, objects reconstructed with types defined in the + top-level script environment use the existing type in the environment + rather than a possibly different reconstructed type. + +The default serialization can also be set globally in *dill.settings*. +Thus, we can modify how ``dill`` handles references to the global dictionary +locally or globally:: + + >>> import dill.settings + >>> dumps(absolute) == dumps(absolute, recurse=True) + False + >>> dill.settings['recurse'] = True + >>> dumps(absolute) == dumps(absolute, recurse=True) + True + +``dill`` also includes source code inspection, as an alternate to pickling:: + + >>> import dill.source + >>> print(dill.source.getsource(squared)) + squared = lambda x:x**2 + +To aid in debugging pickling issues, use *dill.detect* which provides +tools like pickle tracing:: + + >>> import dill.detect + >>> dill.detect.trace(True) + >>> f = dumps(squared) + F1: at 0x108899e18> + F2: + # F2 + Co: at 0x10866a270, file "", line 1> + F2: + # F2 + # Co + D1: + # D1 + D2: + # D2 + # F1 + >>> dill.detect.trace(False) + +With trace, we see how ``dill`` stored the lambda (``F1``) by first storing +``_create_function``, the underlying code object (``Co``) and ``_create_code`` +(which is used to handle code objects), then we handle the reference to +the global dict (``D2``). A ``#`` marks when the object is actually stored. + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that +demonstrate how ``dill`` can serialize different python objects. You can +run the test suite with ``python -m dill.tests``. The contents of any +pickle file can be examined with ``undill``. As ``dill`` conforms to +the ``pickle`` interface, the examples and documentation found at +http://docs.python.org/library/pickle.html also apply to ``dill`` +if one will ``import dill as pickle``. The source code is also generally +well documented, so further questions may be resolved by inspecting the +code itself. Please feel free to submit a ticket on github, or ask a +question on stackoverflow (**@Mike McKerns**). +If you would like to share how you use ``dill`` in your work, please send +an email (to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``dill`` to do research that leads to publication, we ask that you +acknowledge use of ``dill`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. + +""" + +__license__ = """ +Copyright (c) 2004-2016 California Institute of Technology. +Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +All rights reserved. + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met:: + + - Redistribution of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistribution in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentations and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" + +from ._dill import dump, dumps, load, loads, dump_session, load_session, \ + Pickler, Unpickler, register, copy, pickle, pickles, check, \ + HIGHEST_PROTOCOL, DEFAULT_PROTOCOL, PicklingError, UnpicklingError, \ + HANDLE_FMODE, CONTENTS_FMODE, FILE_FMODE, PickleError, PickleWarning, \ + PicklingWarning, UnpicklingWarning +from . import source, temp, detect + +# get global settings +from .settings import settings + +# make sure "trace" is turned off +detect.trace(False) + +try: + from importlib import reload +except ImportError: + try: + from imp import reload + except ImportError: + pass + +# put the objects in order, if possible +try: + from collections import OrderedDict as odict +except ImportError: + try: + from ordereddict import OrderedDict as odict + except ImportError: + odict = dict +objects = odict() +# local import of dill._objects +#from . import _objects +#objects.update(_objects.succeeds) +#del _objects + +# local import of dill.objtypes +from . import objtypes as types + +def load_types(pickleable=True, unpickleable=True): + """load pickleable and/or unpickleable types to ``dill.types`` + + ``dill.types`` is meant to mimic the ``types`` module, providing a + registry of object types. By default, the module is empty (for import + speed purposes). Use the ``load_types`` function to load selected object + types to the ``dill.types`` module. + + Args: + pickleable (bool, default=True): if True, load pickleable types. + unpickleable (bool, default=True): if True, load unpickleable types. + + Returns: + None + """ + # local import of dill.objects + from . import _objects + if pickleable: + objects.update(_objects.succeeds) + else: + [objects.pop(obj,None) for obj in _objects.succeeds] + if unpickleable: + objects.update(_objects.failures) + else: + [objects.pop(obj,None) for obj in _objects.failures] + objects.update(_objects.registered) + del _objects + # reset contents of types to 'empty' + [types.__dict__.pop(obj) for obj in list(types.__dict__.keys()) \ + if obj.find('Type') != -1] + # add corresponding types from objects to types + reload(types) + +def extend(use_dill=True): + '''add (or remove) dill types to/from the pickle registry + + by default, ``dill`` populates its types to ``pickle.Pickler.dispatch``. + Thus, all ``dill`` types are available upon calling ``'import pickle'``. + To drop all ``dill`` types from the ``pickle`` dispatch, *use_dill=False*. + + Args: + use_dill (bool, default=True): if True, extend the dispatch table. + + Returns: + None + ''' + from ._dill import _revert_extension, _extend + if use_dill: _extend() + else: _revert_extension() + return + +extend() +del odict + + +def license(): + """print license""" + print (__license__) + return + +def citation(): + """print citation""" + print (__doc__[-491:-118]) + return + +# end of file diff --git a/myenv/lib/python3.9/site-packages/dill/_dill.py b/myenv/lib/python3.9/site-packages/dill/_dill.py new file mode 100644 index 0000000..3f3cb02 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/_dill.py @@ -0,0 +1,2107 @@ +# -*- coding: utf-8 -*- +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2015 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +dill: a utility for serialization of python objects + +Based on code written by Oren Tirosh and Armin Ronacher. +Extended to a (near) full set of the builtin types (in types module), +and coded to the pickle interface, by . +Initial port to python3 by Jonathan Dobson, continued by mmckerns. +Test against "all" python types (Std. Lib. CH 1-15 @ 2.7) by mmckerns. +Test against CH16+ Std. Lib. ... TBD. +""" +__all__ = ['dump','dumps','load','loads','dump_session','load_session', + 'Pickler','Unpickler','register','copy','pickle','pickles', + 'check','HIGHEST_PROTOCOL','DEFAULT_PROTOCOL','PicklingError', + 'UnpicklingError','HANDLE_FMODE','CONTENTS_FMODE','FILE_FMODE', + 'PickleError','PickleWarning','PicklingWarning','UnpicklingWarning'] + +import logging +log = logging.getLogger("dill") +log.addHandler(logging.StreamHandler()) +def _trace(boolean): + """print a trace through the stack when pickling; useful for debugging""" + if boolean: log.setLevel(logging.INFO) + else: log.setLevel(logging.WARN) + return +import warnings + +import os +import sys +diff = None +_use_diff = False +PY3 = (sys.hexversion >= 0x3000000) +# OLDER: 3.0 <= x < 3.4 *OR* x < 2.7.10 #NOTE: guessing relevant versions +OLDER = (PY3 and sys.hexversion < 0x3040000) or (sys.hexversion < 0x2070ab1) +OLD33 = (sys.hexversion < 0x3030000) +OLD37 = (sys.hexversion < 0x3070000) +OLD39 = (sys.hexversion < 0x3090000) +OLD310 = (sys.hexversion < 0x30a0000) +PY34 = (0x3040000 <= sys.hexversion < 0x3050000) +if PY3: #XXX: get types from .objtypes ? + import builtins as __builtin__ + from pickle import _Pickler as StockPickler, Unpickler as StockUnpickler + from _thread import LockType + if (sys.hexversion >= 0x30200f0): + from _thread import RLock as RLockType + else: + from threading import _RLock as RLockType + #from io import IOBase + from types import CodeType, FunctionType, MethodType, GeneratorType, \ + TracebackType, FrameType, ModuleType, BuiltinMethodType + BufferType = memoryview #XXX: unregistered + ClassType = type # no 'old-style' classes + EllipsisType = type(Ellipsis) + #FileType = IOBase + NotImplementedType = type(NotImplemented) + SliceType = slice + TypeType = type # 'new-style' classes #XXX: unregistered + XRangeType = range + if OLD33: + DictProxyType = type(object.__dict__) + else: + from types import MappingProxyType as DictProxyType +else: + import __builtin__ + from pickle import Pickler as StockPickler, Unpickler as StockUnpickler + from thread import LockType + from threading import _RLock as RLockType + from types import CodeType, FunctionType, ClassType, MethodType, \ + GeneratorType, DictProxyType, XRangeType, SliceType, TracebackType, \ + NotImplementedType, EllipsisType, FrameType, ModuleType, \ + BufferType, BuiltinMethodType, TypeType +from pickle import HIGHEST_PROTOCOL, PickleError, PicklingError, UnpicklingError +try: + from pickle import DEFAULT_PROTOCOL +except ImportError: + DEFAULT_PROTOCOL = HIGHEST_PROTOCOL +import __main__ as _main_module +import marshal +import gc +# import zlib +from weakref import ReferenceType, ProxyType, CallableProxyType +from functools import partial +from operator import itemgetter, attrgetter +# new in python3.3 +if sys.hexversion < 0x03030000: + FileNotFoundError = IOError +if PY3 and sys.hexversion < 0x03040000: + GENERATOR_FAIL = True +else: GENERATOR_FAIL = False +if PY3: + import importlib.machinery + EXTENSION_SUFFIXES = tuple(importlib.machinery.EXTENSION_SUFFIXES) +else: + import imp + EXTENSION_SUFFIXES = tuple(suffix + for (suffix, _, s_type) in imp.get_suffixes() + if s_type == imp.C_EXTENSION) +try: + import ctypes + HAS_CTYPES = True + # if using `pypy`, pythonapi is not found + IS_PYPY = not hasattr(ctypes, 'pythonapi') +except ImportError: + HAS_CTYPES = False + IS_PYPY = False +IS_PYPY2 = IS_PYPY and not PY3 +NumpyUfuncType = None +NumpyDType = None +NumpyArrayType = None +try: + if OLDER: + raise AttributeError('find_spec not found') + import importlib + if not importlib.machinery.PathFinder().find_spec('numpy'): + raise ImportError("No module named 'numpy'") + NumpyUfuncType = True + NumpyDType = True + NumpyArrayType = True +except AttributeError: + try: + import imp + imp.find_module('numpy') + NumpyUfuncType = True + NumpyDType = True + NumpyArrayType = True + except ImportError: + pass +except ImportError: + pass +def __hook__(): + global NumpyArrayType, NumpyDType, NumpyUfuncType + from numpy import ufunc as NumpyUfuncType + from numpy import ndarray as NumpyArrayType + from numpy import dtype as NumpyDType + return True +if NumpyArrayType: # then has numpy + def ndarraysubclassinstance(obj): + if type(obj) in (TypeType, ClassType): + return False # all classes return False + try: # check if is ndarray, and elif is subclass of ndarray + cls = getattr(obj, '__class__', None) + if cls is None: return False + elif cls is TypeType: return False + elif 'numpy.ndarray' not in str(getattr(cls, 'mro', int.mro)()): + return False + except ReferenceError: return False # handle 'R3' weakref in 3.x + except TypeError: return False + # anything below here is a numpy array (or subclass) instance + __hook__() # import numpy (so the following works!!!) + # verify that __reduce__ has not been overridden + NumpyInstance = NumpyArrayType((0,),'int8') + if id(obj.__reduce_ex__) == id(NumpyInstance.__reduce_ex__) and \ + id(obj.__reduce__) == id(NumpyInstance.__reduce__): return True + return False + def numpyufunc(obj): + if type(obj) in (TypeType, ClassType): + return False # all classes return False + try: # check if is ufunc + cls = getattr(obj, '__class__', None) + if cls is None: return False + elif cls is TypeType: return False + if 'numpy.ufunc' not in str(getattr(cls, 'mro', int.mro)()): + return False + except ReferenceError: return False # handle 'R3' weakref in 3.x + except TypeError: return False + # anything below here is a numpy ufunc + return True + def numpydtype(obj): + if type(obj) in (TypeType, ClassType): + return False # all classes return False + try: # check if is dtype + cls = getattr(obj, '__class__', None) + if cls is None: return False + elif cls is TypeType: return False + if 'numpy.dtype' not in str(getattr(obj, 'mro', int.mro)()): + return False + except ReferenceError: return False # handle 'R3' weakref in 3.x + except TypeError: return False + # anything below here is a numpy dtype + __hook__() # import numpy (so the following works!!!) + return type(obj) is type(NumpyDType) # handles subclasses +else: + def ndarraysubclassinstance(obj): return False + def numpyufunc(obj): return False + def numpydtype(obj): return False + +# make sure to add these 'hand-built' types to _typemap +if PY3: + CellType = type((lambda x: lambda y: x)(0).__closure__[0]) +else: + CellType = type((lambda x: lambda y: x)(0).func_closure[0]) +# new in python2.5 +if sys.hexversion >= 0x20500f0: + from types import GetSetDescriptorType + if not IS_PYPY: + from types import MemberDescriptorType + else: + # oddly, MemberDescriptorType is GetSetDescriptorType + # while, member_descriptor does exist otherwise... is this a pypy bug? + class _member(object): + __slots__ = ['descriptor'] + MemberDescriptorType = type(_member.descriptor) +if IS_PYPY: + WrapperDescriptorType = MethodType + MethodDescriptorType = FunctionType + ClassMethodDescriptorType = FunctionType +else: + WrapperDescriptorType = type(type.__repr__) + MethodDescriptorType = type(type.__dict__['mro']) + ClassMethodDescriptorType = type(type.__dict__['__prepare__' if PY3 else 'mro']) + +MethodWrapperType = type([].__repr__) +PartialType = type(partial(int,base=2)) +SuperType = type(super(Exception, TypeError())) +ItemGetterType = type(itemgetter(0)) +AttrGetterType = type(attrgetter('__repr__')) + +try: + from functools import _lru_cache_wrapper as LRUCacheType +except: + LRUCacheType = None + +if not isinstance(LRUCacheType, type): + LRUCacheType = None + +def get_file_type(*args, **kwargs): + open = kwargs.pop("open", __builtin__.open) + f = open(os.devnull, *args, **kwargs) + t = type(f) + f.close() + return t + +FileType = get_file_type('rb', buffering=0) +TextWrapperType = get_file_type('r', buffering=-1) +BufferedRandomType = get_file_type('r+b', buffering=-1) +BufferedReaderType = get_file_type('rb', buffering=-1) +BufferedWriterType = get_file_type('wb', buffering=-1) +try: + from _pyio import open as _open + PyTextWrapperType = get_file_type('r', buffering=-1, open=_open) + PyBufferedRandomType = get_file_type('r+b', buffering=-1, open=_open) + PyBufferedReaderType = get_file_type('rb', buffering=-1, open=_open) + PyBufferedWriterType = get_file_type('wb', buffering=-1, open=_open) +except ImportError: + PyTextWrapperType = PyBufferedRandomType = PyBufferedReaderType = PyBufferedWriterType = None +try: + from cStringIO import StringIO, InputType, OutputType +except ImportError: + if PY3: + from io import BytesIO as StringIO + else: + from StringIO import StringIO + InputType = OutputType = None +if not IS_PYPY2: + from socket import socket as SocketType + try: #FIXME: additionally calls ForkingPickler.register several times + from multiprocessing.reduction import _reduce_socket as reduce_socket + except ImportError: + from multiprocessing.reduction import reduce_socket +try: + __IPYTHON__ is True # is ipython + ExitType = None # IPython.core.autocall.ExitAutocall + singletontypes = ['exit', 'quit', 'get_ipython'] +except NameError: + try: ExitType = type(exit) # apparently 'exit' can be removed + except NameError: ExitType = None + singletontypes = [] + +from collections import OrderedDict + +import inspect + +### Shims for different versions of Python and dill +class Sentinel(object): + """ + Create a unique sentinel object that is pickled as a constant. + """ + def __init__(self, name, module_name=None): + self.name = name + if module_name is None: + # Use the calling frame's module + self.__module__ = inspect.currentframe().f_back.f_globals['__name__'] + else: + self.__module__ = module_name # pragma: no cover + def __repr__(self): + return self.__module__ + '.' + self.name # pragma: no cover + def __copy__(self): + return self # pragma: no cover + def __deepcopy__(self, memo): + return self # pragma: no cover + def __reduce__(self): + return self.name + def __reduce_ex__(self, protocol): + return self.name + +from . import _shims +from ._shims import Reduce, Getattr + +### File modes +#: Pickles the file handle, preserving mode. The position of the unpickled +#: object is as for a new file handle. +HANDLE_FMODE = 0 +#: Pickles the file contents, creating a new file if on load the file does +#: not exist. The position = min(pickled position, EOF) and mode is chosen +#: as such that "best" preserves behavior of the original file. +CONTENTS_FMODE = 1 +#: Pickles the entire file (handle and contents), preserving mode and position. +FILE_FMODE = 2 + +### Shorthands (modified from python2.5/lib/pickle.py) +def copy(obj, *args, **kwds): + """ + Use pickling to 'copy' an object (i.e. `loads(dumps(obj))`). + + See :func:`dumps` and :func:`loads` for keyword arguments. + """ + ignore = kwds.pop('ignore', Unpickler.settings['ignore']) + return loads(dumps(obj, *args, **kwds), ignore=ignore) + +def dump(obj, file, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None): + """ + Pickle an object to a file. + + See :func:`dumps` for keyword arguments. + """ + from .settings import settings + protocol = settings['protocol'] if protocol is None else int(protocol) + _kwds = kwds.copy() + _kwds.update(dict(byref=byref, fmode=fmode, recurse=recurse)) + Pickler(file, protocol, **_kwds).dump(obj) + return + +def dumps(obj, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None): + """ + Pickle an object to a string. + + *protocol* is the pickler protocol, as defined for Python *pickle*. + + If *byref=True*, then dill behaves a lot more like pickle as certain + objects (like modules) are pickled by reference as opposed to attempting + to pickle the object itself. + + If *recurse=True*, then objects referred to in the global dictionary + are recursively traced and pickled, instead of the default behavior + of attempting to store the entire global dictionary. This is needed for + functions defined via *exec()*. + + *fmode* (:const:`HANDLE_FMODE`, :const:`CONTENTS_FMODE`, + or :const:`FILE_FMODE`) indicates how file handles will be pickled. + For example, when pickling a data file handle for transfer to a remote + compute service, *FILE_FMODE* will include the file contents in the + pickle and cursor position so that a remote method can operate + transparently on an object with an open file handle. + + Default values for keyword arguments can be set in :mod:`dill.settings`. + """ + file = StringIO() + dump(obj, file, protocol, byref, fmode, recurse, **kwds)#, strictio) + return file.getvalue() + +def load(file, ignore=None, **kwds): + """ + Unpickle an object from a file. + + See :func:`loads` for keyword arguments. + """ + return Unpickler(file, ignore=ignore, **kwds).load() + +def loads(str, ignore=None, **kwds): + """ + Unpickle an object from a string. + + If *ignore=False* then objects whose class is defined in the module + *__main__* are updated to reference the existing class in *__main__*, + otherwise they are left to refer to the reconstructed type, which may + be different. + + Default values for keyword arguments can be set in :mod:`dill.settings`. + """ + file = StringIO(str) + return load(file, ignore, **kwds) + +# def dumpzs(obj, protocol=None): +# """pickle an object to a compressed string""" +# return zlib.compress(dumps(obj, protocol)) + +# def loadzs(str): +# """unpickle an object from a compressed string""" +# return loads(zlib.decompress(str)) + +### End: Shorthands ### + +### Pickle the Interpreter Session +SESSION_IMPORTED_AS_TYPES = (ModuleType, ClassType, TypeType, Exception, + FunctionType, MethodType, BuiltinMethodType) + +def _module_map(): + """get map of imported modules""" + from collections import defaultdict, namedtuple + modmap = namedtuple('Modmap', ['by_name', 'by_id', 'top_level']) + modmap = modmap(defaultdict(list), defaultdict(list), {}) + items = 'items' if PY3 else 'iteritems' + for modname, module in getattr(sys.modules, items)(): + if not isinstance(module, ModuleType): + continue + if '.' not in modname: + modmap.top_level[id(module)] = modname + for objname, modobj in module.__dict__.items(): + modmap.by_name[objname].append((modobj, modname)) + modmap.by_id[id(modobj)].append((modobj, objname, modname)) + return modmap + +def _lookup_module(modmap, name, obj, main_module): + """lookup name or id of obj if module is imported""" + for modobj, modname in modmap.by_name[name]: + if modobj is obj and sys.modules[modname] is not main_module: + return modname, name + if isinstance(obj, SESSION_IMPORTED_AS_TYPES): + for modobj, objname, modname in modmap.by_id[id(obj)]: + if sys.modules[modname] is not main_module: + return modname, objname + return None, None + +def _stash_modules(main_module): + modmap = _module_map() + newmod = ModuleType(main_module.__name__) + + imported = [] + imported_as = [] + imported_top_level = [] # keep separeted for backwards compatibility + original = {} + items = 'items' if PY3 else 'iteritems' + for name, obj in getattr(main_module.__dict__, items)(): + if obj is main_module: + original[name] = newmod # self-reference + continue + + # Avoid incorrectly matching a singleton value in another package (ex.: __doc__). + if any(obj is singleton for singleton in (None, False, True)) or \ + isinstance(obj, ModuleType) and _is_builtin_module(obj): # always saved by ref + original[name] = obj + continue + + source_module, objname = _lookup_module(modmap, name, obj, main_module) + if source_module: + if objname == name: + imported.append((source_module, name)) + else: + imported_as.append((source_module, objname, name)) + else: + try: + imported_top_level.append((modmap.top_level[id(obj)], name)) + except KeyError: + original[name] = obj + + if len(original) < len(main_module.__dict__): + newmod.__dict__.update(original) + newmod.__dill_imported = imported + newmod.__dill_imported_as = imported_as + newmod.__dill_imported_top_level = imported_top_level + return newmod + else: + return main_module + +def _restore_modules(unpickler, main_module): + try: + for modname, name in main_module.__dict__.pop('__dill_imported'): + main_module.__dict__[name] = unpickler.find_class(modname, name) + for modname, objname, name in main_module.__dict__.pop('__dill_imported_as'): + main_module.__dict__[name] = unpickler.find_class(modname, objname) + for modname, name in main_module.__dict__.pop('__dill_imported_top_level'): + main_module.__dict__[name] = __import__(modname) + except KeyError: + pass + +#NOTE: 06/03/15 renamed main_module to main +def dump_session(filename='/tmp/session.pkl', main=None, byref=False, **kwds): + """pickle the current state of __main__ to a file""" + from .settings import settings + protocol = settings['protocol'] + if main is None: main = _main_module + if hasattr(filename, 'write'): + f = filename + else: + f = open(filename, 'wb') + try: + pickler = Pickler(f, protocol, **kwds) + pickler._original_main = main + if byref: + main = _stash_modules(main) + pickler._main = main #FIXME: dill.settings are disabled + pickler._byref = False # disable pickling by name reference + pickler._recurse = False # disable pickling recursion for globals + pickler._session = True # is best indicator of when pickling a session + pickler._first_pass = True + pickler._main_modified = main is not pickler._original_main + pickler.dump(main) + finally: + if f is not filename: # If newly opened file + f.close() + return + +def load_session(filename='/tmp/session.pkl', main=None, **kwds): + """update the __main__ module with the state from the session file""" + if main is None: main = _main_module + if hasattr(filename, 'read'): + f = filename + else: + f = open(filename, 'rb') + try: #FIXME: dill.settings are disabled + unpickler = Unpickler(f, **kwds) + unpickler._main = main + unpickler._session = True + module = unpickler.load() + unpickler._session = False + main.__dict__.update(module.__dict__) + _restore_modules(unpickler, main) + finally: + if f is not filename: # If newly opened file + f.close() + return + +### End: Pickle the Interpreter + +class MetaCatchingDict(dict): + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __missing__(self, key): + if issubclass(key, type): + return save_type + else: + raise KeyError() + +class PickleWarning(Warning, PickleError): + pass + +class PicklingWarning(PickleWarning, PicklingError): + pass + +class UnpicklingWarning(PickleWarning, UnpicklingError): + pass + +### Extend the Picklers +class Pickler(StockPickler): + """python's Pickler extended to interpreter sessions""" + dispatch = MetaCatchingDict(StockPickler.dispatch.copy()) + _session = False + from .settings import settings + + def __init__(self, *args, **kwds): + settings = Pickler.settings + _byref = kwds.pop('byref', None) + #_strictio = kwds.pop('strictio', None) + _fmode = kwds.pop('fmode', None) + _recurse = kwds.pop('recurse', None) + StockPickler.__init__(self, *args, **kwds) + self._main = _main_module + self._diff_cache = {} + self._byref = settings['byref'] if _byref is None else _byref + self._strictio = False #_strictio + self._fmode = settings['fmode'] if _fmode is None else _fmode + self._recurse = settings['recurse'] if _recurse is None else _recurse + from collections import OrderedDict + self._postproc = OrderedDict() + + def dump(self, obj): #NOTE: if settings change, need to update attributes + # register if the object is a numpy ufunc + # thanks to Paul Kienzle for pointing out ufuncs didn't pickle + if NumpyUfuncType and numpyufunc(obj): + @register(type(obj)) + def save_numpy_ufunc(pickler, obj): + log.info("Nu: %s" % obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + log.info("# Nu") + return + # NOTE: the above 'save' performs like: + # import copy_reg + # def udump(f): return f.__name__ + # def uload(name): return getattr(numpy, name) + # copy_reg.pickle(NumpyUfuncType, udump, uload) + # register if the object is a numpy dtype + if NumpyDType and numpydtype(obj): + @register(type(obj)) + def save_numpy_dtype(pickler, obj): + log.info("Dt: %s" % obj) + pickler.save_reduce(_create_dtypemeta, (obj.type,), obj=obj) + log.info("# Dt") + return + # NOTE: the above 'save' performs like: + # import copy_reg + # def uload(name): return type(NumpyDType(name)) + # def udump(f): return uload, (f.type,) + # copy_reg.pickle(NumpyDTypeType, udump, uload) + # register if the object is a subclassed numpy array instance + if NumpyArrayType and ndarraysubclassinstance(obj): + @register(type(obj)) + def save_numpy_array(pickler, obj): + log.info("Nu: (%s, %s)" % (obj.shape,obj.dtype)) + npdict = getattr(obj, '__dict__', None) + f, args, state = obj.__reduce__() + pickler.save_reduce(_create_array, (f,args,state,npdict), obj=obj) + log.info("# Nu") + return + # end hack + if GENERATOR_FAIL and type(obj) == GeneratorType: + msg = "Can't pickle %s: attribute lookup builtins.generator failed" % GeneratorType + raise PicklingError(msg) + else: + StockPickler.dump(self, obj) + return + dump.__doc__ = StockPickler.dump.__doc__ + pass + +class Unpickler(StockUnpickler): + """python's Unpickler extended to interpreter sessions and more types""" + from .settings import settings + _session = False + + def find_class(self, module, name): + if (module, name) == ('__builtin__', '__main__'): + return self._main.__dict__ #XXX: above set w/save_module_dict + elif (module, name) == ('__builtin__', 'NoneType'): + return type(None) #XXX: special case: NoneType missing + if module == 'dill.dill': module = 'dill._dill' + return StockUnpickler.find_class(self, module, name) + + def __init__(self, *args, **kwds): + settings = Pickler.settings + _ignore = kwds.pop('ignore', None) + StockUnpickler.__init__(self, *args, **kwds) + self._main = _main_module + self._ignore = settings['ignore'] if _ignore is None else _ignore + + def load(self): #NOTE: if settings change, need to update attributes + obj = StockUnpickler.load(self) + if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'): + if not self._ignore: + # point obj class to main + try: obj.__class__ = getattr(self._main, type(obj).__name__) + except (AttributeError,TypeError): pass # defined in a file + #_main_module.__dict__.update(obj.__dict__) #XXX: should update globals ? + return obj + load.__doc__ = StockUnpickler.load.__doc__ + pass + +''' +def dispatch_table(): + """get the dispatch table of registered types""" + return Pickler.dispatch +''' + +pickle_dispatch_copy = StockPickler.dispatch.copy() + +def pickle(t, func): + """expose dispatch table for user-created extensions""" + Pickler.dispatch[t] = func + return + +def register(t): + """register type to Pickler's dispatch table """ + def proxy(func): + Pickler.dispatch[t] = func + return func + return proxy + +def _revert_extension(): + """drop dill-registered types from pickle's dispatch table""" + for type, func in list(StockPickler.dispatch.items()): + if func.__module__ == __name__: + del StockPickler.dispatch[type] + if type in pickle_dispatch_copy: + StockPickler.dispatch[type] = pickle_dispatch_copy[type] + +def use_diff(on=True): + """ + Reduces size of pickles by only including object which have changed. + + Decreases pickle size but increases CPU time needed. + Also helps avoid some unpicklable objects. + MUST be called at start of script, otherwise changes will not be recorded. + """ + global _use_diff, diff + _use_diff = on + if _use_diff and diff is None: + try: + from . import diff as d + except: + import diff as d + diff = d + +def _create_typemap(): + import types + if PY3: + d = dict(list(__builtin__.__dict__.items()) + \ + list(types.__dict__.items())).items() + builtin = 'builtins' + else: + d = types.__dict__.iteritems() + builtin = '__builtin__' + for key, value in d: + if getattr(value, '__module__', None) == builtin \ + and type(value) is type: + yield key, value + return +_reverse_typemap = dict(_create_typemap()) +_reverse_typemap.update({ + 'CellType': CellType, + 'MethodWrapperType': MethodWrapperType, + 'PartialType': PartialType, + 'SuperType': SuperType, + 'ItemGetterType': ItemGetterType, + 'AttrGetterType': AttrGetterType, + 'FileType': FileType, + 'BufferedRandomType': BufferedRandomType, + 'BufferedReaderType': BufferedReaderType, + 'BufferedWriterType': BufferedWriterType, + 'TextWrapperType': TextWrapperType, + 'PyBufferedRandomType': PyBufferedRandomType, + 'PyBufferedReaderType': PyBufferedReaderType, + 'PyBufferedWriterType': PyBufferedWriterType, + 'PyTextWrapperType': PyTextWrapperType, +}) +if ExitType: + _reverse_typemap['ExitType'] = ExitType +if InputType: + _reverse_typemap['InputType'] = InputType + _reverse_typemap['OutputType'] = OutputType +if not IS_PYPY: + _reverse_typemap['WrapperDescriptorType'] = WrapperDescriptorType + _reverse_typemap['MethodDescriptorType'] = MethodDescriptorType + _reverse_typemap['ClassMethodDescriptorType'] = ClassMethodDescriptorType +else: + _reverse_typemap['MemberDescriptorType'] = MemberDescriptorType +if PY3: + _typemap = dict((v, k) for k, v in _reverse_typemap.items()) +else: + _typemap = dict((v, k) for k, v in _reverse_typemap.iteritems()) + +def _unmarshal(string): + return marshal.loads(string) + +def _load_type(name): + return _reverse_typemap[name] + +def _create_type(typeobj, *args): + return typeobj(*args) + +def _create_function(fcode, fglobals, fname=None, fdefaults=None, + fclosure=None, fdict=None, fkwdefaults=None): + # same as FunctionType, but enable passing __dict__ to new function, + # __dict__ is the storehouse for attributes added after function creation + func = FunctionType(fcode, fglobals or dict(), fname, fdefaults, fclosure) + if fdict is not None: + func.__dict__.update(fdict) #XXX: better copy? option to copy? + if fkwdefaults is not None: + func.__kwdefaults__ = fkwdefaults + # 'recurse' only stores referenced modules/objects in fglobals, + # thus we need to make sure that we have __builtins__ as well + if "__builtins__" not in func.__globals__: + func.__globals__["__builtins__"] = globals()["__builtins__"] + # assert id(fglobals) == id(func.__globals__) + return func + +def _create_code(*args): + if PY3 and hasattr(args[-3], 'encode'): #FIXME: from PY2 fails (optcode) + args = list(args) + if len(args) == 20: + args[-3] = args[-3].encode() # co_exceptiontable + args[-6] = args[-6].encode() # co_lnotab + args[-14] = args[-14].encode() # co_code + if args[-4] is not None: + args[-4] = args[-4].encode() # co_columntable + if args[-5] is not None: + args[-5] = args[-5].encode() # co_endlinetable + else: + args[-3] = args[-3].encode() # co_lnotab + args[-10] = args[-10].encode() # co_code + if hasattr(CodeType, 'co_exceptiontable'): + if len(args) == 20: return CodeType(*args) + elif len(args) == 16: + argz = (None, None, b'') + argz = args[:-4] + args[-5:-4] + args[-4:-2] + argz + args[-2:] + return CodeType(*argz) + elif len(args) == 15: + argz = args[1:-4] + args[-5:-4] + args[-4:-2] + argz + args[-2:] + return CodeType(args[0], 0, *argz) + argz = args[1:-4] + args[-5:-4] + args[-4:-2] + argz + args[-2:] + return CodeType(args[0], 0, 0, *argz) + elif hasattr(CodeType, 'co_posonlyargcount'): + if len(args) == 20: + return CodeType(*(args[:12] + args[13:15] + args[18:])) + elif len(args) == 16: return CodeType(*args) + elif len(args) == 15: return CodeType(args[0], 0, *args[1:]) + return CodeType(args[0], 0, 0, *args[1:]) + elif hasattr(CodeType, 'co_kwonlyargcount'): + if len(args) == 20: + return CodeType(*(args[:1] + args[2:12] + args[13:15] + args[18:])) + elif len(args) == 16: return CodeType(args[0], *args[2:]) + elif len(args) == 15: return CodeType(*args) + return CodeType(args[0], 0, *args[1:]) + if len(args) == 20: + return CodeType(*(args[:1] + args[3:12] + args[13:15] + args[18:])) + elif len(args) == 16: return CodeType(args[0], *args[3:]) + elif len(args) == 15: return CodeType(args[0], *args[2:]) + return CodeType(*args) + +def _create_ftype(ftypeobj, func, args, kwds): + if kwds is None: + kwds = {} + if args is None: + args = () + return ftypeobj(func, *args, **kwds) + +def _create_lock(locked, *args): #XXX: ignores 'blocking' + from threading import Lock + lock = Lock() + if locked: + if not lock.acquire(False): + raise UnpicklingError("Cannot acquire lock") + return lock + +def _create_rlock(count, owner, *args): #XXX: ignores 'blocking' + lock = RLockType() + if owner is not None: + lock._acquire_restore((count, owner)) + if owner and not lock._is_owned(): + raise UnpicklingError("Cannot acquire lock") + return lock + +# thanks to matsjoyce for adding all the different file modes +def _create_filehandle(name, mode, position, closed, open, strictio, fmode, fdata): # buffering=0 + # only pickles the handle, not the file contents... good? or StringIO(data)? + # (for file contents see: http://effbot.org/librarybook/copy-reg.htm) + # NOTE: handle special cases first (are there more special cases?) + names = {'':sys.__stdin__, '':sys.__stdout__, + '':sys.__stderr__} #XXX: better fileno=(0,1,2) ? + if name in list(names.keys()): + f = names[name] #XXX: safer "f=sys.stdin" + elif name == '': + f = os.tmpfile() + elif name == '': + import tempfile + f = tempfile.TemporaryFile(mode) + else: + # treat x mode as w mode + if "x" in mode and sys.hexversion < 0x03030000: + raise ValueError("invalid mode: '%s'" % mode) + try: + exists = os.path.exists(name) + except: + exists = False + if not exists: + if strictio: + raise FileNotFoundError("[Errno 2] No such file or directory: '%s'" % name) + elif "r" in mode and fmode != FILE_FMODE: + name = '' # or os.devnull? + current_size = 0 # or maintain position? + else: + current_size = os.path.getsize(name) + + if position > current_size: + if strictio: + raise ValueError("invalid buffer size") + elif fmode == CONTENTS_FMODE: + position = current_size + # try to open the file by name + # NOTE: has different fileno + try: + #FIXME: missing: *buffering*, encoding, softspace + if fmode == FILE_FMODE: + f = open(name, mode if "w" in mode else "w") + f.write(fdata) + if "w" not in mode: + f.close() + f = open(name, mode) + elif name == '': # file did not exist + import tempfile + f = tempfile.TemporaryFile(mode) + elif fmode == CONTENTS_FMODE \ + and ("w" in mode or "x" in mode): + # stop truncation when opening + flags = os.O_CREAT + if "+" in mode: + flags |= os.O_RDWR + else: + flags |= os.O_WRONLY + f = os.fdopen(os.open(name, flags), mode) + # set name to the correct value + if PY3: + r = getattr(f, "buffer", f) + r = getattr(r, "raw", r) + r.name = name + else: + if not HAS_CTYPES: + raise ImportError("No module named 'ctypes'") + class FILE(ctypes.Structure): + _fields_ = [("refcount", ctypes.c_long), + ("type_obj", ctypes.py_object), + ("file_pointer", ctypes.c_voidp), + ("name", ctypes.py_object)] + + class PyObject(ctypes.Structure): + _fields_ = [ + ("ob_refcnt", ctypes.c_int), + ("ob_type", ctypes.py_object) + ] + #FIXME: CONTENTS_FMODE fails for pypy due to issue #1233 + # https://bitbucket.org/pypy/pypy/issues/1233 + ctypes.cast(id(f), ctypes.POINTER(FILE)).contents.name = name + ctypes.cast(id(name), ctypes.POINTER(PyObject)).contents.ob_refcnt += 1 + assert f.name == name + else: + f = open(name, mode) + except (IOError, FileNotFoundError): + err = sys.exc_info()[1] + raise UnpicklingError(err) + if closed: + f.close() + elif position >= 0 and fmode != HANDLE_FMODE: + f.seek(position) + return f + +def _create_stringi(value, position, closed): + f = StringIO(value) + if closed: f.close() + else: f.seek(position) + return f + +def _create_stringo(value, position, closed): + f = StringIO() + if closed: f.close() + else: + f.write(value) + f.seek(position) + return f + +class _itemgetter_helper(object): + def __init__(self): + self.items = [] + def __getitem__(self, item): + self.items.append(item) + return + +class _attrgetter_helper(object): + def __init__(self, attrs, index=None): + self.attrs = attrs + self.index = index + def __getattribute__(self, attr): + attrs = object.__getattribute__(self, "attrs") + index = object.__getattribute__(self, "index") + if index is None: + index = len(attrs) + attrs.append(attr) + else: + attrs[index] = ".".join([attrs[index], attr]) + return type(self)(attrs, index) + +class _dictproxy_helper(dict): + def __ror__(self, a): + return a + +_dictproxy_helper_instance = _dictproxy_helper() + +__d = {} +try: + # In CPython 3.9 and later, this trick can be used to exploit the + # implementation of the __or__ function of MappingProxyType to get the true + # mapping referenced by the proxy. It may work for other implementations, + # but is not guaranteed. + MAPPING_PROXY_TRICK = __d is (DictProxyType(__d) | _dictproxy_helper_instance) +except: + MAPPING_PROXY_TRICK = False +del __d + +# _CELL_REF and _CELL_EMPTY are used to stay compatible with versions of dill +# whose _create_cell functions do not have a default value. +# _CELL_REF can be safely removed entirely (replaced by empty tuples for calls +# to _create_cell) once breaking changes are allowed. +_CELL_REF = None +_CELL_EMPTY = Sentinel('_CELL_EMPTY') + +if PY3: + def _create_cell(contents=None): + if contents is not _CELL_EMPTY: + value = contents + return (lambda: value).__closure__[0] + +else: + def _create_cell(contents=None): + if contents is not _CELL_EMPTY: + value = contents + return (lambda: value).func_closure[0] + + +def _create_weakref(obj, *args): + from weakref import ref + if obj is None: # it's dead + if PY3: + from collections import UserDict + else: + from UserDict import UserDict + return ref(UserDict(), *args) + return ref(obj, *args) + +def _create_weakproxy(obj, callable=False, *args): + from weakref import proxy + if obj is None: # it's dead + if callable: return proxy(lambda x:x, *args) + if PY3: + from collections import UserDict + else: + from UserDict import UserDict + return proxy(UserDict(), *args) + return proxy(obj, *args) + +def _eval_repr(repr_str): + return eval(repr_str) + +def _create_array(f, args, state, npdict=None): + #array = numpy.core.multiarray._reconstruct(*args) + array = f(*args) + array.__setstate__(state) + if npdict is not None: # we also have saved state in __dict__ + array.__dict__.update(npdict) + return array + +def _create_dtypemeta(scalar_type): + if NumpyDType is True: __hook__() # a bit hacky I think + if scalar_type is None: + return NumpyDType + return type(NumpyDType(scalar_type)) + +if OLD37: + def _create_namedtuple(name, fieldnames, modulename, defaults=None): + class_ = _import_module(modulename + '.' + name, safe=True) + if class_ is not None: + return class_ + import collections + t = collections.namedtuple(name, fieldnames) + t.__module__ = modulename + return t +else: + def _create_namedtuple(name, fieldnames, modulename, defaults=None): + class_ = _import_module(modulename + '.' + name, safe=True) + if class_ is not None: + return class_ + import collections + t = collections.namedtuple(name, fieldnames, defaults=defaults, module=modulename) + return t + +def _getattr(objclass, name, repr_str): + # hack to grab the reference directly + try: #XXX: works only for __builtin__ ? + attr = repr_str.split("'")[3] + return eval(attr+'.__dict__["'+name+'"]') + except: + try: + attr = objclass.__dict__ + if type(attr) is DictProxyType: + attr = attr[name] + else: + attr = getattr(objclass,name) + except: + attr = getattr(objclass,name) + return attr + +def _get_attr(self, name): + # stop recursive pickling + return getattr(self, name, None) or getattr(__builtin__, name) + +def _dict_from_dictproxy(dictproxy): + _dict = dictproxy.copy() # convert dictproxy to dict + _dict.pop('__dict__', None) + _dict.pop('__weakref__', None) + _dict.pop('__prepare__', None) + return _dict + +def _import_module(import_name, safe=False): + try: + if '.' in import_name: + items = import_name.split('.') + module = '.'.join(items[:-1]) + obj = items[-1] + else: + return __import__(import_name) + return getattr(__import__(module, None, None, [obj]), obj) + except (ImportError, AttributeError): + if safe: + return None + raise + +def _locate_function(obj, pickler=None): + if obj.__module__ in ['__main__', None] or \ + pickler and is_dill(pickler, child=False) and pickler._session and obj.__module__ == pickler._main.__name__: + return False + + found = _import_module(obj.__module__ + '.' + obj.__name__, safe=True) + return found is obj + + +def _setitems(dest, source): + for k, v in source.items(): + dest[k] = v + + +def _save_with_postproc(pickler, reduction, is_pickler_dill=None, obj=Getattr.NO_DEFAULT, postproc_list=None): + if obj is Getattr.NO_DEFAULT: + obj = Reduce(reduction) # pragma: no cover + + if is_pickler_dill is None: + is_pickler_dill = is_dill(pickler, child=True) + if is_pickler_dill: + # assert id(obj) not in pickler._postproc, str(obj) + ' already pushed on stack!' + # if not hasattr(pickler, 'x'): pickler.x = 0 + # print(pickler.x*' ', 'push', obj, id(obj), pickler._recurse) + # pickler.x += 1 + if postproc_list is None: + postproc_list = [] + + # Recursive object not supported. Default to a global instead. + if id(obj) in pickler._postproc: + name = '%s.%s ' % (obj.__module__, getattr(obj, '__qualname__', obj.__name__)) if hasattr(obj, '__module__') else '' + warnings.warn('Cannot pickle %r: %shas recursive self-references that trigger a RecursionError.' % (obj, name), PicklingWarning) + pickler.save_global(obj) + return + pickler._postproc[id(obj)] = postproc_list + + # TODO: Use state_setter in Python 3.8 to allow for faster cPickle implementations + pickler.save_reduce(*reduction, obj=obj) + + if is_pickler_dill: + # pickler.x -= 1 + # print(pickler.x*' ', 'pop', obj, id(obj)) + postproc = pickler._postproc.pop(id(obj)) + # assert postproc_list == postproc, 'Stack tampered!' + for reduction in reversed(postproc): + if reduction[0] is _setitems: + # use the internal machinery of pickle.py to speedup when + # updating a dictionary in postproc + dest, source = reduction[1] + if source: + pickler.write(pickler.get(pickler.memo[id(dest)][0])) + pickler._batch_setitems(iter(source.items())) + else: + # Updating with an empty dictionary. Same as doing nothing. + continue + else: + pickler.save_reduce(*reduction) + # pop None created by calling preprocessing step off stack + if PY3: + pickler.write(bytes('0', 'UTF-8')) + else: + pickler.write('0') + +#@register(CodeType) +#def save_code(pickler, obj): +# log.info("Co: %s" % obj) +# pickler.save_reduce(_unmarshal, (marshal.dumps(obj),), obj=obj) +# log.info("# Co") +# return + +# The following function is based on 'save_codeobject' from 'cloudpickle' +# Copyright (c) 2012, Regents of the University of California. +# Copyright (c) 2009 `PiCloud, Inc. `_. +# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE +@register(CodeType) +def save_code(pickler, obj): + log.info("Co: %s" % obj) + if PY3: + if hasattr(obj, "co_exceptiontable"): + args = ( + obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname, + obj.co_firstlineno, obj.co_lnotab, obj.co_endlinetable, + obj.co_columntable, obj.co_exceptiontable, obj.co_freevars, + obj.co_cellvars + ) + elif hasattr(obj, "co_posonlyargcount"): + args = ( + obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, + obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, + obj.co_cellvars + ) + else: + args = ( + obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, + obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, + obj.co_names, obj.co_varnames, obj.co_filename, + obj.co_name, obj.co_firstlineno, obj.co_lnotab, + obj.co_freevars, obj.co_cellvars + ) + else: + args = ( + obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, + obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, + obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, + obj.co_freevars, obj.co_cellvars + ) + + pickler.save_reduce(_create_code, args, obj=obj) + log.info("# Co") + return + +@register(dict) +def save_module_dict(pickler, obj): + if is_dill(pickler, child=False) and obj == pickler._main.__dict__ and \ + not (pickler._session and pickler._first_pass): + log.info("D1: " % (obj,)) + mapping = obj.mapping | _dictproxy_helper_instance + pickler.save_reduce(func, (mapping,), obj=obj) + log.info("# Dkvi") + return _save_dict_view + return [ + (funcname, save_dict_view_for_function(getattr(dicttype, funcname))) + for funcname in ('keys', 'values', 'items') + ] +else: + # The following functions are based on 'cloudpickle' + # https://github.com/cloudpipe/cloudpickle/blob/5d89947288a18029672596a4d719093cc6d5a412/cloudpickle/cloudpickle.py#L922-L940 + # Copyright (c) 2012, Regents of the University of California. + # Copyright (c) 2009 `PiCloud, Inc. `_. + # License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE + def save_dict_view(dicttype): + def save_dict_keys(pickler, obj): + log.info("Dk: <%s>" % (obj,)) + dict_constructor = _shims.Reduce(dicttype.fromkeys, (list(obj),)) + pickler.save_reduce(dicttype.keys, (dict_constructor,), obj=obj) + log.info("# Dk") + + def save_dict_values(pickler, obj): + log.info("Dv: <%s>" % (obj,)) + dict_constructor = _shims.Reduce(dicttype, (enumerate(obj),)) + pickler.save_reduce(dicttype.values, (dict_constructor,), obj=obj) + log.info("# Dv") + + def save_dict_items(pickler, obj): + log.info("Di: <%s>" % (obj,)) + pickler.save_reduce(dicttype.items, (dicttype(obj),), obj=obj) + log.info("# Di") + + return ( + ('keys', save_dict_keys), + ('values', save_dict_values), + ('items', save_dict_items) + ) + +for __dicttype in ( + dict, + OrderedDict +): + __obj = __dicttype() + for __funcname, __savefunc in save_dict_view(__dicttype): + __tview = type(getattr(__obj, __funcname)()) + if __tview not in Pickler.dispatch: + Pickler.dispatch[__tview] = __savefunc +del __dicttype, __obj, __funcname, __tview, __savefunc + + +@register(ClassType) +def save_classobj(pickler, obj): #FIXME: enable pickler._byref + if not _locate_function(obj, pickler): + log.info("C1: %s" % obj) + pickler.save_reduce(ClassType, (obj.__name__, obj.__bases__, + obj.__dict__), obj=obj) + #XXX: or obj.__dict__.copy()), obj=obj) ? + log.info("# C1") + else: + log.info("C2: %s" % obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + log.info("# C2") + return + +@register(LockType) +def save_lock(pickler, obj): + log.info("Lo: %s" % obj) + pickler.save_reduce(_create_lock, (obj.locked(),), obj=obj) + log.info("# Lo") + return + +@register(RLockType) +def save_rlock(pickler, obj): + log.info("RL: %s" % obj) + r = obj.__repr__() # don't use _release_save as it unlocks the lock + count = int(r.split('count=')[1].split()[0].rstrip('>')) + owner = int(r.split('owner=')[1].split()[0]) if PY3 else getattr(obj, '_RLock__owner') + pickler.save_reduce(_create_rlock, (count,owner,), obj=obj) + log.info("# RL") + return + +if not IS_PYPY2: + #@register(SocketType) #FIXME: causes multiprocess test_pickling FAIL + def save_socket(pickler, obj): + log.info("So: %s" % obj) + pickler.save_reduce(*reduce_socket(obj)) + log.info("# So") + return + +if sys.hexversion <= 0x3050000: + @register(ItemGetterType) + def save_itemgetter(pickler, obj): + log.info("Ig: %s" % obj) + helper = _itemgetter_helper() + obj(helper) + pickler.save_reduce(type(obj), tuple(helper.items), obj=obj) + log.info("# Ig") + return + + @register(AttrGetterType) + def save_attrgetter(pickler, obj): + log.info("Ag: %s" % obj) + attrs = [] + helper = _attrgetter_helper(attrs) + obj(helper) + pickler.save_reduce(type(obj), tuple(attrs), obj=obj) + log.info("# Ag") + return + +def _save_file(pickler, obj, open_): + if obj.closed: + position = 0 + else: + obj.flush() + if obj in (sys.__stdout__, sys.__stderr__, sys.__stdin__): + position = -1 + else: + position = obj.tell() + if is_dill(pickler, child=True) and pickler._fmode == FILE_FMODE: + f = open_(obj.name, "r") + fdata = f.read() + f.close() + else: + fdata = "" + if is_dill(pickler, child=True): + strictio = pickler._strictio + fmode = pickler._fmode + else: + strictio = False + fmode = 0 # HANDLE_FMODE + pickler.save_reduce(_create_filehandle, (obj.name, obj.mode, position, + obj.closed, open_, strictio, + fmode, fdata), obj=obj) + return + + +@register(FileType) #XXX: in 3.x has buffer=0, needs different _create? +@register(BufferedRandomType) +@register(BufferedReaderType) +@register(BufferedWriterType) +@register(TextWrapperType) +def save_file(pickler, obj): + log.info("Fi: %s" % obj) + f = _save_file(pickler, obj, open) + log.info("# Fi") + return f + +if PyTextWrapperType: + @register(PyBufferedRandomType) + @register(PyBufferedReaderType) + @register(PyBufferedWriterType) + @register(PyTextWrapperType) + def save_file(pickler, obj): + log.info("Fi: %s" % obj) + f = _save_file(pickler, obj, _open) + log.info("# Fi") + return f + +# The following two functions are based on 'saveCStringIoInput' +# and 'saveCStringIoOutput' from spickle +# Copyright (c) 2011 by science+computing ag +# License: http://www.apache.org/licenses/LICENSE-2.0 +if InputType: + @register(InputType) + def save_stringi(pickler, obj): + log.info("Io: %s" % obj) + if obj.closed: + value = ''; position = 0 + else: + value = obj.getvalue(); position = obj.tell() + pickler.save_reduce(_create_stringi, (value, position, \ + obj.closed), obj=obj) + log.info("# Io") + return + + @register(OutputType) + def save_stringo(pickler, obj): + log.info("Io: %s" % obj) + if obj.closed: + value = ''; position = 0 + else: + value = obj.getvalue(); position = obj.tell() + pickler.save_reduce(_create_stringo, (value, position, \ + obj.closed), obj=obj) + log.info("# Io") + return + +if 0x2050000 <= sys.hexversion < 0x3010000: + @register(PartialType) + def save_functor(pickler, obj): + log.info("Fu: %s" % obj) + pickler.save_reduce(_create_ftype, (type(obj), obj.func, obj.args, + obj.keywords), obj=obj) + log.info("# Fu") + return + +if LRUCacheType is not None: + from functools import lru_cache + @register(LRUCacheType) + def save_lru_cache(pickler, obj): + log.info("LRU: %s" % obj) + if OLD39: + kwargs = obj.cache_info() + args = (kwargs.maxsize,) + else: + kwargs = obj.cache_parameters() + args = (kwargs['maxsize'], kwargs['typed']) + if args != lru_cache.__defaults__: + wrapper = Reduce(lru_cache, args, is_callable=True) + else: + wrapper = lru_cache + pickler.save_reduce(wrapper, (obj.__wrapped__,), obj=obj) + log.info("# LRU") + return + +@register(SuperType) +def save_super(pickler, obj): + log.info("Su: %s" % obj) + pickler.save_reduce(super, (obj.__thisclass__, obj.__self__), obj=obj) + log.info("# Su") + return + +if OLDER or not PY3: + @register(BuiltinMethodType) + def save_builtin_method(pickler, obj): + if obj.__self__ is not None: + if obj.__self__ is __builtin__: + module = 'builtins' if PY3 else '__builtin__' + _t = "B1" + log.info("%s: %s" % (_t, obj)) + else: + module = obj.__self__ + _t = "B3" + log.info("%s: %s" % (_t, obj)) + if is_dill(pickler, child=True): + _recurse = pickler._recurse + pickler._recurse = False + pickler.save_reduce(_get_attr, (module, obj.__name__), obj=obj) + if is_dill(pickler, child=True): + pickler._recurse = _recurse + log.info("# %s" % _t) + else: + log.info("B2: %s" % obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + log.info("# B2") + return + + @register(MethodType) #FIXME: fails for 'hidden' or 'name-mangled' classes + def save_instancemethod0(pickler, obj):# example: cStringIO.StringI + log.info("Me: %s" % obj) #XXX: obj.__dict__ handled elsewhere? + if PY3: + pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj) + else: + pickler.save_reduce(MethodType, (obj.im_func, obj.im_self, + obj.im_class), obj=obj) + log.info("# Me") + return + +if sys.hexversion >= 0x20500f0: + if not IS_PYPY: + @register(MemberDescriptorType) + @register(GetSetDescriptorType) + @register(MethodDescriptorType) + @register(WrapperDescriptorType) + @register(ClassMethodDescriptorType) + def save_wrapper_descriptor(pickler, obj): + log.info("Wr: %s" % obj) + pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__, + obj.__repr__()), obj=obj) + log.info("# Wr") + return + else: + @register(MemberDescriptorType) + @register(GetSetDescriptorType) + def save_wrapper_descriptor(pickler, obj): + log.info("Wr: %s" % obj) + pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__, + obj.__repr__()), obj=obj) + log.info("# Wr") + return + + @register(MethodWrapperType) + def save_instancemethod(pickler, obj): + log.info("Mw: %s" % obj) + if IS_PYPY2 and obj.__self__ is None and obj.im_class: + # Can be a class method in PYPY2 if __self__ is none + pickler.save_reduce(getattr, (obj.im_class, obj.__name__), obj=obj) + return + pickler.save_reduce(getattr, (obj.__self__, obj.__name__), obj=obj) + log.info("# Mw") + return + +elif not IS_PYPY: + @register(MethodDescriptorType) + @register(WrapperDescriptorType) + def save_wrapper_descriptor(pickler, obj): + log.info("Wr: %s" % obj) + pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__, + obj.__repr__()), obj=obj) + log.info("# Wr") + return + +@register(CellType) +def save_cell(pickler, obj): + try: + f = obj.cell_contents + except: + log.info("Ce3: %s" % obj) + # _shims._CELL_EMPTY is defined in _shims.py to support PyPy 2.7. + # It unpickles to a sentinel object _dill._CELL_EMPTY, also created in + # _shims.py. This object is not present in Python 3 because the cell's + # contents can be deleted in newer versions of Python. The reduce object + # will instead unpickle to None if unpickled in Python 3. + + # When breaking changes are made to dill, (_shims._CELL_EMPTY,) can + # be replaced by () OR the delattr function can be removed repending on + # whichever is more convienient. + pickler.save_reduce(_create_cell, (_shims._CELL_EMPTY,), obj=obj) + # Call the function _delattr on the cell's cell_contents attribute + # The result of this function call will be None + pickler.save_reduce(_shims._delattr, (obj, 'cell_contents')) + # pop None created by calling _delattr off stack + if PY3: + pickler.write(bytes('0', 'UTF-8')) + else: + pickler.write('0') + log.info("# Ce3") + return + if is_dill(pickler, child=True): + if id(f) in pickler._postproc: + # Already seen. Add to its postprocessing. + postproc = pickler._postproc[id(f)] + else: + # Haven't seen it. Add to the highest possible object and set its + # value as late as possible to prevent cycle. + postproc = next(iter(pickler._postproc.values()), None) + if postproc is not None: + log.info("Ce2: %s" % obj) + # _CELL_REF is defined in _shims.py to support older versions of + # dill. When breaking changes are made to dill, (_CELL_REF,) can + # be replaced by () + pickler.save_reduce(_create_cell, (_CELL_REF,), obj=obj) + postproc.append((_shims._setattr, (obj, 'cell_contents', f))) + log.info("# Ce2") + return + log.info("Ce1: %s" % obj) + pickler.save_reduce(_create_cell, (f,), obj=obj) + log.info("# Ce1") + return + +if MAPPING_PROXY_TRICK: + @register(DictProxyType) + def save_dictproxy(pickler, obj): + log.info("Mp: %s" % obj) + mapping = obj | _dictproxy_helper_instance + pickler.save_reduce(DictProxyType, (mapping,), obj=obj) + log.info("# Mp") + return +elif not IS_PYPY: + if not OLD33: + @register(DictProxyType) + def save_dictproxy(pickler, obj): + log.info("Mp: %s" % obj) + pickler.save_reduce(DictProxyType, (obj.copy(),), obj=obj) + log.info("# Mp") + return + else: + # The following function is based on 'saveDictProxy' from spickle + # Copyright (c) 2011 by science+computing ag + # License: http://www.apache.org/licenses/LICENSE-2.0 + @register(DictProxyType) + def save_dictproxy(pickler, obj): + log.info("Dp: %s" % obj) + attr = obj.get('__dict__') + #pickler.save_reduce(_create_dictproxy, (attr,'nested'), obj=obj) + if type(attr) == GetSetDescriptorType and attr.__name__ == "__dict__" \ + and getattr(attr.__objclass__, "__dict__", None) == obj: + pickler.save_reduce(getattr, (attr.__objclass__,"__dict__"),obj=obj) + log.info("# Dp") + return + # all bad below... so throw ReferenceError or TypeError + raise ReferenceError("%s does not reference a class __dict__" % obj) + +@register(SliceType) +def save_slice(pickler, obj): + log.info("Sl: %s" % obj) + pickler.save_reduce(slice, (obj.start, obj.stop, obj.step), obj=obj) + log.info("# Sl") + return + +@register(XRangeType) +@register(EllipsisType) +@register(NotImplementedType) +def save_singleton(pickler, obj): + log.info("Si: %s" % obj) + pickler.save_reduce(_eval_repr, (obj.__repr__(),), obj=obj) + log.info("# Si") + return + +def _proxy_helper(obj): # a dead proxy returns a reference to None + """get memory address of proxy's reference object""" + _repr = repr(obj) + try: _str = str(obj) + except ReferenceError: # it's a dead proxy + return id(None) + if _str == _repr: return id(obj) # it's a repr + try: # either way, it's a proxy from here + address = int(_str.rstrip('>').split(' at ')[-1], base=16) + except ValueError: # special case: proxy of a 'type' + if not IS_PYPY: + address = int(_repr.rstrip('>').split(' at ')[-1], base=16) + else: + objects = iter(gc.get_objects()) + for _obj in objects: + if repr(_obj) == _str: return id(_obj) + # all bad below... nothing found so throw ReferenceError + msg = "Cannot reference object for proxy at '%s'" % id(obj) + raise ReferenceError(msg) + return address + +def _locate_object(address, module=None): + """get object located at the given memory address (inverse of id(obj))""" + special = [None, True, False] #XXX: more...? + for obj in special: + if address == id(obj): return obj + if module: + if PY3: + objects = iter(module.__dict__.values()) + else: + objects = module.__dict__.itervalues() + else: objects = iter(gc.get_objects()) + for obj in objects: + if address == id(obj): return obj + # all bad below... nothing found so throw ReferenceError or TypeError + try: address = hex(address) + except TypeError: + raise TypeError("'%s' is not a valid memory address" % str(address)) + raise ReferenceError("Cannot reference object at '%s'" % address) + +@register(ReferenceType) +def save_weakref(pickler, obj): + refobj = obj() + log.info("R1: %s" % obj) + #refobj = ctypes.pythonapi.PyWeakref_GetObject(obj) # dead returns "None" + pickler.save_reduce(_create_weakref, (refobj,), obj=obj) + log.info("# R1") + return + +@register(ProxyType) +@register(CallableProxyType) +def save_weakproxy(pickler, obj): + refobj = _locate_object(_proxy_helper(obj)) + try: + _t = "R2" + log.info("%s: %s" % (_t, obj)) + except ReferenceError: + _t = "R3" + log.info("%s: %s" % (_t, sys.exc_info()[1])) + #callable = bool(getattr(refobj, '__call__', None)) + if type(obj) is CallableProxyType: callable = True + else: callable = False + pickler.save_reduce(_create_weakproxy, (refobj, callable), obj=obj) + log.info("# %s" % _t) + return + +def _is_builtin_module(module): + if not hasattr(module, "__file__"): return True + # If a module file name starts with prefix, it should be a builtin + # module, so should always be pickled as a reference. + names = ["base_prefix", "base_exec_prefix", "exec_prefix", "prefix", "real_prefix"] + return any(os.path.realpath(module.__file__).startswith(os.path.realpath(getattr(sys, name))) + for name in names if hasattr(sys, name)) or \ + module.__file__.endswith(EXTENSION_SUFFIXES) or \ + 'site-packages' in module.__file__ + +@register(ModuleType) +def save_module(pickler, obj): + if False: #_use_diff: + if obj.__name__.split('.', 1)[0] != "dill": + try: + changed = diff.whats_changed(obj, seen=pickler._diff_cache)[0] + except RuntimeError: # not memorised module, probably part of dill + pass + else: + log.info("M2: %s with diff" % obj) + log.info("Diff: %s", changed.keys()) + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj, + state=changed) + log.info("# M2") + return + + log.info("M1: %s" % obj) + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj) + log.info("# M1") + else: + builtin_mod = _is_builtin_module(obj) + if obj.__name__ not in ("builtins", "dill", "dill._dill") and not builtin_mod or \ + is_dill(pickler, child=True) and obj is pickler._main: + log.info("M1: %s" % obj) + _main_dict = obj.__dict__.copy() #XXX: better no copy? option to copy? + [_main_dict.pop(item, None) for item in singletontypes + + ["__builtins__", "__loader__"]] + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj, + state=_main_dict) + log.info("# M1") + elif PY3 and obj.__name__ == "dill._dill": + log.info("M2: %s" % obj) + pickler.save_global(obj, name="_dill") + log.info("# M2") + else: + log.info("M2: %s" % obj) + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj) + log.info("# M2") + return + return + +@register(TypeType) +def save_type(pickler, obj, postproc_list=None): + if obj in _typemap: + log.info("T1: %s" % obj) + pickler.save_reduce(_load_type, (_typemap[obj],), obj=obj) + log.info("# T1") + elif obj.__bases__ == (tuple,) and all([hasattr(obj, attr) for attr in ('_fields','_asdict','_make','_replace')]): + # special case: namedtuples + log.info("T6: %s" % obj) + if OLD37 or (not obj._field_defaults): + pickler.save_reduce(_create_namedtuple, (obj.__name__, obj._fields, obj.__module__), obj=obj) + else: + defaults = [obj._field_defaults[field] for field in obj._fields if field in obj._field_defaults] + pickler.save_reduce(_create_namedtuple, (obj.__name__, obj._fields, obj.__module__, defaults), obj=obj) + log.info("# T6") + return + + # special cases: NoneType, NotImplementedType, EllipsisType + elif obj is type(None): + log.info("T7: %s" % obj) + #XXX: pickler.save_reduce(type, (None,), obj=obj) + if PY3: + pickler.write(bytes('c__builtin__\nNoneType\n', 'UTF-8')) + else: + pickler.write('c__builtin__\nNoneType\n') + log.info("# T7") + elif obj is NotImplementedType: + log.info("T7: %s" % obj) + pickler.save_reduce(type, (NotImplemented,), obj=obj) + log.info("# T7") + elif obj is EllipsisType: + log.info("T7: %s" % obj) + pickler.save_reduce(type, (Ellipsis,), obj=obj) + log.info("# T7") + + else: + obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + _byref = getattr(pickler, '_byref', None) + obj_recursive = id(obj) in getattr(pickler, '_postproc', ()) + incorrectly_named = not _locate_function(obj, pickler) + if not _byref and not obj_recursive and incorrectly_named: # not a function, but the name was held over + if issubclass(type(obj), type): + # thanks to Tom Stepleton pointing out pickler._session unneeded + _t = 'T2' + log.info("%s: %s" % (_t, obj)) + _dict = _dict_from_dictproxy(obj.__dict__) + else: + _t = 'T3' + log.info("%s: %s" % (_t, obj)) + _dict = obj.__dict__ + #print (_dict) + #print ("%s\n%s" % (type(obj), obj.__name__)) + #print ("%s\n%s" % (obj.__bases__, obj.__dict__)) + for name in _dict.get("__slots__", []): + del _dict[name] + if PY3 and obj_name != obj.__name__: + if postproc_list is None: + postproc_list = [] + postproc_list.append((setattr, (obj, '__qualname__', obj_name))) + _save_with_postproc(pickler, (_create_type, ( + type(obj), obj.__name__, obj.__bases__, _dict + )), obj=obj, postproc_list=postproc_list) + log.info("# %s" % _t) + else: + log.info("T4: %s" % obj) + if incorrectly_named: + warnings.warn('Cannot locate reference to %r.' % (obj,), PicklingWarning) + if obj_recursive: + warnings.warn('Cannot pickle %r: %s.%s has recursive self-references that trigger a RecursionError.' % (obj, obj.__module__, obj_name), PicklingWarning) + #print (obj.__dict__) + #print ("%s\n%s" % (type(obj), obj.__name__)) + #print ("%s\n%s" % (obj.__bases__, obj.__dict__)) + StockPickler.save_global(pickler, obj, name=obj_name) + log.info("# T4") + return + +# Error in PyPy 2.7 when adding ABC support +if IS_PYPY2: + @register(FrameType) + def save_frame(pickler, obj): + raise PicklingError('Cannot pickle a Python stack frame') + +@register(property) +def save_property(pickler, obj): + log.info("Pr: %s" % obj) + pickler.save_reduce(property, (obj.fget, obj.fset, obj.fdel, obj.__doc__), + obj=obj) + log.info("# Pr") + +@register(staticmethod) +@register(classmethod) +def save_classmethod(pickler, obj): + log.info("Cm: %s" % obj) + im_func = '__func__' if PY3 else 'im_func' + try: + orig_func = getattr(obj, im_func) + except AttributeError: # Python 2.6 + orig_func = obj.__get__(None, object) + if isinstance(obj, classmethod): + orig_func = getattr(orig_func, im_func) # Unbind + + # if PY3: + # if type(obj.__dict__) is dict: + # if obj.__dict__: + # state = obj.__dict__ + # else: + # state = None + # else: + # state = (None, {'__dict__', obj.__dict__}) + # else: + # state = None + + pickler.save_reduce(type(obj), (orig_func,), obj=obj) + log.info("# Cm") + +@register(FunctionType) +def save_function(pickler, obj): + if not _locate_function(obj, pickler): + log.info("F1: %s" % obj) + _recurse = getattr(pickler, '_recurse', None) + _byref = getattr(pickler, '_byref', None) + _postproc = getattr(pickler, '_postproc', None) + _main_modified = getattr(pickler, '_main_modified', None) + _original_main = getattr(pickler, '_original_main', __builtin__)#'None' + postproc_list = [] + if _recurse: + # recurse to get all globals referred to by obj + from .detect import globalvars + globs_copy = globalvars(obj, recurse=True, builtin=True) + + # Add the name of the module to the globs dictionary to prevent + # the duplication of the dictionary. Pickle the unpopulated + # globals dictionary and set the remaining items after the function + # is created to correctly handle recursion. + globs = {'__name__': obj.__module__} + else: + globs_copy = obj.__globals__ if PY3 else obj.func_globals + + # If the globals is the __dict__ from the module being saved as a + # session, substitute it by the dictionary being actually saved. + if _main_modified and globs_copy is _original_main.__dict__: + globs_copy = getattr(pickler, '_main', _original_main).__dict__ + globs = globs_copy + # If the globals is a module __dict__, do not save it in the pickle. + elif globs_copy is not None and obj.__module__ is not None and \ + getattr(_import_module(obj.__module__, True), '__dict__', None) is globs_copy: + globs = globs_copy + else: + globs = {'__name__': obj.__module__} + + if globs_copy is not None and globs is not globs_copy: + # In the case that the globals are copied, we need to ensure that + # the globals dictionary is updated when all objects in the + # dictionary are already created. + if PY3: + glob_ids = {id(g) for g in globs_copy.values()} + else: + glob_ids = {id(g) for g in globs_copy.itervalues()} + for stack_element in _postproc: + if stack_element in glob_ids: + _postproc[stack_element].append((_setitems, (globs, globs_copy))) + break + else: + postproc_list.append((_setitems, (globs, globs_copy))) + + if PY3: + closure = obj.__closure__ + state_dict = {} + for fattrname in ('__doc__', '__kwdefaults__', '__annotations__'): + fattr = getattr(obj, fattrname, None) + if fattr is not None: + state_dict[fattrname] = fattr + if obj.__qualname__ != obj.__name__: + state_dict['__qualname__'] = obj.__qualname__ + if '__name__' not in globs or obj.__module__ != globs['__name__']: + state_dict['__module__'] = obj.__module__ + + state = obj.__dict__ + if type(state) is not dict: + state_dict['__dict__'] = state + state = None + if state_dict: + state = state, state_dict + + _save_with_postproc(pickler, (_create_function, ( + obj.__code__, globs, obj.__name__, obj.__defaults__, + closure + ), state), obj=obj, postproc_list=postproc_list) + else: + closure = obj.func_closure + if obj.__doc__ is not None: + postproc_list.append((setattr, (obj, '__doc__', obj.__doc__))) + if '__name__' not in globs or obj.__module__ != globs['__name__']: + postproc_list.append((setattr, (obj, '__module__', obj.__module__))) + if obj.__dict__: + postproc_list.append((setattr, (obj, '__dict__', obj.__dict__))) + + _save_with_postproc(pickler, (_create_function, ( + obj.func_code, globs, obj.func_name, obj.func_defaults, + closure + )), obj=obj, postproc_list=postproc_list) + + # Lift closure cell update to earliest function (#458) + if _postproc: + topmost_postproc = next(iter(_postproc.values()), None) + if closure and topmost_postproc: + for cell in closure: + possible_postproc = (setattr, (cell, 'cell_contents', obj)) + try: + topmost_postproc.remove(possible_postproc) + except ValueError: + continue + + # Change the value of the cell + pickler.save_reduce(*possible_postproc) + # pop None created by calling preprocessing step off stack + if PY3: + pickler.write(bytes('0', 'UTF-8')) + else: + pickler.write('0') + + log.info("# F1") + else: + log.info("F2: %s" % obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + log.info("# F2") + return + +# quick sanity checking +def pickles(obj,exact=False,safe=False,**kwds): + """ + Quick check if object pickles with dill. + + If *exact=True* then an equality test is done to check if the reconstructed + object matches the original object. + + If *safe=True* then any exception will raised in copy signal that the + object is not picklable, otherwise only pickling errors will be trapped. + + Additional keyword arguments are as :func:`dumps` and :func:`loads`. + """ + if safe: exceptions = (Exception,) # RuntimeError, ValueError + else: + exceptions = (TypeError, AssertionError, PicklingError, UnpicklingError) + try: + pik = copy(obj, **kwds) + #FIXME: should check types match first, then check content if "exact" + try: + #FIXME: should be "(pik == obj).all()" for numpy comparison, though that'll fail if shapes differ + result = bool(pik.all() == obj.all()) + except AttributeError: + warnings.filterwarnings('ignore') + result = pik == obj + warnings.resetwarnings() + if hasattr(result, 'toarray'): # for unusual types like sparse matrix + result = result.toarray().all() + if result: return True + if not exact: + result = type(pik) == type(obj) + if result: return result + # class instances might have been dumped with byref=False + return repr(type(pik)) == repr(type(obj)) #XXX: InstanceType? + return False + except exceptions: + return False + +def check(obj, *args, **kwds): + """ + Check pickling of an object across another process. + + *python* is the path to the python interpreter (defaults to sys.executable) + + Set *verbose=True* to print the unpickled object in the other process. + + Additional keyword arguments are as :func:`dumps` and :func:`loads`. + """ + # == undocumented == + # python -- the string path or executable name of the selected python + # verbose -- if True, be verbose about printing warning messages + # all other args and kwds are passed to dill.dumps #FIXME: ignore on load + verbose = kwds.pop('verbose', False) + python = kwds.pop('python', None) + if python is None: + import sys + python = sys.executable + # type check + isinstance(python, str) + import subprocess + fail = True + try: + _obj = dumps(obj, *args, **kwds) + fail = False + finally: + if fail and verbose: + print("DUMP FAILED") + #FIXME: fails if python interpreter path contains spaces + # Use the following instead (which also processes the 'ignore' keyword): + # ignore = kwds.pop('ignore', None) + # unpickle = "dill.loads(%s, ignore=%s)"%(repr(_obj), repr(ignore)) + # cmd = [python, "-c", "import dill; print(%s)"%unpickle] + # msg = "SUCCESS" if not subprocess.call(cmd) else "LOAD FAILED" + msg = "%s -c import dill; print(dill.loads(%s))" % (python, repr(_obj)) + msg = "SUCCESS" if not subprocess.call(msg.split(None,2)) else "LOAD FAILED" + if verbose: + print(msg) + return + +# use to protect against missing attributes +def is_dill(pickler, child=None): + "check the dill-ness of your pickler" + if (child is False) or PY34 or (not hasattr(pickler.__class__, 'mro')): + return 'dill' in pickler.__module__ + return Pickler in pickler.__class__.mro() + +def _extend(): + """extend pickle with all of dill's registered types""" + # need to have pickle not choke on _main_module? use is_dill(pickler) + for t,func in Pickler.dispatch.items(): + try: + StockPickler.dispatch[t] = func + except: #TypeError, PicklingError, UnpicklingError + log.info("skip: %s" % t) + else: pass + return + +del diff, _use_diff, use_diff + +# EOF diff --git a/myenv/lib/python3.9/site-packages/dill/_objects.py b/myenv/lib/python3.9/site-packages/dill/_objects.py new file mode 100644 index 0000000..8b1cb65 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/_objects.py @@ -0,0 +1,558 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +all Python Standard Library objects (currently: CH 1-15 @ 2.7) +and some other common objects (i.e. numpy.ndarray) +""" + +__all__ = ['registered','failures','succeeds'] + +# helper imports +import warnings; warnings.filterwarnings("ignore", category=DeprecationWarning) +import sys +PY3 = (hex(sys.hexversion) >= '0x30000f0') +if PY3: + import queue as Queue + import dbm as anydbm +else: + import Queue + import anydbm + import sets # deprecated/removed + import mutex # removed +try: + from cStringIO import StringIO # has StringI and StringO types +except ImportError: # only has StringIO type + if PY3: + from io import BytesIO as StringIO + else: + from StringIO import StringIO +import re +import array +import collections +import codecs +import struct +import datetime +import calendar +import weakref +import pprint +import decimal +import functools +import itertools +import operator +import tempfile +import shelve +import zlib +import gzip +import zipfile +import tarfile +import xdrlib +import csv +import hashlib +import hmac +import os +import logging +import optparse +#import __hello__ +import threading +import socket +import contextlib +try: + import bz2 + import sqlite3 + if PY3: import dbm.ndbm as dbm + else: import dbm + HAS_ALL = True +except ImportError: # Ubuntu + HAS_ALL = False +try: + #import curses + #from curses import textpad, panel + HAS_CURSES = True +except ImportError: # Windows + HAS_CURSES = False +try: + import ctypes + HAS_CTYPES = True + # if using `pypy`, pythonapi is not found + IS_PYPY = not hasattr(ctypes, 'pythonapi') +except ImportError: # MacPorts + HAS_CTYPES = False + IS_PYPY = False + +# helper objects +class _class: + def _method(self): + pass +# @classmethod +# def _clsmethod(cls): #XXX: test me +# pass +# @staticmethod +# def _static(self): #XXX: test me +# pass +class _class2: + def __call__(self): + pass +_instance2 = _class2() +class _newclass(object): + def _method(self): + pass +# @classmethod +# def _clsmethod(cls): #XXX: test me +# pass +# @staticmethod +# def _static(self): #XXX: test me +# pass +class _newclass2(object): + __slots__ = ['descriptor'] +def _function(x): yield x +def _function2(): + try: raise + except: + from sys import exc_info + e, er, tb = exc_info() + return er, tb +if HAS_CTYPES: + class _Struct(ctypes.Structure): + pass + _Struct._fields_ = [("_field", ctypes.c_int),("next", ctypes.POINTER(_Struct))] +_filedescrip, _tempfile = tempfile.mkstemp('r') # deleted in cleanup +_tmpf = tempfile.TemporaryFile('w') + +# put the objects in order, if possible +try: + from collections import OrderedDict as odict +except ImportError: + try: + from ordereddict import OrderedDict as odict + except ImportError: + odict = dict +# objects used by dill for type declaration +registered = d = odict() +# objects dill fails to pickle +failures = x = odict() +# all other type objects +succeeds = a = odict() + +# types module (part of CH 8) +a['BooleanType'] = bool(1) +a['BuiltinFunctionType'] = len +a['BuiltinMethodType'] = a['BuiltinFunctionType'] +a['BytesType'] = _bytes = codecs.latin_1_encode('\x00')[0] # bytes(1) +a['ClassType'] = _class +a['ComplexType'] = complex(1) +a['DictType'] = _dict = {} +a['DictionaryType'] = a['DictType'] +a['FloatType'] = float(1) +a['FunctionType'] = _function +a['InstanceType'] = _instance = _class() +a['IntType'] = _int = int(1) +a['ListType'] = _list = [] +a['NoneType'] = None +a['ObjectType'] = object() +a['StringType'] = _str = str(1) +a['TupleType'] = _tuple = () +a['TypeType'] = type +if PY3: + a['LongType'] = _int + a['UnicodeType'] = _str +else: + a['LongType'] = long(1) + a['UnicodeType'] = unicode(1) +# built-in constants (CH 4) +a['CopyrightType'] = copyright +# built-in types (CH 5) +a['ClassObjectType'] = _newclass # +a['ClassInstanceType'] = _newclass() # +a['SetType'] = _set = set() +a['FrozenSetType'] = frozenset() +# built-in exceptions (CH 6) +a['ExceptionType'] = _exception = _function2()[0] +# string services (CH 7) +a['SREPatternType'] = _srepattern = re.compile('') +# data types (CH 8) +a['ArrayType'] = array.array("f") +a['DequeType'] = collections.deque([0]) +a['DefaultDictType'] = collections.defaultdict(_function, _dict) +a['TZInfoType'] = datetime.tzinfo() +a['DateTimeType'] = datetime.datetime.today() +a['CalendarType'] = calendar.Calendar() +if not PY3: + a['SetsType'] = sets.Set() + a['ImmutableSetType'] = sets.ImmutableSet() + a['MutexType'] = mutex.mutex() +# numeric and mathematical types (CH 9) +a['DecimalType'] = decimal.Decimal(1) +a['CountType'] = itertools.count(0) +# data compression and archiving (CH 12) +a['TarInfoType'] = tarfile.TarInfo() +# generic operating system services (CH 15) +a['LoggerType'] = logging.getLogger() +a['FormatterType'] = logging.Formatter() # pickle ok +a['FilterType'] = logging.Filter() # pickle ok +a['LogRecordType'] = logging.makeLogRecord(_dict) # pickle ok +a['OptionParserType'] = _oparser = optparse.OptionParser() # pickle ok +a['OptionGroupType'] = optparse.OptionGroup(_oparser,"foo") # pickle ok +a['OptionType'] = optparse.Option('--foo') # pickle ok +if HAS_CTYPES: + a['CCharType'] = _cchar = ctypes.c_char() + a['CWCharType'] = ctypes.c_wchar() # fail == 2.6 + a['CByteType'] = ctypes.c_byte() + a['CUByteType'] = ctypes.c_ubyte() + a['CShortType'] = ctypes.c_short() + a['CUShortType'] = ctypes.c_ushort() + a['CIntType'] = ctypes.c_int() + a['CUIntType'] = ctypes.c_uint() + a['CLongType'] = ctypes.c_long() + a['CULongType'] = ctypes.c_ulong() + a['CLongLongType'] = ctypes.c_longlong() + a['CULongLongType'] = ctypes.c_ulonglong() + a['CFloatType'] = ctypes.c_float() + a['CDoubleType'] = ctypes.c_double() + a['CSizeTType'] = ctypes.c_size_t() + a['CLibraryLoaderType'] = ctypes.cdll + a['StructureType'] = _Struct + # if not IS_PYPY: + # a['BigEndianStructureType'] = ctypes.BigEndianStructure() +#NOTE: also LittleEndianStructureType and UnionType... abstract classes +#NOTE: remember for ctypesobj.contents creates a new python object +#NOTE: ctypes.c_int._objects is memberdescriptor for object's __dict__ +#NOTE: base class of all ctypes data types is non-public _CData + +try: # python 2.6 + import fractions + import number + import io + from io import StringIO as TextIO + # built-in functions (CH 2) + a['ByteArrayType'] = bytearray([1]) + # numeric and mathematical types (CH 9) + a['FractionType'] = fractions.Fraction() + a['NumberType'] = numbers.Number() + # generic operating system services (CH 15) + a['IOBaseType'] = io.IOBase() + a['RawIOBaseType'] = io.RawIOBase() + a['TextIOBaseType'] = io.TextIOBase() + a['BufferedIOBaseType'] = io.BufferedIOBase() + a['UnicodeIOType'] = TextIO() # the new StringIO + a['LoggingAdapterType'] = logging.LoggingAdapter(_logger,_dict) # pickle ok + if HAS_CTYPES: + a['CBoolType'] = ctypes.c_bool(1) + a['CLongDoubleType'] = ctypes.c_longdouble() +except ImportError: + pass +try: # python 2.7 + import argparse + # data types (CH 8) + a['OrderedDictType'] = collections.OrderedDict(_dict) + a['CounterType'] = collections.Counter(_dict) + if HAS_CTYPES: + a['CSSizeTType'] = ctypes.c_ssize_t() + # generic operating system services (CH 15) + a['NullHandlerType'] = logging.NullHandler() # pickle ok # new 2.7 + a['ArgParseFileType'] = argparse.FileType() # pickle ok +except (AttributeError, ImportError): + pass + +# -- pickle fails on all below here ----------------------------------------- +# types module (part of CH 8) +a['CodeType'] = compile('','','exec') +a['DictProxyType'] = type.__dict__ +a['DictProxyType2'] = _newclass.__dict__ +a['EllipsisType'] = Ellipsis +a['ClosedFileType'] = open(os.devnull, 'wb', buffering=0).close() +a['GetSetDescriptorType'] = array.array.typecode +a['LambdaType'] = _lambda = lambda x: lambda y: x #XXX: works when not imported! +a['MemberDescriptorType'] = _newclass2.descriptor +if not IS_PYPY: + a['MemberDescriptorType2'] = datetime.timedelta.days +a['MethodType'] = _method = _class()._method #XXX: works when not imported! +a['ModuleType'] = datetime +a['NotImplementedType'] = NotImplemented +a['SliceType'] = slice(1) +a['UnboundMethodType'] = _class._method #XXX: works when not imported! +a['TextWrapperType'] = open(os.devnull, 'r') # same as mode='w','w+','r+' +a['BufferedRandomType'] = open(os.devnull, 'r+b') # same as mode='w+b' +a['BufferedReaderType'] = open(os.devnull, 'rb') # (default: buffering=-1) +a['BufferedWriterType'] = open(os.devnull, 'wb') +try: # oddities: deprecated + from _pyio import open as _open + a['PyTextWrapperType'] = _open(os.devnull, 'r', buffering=-1) + a['PyBufferedRandomType'] = _open(os.devnull, 'r+b', buffering=-1) + a['PyBufferedReaderType'] = _open(os.devnull, 'rb', buffering=-1) + a['PyBufferedWriterType'] = _open(os.devnull, 'wb', buffering=-1) +except ImportError: + pass +# other (concrete) object types +if PY3: + d['CellType'] = (_lambda)(0).__closure__[0] + a['XRangeType'] = _xrange = range(1) +else: + d['CellType'] = (_lambda)(0).func_closure[0] + a['XRangeType'] = _xrange = xrange(1) +if not IS_PYPY: + d['MethodDescriptorType'] = type.__dict__['mro'] + d['WrapperDescriptorType'] = type.__repr__ + a['WrapperDescriptorType2'] = type.__dict__['__module__'] + d['ClassMethodDescriptorType'] = type.__dict__['__prepare__' if PY3 else 'mro'] +# built-in functions (CH 2) +if PY3 or IS_PYPY: + _methodwrap = (1).__lt__ +else: + _methodwrap = (1).__cmp__ +d['MethodWrapperType'] = _methodwrap +a['StaticMethodType'] = staticmethod(_method) +a['ClassMethodType'] = classmethod(_method) +a['PropertyType'] = property() +d['SuperType'] = super(Exception, _exception) +# string services (CH 7) +if PY3: + _in = _bytes +else: + _in = _str +a['InputType'] = _cstrI = StringIO(_in) +a['OutputType'] = _cstrO = StringIO() +# data types (CH 8) +a['WeakKeyDictionaryType'] = weakref.WeakKeyDictionary() +a['WeakValueDictionaryType'] = weakref.WeakValueDictionary() +a['ReferenceType'] = weakref.ref(_instance) +a['DeadReferenceType'] = weakref.ref(_class()) +a['ProxyType'] = weakref.proxy(_instance) +a['DeadProxyType'] = weakref.proxy(_class()) +a['CallableProxyType'] = weakref.proxy(_instance2) +a['DeadCallableProxyType'] = weakref.proxy(_class2()) +a['QueueType'] = Queue.Queue() +# numeric and mathematical types (CH 9) +d['PartialType'] = functools.partial(int,base=2) +if PY3: + a['IzipType'] = zip('0','1') +else: + a['IzipType'] = itertools.izip('0','1') +a['ChainType'] = itertools.chain('0','1') +d['ItemGetterType'] = operator.itemgetter(0) +d['AttrGetterType'] = operator.attrgetter('__repr__') +# file and directory access (CH 10) +if PY3: _fileW = _cstrO +else: _fileW = _tmpf +# data persistence (CH 11) +if HAS_ALL: + a['ConnectionType'] = _conn = sqlite3.connect(':memory:') + a['CursorType'] = _conn.cursor() +a['ShelveType'] = shelve.Shelf({}) +# data compression and archiving (CH 12) +if HAS_ALL: + if (hex(sys.hexversion) < '0x2070ef0') or PY3: + a['BZ2FileType'] = bz2.BZ2File(os.devnull) #FIXME: fail >= 3.3, 2.7.14 + a['BZ2CompressorType'] = bz2.BZ2Compressor() + a['BZ2DecompressorType'] = bz2.BZ2Decompressor() +#a['ZipFileType'] = _zip = zipfile.ZipFile(os.devnull,'w') #FIXME: fail >= 3.2 +#_zip.write(_tempfile,'x') [causes annoying warning/error printed on import] +#a['ZipInfoType'] = _zip.getinfo('x') +a['TarFileType'] = tarfile.open(fileobj=_fileW,mode='w') +# file formats (CH 13) +a['DialectType'] = csv.get_dialect('excel') +a['PackerType'] = xdrlib.Packer() +# optional operating system services (CH 16) +a['LockType'] = threading.Lock() +a['RLockType'] = threading.RLock() +# generic operating system services (CH 15) # also closed/open and r/w/etc... +a['NamedLoggerType'] = _logger = logging.getLogger(__name__) #FIXME: fail >= 3.2 and <= 2.6 +#a['FrozenModuleType'] = __hello__ #FIXME: prints "Hello world..." +# interprocess communication (CH 17) +if PY3: + a['SocketType'] = _socket = socket.socket() #FIXME: fail >= 3.3 + a['SocketPairType'] = socket.socketpair()[0] #FIXME: fail >= 3.3 +else: + a['SocketType'] = _socket = socket.socket() + a['SocketPairType'] = _socket._sock +# python runtime services (CH 27) +if PY3: + a['GeneratorContextManagerType'] = contextlib.contextmanager(max)([1]) +else: + a['GeneratorContextManagerType'] = contextlib.GeneratorContextManager(max) + +try: # ipython + __IPYTHON__ is True # is ipython +except NameError: + # built-in constants (CH 4) + a['QuitterType'] = quit + d['ExitType'] = a['QuitterType'] +try: # numpy #FIXME: slow... 0.05 to 0.1 sec to import numpy + from numpy import ufunc as _numpy_ufunc + from numpy import array as _numpy_array + from numpy import int32 as _numpy_int32 + a['NumpyUfuncType'] = _numpy_ufunc + a['NumpyArrayType'] = _numpy_array + a['NumpyInt32Type'] = _numpy_int32 +except ImportError: + pass +try: # python 2.6 + # numeric and mathematical types (CH 9) + a['ProductType'] = itertools.product('0','1') + # generic operating system services (CH 15) + a['FileHandlerType'] = logging.FileHandler(os.devnull) #FIXME: fail >= 3.2 and <= 2.6 + a['RotatingFileHandlerType'] = logging.handlers.RotatingFileHandler(os.devnull) + a['SocketHandlerType'] = logging.handlers.SocketHandler('localhost',514) + a['MemoryHandlerType'] = logging.handlers.MemoryHandler(1) +except AttributeError: + pass +try: # python 2.7 + # data types (CH 8) + a['WeakSetType'] = weakref.WeakSet() # 2.7 +# # generic operating system services (CH 15) [errors when dill is imported] +# a['ArgumentParserType'] = _parser = argparse.ArgumentParser('PROG') +# a['NamespaceType'] = _parser.parse_args() # pickle ok +# a['SubParsersActionType'] = _parser.add_subparsers() +# a['MutuallyExclusiveGroupType'] = _parser.add_mutually_exclusive_group() +# a['ArgumentGroupType'] = _parser.add_argument_group() +except AttributeError: + pass + +# -- dill fails in some versions below here --------------------------------- +# types module (part of CH 8) +a['FileType'] = open(os.devnull, 'rb', buffering=0) # same 'wb','wb+','rb+' +# FIXME: FileType fails >= 3.1 +# built-in functions (CH 2) +a['ListIteratorType'] = iter(_list) # empty vs non-empty FIXME: fail < 3.2 +a['TupleIteratorType']= iter(_tuple) # empty vs non-empty FIXME: fail < 3.2 +a['XRangeIteratorType'] = iter(_xrange) # empty vs non-empty FIXME: fail < 3.2 +# data types (CH 8) +a['PrettyPrinterType'] = pprint.PrettyPrinter() #FIXME: fail >= 3.2 and == 2.5 +# numeric and mathematical types (CH 9) +a['CycleType'] = itertools.cycle('0') #FIXME: fail < 3.2 +# file and directory access (CH 10) +a['TemporaryFileType'] = _tmpf #FIXME: fail >= 3.2 and == 2.5 +# data compression and archiving (CH 12) +a['GzipFileType'] = gzip.GzipFile(fileobj=_fileW) #FIXME: fail > 3.2 and <= 2.6 +# generic operating system services (CH 15) +a['StreamHandlerType'] = logging.StreamHandler() #FIXME: fail >= 3.2 and == 2.5 +try: # python 2.6 + # numeric and mathematical types (CH 9) + a['PermutationsType'] = itertools.permutations('0') #FIXME: fail < 3.2 + a['CombinationsType'] = itertools.combinations('0',1) #FIXME: fail < 3.2 +except AttributeError: + pass +try: # python 2.7 + # numeric and mathematical types (CH 9) + a['RepeatType'] = itertools.repeat(0) #FIXME: fail < 3.2 + a['CompressType'] = itertools.compress('0',[1]) #FIXME: fail < 3.2 + #XXX: ...and etc +except AttributeError: + pass + +# -- dill fails on all below here ------------------------------------------- +# types module (part of CH 8) +x['GeneratorType'] = _generator = _function(1) #XXX: priority +x['FrameType'] = _generator.gi_frame #XXX: inspect.currentframe() +x['TracebackType'] = _function2()[1] #(see: inspect.getouterframes,getframeinfo) +# other (concrete) object types +# (also: Capsule / CObject ?) +# built-in functions (CH 2) +x['SetIteratorType'] = iter(_set) #XXX: empty vs non-empty +# built-in types (CH 5) +if PY3: + x['DictionaryItemIteratorType'] = iter(type.__dict__.items()) + x['DictionaryKeyIteratorType'] = iter(type.__dict__.keys()) + x['DictionaryValueIteratorType'] = iter(type.__dict__.values()) +else: + x['DictionaryItemIteratorType'] = type.__dict__.iteritems() + x['DictionaryKeyIteratorType'] = type.__dict__.iterkeys() + x['DictionaryValueIteratorType'] = type.__dict__.itervalues() +# string services (CH 7) +x['StructType'] = struct.Struct('c') +x['CallableIteratorType'] = _srepattern.finditer('') +x['SREMatchType'] = _srepattern.match('') +x['SREScannerType'] = _srepattern.scanner('') +x['StreamReader'] = codecs.StreamReader(_cstrI) #XXX: ... and etc +# python object persistence (CH 11) +# x['DbShelveType'] = shelve.open('foo','n')#,protocol=2) #XXX: delete foo +if HAS_ALL: + x['DbmType'] = dbm.open(_tempfile,'n') +# x['DbCursorType'] = _dbcursor = anydbm.open('foo','n') #XXX: delete foo +# x['DbType'] = _dbcursor.db +# data compression and archiving (CH 12) +x['ZlibCompressType'] = zlib.compressobj() +x['ZlibDecompressType'] = zlib.decompressobj() +# file formats (CH 13) +x['CSVReaderType'] = csv.reader(_cstrI) +x['CSVWriterType'] = csv.writer(_cstrO) +x['CSVDictReaderType'] = csv.DictReader(_cstrI) +x['CSVDictWriterType'] = csv.DictWriter(_cstrO,{}) +# cryptographic services (CH 14) +x['HashType'] = hashlib.md5() +if (hex(sys.hexversion) < '0x30800a1'): + x['HMACType'] = hmac.new(_in) +else: + x['HMACType'] = hmac.new(_in, digestmod='md5') +# generic operating system services (CH 15) +if HAS_CURSES: pass + #x['CursesWindowType'] = _curwin = curses.initscr() #FIXME: messes up tty + #x['CursesTextPadType'] = textpad.Textbox(_curwin) + #x['CursesPanelType'] = panel.new_panel(_curwin) +if HAS_CTYPES: + x['CCharPType'] = ctypes.c_char_p() + x['CWCharPType'] = ctypes.c_wchar_p() + x['CVoidPType'] = ctypes.c_void_p() + if sys.platform[:3] == 'win': + x['CDLLType'] = _cdll = ctypes.cdll.msvcrt + else: + x['CDLLType'] = _cdll = ctypes.CDLL(None) + if not IS_PYPY: + x['PyDLLType'] = _pydll = ctypes.pythonapi + x['FuncPtrType'] = _cdll._FuncPtr() + x['CCharArrayType'] = ctypes.create_string_buffer(1) + x['CWCharArrayType'] = ctypes.create_unicode_buffer(1) + x['CParamType'] = ctypes.byref(_cchar) + x['LPCCharType'] = ctypes.pointer(_cchar) + x['LPCCharObjType'] = _lpchar = ctypes.POINTER(ctypes.c_char) + x['NullPtrType'] = _lpchar() + x['NullPyObjectType'] = ctypes.py_object() + x['PyObjectType'] = ctypes.py_object(lambda :None) + x['FieldType'] = _field = _Struct._field + x['CFUNCTYPEType'] = _cfunc = ctypes.CFUNCTYPE(ctypes.c_char) + x['CFunctionType'] = _cfunc(str) +try: # python 2.6 + # numeric and mathematical types (CH 9) + x['MethodCallerType'] = operator.methodcaller('mro') # 2.6 +except AttributeError: + pass +try: # python 2.7 + # built-in types (CH 5) + x['MemoryType'] = memoryview(_in) # 2.7 + x['MemoryType2'] = memoryview(bytearray(_in)) # 2.7 + if PY3: + x['DictItemsType'] = _dict.items() # 2.7 + x['DictKeysType'] = _dict.keys() # 2.7 + x['DictValuesType'] = _dict.values() # 2.7 + else: + x['DictItemsType'] = _dict.viewitems() # 2.7 + x['DictKeysType'] = _dict.viewkeys() # 2.7 + x['DictValuesType'] = _dict.viewvalues() # 2.7 + # generic operating system services (CH 15) + x['RawTextHelpFormatterType'] = argparse.RawTextHelpFormatter('PROG') + x['RawDescriptionHelpFormatterType'] = argparse.RawDescriptionHelpFormatter('PROG') + x['ArgDefaultsHelpFormatterType'] = argparse.ArgumentDefaultsHelpFormatter('PROG') +except NameError: + pass +try: # python 2.7 (and not 3.1) + x['CmpKeyType'] = _cmpkey = functools.cmp_to_key(_methodwrap) # 2.7, >=3.2 + x['CmpKeyObjType'] = _cmpkey('0') #2.7, >=3.2 +except AttributeError: + pass +if PY3: # oddities: removed, etc + x['BufferType'] = x['MemoryType'] +else: + x['BufferType'] = buffer('') + +# -- cleanup ---------------------------------------------------------------- +a.update(d) # registered also succeed +if sys.platform[:3] == 'win': + os.close(_filedescrip) # required on win32 +os.remove(_tempfile) + + +# EOF diff --git a/myenv/lib/python3.9/site-packages/dill/_shims.py b/myenv/lib/python3.9/site-packages/dill/_shims.py new file mode 100644 index 0000000..6bda513 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/_shims.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Anirudh Vegesana (avegesan@stanford.edu) +# Copyright (c) 2021-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Provides shims for compatibility between versions of dill and Python. + +Compatibility shims should be provided in this file. Here are two simple example +use cases. + +Deprecation of constructor function: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Assume that we were transitioning _import_module in _dill.py to +the builtin function importlib.import_module when present. + +@move_to(_dill) +def _import_module(import_name): + ... # code already in _dill.py + +_import_module = Getattr(importlib, 'import_module', Getattr(_dill, '_import_module', None)) + +The code will attempt to find import_module in the importlib module. If not +present, it will use the _import_module function in _dill. + +Emulate new Python behavior in older Python versions: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +CellType.cell_contents behaves differently in Python 3.6 and 3.7. It is +read-only in Python 3.6 and writable and deletable in 3.7. + +if _dill.OLD37 and _dill.HAS_CTYPES and ...: + @move_to(_dill) + def _setattr(object, name, value): + if type(object) is _dill.CellType and name == 'cell_contents': + _PyCell_Set.argtypes = (ctypes.py_object, ctypes.py_object) + _PyCell_Set(object, value) + else: + setattr(object, name, value) +... # more cases below + +_setattr = Getattr(_dill, '_setattr', setattr) + +_dill._setattr will be used when present to emulate Python 3.7 functionality in +older versions of Python while defaulting to the standard setattr in 3.7+. + +See this PR for the discussion that lead to this system: +https://github.com/uqfoundation/dill/pull/443 +""" + +import inspect, sys + +_dill = sys.modules['dill._dill'] + + +class Reduce(object): + """ + Reduce objects are wrappers used for compatibility enforcement during + unpickle-time. They should only be used in calls to pickler.save and + other Reduce objects. They are only evaluated within unpickler.load. + + Pickling a Reduce object makes the two implementations equivalent: + + pickler.save(Reduce(*reduction)) + + pickler.save_reduce(*reduction, obj=reduction) + """ + __slots__ = ['reduction'] + def __new__(cls, *reduction, **kwargs): + """ + Args: + *reduction: a tuple that matches the format given here: + https://docs.python.org/3/library/pickle.html#object.__reduce__ + is_callable: a bool to indicate that the object created by + unpickling `reduction` is callable. If true, the current Reduce + is allowed to be used as the function in further save_reduce calls + or Reduce objects. + """ + is_callable = kwargs.get('is_callable', False) # Pleases Py2. Can be removed later + if is_callable: + self = object.__new__(_CallableReduce) + else: + self = object.__new__(Reduce) + self.reduction = reduction + return self + def __repr__(self): + return 'Reduce%s' % (self.reduction,) + def __copy__(self): + return self # pragma: no cover + def __deepcopy__(self, memo): + return self # pragma: no cover + def __reduce__(self): + return self.reduction + def __reduce_ex__(self, protocol): + return self.__reduce__() + +class _CallableReduce(Reduce): + # A version of Reduce for functions. Used to trick pickler.save_reduce into + # thinking that Reduce objects of functions are themselves meaningful functions. + def __call__(self, *args, **kwargs): + reduction = self.__reduce__() + func = reduction[0] + f_args = reduction[1] + obj = func(*f_args) + return obj(*args, **kwargs) + +__NO_DEFAULT = _dill.Sentinel('Getattr.NO_DEFAULT') + +def Getattr(object, name, default=__NO_DEFAULT): + """ + A Reduce object that represents the getattr operation. When unpickled, the + Getattr will access an attribute 'name' of 'object' and return the value + stored there. If the attribute doesn't exist, the default value will be + returned if present. + + The following statements are equivalent: + + Getattr(collections, 'OrderedDict') + Getattr(collections, 'spam', None) + Getattr(*args) + + Reduce(getattr, (collections, 'OrderedDict')) + Reduce(getattr, (collections, 'spam', None)) + Reduce(getattr, args) + + During unpickling, the first two will result in collections.OrderedDict and + None respectively because the first attribute exists and the second one does + not, forcing it to use the default value given in the third argument. + """ + + if default is Getattr.NO_DEFAULT: + reduction = (getattr, (object, name)) + else: + reduction = (getattr, (object, name, default)) + + return Reduce(*reduction, is_callable=callable(default)) + +Getattr.NO_DEFAULT = __NO_DEFAULT +del __NO_DEFAULT + +def move_to(module, name=None): + def decorator(func): + if name is None: + fname = func.__name__ + else: + fname = name + module.__dict__[fname] = func + func.__module__ = module.__name__ + return func + return decorator + +###################### +## Compatibility Shims are defined below +###################### + +_CELL_EMPTY = Getattr(_dill, '_CELL_EMPTY', None) + +if _dill.OLD37: + if _dill.HAS_CTYPES and hasattr(_dill.ctypes, 'pythonapi') and hasattr(_dill.ctypes.pythonapi, 'PyCell_Set'): + # CPython + ctypes = _dill.ctypes + + _PyCell_Set = ctypes.pythonapi.PyCell_Set + + @move_to(_dill) + def _setattr(object, name, value): + if type(object) is _dill.CellType and name == 'cell_contents': + _PyCell_Set.argtypes = (ctypes.py_object, ctypes.py_object) + _PyCell_Set(object, value) + else: + setattr(object, name, value) + + @move_to(_dill) + def _delattr(object, name): + if type(object) is _dill.CellType and name == 'cell_contents': + _PyCell_Set.argtypes = (ctypes.py_object, ctypes.c_void_p) + _PyCell_Set(object, None) + else: + delattr(object, name) + + # General Python (not CPython) up to 3.6 is in a weird case, where it is + # possible to pickle recursive cells, but we can't assign directly to the + # cell. + elif _dill.PY3: + # Use nonlocal variables to reassign the cell value. + # https://stackoverflow.com/a/59276835 + __nonlocal = ('nonlocal cell',) + exec('''def _setattr(cell, name, value): + if type(cell) is _dill.CellType and name == 'cell_contents': + def cell_setter(value): + %s + cell = value # pylint: disable=unused-variable + func = _dill.FunctionType(cell_setter.__code__, globals(), "", None, (cell,)) # same as cell_setter, but with cell being the cell's contents + func(value) + else: + setattr(cell, name, value)''' % __nonlocal) + move_to(_dill)(_setattr) + + exec('''def _delattr(cell, name): + if type(cell) is _dill.CellType and name == 'cell_contents': + try: + cell.cell_contents + except: + return + def cell_deleter(): + %s + del cell # pylint: disable=unused-variable + func = _dill.FunctionType(cell_deleter.__code__, globals(), "", None, (cell,)) # same as cell_deleter, but with cell being the cell's contents + func() + else: + delattr(cell, name)''' % __nonlocal) + move_to(_dill)(_delattr) + + else: + # Likely PyPy 2.7. Simulate the nonlocal keyword with bytecode + # manipulation. + + # The following function is based on 'cell_set' from 'cloudpickle' + # https://github.com/cloudpipe/cloudpickle/blob/5d89947288a18029672596a4d719093cc6d5a412/cloudpickle/cloudpickle.py#L393-L482 + # Copyright (c) 2012, Regents of the University of California. + # Copyright (c) 2009 `PiCloud, Inc. `_. + # License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE + @move_to(_dill) + def _setattr(cell, name, value): + if type(cell) is _dill.CellType and name == 'cell_contents': + _cell_set = _dill.FunctionType( + _cell_set_template_code, {}, '_cell_set', (), (cell,),) + _cell_set(value) + else: + setattr(cell, name, value) + + def _cell_set_factory(value): + lambda: cell + cell = value + + co = _cell_set_factory.__code__ + + _cell_set_template_code = _dill.CodeType( + co.co_argcount, + co.co_nlocals, + co.co_stacksize, + co.co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + co.co_cellvars, # co_freevars is initialized with co_cellvars + (), # co_cellvars is made empty + ) + + del co + + @move_to(_dill) + def _delattr(cell, name): + if type(cell) is _dill.CellType and name == 'cell_contents': + pass + else: + delattr(cell, name) + +_setattr = Getattr(_dill, '_setattr', setattr) +_delattr = Getattr(_dill, '_delattr', delattr) diff --git a/myenv/lib/python3.9/site-packages/dill/detect.py b/myenv/lib/python3.9/site-packages/dill/detect.py new file mode 100644 index 0000000..4157520 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/detect.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Methods for detecting objects leading to pickling failures. +""" + +import dis +from inspect import ismethod, isfunction, istraceback, isframe, iscode +from .pointers import parent, reference, at, parents, children + +from ._dill import _trace as trace +from ._dill import PY3 + +__all__ = ['baditems','badobjects','badtypes','code','errors','freevars', + 'getmodule','globalvars','nestedcode','nestedglobals','outermost', + 'referredglobals','referrednested','trace','varnames'] + +def getmodule(object, _filename=None, force=False): + """get the module of the object""" + from inspect import getmodule as getmod + module = getmod(object, _filename) + if module or not force: return module + if PY3: builtins = 'builtins' + else: builtins = '__builtin__' + builtins = __import__(builtins) + from .source import getname + name = getname(object, force=True) + return builtins if name in vars(builtins).keys() else None + +def outermost(func): # is analogous to getsource(func,enclosing=True) + """get outermost enclosing object (i.e. the outer function in a closure) + + NOTE: this is the object-equivalent of getsource(func, enclosing=True) + """ + if PY3: + if ismethod(func): + _globals = func.__func__.__globals__ or {} + elif isfunction(func): + _globals = func.__globals__ or {} + else: + return #XXX: or raise? no matches + _globals = _globals.items() + else: + if ismethod(func): + _globals = func.im_func.func_globals or {} + elif isfunction(func): + _globals = func.func_globals or {} + else: + return #XXX: or raise? no matches + _globals = _globals.iteritems() + # get the enclosing source + from .source import getsourcelines + try: lines,lnum = getsourcelines(func, enclosing=True) + except: #TypeError, IOError + lines,lnum = [],None + code = ''.join(lines) + # get all possible names,objects that are named in the enclosing source + _locals = ((name,obj) for (name,obj) in _globals if name in code) + # now only save the objects that generate the enclosing block + for name,obj in _locals: #XXX: don't really need 'name' + try: + if getsourcelines(obj) == (lines,lnum): return obj + except: #TypeError, IOError + pass + return #XXX: or raise? no matches + +def nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ? + """get the code objects for any nested functions (e.g. in a closure)""" + func = code(func) + if not iscode(func): return [] #XXX: or raise? no matches + nested = set() + for co in func.co_consts: + if co is None: continue + co = code(co) + if co: + nested.add(co) + if recurse: nested |= set(nestedcode(co, recurse=True)) + return list(nested) + +def code(func): + '''get the code object for the given function or method + + NOTE: use dill.source.getsource(CODEOBJ) to get the source code + ''' + if PY3: + im_func = '__func__' + func_code = '__code__' + else: + im_func = 'im_func' + func_code = 'func_code' + if ismethod(func): func = getattr(func, im_func) + if isfunction(func): func = getattr(func, func_code) + if istraceback(func): func = func.tb_frame + if isframe(func): func = func.f_code + if iscode(func): return func + return + +#XXX: ugly: parse dis.dis for name after " len(referrednested(func)), try calling func(). + If possible, python builds code objects, but delays building functions + until func() is called. + """ + if PY3: + att1 = '__code__' + att0 = '__func__' + else: + att1 = 'func_code' # functions + att0 = 'im_func' # methods + + import gc + funcs = set() + # get the code objects, and try to track down by referrence + for co in nestedcode(func, recurse): + # look for function objects that refer to the code object + for obj in gc.get_referrers(co): + # get methods + _ = getattr(obj, att0, None) # ismethod + if getattr(_, att1, None) is co: funcs.add(obj) + # get functions + elif getattr(obj, att1, None) is co: funcs.add(obj) + # get frame objects + elif getattr(obj, 'f_code', None) is co: funcs.add(obj) + # get code objects + elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj) +# frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars +# funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames +# frameobjs are not found, however funcobjs are... +# (see: test_mixins.quad ... and test_mixins.wtf) +# after execution, code objects get compiled, and then may be found by gc + return list(funcs) + + +def freevars(func): + """get objects defined in enclosing code that are referred to by func + + returns a dict of {name:object}""" + if PY3: + im_func = '__func__' + func_code = '__code__' + func_closure = '__closure__' + else: + im_func = 'im_func' + func_code = 'func_code' + func_closure = 'func_closure' + if ismethod(func): func = getattr(func, im_func) + if isfunction(func): + closures = getattr(func, func_closure) or () + func = getattr(func, func_code).co_freevars # get freevars + else: + return {} + + def get_cell_contents(): + for (name,c) in zip(func,closures): + try: + cell_contents = c.cell_contents + except: + continue + yield (name,c.cell_contents) + + return dict(get_cell_contents()) + +# thanks to Davies Liu for recursion of globals +def nestedglobals(func, recurse=True): + """get the names of any globals found within func""" + func = code(func) + if func is None: return list() + import sys + from .temp import capture + CAN_NULL = sys.hexversion >= 51052711 #NULL may be prepended >= 3.11a7 + names = set() + with capture('stdout') as out: + dis.dis(func) #XXX: dis.dis(None) disassembles last traceback + for line in out.getvalue().splitlines(): + if '_GLOBAL' in line: + name = line.split('(')[-1].split(')')[0] + if CAN_NULL: + names.add(name.replace('NULL + ', '')) + else: + names.add(name) + for co in getattr(func, 'co_consts', tuple()): + if co and recurse and iscode(co): + names.update(nestedglobals(co, recurse=True)) + return list(names) + +def referredglobals(func, recurse=True, builtin=False): + """get the names of objects in the global scope referred to by func""" + return globalvars(func, recurse, builtin).keys() + +def globalvars(func, recurse=True, builtin=False): + """get objects defined in global scope that are referred to by func + + return a dict of {name:object}""" + if PY3: + im_func = '__func__' + func_code = '__code__' + func_globals = '__globals__' + func_closure = '__closure__' + else: + im_func = 'im_func' + func_code = 'func_code' + func_globals = 'func_globals' + func_closure = 'func_closure' + if ismethod(func): func = getattr(func, im_func) + if isfunction(func): + globs = vars(getmodule(sum)).copy() if builtin else {} + # get references from within closure + orig_func, func = func, set() + for obj in getattr(orig_func, func_closure) or {}: + try: + cell_contents = obj.cell_contents + except: + pass + else: + _vars = globalvars(cell_contents, recurse, builtin) or {} + func.update(_vars) #XXX: (above) be wary of infinte recursion? + globs.update(_vars) + # get globals + globs.update(getattr(orig_func, func_globals) or {}) + # get names of references + if not recurse: + func.update(getattr(orig_func, func_code).co_names) + else: + func.update(nestedglobals(getattr(orig_func, func_code))) + # find globals for all entries of func + for key in func.copy(): #XXX: unnecessary...? + nested_func = globs.get(key) + if nested_func is orig_func: + #func.remove(key) if key in func else None + continue #XXX: globalvars(func, False)? + func.update(globalvars(nested_func, True, builtin)) + elif iscode(func): + globs = vars(getmodule(sum)).copy() if builtin else {} + #globs.update(globals()) + if not recurse: + func = func.co_names # get names + else: + orig_func = func.co_name # to stop infinite recursion + func = set(nestedglobals(func)) + # find globals for all entries of func + for key in func.copy(): #XXX: unnecessary...? + if key is orig_func: + #func.remove(key) if key in func else None + continue #XXX: globalvars(func, False)? + nested_func = globs.get(key) + func.update(globalvars(nested_func, True, builtin)) + else: + return {} + #NOTE: if name not in func_globals, then we skip it... + return dict((name,globs[name]) for name in func if name in globs) + + +def varnames(func): + """get names of variables defined by func + + returns a tuple (local vars, local vars referrenced by nested functions)""" + func = code(func) + if not iscode(func): + return () #XXX: better ((),())? or None? + return func.co_varnames, func.co_cellvars + + +def baditems(obj, exact=False, safe=False): #XXX: obj=globals() ? + """get items in object that fail to pickle""" + if not hasattr(obj,'__iter__'): # is not iterable + return [j for j in (badobjects(obj,0,exact,safe),) if j is not None] + obj = obj.values() if getattr(obj,'values',None) else obj + _obj = [] # can't use a set, as items may be unhashable + [_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj] + return [j for j in _obj if j is not None] + + +def badobjects(obj, depth=0, exact=False, safe=False): + """get objects that fail to pickle""" + from dill import pickles + if not depth: + if pickles(obj,exact,safe): return None + return obj + return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \ + for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe))) + +def badtypes(obj, depth=0, exact=False, safe=False): + """get types for objects that fail to pickle""" + from dill import pickles + if not depth: + if pickles(obj,exact,safe): return None + return type(obj) + return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \ + for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe))) + +def errors(obj, depth=0, exact=False, safe=False): + """get errors for objects that fail to pickle""" + from dill import pickles, copy + if not depth: + try: + pik = copy(obj) + if exact: + assert pik == obj, \ + "Unpickling produces %s instead of %s" % (pik,obj) + assert type(pik) == type(obj), \ + "Unpickling produces %s instead of %s" % (type(pik),type(obj)) + return None + except Exception: + import sys + return sys.exc_info()[1] + _dict = {} + for attr in dir(obj): + try: + _attr = getattr(obj,attr) + except Exception: + import sys + _dict[attr] = sys.exc_info()[1] + continue + if not pickles(_attr,exact,safe): + _dict[attr] = errors(_attr,depth-1,exact,safe) + return _dict + + +# EOF diff --git a/myenv/lib/python3.9/site-packages/dill/objtypes.py b/myenv/lib/python3.9/site-packages/dill/objtypes.py new file mode 100644 index 0000000..37fbbc4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/objtypes.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +all Python Standard Library object types (currently: CH 1-15 @ 2.7) +and some other common object types (i.e. numpy.ndarray) + +to load more objects and types, use dill.load_types() +""" + +# non-local import of dill.objects +from dill import objects +for _type in objects.keys(): + exec("%s = type(objects['%s'])" % (_type,_type)) + +del objects +try: + del _type +except NameError: + pass diff --git a/myenv/lib/python3.9/site-packages/dill/pointers.py b/myenv/lib/python3.9/site-packages/dill/pointers.py new file mode 100644 index 0000000..ca3a198 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/pointers.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +__all__ = ['parent', 'reference', 'at', 'parents', 'children'] + +import gc +import sys + +from ._dill import _proxy_helper as reference +from ._dill import _locate_object as at + +def parent(obj, objtype, ignore=()): + """ +>>> listiter = iter([4,5,6,7]) +>>> obj = parent(listiter, list) +>>> obj == [4,5,6,7] # actually 'is', but don't have handle any longer +True + +NOTE: objtype can be a single type (e.g. int or list) or a tuple of types. + +WARNING: if obj is a sequence (e.g. list), may produce unexpected results. +Parent finds *one* parent (e.g. the last member of the sequence). + """ + depth = 1 #XXX: always looking for the parent (only, right?) + chain = parents(obj, objtype, depth, ignore) + parent = chain.pop() + if parent is obj: + return None + return parent + + +def parents(obj, objtype, depth=1, ignore=()): #XXX: objtype=object ? + """Find the chain of referents for obj. Chain will end with obj. + + objtype: an object type or tuple of types to search for + depth: search depth (e.g. depth=2 is 'grandparents') + ignore: an object or tuple of objects to ignore in the search + """ + edge_func = gc.get_referents # looking for refs, not back_refs + predicate = lambda x: isinstance(x, objtype) # looking for parent type + #if objtype is None: predicate = lambda x: True #XXX: in obj.mro() ? + ignore = (ignore,) if not hasattr(ignore, '__len__') else ignore + ignore = (id(obj) for obj in ignore) + chain = find_chain(obj, predicate, edge_func, depth)[::-1] + #XXX: should pop off obj... ? + return chain + + +def children(obj, objtype, depth=1, ignore=()): #XXX: objtype=object ? + """Find the chain of referrers for obj. Chain will start with obj. + + objtype: an object type or tuple of types to search for + depth: search depth (e.g. depth=2 is 'grandchildren') + ignore: an object or tuple of objects to ignore in the search + + NOTE: a common thing to ignore is all globals, 'ignore=(globals(),)' + + NOTE: repeated calls may yield different results, as python stores + the last value in the special variable '_'; thus, it is often good + to execute something to replace '_' (e.g. >>> 1+1). + """ + edge_func = gc.get_referrers # looking for back_refs, not refs + predicate = lambda x: isinstance(x, objtype) # looking for child type + #if objtype is None: predicate = lambda x: True #XXX: in obj.mro() ? + ignore = (ignore,) if not hasattr(ignore, '__len__') else ignore + ignore = (id(obj) for obj in ignore) + chain = find_chain(obj, predicate, edge_func, depth, ignore) + #XXX: should pop off obj... ? + return chain + + +# more generic helper function (cut-n-paste from objgraph) +# Source at http://mg.pov.lt/objgraph/ +# Copyright (c) 2008-2010 Marius Gedminas +# Copyright (c) 2010 Stefano Rivera +# Released under the MIT licence (see objgraph/objgrah.py) + +def find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()): + queue = [obj] + depth = {id(obj): 0} + parent = {id(obj): None} + ignore = set(extra_ignore) + ignore.add(id(extra_ignore)) + ignore.add(id(queue)) + ignore.add(id(depth)) + ignore.add(id(parent)) + ignore.add(id(ignore)) + ignore.add(id(sys._getframe())) # this function + ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain, likely + gc.collect() + while queue: + target = queue.pop(0) + if predicate(target): + chain = [target] + while parent[id(target)] is not None: + target = parent[id(target)] + chain.append(target) + return chain + tdepth = depth[id(target)] + if tdepth < max_depth: + referrers = edge_func(target) + ignore.add(id(referrers)) + for source in referrers: + if id(source) in ignore: + continue + if id(source) not in depth: + depth[id(source)] = tdepth + 1 + parent[id(source)] = target + queue.append(source) + return [obj] # not found + + +# backward compatability +refobject = at + + +# EOF diff --git a/myenv/lib/python3.9/site-packages/dill/settings.py b/myenv/lib/python3.9/site-packages/dill/settings.py new file mode 100644 index 0000000..4d0226b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/settings.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +global settings for Pickler +""" + +try: + from pickle import DEFAULT_PROTOCOL +except ImportError: + from pickle import HIGHEST_PROTOCOL as DEFAULT_PROTOCOL + +settings = { + #'main' : None, + 'protocol' : DEFAULT_PROTOCOL, + 'byref' : False, + #'strictio' : False, + 'fmode' : 0, #HANDLE_FMODE + 'recurse' : False, + 'ignore' : False, +} + +del DEFAULT_PROTOCOL + diff --git a/myenv/lib/python3.9/site-packages/dill/source.py b/myenv/lib/python3.9/site-packages/dill/source.py new file mode 100644 index 0000000..47064a1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/source.py @@ -0,0 +1,1023 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +# +# inspired by inspect.py from Python-2.7.6 +# inspect.py author: 'Ka-Ping Yee ' +# inspect.py merged into original dill.source by Mike McKerns 4/13/14 +""" +Extensions to python's 'inspect' module, which can be used +to retrieve information from live python objects. The methods +defined in this module are augmented to facilitate access to +source code of interactively defined functions and classes, +as well as provide access to source code for objects defined +in a file. +""" + +__all__ = ['findsource', 'getsourcelines', 'getsource', 'indent', 'outdent', \ + '_wrap', 'dumpsource', 'getname', '_namespace', 'getimport', \ + '_importable', 'importable','isdynamic', 'isfrommain'] + +import linecache +import re +from inspect import (getblock, getfile, getmodule, getsourcefile, indentsize, + isbuiltin, isclass, iscode, isframe, isfunction, ismethod, + ismodule, istraceback) +from tokenize import TokenError + +from ._dill import PY3 + + +def isfrommain(obj): + "check if object was built in __main__" + module = getmodule(obj) + if module and module.__name__ == '__main__': + return True + return False + + +def isdynamic(obj): + "check if object was built in the interpreter" + try: file = getfile(obj) + except TypeError: file = None + if file == '' and isfrommain(obj): + return True + return False + + +def _matchlambda(func, line): + """check if lambda object 'func' matches raw line of code 'line'""" + from .detect import code as getcode + from .detect import freevars, globalvars, varnames + dummy = lambda : '__this_is_a_big_dummy_function__' + # process the line (removing leading whitespace, etc) + lhs,rhs = line.split('lambda ',1)[-1].split(":", 1) #FIXME: if !1 inputs + try: #FIXME: unsafe + _ = eval("lambda %s : %s" % (lhs,rhs), globals(),locals()) + except: _ = dummy + # get code objects, for comparison + _, code = getcode(_).co_code, getcode(func).co_code + # check if func is in closure + _f = [line.count(i) for i in freevars(func).keys()] + if not _f: # not in closure + # check if code matches + if _ == code: return True + return False + # weak check on freevars + if not all(_f): return False #XXX: VERY WEAK + # weak check on varnames and globalvars + _f = varnames(func) + _f = [line.count(i) for i in _f[0]+_f[1]] + if _f and not all(_f): return False #XXX: VERY WEAK + _f = [line.count(i) for i in globalvars(func).keys()] + if _f and not all(_f): return False #XXX: VERY WEAK + # check if func is a double lambda + if (line.count('lambda ') > 1) and (lhs in freevars(func).keys()): + _lhs,_rhs = rhs.split('lambda ',1)[-1].split(":",1) #FIXME: if !1 inputs + try: #FIXME: unsafe + _f = eval("lambda %s : %s" % (_lhs,_rhs), globals(),locals()) + except: _f = dummy + # get code objects, for comparison + _, code = getcode(_f).co_code, getcode(func).co_code + if len(_) != len(code): return False + #NOTE: should be same code same order, but except for 't' and '\x88' + _ = set((i,j) for (i,j) in zip(_,code) if i != j) + if len(_) != 1: return False #('t','\x88') + return True + # check indentsize + if not indentsize(line): return False #FIXME: is this a good check??? + # check if code 'pattern' matches + #XXX: or pattern match against dis.dis(code)? (or use uncompyle2?) + _ = _.split(_[0]) # 't' #XXX: remove matching values if starts the same? + _f = code.split(code[0]) # '\x88' + #NOTE: should be same code different order, with different first element + _ = dict(re.match(r'([\W\D\S])(.*)', _[i]).groups() for i in range(1,len(_))) + _f = dict(re.match(r'([\W\D\S])(.*)', _f[i]).groups() for i in range(1,len(_f))) + if (_.keys() == _f.keys()) and (sorted(_.values()) == sorted(_f.values())): + return True + return False + + +def findsource(object): + """Return the entire source file and starting line number for an object. + For interactively-defined objects, the 'file' is the interpreter's history. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An IOError + is raised if the source code cannot be retrieved, while a TypeError is + raised for objects where the source code is unavailable (e.g. builtins).""" + + module = getmodule(object) + try: file = getfile(module) + except TypeError: file = None + # use readline when working in interpreter (i.e. __main__ and not file) + if module and module.__name__ == '__main__' and not file: + try: + import readline + err = '' + except: + import sys + err = sys.exc_info()[1].args[0] + if sys.platform[:3] == 'win': + err += ", please install 'pyreadline'" + if err: + raise IOError(err) + lbuf = readline.get_current_history_length() + lines = [readline.get_history_item(i)+'\n' for i in range(1,lbuf)] + else: + try: # special handling for class instances + if not isclass(object) and isclass(type(object)): # __class__ + file = getfile(module) + sourcefile = getsourcefile(module) + else: # builtins fail with a TypeError + file = getfile(object) + sourcefile = getsourcefile(object) + except (TypeError, AttributeError): # fail with better error + file = getfile(object) + sourcefile = getsourcefile(object) + if not sourcefile and file[:1] + file[-1:] != '<>': + raise IOError('source code not available') + file = sourcefile if sourcefile else file + + module = getmodule(object, file) + if module: + lines = linecache.getlines(file, module.__dict__) + else: + lines = linecache.getlines(file) + + if not lines: + raise IOError('could not extract source code') + + #FIXME: all below may fail if exec used (i.e. exec('f = lambda x:x') ) + if ismodule(object): + return lines, 0 + + #NOTE: beneficial if search goes from end to start of buffer history + name = pat1 = obj = '' + pat2 = r'^(\s*@)' +# pat1b = r'^(\s*%s\W*=)' % name #FIXME: finds 'f = decorate(f)', not exec + if ismethod(object): + name = object.__name__ + if name == '': pat1 = r'(.*(?': + pat1 = r'(.*(?' + if stdin: + lnum = len(lines) - 1 # can't get lnum easily, so leverage pat + if not pat1: pat1 = r'^(\s*def\s)|(.*(? 0: #XXX: won't find decorators in ? + line = lines[lnum] + if pat1.match(line): + if not stdin: break # co_firstlineno does the job + if name == '': # hackery needed to confirm a match + if _matchlambda(obj, line): break + else: # not a lambda, just look for the name + if name in line: # need to check for decorator... + hats = 0 + for _lnum in range(lnum-1,-1,-1): + if pat2.match(lines[_lnum]): hats += 1 + else: break + lnum = lnum - hats + break + lnum = lnum - 1 + return lines, lnum + + try: # turn instances into classes + if not isclass(object) and isclass(type(object)): # __class__ + object = object.__class__ #XXX: sometimes type(class) is better? + #XXX: we don't find how the instance was built + except AttributeError: pass + if isclass(object): + name = object.__name__ + pat = re.compile(r'^(\s*)class\s*' + name + r'\b') + # make some effort to find the best matching class definition: + # use the one with the least indentation, which is the one + # that's most probably not inside a function definition. + candidates = [] + for i in range(len(lines)-1,-1,-1): + match = pat.match(lines[i]) + if match: + # if it's at toplevel, it's already the best one + if lines[i][0] == 'c': + return lines, i + # else add whitespace to candidate list + candidates.append((match.group(1), i)) + if candidates: + # this will sort by whitespace, and by line number, + # less whitespace first #XXX: should sort high lnum before low + candidates.sort() + return lines, candidates[0][1] + else: + raise IOError('could not find class definition') + raise IOError('could not find code object') + + +def getblocks(object, lstrip=False, enclosing=False, locate=False): + """Return a list of source lines and starting line number for an object. + Interactively-defined objects refer to lines in the interpreter's history. + + If enclosing=True, then also return any enclosing code. + If lstrip=True, ensure there is no indentation in the first line of code. + If locate=True, then also return the line number for the block of code. + + DEPRECATED: use 'getsourcelines' instead + """ + lines, lnum = findsource(object) + + if ismodule(object): + if lstrip: lines = _outdent(lines) + return ([lines], [0]) if locate is True else [lines] + + #XXX: 'enclosing' means: closures only? or classes and files? + indent = indentsize(lines[lnum]) + block = getblock(lines[lnum:]) #XXX: catch any TokenError here? + + if not enclosing or not indent: + if lstrip: block = _outdent(block) + return ([block], [lnum]) if locate is True else [block] + + pat1 = r'^(\s*def\s)|(.*(? indent: #XXX: should be >= ? + line += len(code) - skip + elif target in ''.join(code): + blocks.append(code) # save code block as the potential winner + _lnum.append(line - skip) # save the line number for the match + line += len(code) - skip + else: + line += 1 + skip = 0 + # find skip: the number of consecutive decorators + elif pat2.match(lines[line]): + try: code = getblock(lines[line:]) + except TokenError: code = [lines[line]] + skip = 1 + for _line in code[1:]: # skip lines that are decorators + if not pat2.match(_line): break + skip += 1 + line += skip + # no match: reset skip and go to the next line + else: + line +=1 + skip = 0 + + if not blocks: + blocks = [block] + _lnum = [lnum] + if lstrip: blocks = [_outdent(block) for block in blocks] + # return last match + return (blocks, _lnum) if locate is True else blocks + + +def getsourcelines(object, lstrip=False, enclosing=False): + """Return a list of source lines and starting line number for an object. + Interactively-defined objects refer to lines in the interpreter's history. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of the lines + corresponding to the object and the line number indicates where in the + original source file the first line of code was found. An IOError is + raised if the source code cannot be retrieved, while a TypeError is + raised for objects where the source code is unavailable (e.g. builtins). + + If lstrip=True, ensure there is no indentation in the first line of code. + If enclosing=True, then also return any enclosing code.""" + code, n = getblocks(object, lstrip=lstrip, enclosing=enclosing, locate=True) + return code[-1], n[-1] + + +#NOTE: broke backward compatibility 4/16/14 (was lstrip=True, force=True) +def getsource(object, alias='', lstrip=False, enclosing=False, \ + force=False, builtin=False): + """Return the text of the source code for an object. The source code for + interactively-defined objects are extracted from the interpreter's history. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + IOError is raised if the source code cannot be retrieved, while a + TypeError is raised for objects where the source code is unavailable + (e.g. builtins). + + If alias is provided, then add a line of code that renames the object. + If lstrip=True, ensure there is no indentation in the first line of code. + If enclosing=True, then also return any enclosing code. + If force=True, catch (TypeError,IOError) and try to use import hooks. + If builtin=True, force an import for any builtins + """ + # hascode denotes a callable + hascode = _hascode(object) + # is a class instance type (and not in builtins) + instance = _isinstance(object) + + # get source lines; if fail, try to 'force' an import + try: # fails for builtins, and other assorted object types + lines, lnum = getsourcelines(object, enclosing=enclosing) + except (TypeError, IOError): # failed to get source, resort to import hooks + if not force: # don't try to get types that findsource can't get + raise + if not getmodule(object): # get things like 'None' and '1' + if not instance: return getimport(object, alias, builtin=builtin) + # special handling (numpy arrays, ...) + _import = getimport(object, builtin=builtin) + name = getname(object, force=True) + _alias = "%s = " % alias if alias else "" + if alias == name: _alias = "" + return _import+_alias+"%s\n" % name + else: #FIXME: could use a good bit of cleanup, since using getimport... + if not instance: return getimport(object, alias, builtin=builtin) + # now we are dealing with an instance... + name = object.__class__.__name__ + module = object.__module__ + if module in ['builtins','__builtin__']: + return getimport(object, alias, builtin=builtin) + else: #FIXME: leverage getimport? use 'from module import name'? + lines, lnum = ["%s = __import__('%s', fromlist=['%s']).%s\n" % (name,module,name,name)], 0 + obj = eval(lines[0].lstrip(name + ' = ')) + lines, lnum = getsourcelines(obj, enclosing=enclosing) + + # strip leading indent (helps ensure can be imported) + if lstrip or alias: + lines = _outdent(lines) + + # instantiate, if there's a nice repr #XXX: BAD IDEA??? + if instance: #and force: #XXX: move into findsource or getsourcelines ? + if '(' in repr(object): lines.append('%r\n' % object) + #else: #XXX: better to somehow to leverage __reduce__ ? + # reconstructor,args = object.__reduce__() + # _ = reconstructor(*args) + else: # fall back to serialization #XXX: bad idea? + #XXX: better not duplicate work? #XXX: better new/enclose=True? + lines = dumpsource(object, alias='', new=force, enclose=False) + lines, lnum = [line+'\n' for line in lines.split('\n')][:-1], 0 + #else: object.__code__ # raise AttributeError + + # add an alias to the source code + if alias: + if hascode: + skip = 0 + for line in lines: # skip lines that are decorators + if not line.startswith('@'): break + skip += 1 + #XXX: use regex from findsource / getsourcelines ? + if lines[skip].lstrip().startswith('def '): # we have a function + if alias != object.__name__: + lines.append('\n%s = %s\n' % (alias, object.__name__)) + elif 'lambda ' in lines[skip]: # we have a lambda + if alias != lines[skip].split('=')[0].strip(): + lines[skip] = '%s = %s' % (alias, lines[skip]) + else: # ...try to use the object's name + if alias != object.__name__: + lines.append('\n%s = %s\n' % (alias, object.__name__)) + else: # class or class instance + if instance: + if alias != lines[-1].split('=')[0].strip(): + lines[-1] = ('%s = ' % alias) + lines[-1] + else: + name = getname(object, force=True) or object.__name__ + if alias != name: + lines.append('\n%s = %s\n' % (alias, name)) + return ''.join(lines) + + +def _hascode(object): + '''True if object has an attribute that stores it's __code__''' + return getattr(object,'__code__',None) or getattr(object,'func_code',None) + +def _isinstance(object): + '''True if object is a class instance type (and is not a builtin)''' + if _hascode(object) or isclass(object) or ismodule(object): + return False + if istraceback(object) or isframe(object) or iscode(object): + return False + # special handling (numpy arrays, ...) + if not getmodule(object) and getmodule(type(object)).__name__ in ['numpy']: + return True +# # check if is instance of a builtin +# if not getmodule(object) and getmodule(type(object)).__name__ in ['__builtin__','builtins']: +# return False + _types = ('") + if not repr(type(object)).startswith(_types): #FIXME: weak hack + return False + if not getmodule(object) or object.__module__ in ['builtins','__builtin__'] or getname(object, force=True) in ['array']: + return False + return True # by process of elimination... it's what we want + + +def _intypes(object): + '''check if object is in the 'types' module''' + import types + # allow user to pass in object or object.__name__ + if type(object) is not type(''): + object = getname(object, force=True) + if object == 'ellipsis': object = 'EllipsisType' + return True if hasattr(types, object) else False + + +def _isstring(object): #XXX: isstringlike better? + '''check if object is a string-like type''' + if PY3: return isinstance(object, (str, bytes)) + return isinstance(object, basestring) + + +def indent(code, spaces=4): + '''indent a block of code with whitespace (default is 4 spaces)''' + indent = indentsize(code) + if type(spaces) is int: spaces = ' '*spaces + # if '\t' is provided, will indent with a tab + nspaces = indentsize(spaces) + # blank lines (etc) need to be ignored + lines = code.split('\n') +## stq = "'''"; dtq = '"""' +## in_stq = in_dtq = False + for i in range(len(lines)): + #FIXME: works... but shouldn't indent 2nd+ lines of multiline doc + _indent = indentsize(lines[i]) + if indent > _indent: continue + lines[i] = spaces+lines[i] +## #FIXME: may fail when stq and dtq in same line (depends on ordering) +## nstq, ndtq = lines[i].count(stq), lines[i].count(dtq) +## if not in_dtq and not in_stq: +## lines[i] = spaces+lines[i] # we indent +## # entering a comment block +## if nstq%2: in_stq = not in_stq +## if ndtq%2: in_dtq = not in_dtq +## # leaving a comment block +## elif in_dtq and ndtq%2: in_dtq = not in_dtq +## elif in_stq and nstq%2: in_stq = not in_stq +## else: pass + if lines[-1].strip() == '': lines[-1] = '' + return '\n'.join(lines) + + +def _outdent(lines, spaces=None, all=True): + '''outdent lines of code, accounting for docs and line continuations''' + indent = indentsize(lines[0]) + if spaces is None or spaces > indent or spaces < 0: spaces = indent + for i in range(len(lines) if all else 1): + #FIXME: works... but shouldn't outdent 2nd+ lines of multiline doc + _indent = indentsize(lines[i]) + if spaces > _indent: _spaces = _indent + else: _spaces = spaces + lines[i] = lines[i][_spaces:] + return lines + +def outdent(code, spaces=None, all=True): + '''outdent a block of code (default is to strip all leading whitespace)''' + indent = indentsize(code) + if spaces is None or spaces > indent or spaces < 0: spaces = indent + #XXX: will this delete '\n' in some cases? + if not all: return code[spaces:] + return '\n'.join(_outdent(code.split('\n'), spaces=spaces, all=all)) + + +#XXX: not sure what the point of _wrap is... +#exec_ = lambda s, *a: eval(compile(s, '', 'exec'), *a) +__globals__ = globals() +__locals__ = locals() +wrap2 = ''' +def _wrap(f): + """ encapsulate a function and it's __import__ """ + def func(*args, **kwds): + try: + # _ = eval(getsource(f, force=True)) #XXX: safer but less robust + exec getimportable(f, alias='_') in %s, %s + except: + raise ImportError('cannot import name ' + f.__name__) + return _(*args, **kwds) + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + return func +''' % ('__globals__', '__locals__') +wrap3 = ''' +def _wrap(f): + """ encapsulate a function and it's __import__ """ + def func(*args, **kwds): + try: + # _ = eval(getsource(f, force=True)) #XXX: safer but less robust + exec(getimportable(f, alias='_'), %s, %s) + except: + raise ImportError('cannot import name ' + f.__name__) + return _(*args, **kwds) + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + return func +''' % ('__globals__', '__locals__') +if PY3: + exec(wrap3) +else: + exec(wrap2) +del wrap2, wrap3 + + +def _enclose(object, alias=''): #FIXME: needs alias to hold returned object + """create a function enclosure around the source of some object""" + #XXX: dummy and stub should append a random string + dummy = '__this_is_a_big_dummy_enclosing_function__' + stub = '__this_is_a_stub_variable__' + code = 'def %s():\n' % dummy + code += indent(getsource(object, alias=stub, lstrip=True, force=True)) + code += indent('return %s\n' % stub) + if alias: code += '%s = ' % alias + code += '%s(); del %s\n' % (dummy, dummy) + #code += "globals().pop('%s',lambda :None)()\n" % dummy + return code + + +def dumpsource(object, alias='', new=False, enclose=True): + """'dump to source', where the code includes a pickled object. + + If new=True and object is a class instance, then create a new + instance using the unpacked class source code. If enclose, then + create the object inside a function enclosure (thus minimizing + any global namespace pollution). + """ + from dill import dumps + pik = repr(dumps(object)) + code = 'import dill\n' + if enclose: + stub = '__this_is_a_stub_variable__' #XXX: *must* be same _enclose.stub + pre = '%s = ' % stub + new = False #FIXME: new=True doesn't work with enclose=True + else: + stub = alias + pre = '%s = ' % stub if alias else alias + + # if a 'new' instance is not needed, then just dump and load + if not new or not _isinstance(object): + code += pre + 'dill.loads(%s)\n' % pik + else: #XXX: other cases where source code is needed??? + code += getsource(object.__class__, alias='', lstrip=True, force=True) + mod = repr(object.__module__) # should have a module (no builtins here) + if PY3: + code += pre + 'dill.loads(%s.replace(b%s,bytes(__name__,"UTF-8")))\n' % (pik,mod) + else: + code += pre + 'dill.loads(%s.replace(%s,__name__))\n' % (pik,mod) + #code += 'del %s' % object.__class__.__name__ #NOTE: kills any existing! + + if enclose: + # generation of the 'enclosure' + dummy = '__this_is_a_big_dummy_object__' + dummy = _enclose(dummy, alias=alias) + # hack to replace the 'dummy' with the 'real' code + dummy = dummy.split('\n') + code = dummy[0]+'\n' + indent(code) + '\n'.join(dummy[-3:]) + + return code #XXX: better 'dumpsourcelines', returning list of lines? + + +def getname(obj, force=False, fqn=False): #XXX: throw(?) to raise error on fail? + """get the name of the object. for lambdas, get the name of the pointer """ + if fqn: return '.'.join(_namespace(obj)) + module = getmodule(obj) + if not module: # things like "None" and "1" + if not force: return None + return repr(obj) + try: + #XXX: 'wrong' for decorators and curried functions ? + # if obj.func_closure: ...use logic from getimportable, etc ? + name = obj.__name__ + if name == '': + return getsource(obj).split('=',1)[0].strip() + # handle some special cases + if module.__name__ in ['builtins','__builtin__']: + if name == 'ellipsis': name = 'EllipsisType' + return name + except AttributeError: #XXX: better to just throw AttributeError ? + if not force: return None + name = repr(obj) + if name.startswith('<'): # or name.split('('): + return None + return name + + +def _namespace(obj): + """_namespace(obj); return namespace hierarchy (as a list of names) + for the given object. For an instance, find the class hierarchy. + + For example: + + >>> from functools import partial + >>> p = partial(int, base=2) + >>> _namespace(p) + [\'functools\', \'partial\'] + """ + # mostly for functions and modules and such + #FIXME: 'wrong' for decorators and curried functions + try: #XXX: needs some work and testing on different types + module = qual = str(getmodule(obj)).split()[1].strip('>').strip('"').strip("'") + qual = qual.split('.') + if ismodule(obj): + return qual + # get name of a lambda, function, etc + name = getname(obj) or obj.__name__ # failing, raise AttributeError + # check special cases (NoneType, ...) + if module in ['builtins','__builtin__']: # BuiltinFunctionType + if _intypes(name): return ['types'] + [name] + return qual + [name] #XXX: can be wrong for some aliased objects + except: pass + # special case: numpy.inf and numpy.nan (we don't want them as floats) + if str(obj) in ['inf','nan','Inf','NaN']: # is more, but are they needed? + return ['numpy'] + [str(obj)] + # mostly for classes and class instances and such + module = getattr(obj.__class__, '__module__', None) + qual = str(obj.__class__) + try: qual = qual[qual.index("'")+1:-2] + except ValueError: pass # str(obj.__class__) made the 'try' unnecessary + qual = qual.split(".") + if module in ['builtins','__builtin__']: + # check special cases (NoneType, Ellipsis, ...) + if qual[-1] == 'ellipsis': qual[-1] = 'EllipsisType' + if _intypes(qual[-1]): module = 'types' #XXX: BuiltinFunctionType + qual = [module] + qual + return qual + + +#NOTE: 05/25/14 broke backward compatability: added 'alias' as 3rd argument +def _getimport(head, tail, alias='', verify=True, builtin=False): + """helper to build a likely import string from head and tail of namespace. + ('head','tail') are used in the following context: "from head import tail" + + If verify=True, then test the import string before returning it. + If builtin=True, then force an import for builtins where possible. + If alias is provided, then rename the object on import. + """ + # special handling for a few common types + if tail in ['Ellipsis', 'NotImplemented'] and head in ['types']: + head = len.__module__ + elif tail in ['None'] and head in ['types']: + _alias = '%s = ' % alias if alias else '' + if alias == tail: _alias = '' + return _alias+'%s\n' % tail + # we don't need to import from builtins, so return '' +# elif tail in ['NoneType','int','float','long','complex']: return '' #XXX: ? + if head in ['builtins','__builtin__']: + # special cases (NoneType, Ellipsis, ...) #XXX: BuiltinFunctionType + if tail == 'ellipsis': tail = 'EllipsisType' + if _intypes(tail): head = 'types' + elif not builtin: + _alias = '%s = ' % alias if alias else '' + if alias == tail: _alias = '' + return _alias+'%s\n' % tail + else: pass # handle builtins below + # get likely import string + if not head: _str = "import %s" % tail + else: _str = "from %s import %s" % (head, tail) + _alias = " as %s\n" % alias if alias else "\n" + if alias == tail: _alias = "\n" + _str += _alias + # FIXME: fails on most decorators, currying, and such... + # (could look for magic __wrapped__ or __func__ attr) + # (could fix in 'namespace' to check obj for closure) + if verify and not head.startswith('dill.'):# weird behavior for dill + #print(_str) + try: exec(_str) #XXX: check if == obj? (name collision) + except ImportError: #XXX: better top-down or bottom-up recursion? + _head = head.rsplit(".",1)[0] #(or get all, then compare == obj?) + if not _head: raise + if _head != head: + _str = _getimport(_head, tail, alias, verify) + return _str + + +#XXX: rename builtin to force? vice versa? verify to force? (as in getsource) +#NOTE: 05/25/14 broke backward compatability: added 'alias' as 2nd argument +def getimport(obj, alias='', verify=True, builtin=False, enclosing=False): + """get the likely import string for the given object + + obj is the object to inspect + If verify=True, then test the import string before returning it. + If builtin=True, then force an import for builtins where possible. + If enclosing=True, get the import for the outermost enclosing callable. + If alias is provided, then rename the object on import. + """ + if enclosing: + from .detect import outermost + _obj = outermost(obj) + obj = _obj if _obj else obj + # get the namespace + qual = _namespace(obj) + head = '.'.join(qual[:-1]) + tail = qual[-1] + # for named things... with a nice repr #XXX: move into _namespace? + try: # look for '<...>' and be mindful it might be in lists, dicts, etc... + name = repr(obj).split('<',1)[1].split('>',1)[1] + name = None # we have a 'object'-style repr + except: # it's probably something 'importable' + if head in ['builtins','__builtin__']: + name = repr(obj) #XXX: catch [1,2], (1,2), set([1,2])... others? + else: + name = repr(obj).split('(')[0] + #if not repr(obj).startswith('<'): name = repr(obj).split('(')[0] + #else: name = None + if name: # try using name instead of tail + try: return _getimport(head, name, alias, verify, builtin) + except ImportError: pass + except SyntaxError: + if head in ['builtins','__builtin__']: + _alias = '%s = ' % alias if alias else '' + if alias == name: _alias = '' + return _alias+'%s\n' % name + else: pass + try: + #if type(obj) is type(abs): _builtin = builtin # BuiltinFunctionType + #else: _builtin = False + return _getimport(head, tail, alias, verify, builtin) + except ImportError: + raise # could do some checking against obj + except SyntaxError: + if head in ['builtins','__builtin__']: + _alias = '%s = ' % alias if alias else '' + if alias == tail: _alias = '' + return _alias+'%s\n' % tail + raise # could do some checking against obj + + +def _importable(obj, alias='', source=None, enclosing=False, force=True, \ + builtin=True, lstrip=True): + """get an import string (or the source code) for the given object + + This function will attempt to discover the name of the object, or the repr + of the object, or the source code for the object. To attempt to force + discovery of the source code, use source=True, to attempt to force the + use of an import, use source=False; otherwise an import will be sought + for objects not defined in __main__. The intent is to build a string + that can be imported from a python file. obj is the object to inspect. + If alias is provided, then rename the object with the given alias. + + If source=True, use these options: + If enclosing=True, then also return any enclosing code. + If force=True, catch (TypeError,IOError) and try to use import hooks. + If lstrip=True, ensure there is no indentation in the first line of code. + + If source=False, use these options: + If enclosing=True, get the import for the outermost enclosing callable. + If force=True, then don't test the import string before returning it. + If builtin=True, then force an import for builtins where possible. + """ + if source is None: + source = True if isfrommain(obj) else False + if source: # first try to get the source + try: + return getsource(obj, alias, enclosing=enclosing, \ + force=force, lstrip=lstrip, builtin=builtin) + except: pass + try: + if not _isinstance(obj): + return getimport(obj, alias, enclosing=enclosing, \ + verify=(not force), builtin=builtin) + # first 'get the import', then 'get the instance' + _import = getimport(obj, enclosing=enclosing, \ + verify=(not force), builtin=builtin) + name = getname(obj, force=True) + if not name: + raise AttributeError("object has no atribute '__name__'") + _alias = "%s = " % alias if alias else "" + if alias == name: _alias = "" + return _import+_alias+"%s\n" % name + + except: pass + if not source: # try getsource, only if it hasn't been tried yet + try: + return getsource(obj, alias, enclosing=enclosing, \ + force=force, lstrip=lstrip, builtin=builtin) + except: pass + # get the name (of functions, lambdas, and classes) + # or hope that obj can be built from the __repr__ + #XXX: what to do about class instances and such? + obj = getname(obj, force=force) + # we either have __repr__ or __name__ (or None) + if not obj or obj.startswith('<'): + raise AttributeError("object has no atribute '__name__'") + _alias = '%s = ' % alias if alias else '' + if alias == obj: _alias = '' + return _alias+'%s\n' % obj + #XXX: possible failsafe... (for example, for instances when source=False) + # "import dill; result = dill.loads(); # repr()" + +def _closuredimport(func, alias='', builtin=False): + """get import for closured objects; return a dict of 'name' and 'import'""" + import re + from .detect import freevars, outermost + free_vars = freevars(func) + func_vars = {} + # split into 'funcs' and 'non-funcs' + for name,obj in list(free_vars.items()): + if not isfunction(obj): continue + # get import for 'funcs' + fobj = free_vars.pop(name) + src = getsource(fobj) + if src.lstrip().startswith('@'): # we have a decorator + src = getimport(fobj, alias=alias, builtin=builtin) + else: # we have to "hack" a bit... and maybe be lucky + encl = outermost(func) + # pattern: 'func = enclosing(fobj' + pat = r'.*[\w\s]=\s*'+getname(encl)+r'\('+getname(fobj) + mod = getname(getmodule(encl)) + #HACK: get file containing 'outer' function; is func there? + lines,_ = findsource(encl) + candidate = [line for line in lines if getname(encl) in line and \ + re.match(pat, line)] + if not candidate: + mod = getname(getmodule(fobj)) + #HACK: get file containing 'inner' function; is func there? + lines,_ = findsource(fobj) + candidate = [line for line in lines \ + if getname(fobj) in line and re.match(pat, line)] + if not len(candidate): raise TypeError('import could not be found') + candidate = candidate[-1] + name = candidate.split('=',1)[0].split()[-1].strip() + src = _getimport(mod, name, alias=alias, builtin=builtin) + func_vars[name] = src + if not func_vars: + name = outermost(func) + mod = getname(getmodule(name)) + if not mod or name is func: # then it can be handled by getimport + name = getname(func, force=True) #XXX: better key? + src = getimport(func, alias=alias, builtin=builtin) + else: + lines,_ = findsource(name) + # pattern: 'func = enclosing(' + candidate = [line for line in lines if getname(name) in line and \ + re.match(r'.*[\w\s]=\s*'+getname(name)+r'\(', line)] + if not len(candidate): raise TypeError('import could not be found') + candidate = candidate[-1] + name = candidate.split('=',1)[0].split()[-1].strip() + src = _getimport(mod, name, alias=alias, builtin=builtin) + func_vars[name] = src + return func_vars + +#XXX: should be able to use __qualname__ +def _closuredsource(func, alias=''): + """get source code for closured objects; return a dict of 'name' + and 'code blocks'""" + #FIXME: this entire function is a messy messy HACK + # - pollutes global namespace + # - fails if name of freevars are reused + # - can unnecessarily duplicate function code + from .detect import freevars + free_vars = freevars(func) + func_vars = {} + # split into 'funcs' and 'non-funcs' + for name,obj in list(free_vars.items()): + if not isfunction(obj): + # get source for 'non-funcs' + free_vars[name] = getsource(obj, force=True, alias=name) + continue + # get source for 'funcs' + fobj = free_vars.pop(name) + src = getsource(fobj, alias) # DO NOT include dependencies + # if source doesn't start with '@', use name as the alias + if not src.lstrip().startswith('@'): #FIXME: 'enclose' in dummy; + src = importable(fobj,alias=name)# wrong ref 'name' + org = getsource(func, alias, enclosing=False, lstrip=True) + src = (src, org) # undecorated first, then target + else: #NOTE: reproduces the code! + org = getsource(func, enclosing=True, lstrip=False) + src = importable(fobj, alias, source=True) # include dependencies + src = (org, src) # target first, then decorated + func_vars[name] = src + src = ''.join(free_vars.values()) + if not func_vars: #FIXME: 'enclose' in dummy; wrong ref 'name' + org = getsource(func, alias, force=True, enclosing=False, lstrip=True) + src = (src, org) # variables first, then target + else: + src = (src, None) # just variables (better '' instead of None?) + func_vars[None] = src + # FIXME: remove duplicates (however, order is important...) + return func_vars + +def importable(obj, alias='', source=None, builtin=True): + """get an importable string (i.e. source code or the import string) + for the given object, including any required objects from the enclosing + and global scope + + This function will attempt to discover the name of the object, or the repr + of the object, or the source code for the object. To attempt to force + discovery of the source code, use source=True, to attempt to force the + use of an import, use source=False; otherwise an import will be sought + for objects not defined in __main__. The intent is to build a string + that can be imported from a python file. + + obj is the object to inspect. If alias is provided, then rename the + object with the given alias. If builtin=True, then force an import for + builtins where possible. + """ + #NOTE: we always 'force', and 'lstrip' as necessary + #NOTE: for 'enclosing', use importable(outermost(obj)) + if source is None: + source = True if isfrommain(obj) else False + elif builtin and isbuiltin(obj): + source = False + tried_source = tried_import = False + while True: + if not source: # we want an import + try: + if _isinstance(obj): # for instances, punt to _importable + return _importable(obj, alias, source=False, builtin=builtin) + src = _closuredimport(obj, alias=alias, builtin=builtin) + if len(src) == 0: + raise NotImplementedError('not implemented') + if len(src) > 1: + raise NotImplementedError('not implemented') + return list(src.values())[0] + except: + if tried_source: raise + tried_import = True + # we want the source + try: + src = _closuredsource(obj, alias=alias) + if len(src) == 0: + raise NotImplementedError('not implemented') + # groan... an inline code stitcher + def _code_stitcher(block): + "stitch together the strings in tuple 'block'" + if block[0] and block[-1]: block = '\n'.join(block) + elif block[0]: block = block[0] + elif block[-1]: block = block[-1] + else: block = '' + return block + # get free_vars first + _src = _code_stitcher(src.pop(None)) + _src = [_src] if _src else [] + # get func_vars + for xxx in src.values(): + xxx = _code_stitcher(xxx) + if xxx: _src.append(xxx) + # make a single source string + if not len(_src): + src = '' + elif len(_src) == 1: + src = _src[0] + else: + src = '\n'.join(_src) + # get source code of objects referred to by obj in global scope + from .detect import globalvars + obj = globalvars(obj) #XXX: don't worry about alias? recurse? etc? + obj = list(getsource(_obj,name,force=True) for (name,_obj) in obj.items() if not isbuiltin(_obj)) + obj = '\n'.join(obj) if obj else '' + # combine all referred-to source (global then enclosing) + if not obj: return src + if not src: return obj + return obj + src + except: + if tried_import: raise + tried_source = True + source = not source + # should never get here + return + + +# backward compatability +def getimportable(obj, alias='', byname=True, explicit=False): + return importable(obj,alias,source=(not byname),builtin=explicit) + #return outdent(_importable(obj,alias,source=(not byname),builtin=explicit)) +def likely_import(obj, passive=False, explicit=False): + return getimport(obj, verify=(not passive), builtin=explicit) +def _likely_import(first, last, passive=False, explicit=True): + return _getimport(first, last, verify=(not passive), builtin=explicit) +_get_name = getname +getblocks_from_history = getblocks + + + +# EOF diff --git a/myenv/lib/python3.9/site-packages/dill/temp.py b/myenv/lib/python3.9/site-packages/dill/temp.py new file mode 100644 index 0000000..251a8e3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/temp.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Methods for serialized objects (or source code) stored in temporary files +and file-like objects. +""" +#XXX: better instead to have functions write to any given file-like object ? +#XXX: currently, all file-like objects are created by the function... + +__all__ = ['dump_source', 'dump', 'dumpIO_source', 'dumpIO',\ + 'load_source', 'load', 'loadIO_source', 'loadIO',\ + 'capture'] + +import contextlib +from ._dill import PY3 + + +@contextlib.contextmanager +def capture(stream='stdout'): + """builds a context that temporarily replaces the given stream name + + >>> with capture('stdout') as out: + ... print ("foo!") + ... + >>> print (out.getvalue()) + foo! + + """ + import sys + if PY3: + from io import StringIO + else: + from StringIO import StringIO + orig = getattr(sys, stream) + setattr(sys, stream, StringIO()) + try: + yield getattr(sys, stream) + finally: + setattr(sys, stream, orig) + + +def b(x): # deal with b'foo' versus 'foo' + import codecs + return codecs.latin_1_encode(x)[0] + +def load_source(file, **kwds): + """load an object that was stored with dill.temp.dump_source + + file: filehandle + alias: string name of stored object + mode: mode to open the file, one of: {'r', 'rb'} + + >>> f = lambda x: x**2 + >>> pyfile = dill.temp.dump_source(f, alias='_f') + >>> _f = dill.temp.load_source(pyfile) + >>> _f(4) + 16 + """ + alias = kwds.pop('alias', None) + mode = kwds.pop('mode', 'r') + fname = getattr(file, 'name', file) # fname=file.name or fname=file (if str) + source = open(fname, mode=mode, **kwds).read() + if not alias: + tag = source.strip().splitlines()[-1].split() + if tag[0] != '#NAME:': + stub = source.splitlines()[0] + raise IOError("unknown name for code: %s" % stub) + alias = tag[-1] + local = {} + exec(source, local) + _ = eval("%s" % alias, local) + return _ + +def dump_source(object, **kwds): + """write object source to a NamedTemporaryFile (instead of dill.dump) +Loads with "import" or "dill.temp.load_source". Returns the filehandle. + + >>> f = lambda x: x**2 + >>> pyfile = dill.temp.dump_source(f, alias='_f') + >>> _f = dill.temp.load_source(pyfile) + >>> _f(4) + 16 + + >>> f = lambda x: x**2 + >>> pyfile = dill.temp.dump_source(f, dir='.') + >>> modulename = os.path.basename(pyfile.name).split('.py')[0] + >>> exec('from %s import f as _f' % modulename) + >>> _f(4) + 16 + +Optional kwds: + If 'alias' is specified, the object will be renamed to the given string. + + If 'prefix' is specified, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is specified, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. On + some operating systems, this makes no difference. + +NOTE: Keep the return value for as long as you want your file to exist ! + """ #XXX: write a "load_source"? + from .source import importable, getname + import tempfile + kwds.pop('suffix', '') # this is *always* '.py' + alias = kwds.pop('alias', '') #XXX: include an alias so a name is known + name = str(alias) or getname(object) + name = "\n#NAME: %s\n" % name + #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH + file = tempfile.NamedTemporaryFile(suffix='.py', **kwds) + file.write(b(''.join([importable(object, alias=alias),name]))) + file.flush() + return file + +def load(file, **kwds): + """load an object that was stored with dill.temp.dump + + file: filehandle + mode: mode to open the file, one of: {'r', 'rb'} + + >>> dumpfile = dill.temp.dump([1, 2, 3, 4, 5]) + >>> dill.temp.load(dumpfile) + [1, 2, 3, 4, 5] + """ + import dill as pickle + mode = kwds.pop('mode', 'rb') + name = getattr(file, 'name', file) # name=file.name or name=file (if str) + return pickle.load(open(name, mode=mode, **kwds)) + +def dump(object, **kwds): + """dill.dump of object to a NamedTemporaryFile. +Loads with "dill.temp.load". Returns the filehandle. + + >>> dumpfile = dill.temp.dump([1, 2, 3, 4, 5]) + >>> dill.temp.load(dumpfile) + [1, 2, 3, 4, 5] + +Optional kwds: + If 'suffix' is specified, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is specified, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is specified, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. On + some operating systems, this makes no difference. + +NOTE: Keep the return value for as long as you want your file to exist ! + """ + import dill as pickle + import tempfile + file = tempfile.NamedTemporaryFile(**kwds) + pickle.dump(object, file) + file.flush() + return file + +def loadIO(buffer, **kwds): + """load an object that was stored with dill.temp.dumpIO + + buffer: buffer object + + >>> dumpfile = dill.temp.dumpIO([1, 2, 3, 4, 5]) + >>> dill.temp.loadIO(dumpfile) + [1, 2, 3, 4, 5] + """ + import dill as pickle + if PY3: + from io import BytesIO as StringIO + else: + from StringIO import StringIO + value = getattr(buffer, 'getvalue', buffer) # value or buffer.getvalue + if value != buffer: value = value() # buffer.getvalue() + return pickle.load(StringIO(value)) + +def dumpIO(object, **kwds): + """dill.dump of object to a buffer. +Loads with "dill.temp.loadIO". Returns the buffer object. + + >>> dumpfile = dill.temp.dumpIO([1, 2, 3, 4, 5]) + >>> dill.temp.loadIO(dumpfile) + [1, 2, 3, 4, 5] + """ + import dill as pickle + if PY3: + from io import BytesIO as StringIO + else: + from StringIO import StringIO + file = StringIO() + pickle.dump(object, file) + file.flush() + return file + +def loadIO_source(buffer, **kwds): + """load an object that was stored with dill.temp.dumpIO_source + + buffer: buffer object + alias: string name of stored object + + >>> f = lambda x:x**2 + >>> pyfile = dill.temp.dumpIO_source(f, alias='_f') + >>> _f = dill.temp.loadIO_source(pyfile) + >>> _f(4) + 16 + """ + alias = kwds.pop('alias', None) + source = getattr(buffer, 'getvalue', buffer) # source or buffer.getvalue + if source != buffer: source = source() # buffer.getvalue() + if PY3: source = source.decode() # buffer to string + if not alias: + tag = source.strip().splitlines()[-1].split() + if tag[0] != '#NAME:': + stub = source.splitlines()[0] + raise IOError("unknown name for code: %s" % stub) + alias = tag[-1] + local = {} + exec(source, local) + _ = eval("%s" % alias, local) + return _ + +def dumpIO_source(object, **kwds): + """write object source to a buffer (instead of dill.dump) +Loads by with dill.temp.loadIO_source. Returns the buffer object. + + >>> f = lambda x:x**2 + >>> pyfile = dill.temp.dumpIO_source(f, alias='_f') + >>> _f = dill.temp.loadIO_source(pyfile) + >>> _f(4) + 16 + +Optional kwds: + If 'alias' is specified, the object will be renamed to the given string. + """ + from .source import importable, getname + if PY3: + from io import BytesIO as StringIO + else: + from StringIO import StringIO + alias = kwds.pop('alias', '') #XXX: include an alias so a name is known + name = str(alias) or getname(object) + name = "\n#NAME: %s\n" % name + #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH + file = StringIO() + file.write(b(''.join([importable(object, alias=alias),name]))) + file.flush() + return file + + +del contextlib + + +# EOF diff --git a/myenv/lib/python3.9/site-packages/dill/tests/__init__.py b/myenv/lib/python3.9/site-packages/dill/tests/__init__.py new file mode 100644 index 0000000..cfd05c6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/__init__.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +to run this test suite, first build and install `dill`. + + $ python setup.py build + $ python setup.py install + + +then run the tests with: + + $ python -m dill.tests + + +or, if `nose` is installed: + + $ nosetests + +""" diff --git a/myenv/lib/python3.9/site-packages/dill/tests/__main__.py b/myenv/lib/python3.9/site-packages/dill/tests/__main__.py new file mode 100644 index 0000000..e82993c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/__main__.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from __future__ import print_function +import glob +import os +try: + import pox + python = pox.which_python(version=True, fullpath=False) or 'python' +except ImportError: + python = 'python' +import subprocess as sp +from sys import platform +shell = platform[:3] == 'win' + +suite = os.path.dirname(__file__) or os.path.curdir +tests = glob.glob(suite + os.path.sep + 'test_*.py') + + +if __name__ == '__main__': + + for test in tests: + p = sp.Popen([python, test], shell=shell).wait() + if not p: + print('.', end='') + print('') diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_check.py b/myenv/lib/python3.9/site-packages/dill/tests/test_check.py new file mode 100644 index 0000000..134b8b0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_check.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill import check +import sys + +from dill.temp import capture +from dill._dill import PY3 + + +#FIXME: this doesn't catch output... it's from the internal call +def raise_check(func, **kwds): + try: + with capture('stdout') as out: + check(func, **kwds) + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert 'Traceback' not in out.getvalue() + finally: + out.close() + + +f = lambda x:x**2 + + +def test_simple(): + raise_check(f) + + +def test_recurse(): + raise_check(f, recurse=True) + + +def test_byref(): + raise_check(f, byref=True) + + +def test_protocol(): + raise_check(f, protocol=True) + + +def test_python(): + raise_check(f, python=None) + + +#TODO: test incompatible versions +#TODO: test dump failure +#TODO: test load failure + + +if __name__ == '__main__': + test_simple() + test_recurse() + test_byref() + test_protocol() + test_python() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_classdef.py b/myenv/lib/python3.9/site-packages/dill/tests/test_classdef.py new file mode 100644 index 0000000..e23bef0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_classdef.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +import sys +dill.settings['recurse'] = True + +# test classdefs +class _class: + def _method(self): + pass + def ok(self): + return True + +class _class2: + def __call__(self): + pass + def ok(self): + return True + +class _newclass(object): + def _method(self): + pass + def ok(self): + return True + +class _newclass2(object): + def __call__(self): + pass + def ok(self): + return True + +class _meta(type): + pass + +def __call__(self): + pass +def ok(self): + return True + +_mclass = _meta("_mclass", (object,), {"__call__": __call__, "ok": ok}) + +del __call__ +del ok + +o = _class() +oc = _class2() +n = _newclass() +nc = _newclass2() +m = _mclass() + +# test pickles for class instances +def test_class_instances(): + assert dill.pickles(o) + assert dill.pickles(oc) + assert dill.pickles(n) + assert dill.pickles(nc) + assert dill.pickles(m) + +def test_class_objects(): + clslist = [_class,_class2,_newclass,_newclass2,_mclass] + objlist = [o,oc,n,nc,m] + _clslist = [dill.dumps(obj) for obj in clslist] + _objlist = [dill.dumps(obj) for obj in objlist] + + for obj in clslist: + globals().pop(obj.__name__) + del clslist + for obj in ['o','oc','n','nc']: + globals().pop(obj) + del objlist + del obj + + for obj,cls in zip(_objlist,_clslist): + _cls = dill.loads(cls) + _obj = dill.loads(obj) + assert _obj.ok() + assert _cls.ok(_cls()) + if _cls.__name__ == "_mclass": + assert type(_cls).__name__ == "_meta" + +# test NoneType +def test_specialtypes(): + assert dill.pickles(type(None)) + assert dill.pickles(type(NotImplemented)) + assert dill.pickles(type(Ellipsis)) + +if hex(sys.hexversion) >= '0x20600f0': + from collections import namedtuple + Z = namedtuple("Z", ['a','b']) + Zi = Z(0,1) + X = namedtuple("Y", ['a','b']) + X.__name__ = "X" + if hex(sys.hexversion) >= '0x30300f0': + X.__qualname__ = "X" #XXX: name must 'match' or fails to pickle + Xi = X(0,1) + Bad = namedtuple("FakeName", ['a','b']) + Badi = Bad(0,1) +else: + Z = Zi = X = Xi = Bad = Badi = None + +# test namedtuple +def test_namedtuple(): + assert Z is dill.loads(dill.dumps(Z)) + assert Zi == dill.loads(dill.dumps(Zi)) + assert X is dill.loads(dill.dumps(X)) + assert Xi == dill.loads(dill.dumps(Xi)) + assert Bad is not dill.loads(dill.dumps(Bad)) + assert Bad._fields == dill.loads(dill.dumps(Bad))._fields + assert tuple(Badi) == tuple(dill.loads(dill.dumps(Badi))) + + class A: + class B(namedtuple("B", ["one", "two"])): + '''docstring''' + B.__module__ = 'testing' + + a = A() + assert dill.copy(a) + + assert dill.copy(A.B).__name__ == 'B' + if dill._dill.PY3: + assert dill.copy(A.B).__qualname__.endswith('..A.B') + assert dill.copy(A.B).__doc__ == 'docstring' + assert dill.copy(A.B).__module__ == 'testing' + +def test_dtype(): + try: + import numpy as np + + dti = np.dtype('int') + assert np.dtype == dill.loads(dill.dumps(np.dtype)) + assert dti == dill.loads(dill.dumps(dti)) + except ImportError: pass + + +def test_array_nested(): + try: + import numpy as np + + x = np.array([1]) + y = (x,) + dill.dumps(x) + assert y == dill.loads(dill.dumps(y)) + + except ImportError: pass + + +def test_array_subclass(): + try: + import numpy as np + + class TestArray(np.ndarray): + def __new__(cls, input_array, color): + obj = np.asarray(input_array).view(cls) + obj.color = color + return obj + def __array_finalize__(self, obj): + if obj is None: + return + if isinstance(obj, type(self)): + self.color = obj.color + def __getnewargs__(self): + return np.asarray(self), self.color + + a1 = TestArray(np.zeros(100), color='green') + if dill._dill.PY3 and not dill._dill.IS_PYPY: + assert dill.pickles(a1) + assert a1.__dict__ == dill.copy(a1).__dict__ + + a2 = a1[0:9] + if dill._dill.PY3 and not dill._dill.IS_PYPY: + assert dill.pickles(a2) + assert a2.__dict__ == dill.copy(a2).__dict__ + + class TestArray2(np.ndarray): + color = 'blue' + + a3 = TestArray2([1,2,3,4,5]) + a3.color = 'green' + if dill._dill.PY3 and not dill._dill.IS_PYPY: + assert dill.pickles(a3) + assert a3.__dict__ == dill.copy(a3).__dict__ + + except ImportError: pass + + +def test_method_decorator(): + class A(object): + @classmethod + def test(cls): + pass + + a = A() + + res = dill.dumps(a) + new_obj = dill.loads(res) + new_obj.__class__.test() + +# test slots +class Y(object): + __slots__ = ['y'] + def __init__(self, y): + self.y = y + +value = 123 +y = Y(value) + +def test_slots(): + assert dill.pickles(Y) + assert dill.pickles(y) + assert dill.pickles(Y.y) + assert dill.copy(y).y == value + +def test_metaclass(): + if dill._dill.PY3: + class metaclass_with_new(type): + def __new__(mcls, name, bases, ns, **kwds): + cls = super().__new__(mcls, name, bases, ns, **kwds) + assert mcls is not None + assert cls.method(mcls) + return cls + def method(cls, mcls): + return isinstance(cls, mcls) + + l = locals() + exec("""class subclass_with_new(metaclass=metaclass_with_new): + def __new__(cls): + self = super().__new__(cls) + return self""", None, l) + subclass_with_new = l['subclass_with_new'] + else: + class metaclass_with_new(type): + def __new__(mcls, name, bases, ns, **kwds): + cls = super(mcls, metaclass_with_new).__new__(mcls, name, bases, ns, **kwds) + assert mcls is not None + assert cls.method(mcls) + return cls + def method(cls, mcls): + return isinstance(cls, mcls) + + class subclass_with_new: + __metaclass__ = metaclass_with_new + def __new__(cls): + self = super(subclass_with_new, cls).__new__(cls) + return self + + assert dill.copy(subclass_with_new()) + + +if __name__ == '__main__': + test_class_instances() + test_class_objects() + test_specialtypes() + test_namedtuple() + test_dtype() + test_array_nested() + test_array_subclass() + test_method_decorator() + test_slots() + test_metaclass() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_detect.py b/myenv/lib/python3.9/site-packages/dill/tests/test_detect.py new file mode 100644 index 0000000..06339ac --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_detect.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill.detect import baditems, badobjects, badtypes, errors, parent, at, globalvars +from dill import settings +from dill._dill import IS_PYPY, IS_PYPY2 +from pickle import PicklingError + +import inspect +import sys +import os + +def test_bad_things(): + f = inspect.currentframe() + assert baditems(f) == [f] + #assert baditems(globals()) == [f] #XXX + assert badobjects(f) is f + assert badtypes(f) == type(f) + assert type(errors(f)) is PicklingError if IS_PYPY2 else TypeError + d = badtypes(f, 1) + assert isinstance(d, dict) + assert list(badobjects(f, 1).keys()) == list(d.keys()) + assert list(errors(f, 1).keys()) == list(d.keys()) + s = set([(err.__class__.__name__,err.args[0]) for err in list(errors(f, 1).values())]) + a = dict(s) + if not os.environ.get('COVERAGE'): #XXX: travis-ci + assert len(s) is len(a) # TypeError (and possibly PicklingError) + n = 1 if IS_PYPY2 else 2 + assert len(a) is n if 'PicklingError' in a.keys() else n-1 + +def test_parent(): + x = [4,5,6,7] + listiter = iter(x) + obj = parent(listiter, list) + assert obj is x + + if IS_PYPY: assert parent(obj, int) is None + else: assert parent(obj, int) is x[-1] # python oddly? finds last int + assert at(id(at)) is at + +a, b, c = 1, 2, 3 + +def squared(x): + return a+x**2 + +def foo(x): + def bar(y): + return squared(x)+y + return bar + +class _class: + def _method(self): + pass + def ok(self): + return True + +def test_globals(): + def f(): + a + def g(): + b + def h(): + c + assert globalvars(f) == dict(a=1, b=2, c=3) + + res = globalvars(foo, recurse=True) + assert set(res) == set(['squared', 'a']) + res = globalvars(foo, recurse=False) + assert res == {} + zap = foo(2) + res = globalvars(zap, recurse=True) + assert set(res) == set(['squared', 'a']) + res = globalvars(zap, recurse=False) + assert set(res) == set(['squared']) + del zap + res = globalvars(squared) + assert set(res) == set(['a']) + # FIXME: should find referenced __builtins__ + #res = globalvars(_class, recurse=True) + #assert set(res) == set(['True']) + #res = globalvars(_class, recurse=False) + #assert res == {} + #res = globalvars(_class.ok, recurse=True) + #assert set(res) == set(['True']) + #res = globalvars(_class.ok, recurse=False) + #assert set(res) == set(['True']) + + +#98 dill ignores __getstate__ in interactive lambdas +bar = [0] + +class Foo(object): + def __init__(self): + pass + def __getstate__(self): + bar[0] = bar[0]+1 + return {} + def __setstate__(self, data): + pass + +f = Foo() + +def test_getstate(): + from dill import dumps, loads + dumps(f) + b = bar[0] + dumps(lambda: f, recurse=False) # doesn't call __getstate__ + assert bar[0] == b + dumps(lambda: f, recurse=True) # calls __getstate__ + assert bar[0] == b + 1 + +#97 serialize lambdas in test files +def test_deleted(): + global sin + from dill import dumps, loads + from math import sin, pi + + def sinc(x): + return sin(x)/x + + settings['recurse'] = True + _sinc = dumps(sinc) + sin = globals().pop('sin') + sin = 1 + del sin + sinc_ = loads(_sinc) # no NameError... pickling preserves 'sin' + res = sinc_(1) + from math import sin + assert sinc(1) == res + + +def test_lambdify(): + try: + from sympy import symbols, lambdify + except ImportError: + return + settings['recurse'] = True + x = symbols("x") + y = x**2 + f = lambdify([x], y) + z = min + d = globals() + globalvars(f, recurse=True, builtin=True) + assert z is min + assert d is globals() + + +if __name__ == '__main__': + test_bad_things() + test_parent() + test_globals() + test_getstate() + test_deleted() + test_lambdify() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_dictviews.py b/myenv/lib/python3.9/site-packages/dill/tests/test_dictviews.py new file mode 100644 index 0000000..3bbc5d6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_dictviews.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2021 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +from dill._dill import OLD310, MAPPING_PROXY_TRICK + +def test_dictviews(): + x = {'a': 1} + assert dill.copy(x.keys()) + assert dill.copy(x.values()) + assert dill.copy(x.items()) + +def test_dictproxy_trick(): + if not OLD310 and MAPPING_PROXY_TRICK: + x = {'a': 1} + all_views = (x.values(), x.items(), x.keys(), x) + seperate_views = dill.copy(all_views) + new_x = seperate_views[-1] + new_x['b'] = 2 + new_x['c'] = 1 + assert len(new_x) == 3 and len(x) == 1 + assert len(seperate_views[0]) == 3 and len(all_views[0]) == 1 + assert len(seperate_views[1]) == 3 and len(all_views[1]) == 1 + assert len(seperate_views[2]) == 3 and len(all_views[2]) == 1 + assert dict(all_views[1]) == x + assert dict(seperate_views[1]) == new_x + +if __name__ == '__main__': + test_dictviews() + test_dictproxy_trick() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_diff.py b/myenv/lib/python3.9/site-packages/dill/tests/test_diff.py new file mode 100644 index 0000000..3277682 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_diff.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill import __diff as diff + +import sys +IS_PYPY = not hasattr(sys, 'getrefcount') + +class A: + pass + +def test_diff(): + a = A() + b = A() + c = A() + a.a = b + b.a = c + diff.memorise(a) + assert not diff.has_changed(a) + c.a = 1 + assert diff.has_changed(a) + diff.memorise(c, force=True) + assert not diff.has_changed(a) + c.a = 2 + assert diff.has_changed(a) + changed = diff.whats_changed(a) + assert list(changed[0].keys()) == ["a"] + assert not changed[1] + + a2 = [] + b2 = [a2] + c2 = [b2] + diff.memorise(c2) + assert not diff.has_changed(c2) + a2.append(1) + assert diff.has_changed(c2) + changed = diff.whats_changed(c2) + assert changed[0] == {} + assert changed[1] + + a3 = {} + b3 = {1: a3} + c3 = {1: b3} + diff.memorise(c3) + assert not diff.has_changed(c3) + a3[1] = 1 + assert diff.has_changed(c3) + changed = diff.whats_changed(c3) + assert changed[0] == {} + assert changed[1] + + if not IS_PYPY: + try: + import abc + # make sure the "_abc_invaldation_counter" doesn't make test fail + diff.memorise(abc.ABCMeta, force=True) + assert not diff.has_changed(abc) + abc.ABCMeta.zzz = 1 + assert diff.has_changed(abc) + changed = diff.whats_changed(abc) + assert list(changed[0].keys()) == ["ABCMeta"] + assert not changed[1] + except ImportError: + pass + + ''' + import Queue + diff.memorise(Queue, force=True) + assert not diff.has_changed(Queue) + Queue.Queue.zzz = 1 + assert diff.has_changed(Queue) + changed = diff.whats_changed(Queue) + assert list(changed[0].keys()) == ["Queue"] + assert not changed[1] + + import math + diff.memorise(math, force=True) + assert not diff.has_changed(math) + math.zzz = 1 + assert diff.has_changed(math) + changed = diff.whats_changed(math) + assert list(changed[0].keys()) == ["zzz"] + assert not changed[1] + ''' + + a = A() + b = A() + c = A() + a.a = b + b.a = c + diff.memorise(a) + assert not diff.has_changed(a) + c.a = 1 + assert diff.has_changed(a) + diff.memorise(c, force=True) + assert not diff.has_changed(a) + del c.a + assert diff.has_changed(a) + changed = diff.whats_changed(a) + assert list(changed[0].keys()) == ["a"] + assert not changed[1] + + +if __name__ == '__main__': + test_diff() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_extendpickle.py b/myenv/lib/python3.9/site-packages/dill/tests/test_extendpickle.py new file mode 100644 index 0000000..49b6f15 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_extendpickle.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill as pickle +try: + from StringIO import StringIO +except ImportError: + from io import BytesIO as StringIO + + +def my_fn(x): + return x * 17 + + +def test_extend(): + obj = lambda : my_fn(34) + assert obj() == 578 + + obj_io = StringIO() + pickler = pickle.Pickler(obj_io) + pickler.dump(obj) + + obj_str = obj_io.getvalue() + + obj2_io = StringIO(obj_str) + unpickler = pickle.Unpickler(obj2_io) + obj2 = unpickler.load() + + assert obj2() == 578 + + +def test_isdill(): + obj_io = StringIO() + pickler = pickle.Pickler(obj_io) + assert pickle._dill.is_dill(pickler) is True + + pickler = pickle._dill.StockPickler(obj_io) + assert pickle._dill.is_dill(pickler) is False + + try: + import multiprocess as mp + pickler = mp.reduction.ForkingPickler(obj_io) + assert pickle._dill.is_dill(pickler, child=True) is True + assert pickle._dill.is_dill(pickler, child=False) is False + except: + pass + + +if __name__ == '__main__': + test_extend() + test_isdill() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_fglobals.py b/myenv/lib/python3.9/site-packages/dill/tests/test_fglobals.py new file mode 100644 index 0000000..eea3c3a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_fglobals.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2021-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + +def get_fun_with_strftime(): + def fun_with_strftime(): + import datetime + return datetime.datetime.strptime("04-01-1943", "%d-%m-%Y").strftime( + "%Y-%m-%d %H:%M:%S" + ) + return fun_with_strftime + + +def get_fun_with_strftime2(): + import datetime + return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + + +def test_doc_dill_issue_219(): + back_fn = dill.loads(dill.dumps(get_fun_with_strftime())) + assert back_fn() == "1943-01-04 00:00:00" + dupl = dill.loads(dill.dumps(get_fun_with_strftime2)) + assert dupl() == get_fun_with_strftime2() + + +def get_fun_with_internal_import(): + def fun_with_import(): + import re + return re.compile("$") + return fun_with_import + + +def test_method_with_internal_import_should_work(): + import re + back_fn = dill.loads(dill.dumps(get_fun_with_internal_import())) + import inspect + if hasattr(inspect, 'getclosurevars'): + vars = inspect.getclosurevars(back_fn) + assert vars.globals == {} + assert vars.nonlocals == {} + assert back_fn() == re.compile("$") + assert "__builtins__" in back_fn.__globals__ + + +if __name__ == "__main__": + import sys + if (sys.version_info[:3] != (3,10,0) or sys.version_info[3] != 'alpha'): + test_doc_dill_issue_219() + test_method_with_internal_import_should_work() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_file.py b/myenv/lib/python3.9/site-packages/dill/tests/test_file.py new file mode 100644 index 0000000..8118e3a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_file.py @@ -0,0 +1,502 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import os +import sys +import string +import random + +import dill + + +dill.settings['recurse'] = True + +fname = "_test_file.txt" +rand_chars = list(string.ascii_letters) + ["\n"] * 40 # bias newline + +if sys.hexversion < 0x03030000: + FileNotFoundError = IOError +buffer_error = ValueError("invalid buffer size") +dne_error = FileNotFoundError("[Errno 2] No such file or directory: '%s'" % fname) + + +def write_randomness(number=200): + f = open(fname, "w") + for i in range(number): + f.write(random.choice(rand_chars)) + f.close() + f = open(fname, "r") + contents = f.read() + f.close() + return contents + + +def trunc_file(): + open(fname, "w").close() + + +def throws(op, args, exc): + try: + op(*args) + except type(exc): + return sys.exc_info()[1].args == exc.args + else: + return False + + +def teardown_module(): + if os.path.exists(fname): + os.remove(fname) + + +def bench(strictio, fmode, skippypy): + import platform + if skippypy and platform.python_implementation() == 'PyPy': + # Skip for PyPy... + return + + # file exists, with same contents + # read + + write_randomness() + + f = open(fname, "r") + _f = dill.loads(dill.dumps(f, fmode=fmode))#, strictio=strictio)) + assert _f.mode == f.mode + assert _f.tell() == f.tell() + assert _f.read() == f.read() + f.close() + _f.close() + + # write + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + f2 = dill.loads(f_dumped) #FIXME: fails due to pypy/issues/1233 + # TypeError: expected py_object instance instead of str + f2mode = f2.mode + f2tell = f2.tell() + f2name = f2.name + f2.write(" world!") + f2.close() + + if fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + assert f2name == fname + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # file exists, with different contents (smaller size) + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + _flen = 150 + _fstr = write_randomness(number=_flen) + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert f2.tell() == _flen + assert f2.read() == "" + f2.seek(0) + assert f2.read() == _fstr + assert f2.tell() == _flen # 150 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == _fstr + assert f2.tell() == _flen # 150 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # write + + write_randomness() + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + fstr = open(fname).read() + + f = open(fname, "w") + f.write("h") + _ftell = f.tell() + f.close() + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "h world!" + assert f2mode == f1mode + assert f2tell == _ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + fstr = open(fname).read() + + f = open(fname, "w") + f.write("h") + _ftell = f.tell() + f.close() + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + # position of writes cannot be changed on some OSs + assert open(fname).read() == "h world!" + assert f2tell == _ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "h world!" + assert f2tell == _ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # file does not exist + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + # FIXME: this fails on systems where f2.tell() always returns 0 + # assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == "" + assert f2.tell() == 0 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == "" + assert f2.tell() == 0 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # write + + write_randomness() + + f = open(fname, "w+") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + ftell = f.tell() + f1mode = f.mode + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == " world!" + assert f2mode == 'w+' + assert f2tell == 0 + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + ftell = f.tell() + f1mode = f.mode + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == " world!" + assert f2tell == 0 + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # file exists, with different contents (larger size) + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + _flen = 250 + _fstr = write_randomness(number=_flen) + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == _fstr[ftell:] + f2.seek(0) + assert f2.read() == _fstr + assert f2.tell() == _flen # 250 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == _fstr + assert f2.tell() == _flen # 250 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() # XXX: other alternatives? + + # write + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + + fstr = open(fname).read() + + f.write(" and goodbye!") + _ftell = f.tell() + f.close() + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!odbye!" + assert f2mode == f1mode + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + fstr = open(fname).read() + + f.write(" and goodbye!") + _ftell = f.tell() + f.close() + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello and goodbye! world!" + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "hello and goodbye! world!" + assert f2tell == _ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + +def test_nostrictio_handlefmode(): + bench(False, dill.HANDLE_FMODE, False) + teardown_module() + + +def test_nostrictio_filefmode(): + bench(False, dill.FILE_FMODE, False) + teardown_module() + + +def test_nostrictio_contentsfmode(): + bench(False, dill.CONTENTS_FMODE, True) + teardown_module() + + +#bench(True, dill.HANDLE_FMODE, False) +#bench(True, dill.FILE_FMODE, False) +#bench(True, dill.CONTENTS_FMODE, True) + + +if __name__ == '__main__': + test_nostrictio_handlefmode() + test_nostrictio_filefmode() + test_nostrictio_contentsfmode() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_functions.py b/myenv/lib/python3.9/site-packages/dill/tests/test_functions.py new file mode 100644 index 0000000..ec9670e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_functions.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2019-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import functools +import dill +import sys +dill.settings['recurse'] = True + + +def is_py3(): + return hex(sys.hexversion) >= '0x30000f0' + + +def function_a(a): + return a + + +def function_b(b, b1): + return b + b1 + + +def function_c(c, c1=1): + return c + c1 + + +def function_d(d, d1, d2=1): + """doc string""" + return d + d1 + d2 + +function_d.__module__ = 'a module' + + +if is_py3(): + exec(''' +def function_e(e, *e1, e2=1, e3=2): + return e + sum(e1) + e2 + e3''') + + globalvar = 0 + + @functools.lru_cache(None) + def function_with_cache(x): + global globalvar + globalvar += x + return globalvar + + +def function_with_unassigned_variable(): + if False: + value = None + return (lambda: value) + + +def test_functions(): + dumped_func_a = dill.dumps(function_a) + assert dill.loads(dumped_func_a)(0) == 0 + + dumped_func_b = dill.dumps(function_b) + assert dill.loads(dumped_func_b)(1,2) == 3 + + dumped_func_c = dill.dumps(function_c) + assert dill.loads(dumped_func_c)(1) == 2 + assert dill.loads(dumped_func_c)(1, 2) == 3 + + dumped_func_d = dill.dumps(function_d) + assert dill.loads(dumped_func_d).__doc__ == function_d.__doc__ + assert dill.loads(dumped_func_d).__module__ == function_d.__module__ + assert dill.loads(dumped_func_d)(1, 2) == 4 + assert dill.loads(dumped_func_d)(1, 2, 3) == 6 + assert dill.loads(dumped_func_d)(1, 2, d2=3) == 6 + + if is_py3(): + function_with_cache(1) + globalvar = 0 + dumped_func_cache = dill.dumps(function_with_cache) + assert function_with_cache(2) == 3 + assert function_with_cache(1) == 1 + assert function_with_cache(3) == 6 + assert function_with_cache(2) == 3 + + empty_cell = function_with_unassigned_variable() + cell_copy = dill.loads(dill.dumps(empty_cell)) + assert 'empty' in str(cell_copy.__closure__[0]) + try: + cell_copy() + except: + # this is good + pass + else: + raise AssertionError('cell_copy() did not read an empty cell') + + if is_py3(): + exec(''' +dumped_func_e = dill.dumps(function_e) +assert dill.loads(dumped_func_e)(1, 2) == 6 +assert dill.loads(dumped_func_e)(1, 2, 3) == 9 +assert dill.loads(dumped_func_e)(1, 2, e2=3) == 8 +assert dill.loads(dumped_func_e)(1, 2, e2=3, e3=4) == 10 +assert dill.loads(dumped_func_e)(1, 2, 3, e2=4) == 12 +assert dill.loads(dumped_func_e)(1, 2, 3, e2=4, e3=5) == 15''') + +if __name__ == '__main__': + test_functions() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_functors.py b/myenv/lib/python3.9/site-packages/dill/tests/test_functors.py new file mode 100644 index 0000000..952a546 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_functors.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import functools +import dill +dill.settings['recurse'] = True + + +def f(a, b, c): # without keywords + pass + + +def g(a, b, c=2): # with keywords + pass + + +def h(a=1, b=2, c=3): # without args + pass + + +def test_functools(): + fp = functools.partial(f, 1, 2) + gp = functools.partial(g, 1, c=2) + hp = functools.partial(h, 1, c=2) + bp = functools.partial(int, base=2) + + assert dill.pickles(fp, safe=True) + assert dill.pickles(gp, safe=True) + assert dill.pickles(hp, safe=True) + assert dill.pickles(bp, safe=True) + + +if __name__ == '__main__': + test_functools() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_mixins.py b/myenv/lib/python3.9/site-packages/dill/tests/test_mixins.py new file mode 100644 index 0000000..a2296c2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_mixins.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + + +def wtf(x,y,z): + def zzz(): + return x + def yyy(): + return y + def xxx(): + return z + return zzz,yyy + + +def quad(a=1, b=1, c=0): + inverted = [False] + def invert(): + inverted[0] = not inverted[0] + def dec(f): + def func(*args, **kwds): + x = f(*args, **kwds) + if inverted[0]: x = -x + return a*x**2 + b*x + c + func.__wrapped__ = f + func.invert = invert + func.inverted = inverted + return func + return dec + + +@quad(a=0,b=2) +def double_add(*args): + return sum(args) + + +fx = sum([1,2,3]) + + +### to make it interesting... +def quad_factory(a=1,b=1,c=0): + def dec(f): + def func(*args,**kwds): + fx = f(*args,**kwds) + return a*fx**2 + b*fx + c + return func + return dec + + +@quad_factory(a=0,b=4,c=0) +def quadish(x): + return x+1 + + +quadratic = quad_factory() + + +def doubler(f): + def inner(*args, **kwds): + fx = f(*args, **kwds) + return 2*fx + return inner + + +@doubler +def quadruple(x): + return 2*x + + +def test_mixins(): + # test mixins + assert double_add(1,2,3) == 2*fx + double_add.invert() + assert double_add(1,2,3) == -2*fx + + _d = dill.copy(double_add) + assert _d(1,2,3) == -2*fx + #_d.invert() #FIXME: fails seemingly randomly + #assert _d(1,2,3) == 2*fx + + assert _d.__wrapped__(1,2,3) == fx + + # XXX: issue or feature? in python3.4, inverted is linked through copy + if not double_add.inverted[0]: + double_add.invert() + + # test some stuff from source and pointers + ds = dill.source + dd = dill.detect + assert ds.getsource(dd.freevars(quadish)['f']) == '@quad_factory(a=0,b=4,c=0)\ndef quadish(x):\n return x+1\n' + assert ds.getsource(dd.freevars(quadruple)['f']) == '@doubler\ndef quadruple(x):\n return 2*x\n' + assert ds.importable(quadish, source=False) == 'from %s import quadish\n' % __name__ + assert ds.importable(quadruple, source=False) == 'from %s import quadruple\n' % __name__ + assert ds.importable(quadratic, source=False) == 'from %s import quadratic\n' % __name__ + assert ds.importable(double_add, source=False) == 'from %s import double_add\n' % __name__ + assert ds.importable(quadruple, source=True) == 'def doubler(f):\n def inner(*args, **kwds):\n fx = f(*args, **kwds)\n return 2*fx\n return inner\n\n@doubler\ndef quadruple(x):\n return 2*x\n' + #***** #FIXME: this needs work + result = ds.importable(quadish, source=True) + a,b,c,_,result = result.split('\n',4) + assert result == 'def quad_factory(a=1,b=1,c=0):\n def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n return dec\n\n@quad_factory(a=0,b=4,c=0)\ndef quadish(x):\n return x+1\n' + assert set([a,b,c]) == set(['a = 0', 'c = 0', 'b = 4']) + result = ds.importable(quadratic, source=True) + a,b,c,result = result.split('\n',3) + assert result == '\ndef dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n' + assert set([a,b,c]) == set(['a = 1', 'c = 0', 'b = 1']) + result = ds.importable(double_add, source=True) + a,b,c,d,_,result = result.split('\n',5) + assert result == 'def quad(a=1, b=1, c=0):\n inverted = [False]\n def invert():\n inverted[0] = not inverted[0]\n def dec(f):\n def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n func.__wrapped__ = f\n func.invert = invert\n func.inverted = inverted\n return func\n return dec\n\n@quad(a=0,b=2)\ndef double_add(*args):\n return sum(args)\n' + assert set([a,b,c,d]) == set(['a = 0', 'c = 0', 'b = 2', 'inverted = [True]']) + #***** + + +if __name__ == '__main__': + test_mixins() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_module.py b/myenv/lib/python3.9/site-packages/dill/tests/test_module.py new file mode 100644 index 0000000..5c5e000 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_module.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys +import dill +import test_mixins as module +try: from importlib import reload +except ImportError: pass +dill.settings['recurse'] = True + +cached = (module.__cached__ if hasattr(module, "__cached__") + else module.__file__.split(".", 1)[0] + ".pyc") + +module.a = 1234 + +pik_mod = dill.dumps(module) + +module.a = 0 + +# remove module +del sys.modules[module.__name__] +del module + +module = dill.loads(pik_mod) +def test_attributes(): + #assert hasattr(module, "a") and module.a == 1234 #FIXME: -m dill.tests + assert module.double_add(1, 2, 3) == 2 * module.fx + +# Restart, and test use_diff + +reload(module) + +try: + dill.use_diff() + + module.a = 1234 + + pik_mod = dill.dumps(module) + + module.a = 0 + + # remove module + del sys.modules[module.__name__] + del module + + module = dill.loads(pik_mod) + def test_diff_attributes(): + assert hasattr(module, "a") and module.a == 1234 + assert module.double_add(1, 2, 3) == 2 * module.fx + +except AttributeError: + def test_diff_attributes(): + pass + +# clean up +import os +if os.path.exists(cached): + os.remove(cached) +pycache = os.path.join(os.path.dirname(module.__file__), "__pycache__") +if os.path.exists(pycache) and not os.listdir(pycache): + os.removedirs(pycache) + + +# test when module is None +import math + +def get_lambda(str, **kwarg): + return eval(str, kwarg, None) + +obj = get_lambda('lambda x: math.exp(x)', math=math) + +def test_module_is_none(): + assert obj.__module__ is None + assert dill.copy(obj)(3) == obj(3) + + +if __name__ == '__main__': + test_attributes() + test_diff_attributes() + test_module_is_none() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_moduledict.py b/myenv/lib/python3.9/site-packages/dill/tests/test_moduledict.py new file mode 100644 index 0000000..1d4d77a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_moduledict.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + +def f(func): + def w(*args): + return f(*args) + return w + +@f +def f2(): pass + +# check when __main__ and on import +def test_decorated(): + assert dill.pickles(f2) + + +import doctest +import logging +logging.basicConfig(level=logging.DEBUG) + +class SomeUnreferencedUnpicklableClass(object): + def __reduce__(self): + raise Exception + +unpicklable = SomeUnreferencedUnpicklableClass() + +# This works fine outside of Doctest: +def test_normal(): + serialized = dill.dumps(lambda x: x) + +# should not try to pickle unpicklable object in __globals__ +def tests(): + """ + >>> serialized = dill.dumps(lambda x: x) + """ + return + +#print("\n\nRunning Doctest:") +def test_doctest(): + doctest.testmod() + + +if __name__ == '__main__': + test_decorated() + test_normal() + test_doctest() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_nested.py b/myenv/lib/python3.9/site-packages/dill/tests/test_nested.py new file mode 100644 index 0000000..144f54b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_nested.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test dill's ability to handle nested functions +""" + +import os +import math + +import dill as pickle +pickle.settings['recurse'] = True + + +# the nested function: pickle should fail here, but dill is ok. +def adder(augend): + zero = [0] + + def inner(addend): + return addend + augend + zero[0] + return inner + + +# rewrite the nested function using a class: standard pickle should work here. +class cadder(object): + def __init__(self, augend): + self.augend = augend + self.zero = [0] + + def __call__(self, addend): + return addend + self.augend + self.zero[0] + + +# rewrite again, but as an old-style class +class c2adder: + def __init__(self, augend): + self.augend = augend + self.zero = [0] + + def __call__(self, addend): + return addend + self.augend + self.zero[0] + + +# some basic class stuff +class basic(object): + pass + + +class basic2: + pass + + +x = 5 +y = 1 + + +def test_basic(): + a = [0, 1, 2] + pa = pickle.dumps(a) + pmath = pickle.dumps(math) #XXX: FAILS in pickle + pmap = pickle.dumps(map) + # ... + la = pickle.loads(pa) + lmath = pickle.loads(pmath) + lmap = pickle.loads(pmap) + assert list(map(math.sin, a)) == list(lmap(lmath.sin, la)) + + +def test_basic_class(): + pbasic2 = pickle.dumps(basic2) + _pbasic2 = pickle.loads(pbasic2)() + pbasic = pickle.dumps(basic) + _pbasic = pickle.loads(pbasic)() + + +def test_c2adder(): + pc2adder = pickle.dumps(c2adder) + pc2add5 = pickle.loads(pc2adder)(x) + assert pc2add5(y) == x+y + + +def test_pickled_cadder(): + pcadder = pickle.dumps(cadder) + pcadd5 = pickle.loads(pcadder)(x) + assert pcadd5(y) == x+y + + +def test_raw_adder_and_inner(): + add5 = adder(x) + assert add5(y) == x+y + + +def test_pickled_adder(): + padder = pickle.dumps(adder) + padd5 = pickle.loads(padder)(x) + assert padd5(y) == x+y + + +def test_pickled_inner(): + add5 = adder(x) + pinner = pickle.dumps(add5) #XXX: FAILS in pickle + p5add = pickle.loads(pinner) + assert p5add(y) == x+y + + +def test_moduledict_where_not_main(): + try: + from . import test_moduledict + except: + import test_moduledict + name = 'test_moduledict.py' + if os.path.exists(name) and os.path.exists(name+'c'): + os.remove(name+'c') + + if os.path.exists(name) and hasattr(test_moduledict, "__cached__") \ + and os.path.exists(test_moduledict.__cached__): + os.remove(getattr(test_moduledict, "__cached__")) + + if os.path.exists("__pycache__") and not os.listdir("__pycache__"): + os.removedirs("__pycache__") + + +if __name__ == '__main__': + test_basic() + test_basic_class() + test_c2adder() + test_pickled_cadder() + test_raw_adder_and_inner() + test_pickled_adder() + test_pickled_inner() + test_moduledict_where_not_main() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_objects.py b/myenv/lib/python3.9/site-packages/dill/tests/test_objects.py new file mode 100644 index 0000000..985041b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_objects.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +demonstrate dill's ability to pickle different python types +test pickling of all Python Standard Library objects (currently: CH 1-14 @ 2.7) +""" + +import dill as pickle +pickle.settings['recurse'] = True +#pickle.detect.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types, objects, extend +load_types(pickleable=True,unpickleable=False) + +# uncomment the next two lines to test cloudpickle +#extend(False) +#import cloudpickle as pickle + +# helper objects +class _class: + def _method(self): + pass + +# objects that *fail* if imported +special = {} +special['LambdaType'] = _lambda = lambda x: lambda y: x +special['MethodType'] = _method = _class()._method +special['UnboundMethodType'] = _class._method +objects.update(special) + +def pickles(name, exact=False): + """quick check if object pickles with dill""" + obj = objects[name] + try: + pik = pickle.loads(pickle.dumps(obj)) + if exact: + try: + assert pik == obj + except AssertionError: + assert type(obj) == type(pik) + print ("weak: %s %s" % (name, type(obj))) + else: + assert type(obj) == type(pik) + except Exception: + print ("fails: %s %s" % (name, type(obj))) + + +def test_objects(): + for member in objects.keys(): + #pickles(member, exact=True) + pickles(member, exact=False) + + +if __name__ == '__main__': + test_objects() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_properties.py b/myenv/lib/python3.9/site-packages/dill/tests/test_properties.py new file mode 100644 index 0000000..36589d5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_properties.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys + +import dill +dill.settings['recurse'] = True + + +class Foo(object): + def __init__(self): + self._data = 1 + + def _get_data(self): + return self._data + + def _set_data(self, x): + self._data = x + + data = property(_get_data, _set_data) + + +def test_data_not_none(): + FooS = dill.copy(Foo) + assert FooS.data.fget is not None + assert FooS.data.fset is not None + assert FooS.data.fdel is None + + +def test_data_unchanged(): + FooS = dill.copy(Foo) + try: + res = FooS().data + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert res == 1 + + +def test_data_changed(): + FooS = dill.copy(Foo) + try: + f = FooS() + f.data = 1024 + res = f.data + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert res == 1024 + + +if __name__ == '__main__': + test_data_not_none() + test_data_unchanged() + test_data_changed() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_recursive.py b/myenv/lib/python3.9/site-packages/dill/tests/test_recursive.py new file mode 100644 index 0000000..ee71a68 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_recursive.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2019-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +from dill._dill import PY3 +from functools import partial +import warnings + + +def copy(obj, byref=False, recurse=False): + if byref: + try: + return dill.copy(obj, byref=byref, recurse=recurse) + except: + pass + else: + raise AssertionError('Copy of %s with byref=True should have given a warning!' % (obj,)) + + warnings.simplefilter('ignore') + val = dill.copy(obj, byref=byref, recurse=recurse) + warnings.simplefilter('error') + return val + else: + return dill.copy(obj, byref=byref, recurse=recurse) + + +class obj1(object): + def __init__(self): + super(obj1, self).__init__() + +class obj2(object): + def __init__(self): + super(obj2, self).__init__() + +class obj3(object): + super_ = super + def __init__(self): + obj3.super_(obj3, self).__init__() + + +def test_super(): + assert copy(obj1(), byref=True) + assert copy(obj1(), byref=True, recurse=True) + assert copy(obj1(), recurse=True) + assert copy(obj1()) + + assert copy(obj2(), byref=True) + assert copy(obj2(), byref=True, recurse=True) + assert copy(obj2(), recurse=True) + assert copy(obj2()) + + assert copy(obj3(), byref=True) + assert copy(obj3(), byref=True, recurse=True) + assert copy(obj3(), recurse=True) + assert copy(obj3()) + + +def get_trigger(model): + pass + +class Machine(object): + def __init__(self): + self.child = Model() + self.trigger = partial(get_trigger, self) + self.child.trigger = partial(get_trigger, self.child) + +class Model(object): + pass + + + +def test_partial(): + assert copy(Machine(), byref=True) + assert copy(Machine(), byref=True, recurse=True) + assert copy(Machine(), recurse=True) + assert copy(Machine()) + + +class Machine2(object): + def __init__(self): + self.go = partial(self.member, self) + def member(self, model): + pass + + +class SubMachine(Machine2): + def __init__(self): + super(SubMachine, self).__init__() + + +def test_partials(): + assert copy(SubMachine(), byref=True) + assert copy(SubMachine(), byref=True, recurse=True) + assert copy(SubMachine(), recurse=True) + assert copy(SubMachine()) + + +class obj4(object): + def __init__(self): + super(obj4, self).__init__() + a = self + class obj5(object): + def __init__(self): + super(obj5, self).__init__() + self.a = a + self.b = obj5() + + +def test_circular_reference(): + assert copy(obj4()) + obj4_copy = dill.loads(dill.dumps(obj4())) + if PY3: + assert type(obj4_copy) is type(obj4_copy).__init__.__closure__[0].cell_contents + assert type(obj4_copy.b) is type(obj4_copy.b).__init__.__closure__[0].cell_contents + + +def f(): + def g(): + return g + return g + + +def test_function_cells(): + assert copy(f()) + + +def fib(n): + assert n >= 0 + if n <= 1: + return n + else: + return fib(n-1) + fib(n-2) + + +def test_recursive_function(): + global fib + fib2 = copy(fib, recurse=True) + fib3 = copy(fib) + fib4 = fib + del fib + assert fib2(5) == 5 + for _fib in (fib3, fib4): + try: + _fib(5) + except: + # This is expected to fail because fib no longer exists + pass + else: + raise AssertionError("Function fib shouldn't have been found") + fib = fib4 + + +def collection_function_recursion(): + d = {} + def g(): + return d + d['g'] = g + return g + + +def test_collection_function_recursion(): + g = copy(collection_function_recursion()) + assert g()['g'] is g + + +if __name__ == '__main__': + with warnings.catch_warnings(): + warnings.simplefilter('error') + test_super() + test_partial() + test_partials() + test_circular_reference() + test_function_cells() + test_recursive_function() + test_collection_function_recursion() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_restricted.py b/myenv/lib/python3.9/site-packages/dill/tests/test_restricted.py new file mode 100644 index 0000000..c9d6e48 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_restricted.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# +# Author: Kirill Makhonin (@kirillmakhonin) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill + +class RestrictedType: + def __bool__(*args, **kwargs): + raise Exception('Restricted function') + + __eq__ = __lt__ = __le__ = __ne__ = __gt__ = __ge__ = __hash__ = __bool__ + +glob_obj = RestrictedType() + +def restricted_func(): + a = glob_obj + +def test_function_with_restricted_object(): + deserialized = dill.loads(dill.dumps(restricted_func, recurse=True)) + + +if __name__ == '__main__': + test_function_with_restricted_object() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_selected.py b/myenv/lib/python3.9/site-packages/dill/tests/test_selected.py new file mode 100644 index 0000000..bd79083 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_selected.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +testing some selected object types +""" + +import dill +dill.settings['recurse'] = True + +verbose = False + +def test_dict_contents(): + c = type.__dict__ + for i,j in c.items(): + #try: + ok = dill.pickles(j) + #except: + # print ("FAIL: %s with %s" % (i, dill.detect.errors(j))) + if verbose: print ("%s: %s, %s" % (ok, type(j), j)) + assert ok + if verbose: print ("") + +def _g(x): yield x; + +def _f(): + try: raise + except: + from sys import exc_info + e, er, tb = exc_info() + return er, tb + +class _d(object): + def _method(self): + pass + +from dill import objects +from dill import load_types +load_types(pickleable=True,unpickleable=False) +_newclass = objects['ClassObjectType'] +del objects + +# getset_descriptor for new-style classes (fails on '_method', if not __main__) +def test_class_descriptors(): + d = _d.__dict__ + for i in d.values(): + ok = dill.pickles(i) + if verbose: print ("%s: %s, %s" % (ok, type(i), i)) + assert ok + if verbose: print ("") + od = _newclass.__dict__ + for i in od.values(): + ok = dill.pickles(i) + if verbose: print ("%s: %s, %s" % (ok, type(i), i)) + assert ok + if verbose: print ("") + +# (__main__) class instance for new-style classes +def test_class(): + o = _d() + oo = _newclass() + ok = dill.pickles(o) + if verbose: print ("%s: %s, %s" % (ok, type(o), o)) + assert ok + ok = dill.pickles(oo) + if verbose: print ("%s: %s, %s" % (ok, type(oo), oo)) + assert ok + if verbose: print ("") + +# frames, generators, and tracebacks (all depend on frame) +def test_frame_related(): + g = _g(1) + f = g.gi_frame + e,t = _f() + _is = lambda ok: not ok if dill._dill.IS_PYPY2 else ok + ok = dill.pickles(f) + if verbose: print ("%s: %s, %s" % (ok, type(f), f)) + assert not ok + ok = dill.pickles(g) + if verbose: print ("%s: %s, %s" % (ok, type(g), g)) + assert _is(not ok) #XXX: dill fails + ok = dill.pickles(t) + if verbose: print ("%s: %s, %s" % (ok, type(t), t)) + assert not ok #XXX: dill fails + ok = dill.pickles(e) + if verbose: print ("%s: %s, %s" % (ok, type(e), e)) + assert ok + if verbose: print ("") + + +if __name__ == '__main__': + test_frame_related() + test_dict_contents() + test_class() + test_class_descriptors() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_session.py b/myenv/lib/python3.9/site-packages/dill/tests/test_session.py new file mode 100644 index 0000000..fd71ea0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_session.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python + +# Author: Leonardo Gama (@leogama) +# Copyright (c) 2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from __future__ import print_function +import atexit, dill, os, sys, __main__ + +session_file = os.path.join(os.path.dirname(__file__), 'session-byref-%s.pkl') + +def test_modules(main, byref): + main_dict = main.__dict__ + + try: + for obj in ('json', 'url', 'local_mod', 'sax', 'dom'): + assert main_dict[obj].__name__ in sys.modules + + for obj in ('Calendar', 'isleap'): + assert main_dict[obj] is sys.modules['calendar'].__dict__[obj] + assert main.day_name.__module__ == 'calendar' + if byref: + assert main.day_name is sys.modules['calendar'].__dict__['day_name'] + + assert main.complex_log is sys.modules['cmath'].__dict__['log'] + + except AssertionError: + import traceback + error_line = traceback.format_exc().splitlines()[-2].replace('[obj]', '['+repr(obj)+']') + print("Error while testing (byref=%s):" % byref, error_line, sep="\n", file=sys.stderr) + raise + + +# Test session loading in a fresh interpreter session. +if __name__ == '__main__' and len(sys.argv) >= 3 and sys.argv[1] == '--child': + byref = sys.argv[2] == 'True' + dill.load_session(session_file % byref) + test_modules(__main__, byref) + sys.exit() + +del test_modules + + +def _clean_up_cache(module): + cached = module.__file__.split('.', 1)[0] + '.pyc' + cached = module.__cached__ if hasattr(module, '__cached__') else cached + pycache = os.path.join(os.path.dirname(module.__file__), '__pycache__') + for remove, file in [(os.remove, cached), (os.removedirs, pycache)]: + try: + remove(file) + except OSError: + pass + + +# To clean up namespace before loading the session. +original_modules = set(sys.modules.keys()) - \ + set(['json', 'urllib', 'xml.sax', 'xml.dom.minidom', 'calendar', 'cmath']) +original_objects = set(__main__.__dict__.keys()) +original_objects.add('original_objects') + + +# Create various kinds of objects to test different internal logics. + +## Modules. +import json # top-level module +import urllib as url # top-level module under alias +from xml import sax # submodule +import xml.dom.minidom as dom # submodule under alias +import test_dictviews as local_mod # non-builtin top-level module +atexit.register(_clean_up_cache, local_mod) + +## Imported objects. +from calendar import Calendar, isleap, day_name # class, function, other object +from cmath import log as complex_log # imported with alias + +## Local objects. +x = 17 +empty = None +names = ['Alice', 'Bob', 'Carol'] +def squared(x): return x**2 +cubed = lambda x: x**3 +class Person: + def __init__(self, name, age): + self.name = name + self.age = age +person = Person(names[0], x) +class CalendarSubclass(Calendar): + def weekdays(self): + return [day_name[i] for i in self.iterweekdays()] +cal = CalendarSubclass() +selfref = __main__ + + +def test_objects(main, copy_dict, byref): + main_dict = main.__dict__ + + try: + for obj in ('json', 'url', 'local_mod', 'sax', 'dom'): + assert main_dict[obj].__name__ == copy_dict[obj].__name__ + + #FIXME: In the second test call, 'calendar' is not included in + # sys.modules, independent of the value of byref. Tried to run garbage + # collection before with no luck. This block fails even with + # "import calendar" before it. Needed to restore the original modules + # with the 'copy_modules' object. (Moved to "test_session_{1,2}.py".) + + #for obj in ('Calendar', 'isleap'): + # assert main_dict[obj] is sys.modules['calendar'].__dict__[obj] + #assert main_dict['day_name'].__module__ == 'calendar' + #if byref: + # assert main_dict['day_name'] is sys.modules['calendar'].__dict__['day_name'] + + for obj in ('x', 'empty', 'names'): + assert main_dict[obj] == copy_dict[obj] + + globs = '__globals__' if dill._dill.PY3 else 'func_globals' + for obj in ['squared', 'cubed']: + assert getattr(main_dict[obj], globs) is main_dict + assert main_dict[obj](3) == copy_dict[obj](3) + + assert main.Person.__module__ == main.__name__ + assert isinstance(main.person, main.Person) + assert main.person.age == copy_dict['person'].age + + assert issubclass(main.CalendarSubclass, main.Calendar) + assert isinstance(main.cal, main.CalendarSubclass) + assert main.cal.weekdays() == copy_dict['cal'].weekdays() + + assert main.selfref is main + + except AssertionError: + import traceback + error_line = traceback.format_exc().splitlines()[-2].replace('[obj]', '['+repr(obj)+']') + print("Error while testing (byref=%s):" % byref, error_line, sep="\n", file=sys.stderr) + raise + + +if __name__ == '__main__': + + # Test dump_session() and load_session(). + for byref in (False, True): + if byref: + # Test unpickleable imported object in main. + from sys import flags + + #print(sorted(set(sys.modules.keys()) - original_modules)) + dill._test_file = dill._dill.StringIO() + try: + # For the subprocess. + dill.dump_session(session_file % byref, byref=byref) + + dill.dump_session(dill._test_file, byref=byref) + dump = dill._test_file.getvalue() + dill._test_file.close() + + import __main__ + copy_dict = __main__.__dict__.copy() + copy_modules = sys.modules.copy() + del copy_dict['dump'] + del copy_dict['__main__'] + for name in copy_dict.keys(): + if name not in original_objects: + del __main__.__dict__[name] + for module in list(sys.modules.keys()): + if module not in original_modules: + del sys.modules[module] + + dill._test_file = dill._dill.StringIO(dump) + dill.load_session(dill._test_file) + #print(sorted(set(sys.modules.keys()) - original_modules)) + + # Test session loading in a new session. + from dill.tests.__main__ import python, shell, sp + error = sp.call([python, __file__, '--child', str(byref)], shell=shell) + if error: sys.exit(error) + del python, shell, sp + + finally: + dill._test_file.close() + try: + os.remove(session_file % byref) + except OSError: + pass + + test_objects(__main__, copy_dict, byref) + __main__.__dict__.update(copy_dict) + sys.modules.update(copy_modules) + del __main__, copy_dict, copy_modules, dump + + + # This is for code coverage, tests the use case of dump_session(byref=True) + # without imported objects in the namespace. It's a contrived example because + # even dill can't be in it. + from types import ModuleType + modname = '__test_main__' + main = ModuleType(modname) + main.x = 42 + + _main = dill._dill._stash_modules(main) + if _main is not main: + print("There are objects to save by referenece that shouldn't be:", + _main.__dill_imported, _main.__dill_imported_as, _main.__dill_imported_top_level, + file=sys.stderr) + + test_file = dill._dill.StringIO() + try: + dill.dump_session(test_file, main=main, byref=True) + dump = test_file.getvalue() + test_file.close() + + sys.modules[modname] = ModuleType(modname) # empty + # This should work after fixing https://github.com/uqfoundation/dill/issues/462 + test_file = dill._dill.StringIO(dump) + dill.load_session(test_file) + finally: + test_file.close() + + assert x == 42 + + + # Dump session for module that is not __main__: + import test_classdef as module + atexit.register(_clean_up_cache, module) + module.selfref = module + dict_objects = [obj for obj in module.__dict__.keys() if not obj.startswith('__')] + + test_file = dill._dill.StringIO() + try: + dill.dump_session(test_file, main=module) + dump = test_file.getvalue() + test_file.close() + + for obj in dict_objects: + del module.__dict__[obj] + + test_file = dill._dill.StringIO(dump) + dill.load_session(test_file, main=module) + finally: + test_file.close() + + assert all(obj in module.__dict__ for obj in dict_objects) + assert module.selfref is module diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_source.py b/myenv/lib/python3.9/site-packages/dill/tests/test_source.py new file mode 100644 index 0000000..57d25d1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_source.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill.source import getsource, getname, _wrap, likely_import +from dill.source import getimportable +from dill._dill import IS_PYPY + +import sys +PY3 = sys.version_info[0] >= 3 +IS_PYPY3 = IS_PYPY and PY3 +PY310b = '0x30a00b1' + +f = lambda x: x**2 +def g(x): return f(x) - x + +def h(x): + def g(x): return x + return g(x) - x + +class Foo(object): + def bar(self, x): + return x*x+x +_foo = Foo() + +def add(x,y): + return x+y + +# yes, same as 'f', but things are tricky when it comes to pointers +squared = lambda x:x**2 + +class Bar: + pass +_bar = Bar() + + # inspect.getsourcelines # dill.source.getblocks +def test_getsource(): + assert getsource(f) == 'f = lambda x: x**2\n' + assert getsource(g) == 'def g(x): return f(x) - x\n' + assert getsource(h) == 'def h(x):\n def g(x): return x\n return g(x) - x\n' + assert getname(f) == 'f' + assert getname(g) == 'g' + assert getname(h) == 'h' + assert _wrap(f)(4) == 16 + assert _wrap(g)(4) == 12 + assert _wrap(h)(4) == 0 + + assert getname(Foo) == 'Foo' + assert getname(Bar) == 'Bar' + assert getsource(Bar) == 'class Bar:\n pass\n' + assert getsource(Foo) == 'class Foo(object):\n def bar(self, x):\n return x*x+x\n' + #XXX: add getsource for _foo, _bar + +# test itself +def test_itself(): + assert likely_import(likely_import)=='from dill.source import likely_import\n' + +# builtin functions and objects +def test_builtin(): + if PY3: builtin = 'builtins' + else: builtin = '__builtin__' + assert likely_import(pow) == 'pow\n' + assert likely_import(100) == '100\n' + assert likely_import(True) == 'True\n' + assert likely_import(pow, explicit=True) == 'from %s import pow\n' % builtin + assert likely_import(100, explicit=True) == '100\n' + assert likely_import(True, explicit=True) == 'True\n' if PY3 else 'from %s import True\n' % builtin + # this is kinda BS... you can't import a None + assert likely_import(None) == 'None\n' + assert likely_import(None, explicit=True) == 'None\n' + + +# other imported functions +def test_imported(): + from math import sin + assert likely_import(sin) == 'from math import sin\n' + +# interactively defined functions +def test_dynamic(): + assert likely_import(add) == 'from %s import add\n' % __name__ + # interactive lambdas + assert likely_import(squared) == 'from %s import squared\n' % __name__ + +# classes and class instances +def test_classes(): + try: #XXX: should this be a 'special case'? + from StringIO import StringIO + y = "from StringIO import StringIO\n" + x = y + except ImportError: + from io import BytesIO as StringIO + y = "from _io import BytesIO\n" + x = y if (IS_PYPY3 or hex(sys.hexversion) >= PY310b) else "from io import BytesIO\n" + s = StringIO() + + assert likely_import(StringIO) == x + assert likely_import(s) == y + # interactively defined classes and class instances + assert likely_import(Foo) == 'from %s import Foo\n' % __name__ + assert likely_import(_foo) == 'from %s import Foo\n' % __name__ + + +# test getimportable +def test_importable(): + assert getimportable(add) == 'from %s import add\n' % __name__ + assert getimportable(squared) == 'from %s import squared\n' % __name__ + assert getimportable(Foo) == 'from %s import Foo\n' % __name__ + assert getimportable(Foo.bar) == 'from %s import bar\n' % __name__ + assert getimportable(_foo.bar) == 'from %s import bar\n' % __name__ + assert getimportable(None) == 'None\n' + assert getimportable(100) == '100\n' + + assert getimportable(add, byname=False) == 'def add(x,y):\n return x+y\n' + assert getimportable(squared, byname=False) == 'squared = lambda x:x**2\n' + assert getimportable(None, byname=False) == 'None\n' + assert getimportable(Bar, byname=False) == 'class Bar:\n pass\n' + assert getimportable(Foo, byname=False) == 'class Foo(object):\n def bar(self, x):\n return x*x+x\n' + assert getimportable(Foo.bar, byname=False) == 'def bar(self, x):\n return x*x+x\n' + assert getimportable(Foo.bar, byname=True) == 'from %s import bar\n' % __name__ + assert getimportable(Foo.bar, alias='memo', byname=True) == 'from %s import bar as memo\n' % __name__ + assert getimportable(Foo, alias='memo', byname=True) == 'from %s import Foo as memo\n' % __name__ + assert getimportable(squared, alias='memo', byname=True) == 'from %s import squared as memo\n' % __name__ + assert getimportable(squared, alias='memo', byname=False) == 'memo = squared = lambda x:x**2\n' + assert getimportable(add, alias='memo', byname=False) == 'def add(x,y):\n return x+y\n\nmemo = add\n' + assert getimportable(None, alias='memo', byname=False) == 'memo = None\n' + assert getimportable(100, alias='memo', byname=False) == 'memo = 100\n' + assert getimportable(add, explicit=True) == 'from %s import add\n' % __name__ + assert getimportable(squared, explicit=True) == 'from %s import squared\n' % __name__ + assert getimportable(Foo, explicit=True) == 'from %s import Foo\n' % __name__ + assert getimportable(Foo.bar, explicit=True) == 'from %s import bar\n' % __name__ + assert getimportable(_foo.bar, explicit=True) == 'from %s import bar\n' % __name__ + assert getimportable(None, explicit=True) == 'None\n' + assert getimportable(100, explicit=True) == '100\n' + + +def test_numpy(): + try: + from numpy import array + x = array([1,2,3]) + assert getimportable(x) == 'from numpy import array\narray([1, 2, 3])\n' + assert getimportable(array) == 'from %s import array\n' % array.__module__ + assert getimportable(x, byname=False) == 'from numpy import array\narray([1, 2, 3])\n' + assert getimportable(array, byname=False) == 'from %s import array\n' % array.__module__ + except ImportError: pass + +#NOTE: if before likely_import(pow), will cause pow to throw AssertionError +def test_foo(): + assert getimportable(_foo, byname=False).startswith("import dill\nclass Foo(object):\n def bar(self, x):\n return x*x+x\ndill.loads(") + +if __name__ == '__main__': + test_getsource() + test_itself() + test_builtin() + test_imported() + test_dynamic() + test_classes() + test_importable() + test_numpy() + test_foo() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_temp.py b/myenv/lib/python3.9/site-packages/dill/tests/test_temp.py new file mode 100644 index 0000000..bcf7594 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_temp.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys +from dill.temp import dump, dump_source, dumpIO, dumpIO_source +from dill.temp import load, load_source, loadIO, loadIO_source +WINDOWS = sys.platform[:3] == 'win' + + +f = lambda x: x**2 +x = [1,2,3,4,5] + +# source code to tempfile +def test_code_to_tempfile(): + if not WINDOWS: #see: https://bugs.python.org/issue14243 + pyfile = dump_source(f, alias='_f') + _f = load_source(pyfile) + assert _f(4) == f(4) + +# source code to stream +def test_code_to_stream(): + pyfile = dumpIO_source(f, alias='_f') + _f = loadIO_source(pyfile) + assert _f(4) == f(4) + +# pickle to tempfile +def test_pickle_to_tempfile(): + if not WINDOWS: #see: https://bugs.python.org/issue14243 + dumpfile = dump(x) + _x = load(dumpfile) + assert _x == x + +# pickle to stream +def test_pickle_to_stream(): + dumpfile = dumpIO(x) + _x = loadIO(dumpfile) + assert _x == x + +### now testing the objects ### +f = lambda x: x**2 +def g(x): return f(x) - x + +def h(x): + def g(x): return x + return g(x) - x + +class Foo(object): + def bar(self, x): + return x*x+x +_foo = Foo() + +def add(x,y): + return x+y + +# yes, same as 'f', but things are tricky when it comes to pointers +squared = lambda x:x**2 + +class Bar: + pass +_bar = Bar() + + +# test function-type objects that take 2 args +def test_two_arg_functions(): + for obj in [add]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj(4,2) == obj(4,2) + +# test function-type objects that take 1 arg +def test_one_arg_functions(): + for obj in [g, h, squared]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj(4) == obj(4) + +# test instance-type objects +#for obj in [_bar, _foo]: +# pyfile = dumpIO_source(obj, alias='_obj') +# _obj = loadIO_source(pyfile) +# assert type(_obj) == type(obj) + +# test the rest of the objects +def test_the_rest(): + for obj in [Bar, Foo, Foo.bar, _foo.bar]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj.__name__ == obj.__name__ + + +if __name__ == '__main__': + test_code_to_tempfile() + test_code_to_stream() + test_pickle_to_tempfile() + test_pickle_to_stream() + test_two_arg_functions() + test_one_arg_functions() + test_the_rest() diff --git a/myenv/lib/python3.9/site-packages/dill/tests/test_weakref.py b/myenv/lib/python3.9/site-packages/dill/tests/test_weakref.py new file mode 100644 index 0000000..ada7d14 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dill/tests/test_weakref.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True +import weakref + +class _class: + def _method(self): + pass + +class _class2: + def __call__(self): + pass + +class _newclass(object): + def _method(self): + pass + +class _newclass2(object): + def __call__(self): + pass + +def _function(): + pass + + +def test_weakref(): + o = _class() + oc = _class2() + n = _newclass() + nc = _newclass2() + f = _function + z = _class + x = _newclass + + r = weakref.ref(o) + dr = weakref.ref(_class()) + p = weakref.proxy(o) + dp = weakref.proxy(_class()) + c = weakref.proxy(oc) + dc = weakref.proxy(_class2()) + + m = weakref.ref(n) + dm = weakref.ref(_newclass()) + t = weakref.proxy(n) + dt = weakref.proxy(_newclass()) + d = weakref.proxy(nc) + dd = weakref.proxy(_newclass2()) + + fr = weakref.ref(f) + fp = weakref.proxy(f) + #zr = weakref.ref(z) #XXX: weakrefs not allowed for classobj objects + #zp = weakref.proxy(z) #XXX: weakrefs not allowed for classobj objects + xr = weakref.ref(x) + xp = weakref.proxy(x) + + objlist = [r,dr,m,dm,fr,xr, p,dp,t,dt, c,dc,d,dd, fp,xp] + #dill.detect.trace(True) + + for obj in objlist: + res = dill.detect.errors(obj) + if res: + print ("%s" % res) + #print ("%s:\n %s" % (obj, res)) + # else: + # print ("PASS: %s" % obj) + assert not res + +def test_dictproxy(): + from dill._dill import DictProxyType + try: + m = DictProxyType({"foo": "bar"}) + except: + m = type.__dict__ + mp = dill.copy(m) + assert mp.items() == m.items() + + +if __name__ == '__main__': + test_weakref() + from dill._dill import IS_PYPY + if not IS_PYPY: + test_dictproxy() diff --git a/myenv/lib/python3.9/site-packages/dotenv/__init__.py b/myenv/lib/python3.9/site-packages/dotenv/__init__.py new file mode 100644 index 0000000..3512d10 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/__init__.py @@ -0,0 +1,49 @@ +from typing import Any, Optional + +from .main import (dotenv_values, find_dotenv, get_key, load_dotenv, set_key, + unset_key) + + +def load_ipython_extension(ipython: Any) -> None: + from .ipython import load_ipython_extension + load_ipython_extension(ipython) + + +def get_cli_string( + path: Optional[str] = None, + action: Optional[str] = None, + key: Optional[str] = None, + value: Optional[str] = None, + quote: Optional[str] = None, +): + """Returns a string suitable for running as a shell script. + + Useful for converting a arguments passed to a fabric task + to be passed to a `local` or `run` command. + """ + command = ['dotenv'] + if quote: + command.append('-q %s' % quote) + if path: + command.append('-f %s' % path) + if action: + command.append(action) + if key: + command.append(key) + if value: + if ' ' in value: + command.append('"%s"' % value) + else: + command.append(value) + + return ' '.join(command).strip() + + +__all__ = ['get_cli_string', + 'load_dotenv', + 'dotenv_values', + 'get_key', + 'set_key', + 'unset_key', + 'find_dotenv', + 'load_ipython_extension'] diff --git a/myenv/lib/python3.9/site-packages/dotenv/cli.py b/myenv/lib/python3.9/site-packages/dotenv/cli.py new file mode 100644 index 0000000..b7ae24a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/cli.py @@ -0,0 +1,164 @@ +import os +import sys +from subprocess import Popen +from typing import Any, Dict, List + +try: + import click +except ImportError: + sys.stderr.write('It seems python-dotenv is not installed with cli option. \n' + 'Run pip install "python-dotenv[cli]" to fix this.') + sys.exit(1) + +from .main import dotenv_values, get_key, set_key, unset_key +from .version import __version__ + + +@click.group() +@click.option('-f', '--file', default=os.path.join(os.getcwd(), '.env'), + type=click.Path(file_okay=True), + help="Location of the .env file, defaults to .env file in current working directory.") +@click.option('-q', '--quote', default='always', + type=click.Choice(['always', 'never', 'auto']), + help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.") +@click.option('-e', '--export', default=False, + type=click.BOOL, + help="Whether to write the dot file as an executable bash script.") +@click.version_option(version=__version__) +@click.pass_context +def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None: + '''This script is used to set, get or unset values from a .env file.''' + ctx.obj = {} + ctx.obj['QUOTE'] = quote + ctx.obj['EXPORT'] = export + ctx.obj['FILE'] = file + + +@cli.command() +@click.pass_context +def list(ctx: click.Context) -> None: + '''Display all the stored key/value.''' + file = ctx.obj['FILE'] + if not os.path.isfile(file): + raise click.BadParameter( + 'Path "%s" does not exist.' % (file), + ctx=ctx + ) + dotenv_as_dict = dotenv_values(file) + for k, v in dotenv_as_dict.items(): + click.echo('%s=%s' % (k, v)) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +@click.argument('value', required=True) +def set(ctx: click.Context, key: Any, value: Any) -> None: + '''Store the given key/value.''' + file = ctx.obj['FILE'] + quote = ctx.obj['QUOTE'] + export = ctx.obj['EXPORT'] + success, key, value = set_key(file, key, value, quote, export) + if success: + click.echo('%s=%s' % (key, value)) + else: + exit(1) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +def get(ctx: click.Context, key: Any) -> None: + '''Retrieve the value for the given key.''' + file = ctx.obj['FILE'] + if not os.path.isfile(file): + raise click.BadParameter( + 'Path "%s" does not exist.' % (file), + ctx=ctx + ) + stored_value = get_key(file, key) + if stored_value: + click.echo(stored_value) + else: + exit(1) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +def unset(ctx: click.Context, key: Any) -> None: + '''Removes the given key.''' + file = ctx.obj['FILE'] + quote = ctx.obj['QUOTE'] + success, key = unset_key(file, key, quote) + if success: + click.echo("Successfully removed %s" % key) + else: + exit(1) + + +@cli.command(context_settings={'ignore_unknown_options': True}) +@click.pass_context +@click.option( + "--override/--no-override", + default=True, + help="Override variables from the environment file with those from the .env file.", +) +@click.argument('commandline', nargs=-1, type=click.UNPROCESSED) +def run(ctx: click.Context, override: bool, commandline: List[str]) -> None: + """Run command with environment variables present.""" + file = ctx.obj['FILE'] + if not os.path.isfile(file): + raise click.BadParameter( + 'Invalid value for \'-f\' "%s" does not exist.' % (file), + ctx=ctx + ) + dotenv_as_dict = { + k: v + for (k, v) in dotenv_values(file).items() + if v is not None and (override or k not in os.environ) + } + + if not commandline: + click.echo('No command given.') + exit(1) + ret = run_command(commandline, dotenv_as_dict) + exit(ret) + + +def run_command(command: List[str], env: Dict[str, str]) -> int: + """Run command in sub process. + + Runs the command in a sub process with the variables from `env` + added in the current environment variables. + + Parameters + ---------- + command: List[str] + The command and it's parameters + env: Dict + The additional environment variables + + Returns + ------- + int + The return code of the command + + """ + # copy the current environment variables and add the vales from + # `env` + cmd_env = os.environ.copy() + cmd_env.update(env) + + p = Popen(command, + universal_newlines=True, + bufsize=0, + shell=False, + env=cmd_env) + _, _ = p.communicate() + + return p.returncode + + +if __name__ == "__main__": + cli() diff --git a/myenv/lib/python3.9/site-packages/dotenv/ipython.py b/myenv/lib/python3.9/site-packages/dotenv/ipython.py new file mode 100644 index 0000000..7df727c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/ipython.py @@ -0,0 +1,39 @@ +from IPython.core.magic import Magics, line_magic, magics_class # type: ignore +from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore + parse_argstring) # type: ignore + +from .main import find_dotenv, load_dotenv + + +@magics_class +class IPythonDotEnv(Magics): + + @magic_arguments() + @argument( + '-o', '--override', action='store_true', + help="Indicate to override existing variables" + ) + @argument( + '-v', '--verbose', action='store_true', + help="Indicate function calls to be verbose" + ) + @argument('dotenv_path', nargs='?', type=str, default='.env', + help='Search in increasingly higher folders for the `dotenv_path`') + @line_magic + def dotenv(self, line): + args = parse_argstring(self.dotenv, line) + # Locate the .env file + dotenv_path = args.dotenv_path + try: + dotenv_path = find_dotenv(dotenv_path, True, True) + except IOError: + print("cannot find .env file") + return + + # Load the .env file + load_dotenv(dotenv_path, verbose=args.verbose, override=args.override) + + +def load_ipython_extension(ipython): + """Register the %dotenv magic.""" + ipython.register_magics(IPythonDotEnv) diff --git a/myenv/lib/python3.9/site-packages/dotenv/main.py b/myenv/lib/python3.9/site-packages/dotenv/main.py new file mode 100644 index 0000000..20ac61b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/main.py @@ -0,0 +1,373 @@ +import io +import logging +import os +import shutil +import sys +import tempfile +from collections import OrderedDict +from contextlib import contextmanager +from typing import (IO, Dict, Iterable, Iterator, Mapping, Optional, Tuple, + Union) + +from .parser import Binding, parse_stream +from .variables import parse_variables + +logger = logging.getLogger(__name__) + +if sys.version_info >= (3, 6): + _PathLike = os.PathLike +else: + _PathLike = str + + +def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding]: + for mapping in mappings: + if mapping.error: + logger.warning( + "Python-dotenv could not parse statement starting at line %s", + mapping.original.line, + ) + yield mapping + + +class DotEnv(): + def __init__( + self, + dotenv_path: Optional[Union[str, _PathLike]], + stream: Optional[IO[str]] = None, + verbose: bool = False, + encoding: Union[None, str] = None, + interpolate: bool = True, + override: bool = True, + ) -> None: + self.dotenv_path = dotenv_path # type: Optional[Union[str, _PathLike]] + self.stream = stream # type: Optional[IO[str]] + self._dict = None # type: Optional[Dict[str, Optional[str]]] + self.verbose = verbose # type: bool + self.encoding = encoding # type: Union[None, str] + self.interpolate = interpolate # type: bool + self.override = override # type: bool + + @contextmanager + def _get_stream(self) -> Iterator[IO[str]]: + if self.dotenv_path and os.path.isfile(self.dotenv_path): + with io.open(self.dotenv_path, encoding=self.encoding) as stream: + yield stream + elif self.stream is not None: + yield self.stream + else: + if self.verbose: + logger.info( + "Python-dotenv could not find configuration file %s.", + self.dotenv_path or '.env', + ) + yield io.StringIO('') + + def dict(self) -> Dict[str, Optional[str]]: + """Return dotenv as dict""" + if self._dict: + return self._dict + + raw_values = self.parse() + + if self.interpolate: + self._dict = OrderedDict(resolve_variables(raw_values, override=self.override)) + else: + self._dict = OrderedDict(raw_values) + + return self._dict + + def parse(self) -> Iterator[Tuple[str, Optional[str]]]: + with self._get_stream() as stream: + for mapping in with_warn_for_invalid_lines(parse_stream(stream)): + if mapping.key is not None: + yield mapping.key, mapping.value + + def set_as_environment_variables(self) -> bool: + """ + Load the current dotenv as system environment variable. + """ + for k, v in self.dict().items(): + if k in os.environ and not self.override: + continue + if v is not None: + os.environ[k] = v + + return True + + def get(self, key: str) -> Optional[str]: + """ + """ + data = self.dict() + + if key in data: + return data[key] + + if self.verbose: + logger.warning("Key %s not found in %s.", key, self.dotenv_path) + + return None + + +def get_key( + dotenv_path: Union[str, _PathLike], + key_to_get: str, + encoding: Optional[str] = "utf-8", +) -> Optional[str]: + """ + Get the value of a given key from the given .env. + + Returns `None` if the key isn't found or doesn't have a value. + """ + return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get) + + +@contextmanager +def rewrite( + path: Union[str, _PathLike], + encoding: Optional[str], +) -> Iterator[Tuple[IO[str], IO[str]]]: + try: + if not os.path.isfile(path): + with io.open(path, "w+", encoding=encoding) as source: + source.write("") + with tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding) as dest: + with io.open(path, encoding=encoding) as source: + yield (source, dest) # type: ignore + except BaseException: + if os.path.isfile(dest.name): + os.unlink(dest.name) + raise + else: + shutil.move(dest.name, path) + + +def set_key( + dotenv_path: Union[str, _PathLike], + key_to_set: str, + value_to_set: str, + quote_mode: str = "always", + export: bool = False, + encoding: Optional[str] = "utf-8", +) -> Tuple[Optional[bool], str, str]: + """ + Adds or Updates a key/value to the given .env + + If the .env path given doesn't exist, fails instead of risking creating + an orphan .env somewhere in the filesystem + """ + if quote_mode not in ("always", "auto", "never"): + raise ValueError("Unknown quote_mode: {}".format(quote_mode)) + + quote = ( + quote_mode == "always" + or (quote_mode == "auto" and not value_to_set.isalnum()) + ) + + if quote: + value_out = "'{}'".format(value_to_set.replace("'", "\\'")) + else: + value_out = value_to_set + if export: + line_out = 'export {}={}\n'.format(key_to_set, value_out) + else: + line_out = "{}={}\n".format(key_to_set, value_out) + + with rewrite(dotenv_path, encoding=encoding) as (source, dest): + replaced = False + missing_newline = False + for mapping in with_warn_for_invalid_lines(parse_stream(source)): + if mapping.key == key_to_set: + dest.write(line_out) + replaced = True + else: + dest.write(mapping.original.string) + missing_newline = not mapping.original.string.endswith("\n") + if not replaced: + if missing_newline: + dest.write("\n") + dest.write(line_out) + + return True, key_to_set, value_to_set + + +def unset_key( + dotenv_path: Union[str, _PathLike], + key_to_unset: str, + quote_mode: str = "always", + encoding: Optional[str] = "utf-8", +) -> Tuple[Optional[bool], str]: + """ + Removes a given key from the given .env + + If the .env path given doesn't exist, fails + If the given key doesn't exist in the .env, fails + """ + if not os.path.exists(dotenv_path): + logger.warning("Can't delete from %s - it doesn't exist.", dotenv_path) + return None, key_to_unset + + removed = False + with rewrite(dotenv_path, encoding=encoding) as (source, dest): + for mapping in with_warn_for_invalid_lines(parse_stream(source)): + if mapping.key == key_to_unset: + removed = True + else: + dest.write(mapping.original.string) + + if not removed: + logger.warning("Key %s not removed from %s - key doesn't exist.", key_to_unset, dotenv_path) + return None, key_to_unset + + return removed, key_to_unset + + +def resolve_variables( + values: Iterable[Tuple[str, Optional[str]]], + override: bool, +) -> Mapping[str, Optional[str]]: + new_values = {} # type: Dict[str, Optional[str]] + + for (name, value) in values: + if value is None: + result = None + else: + atoms = parse_variables(value) + env = {} # type: Dict[str, Optional[str]] + if override: + env.update(os.environ) # type: ignore + env.update(new_values) + else: + env.update(new_values) + env.update(os.environ) # type: ignore + result = "".join(atom.resolve(env) for atom in atoms) + + new_values[name] = result + + return new_values + + +def _walk_to_root(path: str) -> Iterator[str]: + """ + Yield directories starting from the given directory up to the root + """ + if not os.path.exists(path): + raise IOError('Starting path not found') + + if os.path.isfile(path): + path = os.path.dirname(path) + + last_dir = None + current_dir = os.path.abspath(path) + while last_dir != current_dir: + yield current_dir + parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir)) + last_dir, current_dir = current_dir, parent_dir + + +def find_dotenv( + filename: str = '.env', + raise_error_if_not_found: bool = False, + usecwd: bool = False, +) -> str: + """ + Search in increasingly higher folders for the given file + + Returns path to the file if found, or an empty string otherwise + """ + + def _is_interactive(): + """ Decide whether this is running in a REPL or IPython notebook """ + main = __import__('__main__', None, None, fromlist=['__file__']) + return not hasattr(main, '__file__') + + if usecwd or _is_interactive() or getattr(sys, 'frozen', False): + # Should work without __file__, e.g. in REPL or IPython notebook. + path = os.getcwd() + else: + # will work for .py files + frame = sys._getframe() + current_file = __file__ + + while frame.f_code.co_filename == current_file: + assert frame.f_back is not None + frame = frame.f_back + frame_filename = frame.f_code.co_filename + path = os.path.dirname(os.path.abspath(frame_filename)) + + for dirname in _walk_to_root(path): + check_path = os.path.join(dirname, filename) + if os.path.isfile(check_path): + return check_path + + if raise_error_if_not_found: + raise IOError('File not found') + + return '' + + +def load_dotenv( + dotenv_path: Union[str, _PathLike, None] = None, + stream: Optional[IO[str]] = None, + verbose: bool = False, + override: bool = False, + interpolate: bool = True, + encoding: Optional[str] = "utf-8", +) -> bool: + """Parse a .env file and then load all the variables found as environment variables. + + - *dotenv_path*: absolute or relative path to .env file. + - *stream*: Text stream (such as `io.StringIO`) with .env content, used if + `dotenv_path` is `None`. + - *verbose*: whether to output a warning the .env file is missing. Defaults to + `False`. + - *override*: whether to override the system environment variables with the variables + in `.env` file. Defaults to `False`. + - *encoding*: encoding to be used to read the file. + + If both `dotenv_path` and `stream`, `find_dotenv()` is used to find the .env file. + """ + if dotenv_path is None and stream is None: + dotenv_path = find_dotenv() + + dotenv = DotEnv( + dotenv_path=dotenv_path, + stream=stream, + verbose=verbose, + interpolate=interpolate, + override=override, + encoding=encoding, + ) + return dotenv.set_as_environment_variables() + + +def dotenv_values( + dotenv_path: Union[str, _PathLike, None] = None, + stream: Optional[IO[str]] = None, + verbose: bool = False, + interpolate: bool = True, + encoding: Optional[str] = "utf-8", +) -> Dict[str, Optional[str]]: + """ + Parse a .env file and return its content as a dict. + + - *dotenv_path*: absolute or relative path to .env file. + - *stream*: `StringIO` object with .env content, used if `dotenv_path` is `None`. + - *verbose*: whether to output a warning the .env file is missing. Defaults to + `False`. + in `.env` file. Defaults to `False`. + - *encoding*: encoding to be used to read the file. + + If both `dotenv_path` and `stream`, `find_dotenv()` is used to find the .env file. + """ + if dotenv_path is None and stream is None: + dotenv_path = find_dotenv() + + return DotEnv( + dotenv_path=dotenv_path, + stream=stream, + verbose=verbose, + interpolate=interpolate, + override=True, + encoding=encoding, + ).dict() diff --git a/myenv/lib/python3.9/site-packages/dotenv/parser.py b/myenv/lib/python3.9/site-packages/dotenv/parser.py new file mode 100644 index 0000000..398bd49 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/parser.py @@ -0,0 +1,182 @@ +import codecs +import re +from typing import (IO, Iterator, Match, NamedTuple, Optional, # noqa:F401 + Pattern, Sequence, Tuple) + + +def make_regex(string: str, extra_flags: int = 0) -> Pattern[str]: + return re.compile(string, re.UNICODE | extra_flags) + + +_newline = make_regex(r"(\r\n|\n|\r)") +_multiline_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE) +_whitespace = make_regex(r"[^\S\r\n]*") +_export = make_regex(r"(?:export[^\S\r\n]+)?") +_single_quoted_key = make_regex(r"'([^']+)'") +_unquoted_key = make_regex(r"([^=\#\s]+)") +_equal_sign = make_regex(r"(=[^\S\r\n]*)") +_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'") +_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"') +_unquoted_value = make_regex(r"([^\r\n]*)") +_comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?") +_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)") +_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?") +_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]") +_single_quote_escapes = make_regex(r"\\[\\']") + + +Original = NamedTuple( + "Original", + [ + ("string", str), + ("line", int), + ], +) + +Binding = NamedTuple( + "Binding", + [ + ("key", Optional[str]), + ("value", Optional[str]), + ("original", Original), + ("error", bool), + ], +) + + +class Position: + def __init__(self, chars: int, line: int) -> None: + self.chars = chars + self.line = line + + @classmethod + def start(cls) -> "Position": + return cls(chars=0, line=1) + + def set(self, other: "Position") -> None: + self.chars = other.chars + self.line = other.line + + def advance(self, string: str) -> None: + self.chars += len(string) + self.line += len(re.findall(_newline, string)) + + +class Error(Exception): + pass + + +class Reader: + def __init__(self, stream: IO[str]) -> None: + self.string = stream.read() + self.position = Position.start() + self.mark = Position.start() + + def has_next(self) -> bool: + return self.position.chars < len(self.string) + + def set_mark(self) -> None: + self.mark.set(self.position) + + def get_marked(self) -> Original: + return Original( + string=self.string[self.mark.chars:self.position.chars], + line=self.mark.line, + ) + + def peek(self, count: int) -> str: + return self.string[self.position.chars:self.position.chars + count] + + def read(self, count: int) -> str: + result = self.string[self.position.chars:self.position.chars + count] + if len(result) < count: + raise Error("read: End of string") + self.position.advance(result) + return result + + def read_regex(self, regex: Pattern[str]) -> Sequence[str]: + match = regex.match(self.string, self.position.chars) + if match is None: + raise Error("read_regex: Pattern not found") + self.position.advance(self.string[match.start():match.end()]) + return match.groups() + + +def decode_escapes(regex: Pattern[str], string: str) -> str: + def decode_match(match: Match[str]) -> str: + return codecs.decode(match.group(0), 'unicode-escape') # type: ignore + + return regex.sub(decode_match, string) + + +def parse_key(reader: Reader) -> Optional[str]: + char = reader.peek(1) + if char == "#": + return None + elif char == "'": + (key,) = reader.read_regex(_single_quoted_key) + else: + (key,) = reader.read_regex(_unquoted_key) + return key + + +def parse_unquoted_value(reader: Reader) -> str: + (part,) = reader.read_regex(_unquoted_value) + return re.sub(r"\s+#.*", "", part).rstrip() + + +def parse_value(reader: Reader) -> str: + char = reader.peek(1) + if char == u"'": + (value,) = reader.read_regex(_single_quoted_value) + return decode_escapes(_single_quote_escapes, value) + elif char == u'"': + (value,) = reader.read_regex(_double_quoted_value) + return decode_escapes(_double_quote_escapes, value) + elif char in (u"", u"\n", u"\r"): + return u"" + else: + return parse_unquoted_value(reader) + + +def parse_binding(reader: Reader) -> Binding: + reader.set_mark() + try: + reader.read_regex(_multiline_whitespace) + if not reader.has_next(): + return Binding( + key=None, + value=None, + original=reader.get_marked(), + error=False, + ) + reader.read_regex(_export) + key = parse_key(reader) + reader.read_regex(_whitespace) + if reader.peek(1) == "=": + reader.read_regex(_equal_sign) + value = parse_value(reader) # type: Optional[str] + else: + value = None + reader.read_regex(_comment) + reader.read_regex(_end_of_line) + return Binding( + key=key, + value=value, + original=reader.get_marked(), + error=False, + ) + except Error: + reader.read_regex(_rest_of_line) + return Binding( + key=None, + value=None, + original=reader.get_marked(), + error=True, + ) + + +def parse_stream(stream: IO[str]) -> Iterator[Binding]: + reader = Reader(stream) + while reader.has_next(): + yield parse_binding(reader) diff --git a/myenv/lib/python3.9/site-packages/dotenv/py.typed b/myenv/lib/python3.9/site-packages/dotenv/py.typed new file mode 100644 index 0000000..7632ecf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 diff --git a/myenv/lib/python3.9/site-packages/dotenv/variables.py b/myenv/lib/python3.9/site-packages/dotenv/variables.py new file mode 100644 index 0000000..d77b700 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/variables.py @@ -0,0 +1,88 @@ +import re +from abc import ABCMeta +from typing import Iterator, Mapping, Optional, Pattern + +_posix_variable = re.compile( + r""" + \$\{ + (?P[^\}:]*) + (?::- + (?P[^\}]*) + )? + \} + """, + re.VERBOSE, +) # type: Pattern[str] + + +class Atom(): + __metaclass__ = ABCMeta + + def __ne__(self, other: object) -> bool: + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + return not result + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + raise NotImplementedError + + +class Literal(Atom): + def __init__(self, value: str) -> None: + self.value = value + + def __repr__(self) -> str: + return "Literal(value={})".format(self.value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + return self.value == other.value + + def __hash__(self) -> int: + return hash((self.__class__, self.value)) + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + return self.value + + +class Variable(Atom): + def __init__(self, name: str, default: Optional[str]) -> None: + self.name = name + self.default = default + + def __repr__(self) -> str: + return "Variable(name={}, default={})".format(self.name, self.default) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + return (self.name, self.default) == (other.name, other.default) + + def __hash__(self) -> int: + return hash((self.__class__, self.name, self.default)) + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + default = self.default if self.default is not None else "" + result = env.get(self.name, default) + return result if result is not None else "" + + +def parse_variables(value: str) -> Iterator[Atom]: + cursor = 0 + + for match in _posix_variable.finditer(value): + (start, end) = match.span() + name = match.groupdict()["name"] + default = match.groupdict()["default"] + + if start > cursor: + yield Literal(value=value[cursor:start]) + + yield Variable(name=name, default=default) + cursor = end + + length = len(value) + if cursor < length: + yield Literal(value=value[cursor:length]) diff --git a/myenv/lib/python3.9/site-packages/dotenv/version.py b/myenv/lib/python3.9/site-packages/dotenv/version.py new file mode 100644 index 0000000..5f4bb0b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/dotenv/version.py @@ -0,0 +1 @@ +__version__ = "0.20.0" diff --git a/myenv/lib/python3.9/site-packages/easy_install.py b/myenv/lib/python3.9/site-packages/easy_install.py new file mode 100644 index 0000000..d87e984 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/easy_install.py @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff --git a/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/LICENSE new file mode 100644 index 0000000..474479a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/LICENSE @@ -0,0 +1,24 @@ +"python-ecdsa" Copyright (c) 2010 Brian Warner + +Portions written in 2005 by Peter Pearson and placed in the public domain. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/METADATA new file mode 100644 index 0000000..fa1c5fe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/METADATA @@ -0,0 +1,675 @@ +Metadata-Version: 2.1 +Name: ecdsa +Version: 0.18.0 +Summary: ECDSA cryptographic signature library (pure python) +Home-page: http://github.com/tlsfuzzer/python-ecdsa +Author: Brian Warner +Author-email: warner@lothar.com +License: MIT +Platform: UNKNOWN +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: six (>=1.9.0) +Provides-Extra: gmpy +Requires-Dist: gmpy ; extra == 'gmpy' +Provides-Extra: gmpy2 +Requires-Dist: gmpy2 ; extra == 'gmpy2' + +# Pure-Python ECDSA and ECDH + +[![Build Status](https://github.com/tlsfuzzer/python-ecdsa/workflows/GitHub%20CI/badge.svg?branch=master)](https://github.com/tlsfuzzer/python-ecdsa/actions?query=workflow%3A%22GitHub+CI%22+branch%3Amaster) +[![Documentation Status](https://readthedocs.org/projects/ecdsa/badge/?version=latest)](https://ecdsa.readthedocs.io/en/latest/?badge=latest) +[![Coverage Status](https://coveralls.io/repos/github/tlsfuzzer/python-ecdsa/badge.svg?branch=master)](https://coveralls.io/github/tlsfuzzer/python-ecdsa?branch=master) +![condition coverage](https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/tomato42/9b6ca1f3410207fbeca785a178781651/raw/python-ecdsa-condition-coverage.json) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/tlsfuzzer/python-ecdsa.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/tlsfuzzer/python-ecdsa/context:python) +[![Total alerts](https://img.shields.io/lgtm/alerts/g/tlsfuzzer/python-ecdsa.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/tlsfuzzer/python-ecdsa/alerts/) +[![Latest Version](https://img.shields.io/pypi/v/ecdsa.svg?style=flat)](https://pypi.python.org/pypi/ecdsa/) +![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg?style=flat) + + +This is an easy-to-use implementation of ECC (Elliptic Curve Cryptography) +with support for ECDSA (Elliptic Curve Digital Signature Algorithm), +EdDSA (Edwards-curve Digital Signature Algorithm) and ECDH +(Elliptic Curve Diffie-Hellman), implemented purely in Python, released under +the MIT license. With this library, you can quickly create key pairs (signing +key and verifying key), sign messages, and verify the signatures. You can +also agree on a shared secret key based on exchanged public keys. +The keys and signatures are very short, making them easy to handle and +incorporate into other protocols. + +**NOTE: This library should not be used in production settings, see [Security](#Security) for more details.** + +## Features + +This library provides key generation, signing, verifying, and shared secret +derivation for five +popular NIST "Suite B" GF(p) (_prime field_) curves, with key lengths of 192, +224, 256, 384, and 521 bits. The "short names" for these curves, as known by +the OpenSSL tool (`openssl ecparam -list_curves`), are: `prime192v1`, +`secp224r1`, `prime256v1`, `secp384r1`, and `secp521r1`. It includes the +256-bit curve `secp256k1` used by Bitcoin. There is also support for the +regular (non-twisted) variants of Brainpool curves from 160 to 512 bits. The +"short names" of those curves are: `brainpoolP160r1`, `brainpoolP192r1`, +`brainpoolP224r1`, `brainpoolP256r1`, `brainpoolP320r1`, `brainpoolP384r1`, +`brainpoolP512r1`. Few of the small curves from SEC standard are also +included (mainly to speed-up testing of the library), those are: +`secp112r1`, `secp112r2`, `secp128r1`, and `secp160r1`. +Key generation, siging and verifying is also supported for Ed25519 and +Ed448 curves. +No other curves are included, but it is not too hard to add support for more +curves over prime fields. + +## Dependencies + +This library uses only Python and the 'six' package. It is compatible with +Python 2.6, 2.7, and 3.3+. It also supports execution on alternative +implementations like pypy and pypy3. + +If `gmpy2` or `gmpy` is installed, they will be used for faster arithmetic. +Either of them can be installed after this library is installed, +`python-ecdsa` will detect their presence on start-up and use them +automatically. +You should prefer `gmpy2` on Python3 for optimal performance. + +To run the OpenSSL compatibility tests, the 'openssl' tool must be in your +`PATH`. This release has been tested successfully against OpenSSL 0.9.8o, +1.0.0a, 1.0.2f, 1.1.1d and 3.0.1 (among others). + + +## Installation + +This library is available on PyPI, it's recommended to install it using `pip`: + +``` +pip install ecdsa +``` + +In case higher performance is wanted and using native code is not a problem, +it's possible to specify installation together with `gmpy2`: + +``` +pip install ecdsa[gmpy2] +``` + +or (slower, legacy option): +``` +pip install ecdsa[gmpy] +``` + +## Speed + +The following table shows how long this library takes to generate key pairs +(`keygen`), to sign data (`sign`), to verify those signatures (`verify`), +to derive a shared secret (`ecdh`), and +to verify the signatures with no key-specific precomputation (`no PC verify`). +All those values are in seconds. +For convenience, the inverses of those values are also provided: +how many keys per second can be generated (`keygen/s`), how many signatures +can be made per second (`sign/s`), how many signatures can be verified +per second (`verify/s`), how many shared secrets can be derived per second +(`ecdh/s`), and how many signatures with no key specific +precomputation can be verified per second (`no PC verify/s`). The size of raw +signature (generally the smallest +the way a signature can be encoded) is also provided in the `siglen` column. +Use `tox -e speed` to generate this table on your own computer. +On an Intel Core i7 4790K @ 4.0GHz I'm getting the following performance: + +``` + siglen keygen keygen/s sign sign/s verify verify/s no PC verify no PC verify/s + NIST192p: 48 0.00032s 3134.06 0.00033s 2985.53 0.00063s 1598.36 0.00129s 774.43 + NIST224p: 56 0.00040s 2469.24 0.00042s 2367.88 0.00081s 1233.41 0.00170s 586.66 + NIST256p: 64 0.00051s 1952.73 0.00054s 1867.80 0.00098s 1021.86 0.00212s 471.27 + NIST384p: 96 0.00107s 935.92 0.00111s 904.23 0.00203s 491.77 0.00446s 224.00 + NIST521p: 132 0.00210s 475.52 0.00215s 464.16 0.00398s 251.28 0.00874s 114.39 + SECP256k1: 64 0.00052s 1921.54 0.00054s 1847.49 0.00105s 948.68 0.00210s 477.01 + BRAINPOOLP160r1: 40 0.00025s 4003.88 0.00026s 3845.12 0.00053s 1893.93 0.00105s 949.92 + BRAINPOOLP192r1: 48 0.00033s 3043.97 0.00034s 2975.98 0.00063s 1581.50 0.00135s 742.29 + BRAINPOOLP224r1: 56 0.00041s 2436.44 0.00043s 2315.51 0.00078s 1278.49 0.00180s 556.16 + BRAINPOOLP256r1: 64 0.00053s 1892.49 0.00054s 1846.24 0.00114s 875.64 0.00229s 437.25 + BRAINPOOLP320r1: 80 0.00073s 1361.26 0.00076s 1309.25 0.00143s 699.29 0.00322s 310.49 + BRAINPOOLP384r1: 96 0.00107s 931.29 0.00111s 901.80 0.00230s 434.19 0.00476s 210.20 + BRAINPOOLP512r1: 128 0.00207s 483.41 0.00212s 471.42 0.00425s 235.43 0.00912s 109.61 + SECP112r1: 28 0.00015s 6672.53 0.00016s 6440.34 0.00031s 3265.41 0.00056s 1774.20 + SECP112r2: 28 0.00015s 6697.11 0.00015s 6479.98 0.00028s 3524.72 0.00058s 1716.16 + SECP128r1: 32 0.00018s 5497.65 0.00019s 5272.89 0.00036s 2747.39 0.00072s 1396.16 + SECP160r1: 42 0.00025s 3949.32 0.00026s 3894.45 0.00046s 2153.85 0.00102s 985.07 + Ed25519: 64 0.00076s 1324.48 0.00042s 2405.01 0.00109s 918.05 0.00344s 290.50 + Ed448: 114 0.00176s 569.53 0.00115s 870.94 0.00282s 355.04 0.01024s 97.69 + + ecdh ecdh/s + NIST192p: 0.00104s 964.89 + NIST224p: 0.00134s 748.63 + NIST256p: 0.00170s 587.08 + NIST384p: 0.00352s 283.90 + NIST521p: 0.00717s 139.51 + SECP256k1: 0.00154s 648.40 + BRAINPOOLP160r1: 0.00082s 1220.70 + BRAINPOOLP192r1: 0.00105s 956.75 + BRAINPOOLP224r1: 0.00136s 734.52 + BRAINPOOLP256r1: 0.00178s 563.32 + BRAINPOOLP320r1: 0.00252s 397.23 + BRAINPOOLP384r1: 0.00376s 266.27 + BRAINPOOLP512r1: 0.00733s 136.35 + SECP112r1: 0.00046s 2180.40 + SECP112r2: 0.00045s 2229.14 + SECP128r1: 0.00054s 1868.15 + SECP160r1: 0.00080s 1243.98 +``` + +To test performance with `gmpy2` loaded, use `tox -e speedgmpy2`. +On the same machine I'm getting the following performance with `gmpy2`: +``` + siglen keygen keygen/s sign sign/s verify verify/s no PC verify no PC verify/s + NIST192p: 48 0.00017s 5933.40 0.00017s 5751.70 0.00032s 3125.28 0.00067s 1502.41 + NIST224p: 56 0.00021s 4782.87 0.00022s 4610.05 0.00040s 2487.04 0.00089s 1126.90 + NIST256p: 64 0.00023s 4263.98 0.00024s 4125.16 0.00045s 2200.88 0.00098s 1016.82 + NIST384p: 96 0.00041s 2449.54 0.00042s 2399.96 0.00083s 1210.57 0.00172s 581.43 + NIST521p: 132 0.00071s 1416.07 0.00072s 1389.81 0.00144s 692.93 0.00312s 320.40 + SECP256k1: 64 0.00024s 4245.05 0.00024s 4122.09 0.00045s 2206.40 0.00094s 1068.32 + BRAINPOOLP160r1: 40 0.00014s 6939.17 0.00015s 6681.55 0.00029s 3452.43 0.00057s 1769.81 + BRAINPOOLP192r1: 48 0.00017s 5920.05 0.00017s 5774.36 0.00034s 2979.00 0.00069s 1453.19 + BRAINPOOLP224r1: 56 0.00021s 4732.12 0.00022s 4622.65 0.00041s 2422.47 0.00087s 1149.87 + BRAINPOOLP256r1: 64 0.00024s 4233.02 0.00024s 4115.20 0.00047s 2143.27 0.00098s 1015.60 + BRAINPOOLP320r1: 80 0.00032s 3162.38 0.00032s 3077.62 0.00063s 1598.83 0.00136s 737.34 + BRAINPOOLP384r1: 96 0.00041s 2436.88 0.00042s 2395.62 0.00083s 1202.68 0.00178s 562.85 + BRAINPOOLP512r1: 128 0.00063s 1587.60 0.00064s 1558.83 0.00125s 799.96 0.00281s 355.83 + SECP112r1: 28 0.00009s 11118.66 0.00009s 10775.48 0.00018s 5456.00 0.00033s 3020.83 + SECP112r2: 28 0.00009s 11322.97 0.00009s 10857.71 0.00017s 5748.77 0.00032s 3094.28 + SECP128r1: 32 0.00010s 10078.39 0.00010s 9665.27 0.00019s 5200.58 0.00036s 2760.88 + SECP160r1: 42 0.00015s 6875.51 0.00015s 6647.35 0.00029s 3422.41 0.00057s 1768.35 + Ed25519: 64 0.00030s 3322.56 0.00018s 5568.63 0.00046s 2165.35 0.00153s 654.02 + Ed448: 114 0.00060s 1680.53 0.00039s 2567.40 0.00096s 1036.67 0.00350s 285.62 + + ecdh ecdh/s + NIST192p: 0.00050s 1985.70 + NIST224p: 0.00066s 1524.16 + NIST256p: 0.00071s 1413.07 + NIST384p: 0.00127s 788.89 + NIST521p: 0.00230s 434.85 + SECP256k1: 0.00071s 1409.95 + BRAINPOOLP160r1: 0.00042s 2374.65 + BRAINPOOLP192r1: 0.00051s 1960.01 + BRAINPOOLP224r1: 0.00066s 1518.37 + BRAINPOOLP256r1: 0.00071s 1399.90 + BRAINPOOLP320r1: 0.00100s 997.21 + BRAINPOOLP384r1: 0.00129s 777.51 + BRAINPOOLP512r1: 0.00210s 475.99 + SECP112r1: 0.00022s 4457.70 + SECP112r2: 0.00024s 4252.33 + SECP128r1: 0.00028s 3589.31 + SECP160r1: 0.00043s 2305.02 +``` + +(there's also `gmpy` version, execute it using `tox -e speedgmpy`) + +For comparison, a highly optimised implementation (including curve-specific +assembly for some curves), like the one in OpenSSL 1.1.1d, provides the +following performance numbers on the same machine. +Run `openssl speed ecdsa` and `openssl speed ecdh` to reproduce it: +``` + sign verify sign/s verify/s + 192 bits ecdsa (nistp192) 0.0002s 0.0002s 4785.6 5380.7 + 224 bits ecdsa (nistp224) 0.0000s 0.0001s 22475.6 9822.0 + 256 bits ecdsa (nistp256) 0.0000s 0.0001s 45069.6 14166.6 + 384 bits ecdsa (nistp384) 0.0008s 0.0006s 1265.6 1648.1 + 521 bits ecdsa (nistp521) 0.0003s 0.0005s 3753.1 1819.5 + 256 bits ecdsa (brainpoolP256r1) 0.0003s 0.0003s 2983.5 3333.2 + 384 bits ecdsa (brainpoolP384r1) 0.0008s 0.0007s 1258.8 1528.1 + 512 bits ecdsa (brainpoolP512r1) 0.0015s 0.0012s 675.1 860.1 + + sign verify sign/s verify/s + 253 bits EdDSA (Ed25519) 0.0000s 0.0001s 28217.9 10897.7 + 456 bits EdDSA (Ed448) 0.0003s 0.0005s 3926.5 2147.7 + + op op/s + 192 bits ecdh (nistp192) 0.0002s 4853.4 + 224 bits ecdh (nistp224) 0.0001s 15252.1 + 256 bits ecdh (nistp256) 0.0001s 18436.3 + 384 bits ecdh (nistp384) 0.0008s 1292.7 + 521 bits ecdh (nistp521) 0.0003s 2884.7 + 256 bits ecdh (brainpoolP256r1) 0.0003s 3066.5 + 384 bits ecdh (brainpoolP384r1) 0.0008s 1298.0 + 512 bits ecdh (brainpoolP512r1) 0.0014s 694.8 +``` + +Keys and signature can be serialized in different ways (see Usage, below). +For a NIST192p key, the three basic representations require strings of the +following lengths (in bytes): + + to_string: signkey= 24, verifykey= 48, signature=48 + compressed: signkey=n/a, verifykey= 25, signature=n/a + DER: signkey=106, verifykey= 80, signature=55 + PEM: signkey=278, verifykey=162, (no support for PEM signatures) + +## History + +In 2006, Peter Pearson announced his pure-python implementation of ECDSA in a +[message to sci.crypt][1], available from his [download site][2]. In 2010, +Brian Warner wrote a wrapper around this code, to make it a bit easier and +safer to use. In 2020, Hubert Kario included an implementation of elliptic +curve cryptography that uses Jacobian coordinates internally, improving +performance about 20-fold. You are looking at the README for this wrapper. + +[1]: http://www.derkeiler.com/Newsgroups/sci.crypt/2006-01/msg00651.html +[2]: http://webpages.charter.net/curryfans/peter/downloads.html + +## Testing + +To run the full test suite, do this: + + tox -e coverage + +On an Intel Core i7 4790K @ 4.0GHz, the tests take about 18 seconds to execute. +The test suite uses +[`hypothesis`](https://github.com/HypothesisWorks/hypothesis) so there is some +inherent variability in the test suite execution time. + +One part of `test_pyecdsa.py` and `test_ecdh.py` checks compatibility with +OpenSSL, by running the "openssl" CLI tool, make sure it's in your `PATH` if +you want to test compatibility with it (if OpenSSL is missing, too old, or +doesn't support all the curves supported in upstream releases you will see +skipped tests in the above `coverage` run). + +## Security + +This library was not designed with security in mind. If you are processing +data that needs to be protected we suggest you use a quality wrapper around +OpenSSL. [pyca/cryptography](https://cryptography.io) is one example of such +a wrapper. The primary use-case of this library is as a portable library for +interoperability testing and as a teaching tool. + +**This library does not protect against side-channel attacks.** + +Do not allow attackers to measure how long it takes you to generate a key pair +or sign a message. Do not allow attackers to run code on the same physical +machine when key pair generation or signing is taking place (this includes +virtual machines). Do not allow attackers to measure how much power your +computer uses while generating the key pair or signing a message. Do not allow +attackers to measure RF interference coming from your computer while generating +a key pair or signing a message. Note: just loading the private key will cause +key pair generation. Other operations or attack vectors may also be +vulnerable to attacks. **For a sophisticated attacker observing just one +operation with a private key will be sufficient to completely +reconstruct the private key**. + +Please also note that any Pure-python cryptographic library will be vulnerable +to the same side-channel attacks. This is because Python does not provide +side-channel secure primitives (with the exception of +[`hmac.compare_digest()`][3]), making side-channel secure programming +impossible. + +This library depends upon a strong source of random numbers. Do not use it on +a system where `os.urandom()` does not provide cryptographically secure +random numbers. + +[3]: https://docs.python.org/3/library/hmac.html#hmac.compare_digest + +## Usage + +You start by creating a `SigningKey`. You can use this to sign data, by passing +in data as a byte string and getting back the signature (also a byte string). +You can also ask a `SigningKey` to give you the corresponding `VerifyingKey`. +The `VerifyingKey` can be used to verify a signature, by passing it both the +data string and the signature byte string: it either returns True or raises +`BadSignatureError`. + +```python +from ecdsa import SigningKey +sk = SigningKey.generate() # uses NIST192p +vk = sk.verifying_key +signature = sk.sign(b"message") +assert vk.verify(signature, b"message") +``` + +Each `SigningKey`/`VerifyingKey` is associated with a specific curve, like +NIST192p (the default one). Longer curves are more secure, but take longer to +use, and result in longer keys and signatures. + +```python +from ecdsa import SigningKey, NIST384p +sk = SigningKey.generate(curve=NIST384p) +vk = sk.verifying_key +signature = sk.sign(b"message") +assert vk.verify(signature, b"message") +``` + +The `SigningKey` can be serialized into several different formats: the shortest +is to call `s=sk.to_string()`, and then re-create it with +`SigningKey.from_string(s, curve)` . This short form does not record the +curve, so you must be sure to pass to `from_string()` the same curve you used +for the original key. The short form of a NIST192p-based signing key is just 24 +bytes long. If a point encoding is invalid or it does not lie on the specified +curve, `from_string()` will raise `MalformedPointError`. + +```python +from ecdsa import SigningKey, NIST384p +sk = SigningKey.generate(curve=NIST384p) +sk_string = sk.to_string() +sk2 = SigningKey.from_string(sk_string, curve=NIST384p) +print(sk_string.hex()) +print(sk2.to_string().hex()) +``` + +Note: while the methods are called `to_string()` the type they return is +actually `bytes`, the "string" part is leftover from Python 2. + +`sk.to_pem()` and `sk.to_der()` will serialize the signing key into the same +formats that OpenSSL uses. The PEM file looks like the familiar ASCII-armored +`"-----BEGIN EC PRIVATE KEY-----"` base64-encoded format, and the DER format +is a shorter binary form of the same data. +`SigningKey.from_pem()/.from_der()` will undo this serialization. These +formats include the curve name, so you do not need to pass in a curve +identifier to the deserializer. In case the file is malformed `from_der()` +and `from_pem()` will raise `UnexpectedDER` or` MalformedPointError`. + +```python +from ecdsa import SigningKey, NIST384p +sk = SigningKey.generate(curve=NIST384p) +sk_pem = sk.to_pem() +sk2 = SigningKey.from_pem(sk_pem) +# sk and sk2 are the same key +``` + +Likewise, the `VerifyingKey` can be serialized in the same way: +`vk.to_string()/VerifyingKey.from_string()`, `to_pem()/from_pem()`, and +`to_der()/from_der()`. The same `curve=` argument is needed for +`VerifyingKey.from_string()`. + +```python +from ecdsa import SigningKey, VerifyingKey, NIST384p +sk = SigningKey.generate(curve=NIST384p) +vk = sk.verifying_key +vk_string = vk.to_string() +vk2 = VerifyingKey.from_string(vk_string, curve=NIST384p) +# vk and vk2 are the same key + +from ecdsa import SigningKey, VerifyingKey, NIST384p +sk = SigningKey.generate(curve=NIST384p) +vk = sk.verifying_key +vk_pem = vk.to_pem() +vk2 = VerifyingKey.from_pem(vk_pem) +# vk and vk2 are the same key +``` + +There are a couple of different ways to compute a signature. Fundamentally, +ECDSA takes a number that represents the data being signed, and returns a +pair of numbers that represent the signature. The `hashfunc=` argument to +`sk.sign()` and `vk.verify()` is used to turn an arbitrary string into a +fixed-length digest, which is then turned into a number that ECDSA can sign, +and both sign and verify must use the same approach. The default value is +`hashlib.sha1`, but if you use NIST256p or a longer curve, you can use +`hashlib.sha256` instead. + +There are also multiple ways to represent a signature. The default +`sk.sign()` and `vk.verify()` methods present it as a short string, for +simplicity and minimal overhead. To use a different scheme, use the +`sk.sign(sigencode=)` and `vk.verify(sigdecode=)` arguments. There are helper +functions in the `ecdsa.util` module that can be useful here. + +It is also possible to create a `SigningKey` from a "seed", which is +deterministic. This can be used in protocols where you want to derive +consistent signing keys from some other secret, for example when you want +three separate keys and only want to store a single master secret. You should +start with a uniformly-distributed unguessable seed with about `curve.baselen` +bytes of entropy, and then use one of the helper functions in `ecdsa.util` to +convert it into an integer in the correct range, and then finally pass it +into `SigningKey.from_secret_exponent()`, like this: + +```python +import os +from ecdsa import NIST384p, SigningKey +from ecdsa.util import randrange_from_seed__trytryagain + +def make_key(seed): + secexp = randrange_from_seed__trytryagain(seed, NIST384p.order) + return SigningKey.from_secret_exponent(secexp, curve=NIST384p) + +seed = os.urandom(NIST384p.baselen) # or other starting point +sk1a = make_key(seed) +sk1b = make_key(seed) +# note: sk1a and sk1b are the same key +assert sk1a.to_string() == sk1b.to_string() +sk2 = make_key(b"2-"+seed) # different key +assert sk1a.to_string() != sk2.to_string() +``` + +In case the application will verify a lot of signatures made with a single +key, it's possible to precompute some of the internal values to make +signature verification significantly faster. The break-even point occurs at +about 100 signatures verified. + +To perform precomputation, you can call the `precompute()` method +on `VerifyingKey` instance: +```python +from ecdsa import SigningKey, NIST384p +sk = SigningKey.generate(curve=NIST384p) +vk = sk.verifying_key +vk.precompute() +signature = sk.sign(b"message") +assert vk.verify(signature, b"message") +``` + +Once `precompute()` was called, all signature verifications with this key will +be faster to execute. + +## OpenSSL Compatibility + +To produce signatures that can be verified by OpenSSL tools, or to verify +signatures that were produced by those tools, use: + +```python +# openssl ecparam -name prime256v1 -genkey -out sk.pem +# openssl ec -in sk.pem -pubout -out vk.pem +# echo "data for signing" > data +# openssl dgst -sha256 -sign sk.pem -out data.sig data +# openssl dgst -sha256 -verify vk.pem -signature data.sig data +# openssl dgst -sha256 -prverify sk.pem -signature data.sig data + +import hashlib +from ecdsa import SigningKey, VerifyingKey +from ecdsa.util import sigencode_der, sigdecode_der + +with open("vk.pem") as f: + vk = VerifyingKey.from_pem(f.read()) + +with open("data", "rb") as f: + data = f.read() + +with open("data.sig", "rb") as f: + signature = f.read() + +assert vk.verify(signature, data, hashlib.sha256, sigdecode=sigdecode_der) + +with open("sk.pem") as f: + sk = SigningKey.from_pem(f.read(), hashlib.sha256) + +new_signature = sk.sign_deterministic(data, sigencode=sigencode_der) + +with open("data.sig2", "wb") as f: + f.write(new_signature) + +# openssl dgst -sha256 -verify vk.pem -signature data.sig2 data +``` + +Note: if compatibility with OpenSSL 1.0.0 or earlier is necessary, the +`sigencode_string` and `sigdecode_string` from `ecdsa.util` can be used for +respectively writing and reading the signatures. + +The keys also can be written in format that openssl can handle: + +```python +from ecdsa import SigningKey, VerifyingKey + +with open("sk.pem") as f: + sk = SigningKey.from_pem(f.read()) +with open("sk.pem", "wb") as f: + f.write(sk.to_pem()) + +with open("vk.pem") as f: + vk = VerifyingKey.from_pem(f.read()) +with open("vk.pem", "wb") as f: + f.write(vk.to_pem()) +``` + +## Entropy + +Creating a signing key with `SigningKey.generate()` requires some form of +entropy (as opposed to +`from_secret_exponent`/`from_string`/`from_der`/`from_pem`, +which are deterministic and do not require an entropy source). The default +source is `os.urandom()`, but you can pass any other function that behaves +like `os.urandom` as the `entropy=` argument to do something different. This +may be useful in unit tests, where you want to achieve repeatable results. The +`ecdsa.util.PRNG` utility is handy here: it takes a seed and produces a strong +pseudo-random stream from it: + +```python +from ecdsa.util import PRNG +from ecdsa import SigningKey +rng1 = PRNG(b"seed") +sk1 = SigningKey.generate(entropy=rng1) +rng2 = PRNG(b"seed") +sk2 = SigningKey.generate(entropy=rng2) +# sk1 and sk2 are the same key +``` + +Likewise, ECDSA signature generation requires a random number, and each +signature must use a different one (using the same number twice will +immediately reveal the private signing key). The `sk.sign()` method takes an +`entropy=` argument which behaves the same as `SigningKey.generate(entropy=)`. + +## Deterministic Signatures + +If you call `SigningKey.sign_deterministic(data)` instead of `.sign(data)`, +the code will generate a deterministic signature instead of a random one. +This uses the algorithm from RFC6979 to safely generate a unique `k` value, +derived from the private key and the message being signed. Each time you sign +the same message with the same key, you will get the same signature (using +the same `k`). + +This may become the default in a future version, as it is not vulnerable to +failures of the entropy source. + +## Examples + +Create a NIST192p key pair and immediately save both to disk: + +```python +from ecdsa import SigningKey +sk = SigningKey.generate() +vk = sk.verifying_key +with open("private.pem", "wb") as f: + f.write(sk.to_pem()) +with open("public.pem", "wb") as f: + f.write(vk.to_pem()) +``` + +Load a signing key from disk, use it to sign a message (using SHA-1), and write +the signature to disk: + +```python +from ecdsa import SigningKey +with open("private.pem") as f: + sk = SigningKey.from_pem(f.read()) +with open("message", "rb") as f: + message = f.read() +sig = sk.sign(message) +with open("signature", "wb") as f: + f.write(sig) +``` + +Load the verifying key, message, and signature from disk, and verify the +signature (assume SHA-1 hash): + +```python +from ecdsa import VerifyingKey, BadSignatureError +vk = VerifyingKey.from_pem(open("public.pem").read()) +with open("message", "rb") as f: + message = f.read() +with open("signature", "rb") as f: + sig = f.read() +try: + vk.verify(sig, message) + print "good signature" +except BadSignatureError: + print "BAD SIGNATURE" +``` + +Create a NIST521p key pair: + +```python +from ecdsa import SigningKey, NIST521p +sk = SigningKey.generate(curve=NIST521p) +vk = sk.verifying_key +``` + +Create three independent signing keys from a master seed: + +```python +from ecdsa import NIST192p, SigningKey +from ecdsa.util import randrange_from_seed__trytryagain + +def make_key_from_seed(seed, curve=NIST192p): + secexp = randrange_from_seed__trytryagain(seed, curve.order) + return SigningKey.from_secret_exponent(secexp, curve) + +sk1 = make_key_from_seed("1:%s" % seed) +sk2 = make_key_from_seed("2:%s" % seed) +sk3 = make_key_from_seed("3:%s" % seed) +``` + +Load a verifying key from disk and print it using hex encoding in +uncompressed and compressed format (defined in X9.62 and SEC1 standards): + +```python +from ecdsa import VerifyingKey + +with open("public.pem") as f: + vk = VerifyingKey.from_pem(f.read()) + +print("uncompressed: {0}".format(vk.to_string("uncompressed").hex())) +print("compressed: {0}".format(vk.to_string("compressed").hex())) +``` + +Load a verifying key from a hex string from compressed format, output +uncompressed: + +```python +from ecdsa import VerifyingKey, NIST256p + +comp_str = '022799c0d0ee09772fdd337d4f28dc155581951d07082fb19a38aa396b67e77759' +vk = VerifyingKey.from_string(bytearray.fromhex(comp_str), curve=NIST256p) +print(vk.to_string("uncompressed").hex()) +``` + +ECDH key exchange with remote party: + +```python +from ecdsa import ECDH, NIST256p + +ecdh = ECDH(curve=NIST256p) +ecdh.generate_private_key() +local_public_key = ecdh.get_public_key() +#send `local_public_key` to remote party and receive `remote_public_key` from remote party +with open("remote_public_key.pem") as e: + remote_public_key = e.read() +ecdh.load_received_public_key_pem(remote_public_key) +secret = ecdh.generate_sharedsecret_bytes() +``` + + diff --git a/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/RECORD new file mode 100644 index 0000000..722d990 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/RECORD @@ -0,0 +1,35 @@ +ecdsa/__init__.py,sha256=m8jWFcZ9E-iiDqVaTy7ABtvEyTJlF58tZ45UAKj3UWo,1637 +ecdsa/_compat.py,sha256=EhUF8-sFu1dKKGDibkmItbYm_nKoklSIBgkIburUoAg,4619 +ecdsa/_rwlock.py,sha256=CAwHp2V65ksI8B1UqY7EccK9LaUToiv6pDLVzm44eag,2849 +ecdsa/_sha3.py,sha256=DJs7QLmdkQMU35llyD8HQeAXNvf5sMcujO6oFdScIqI,4747 +ecdsa/_version.py,sha256=YZ3BGOHr1Ltse4LfX7F80J6qmKFA-NS-G2eYUuw2WnU,498 +ecdsa/curves.py,sha256=Na5rpnuADNvWkCTlUGbs9xwVogY6Vl2_3uNzpVGgxtE,14390 +ecdsa/der.py,sha256=OmfH8fojeqfJnasAt7I1P8j_qfwcwl-W4gDx1-cO8M0,14109 +ecdsa/ecdh.py,sha256=Tiirawt5xegVDrY9eS-ATvvfmTIznUyv5fy2k7VnzTk,11011 +ecdsa/ecdsa.py,sha256=LPRHHXNvGyZ67lyM6cWqUuhQceCwoktfPij20UTsdJo,24955 +ecdsa/eddsa.py,sha256=IzsGzoGAefcoYjF7DVjFkX5ZJqiK2LTOmAMe6wyf4UU,7170 +ecdsa/ellipticcurve.py,sha256=HwlFqrihf7Q2GQTLYQ0PeCwsgMhk_GlkZz3I2Xj4-eI,53625 +ecdsa/errors.py,sha256=b4mhnmIpRnEdHzbectHAA5F7O9MtSaI-fYoc13_vBxQ,130 +ecdsa/keys.py,sha256=plsomRHYrN3Z3iigUqKM5An8_6TDk1nJNpFv_cPGAvM,65124 +ecdsa/numbertheory.py,sha256=XWugBG59BxrpvjZm7Ytsnmkv770vK8IkClObThpbvAM,17479 +ecdsa/rfc6979.py,sha256=zwzo33lsZJA9r2dSf7HCliI_yIbw5cJ0Ek9tLdRRO40,2850 +ecdsa/test_curves.py,sha256=l5N-m4Yo5IAy4a8aJMsBamaSlLAfSoYjYqCj1HDEVpU,13081 +ecdsa/test_der.py,sha256=q2mr4HS_JyUxojWTSLJu-MQZiTwaCE7W_VG3rwwPEas,14956 +ecdsa/test_ecdh.py,sha256=hUJXTo_Cr9ji9-EpPvpQf7-TgB97WUj2tWcwU-LqCXc,15238 +ecdsa/test_ecdsa.py,sha256=1clBfDtA0zdF-13BoKXsJffL5K-iFutlMgov0kmGro0,23923 +ecdsa/test_eddsa.py,sha256=Vlv5J0C4zNJu5zzr756qpm0AJmARpzBKCWrhHaF_bR4,32615 +ecdsa/test_ellipticcurve.py,sha256=K6W_EQunOfE-RVSux6d1O7LXzSuAsVk9F1elwyY-rYA,6085 +ecdsa/test_jacobi.py,sha256=JDQeM_JKwPfwWBd4IgqtOp1rboeQNUIPPA334b-nmLQ,18388 +ecdsa/test_keys.py,sha256=n_IYLxG4JwD84dYLDmRjV2A-NqsSrrR1P0XBJOCZsEI,32833 +ecdsa/test_malformed_sigs.py,sha256=hiV2vwzFrIdNIC-inYUJIKboyAAw2TKAIVXtFadyojg,10857 +ecdsa/test_numbertheory.py,sha256=g6hi7NZFKuMSAxJSAYW5sWM7ivSCiw8g5-PeoDyowgY,11619 +ecdsa/test_pyecdsa.py,sha256=WeRujEKpkZzHvEeXNnnhM1QdMH0Lxou7Bl4u8RXY-jM,82757 +ecdsa/test_rw_lock.py,sha256=byv0_FTM90cbuHPCI6__LeQJkHL_zYEeVYIBO8e2LLc,7021 +ecdsa/test_sha3.py,sha256=oKULy5KOTaXjpLXSyuHrB1wjPiQDxB6INp7Tf1EU8Ko,3022 +ecdsa/util.py,sha256=cOEN3_c8p79Dc8a-LcUQP2ctIsYky35jhSWc9hLP1qc,14618 +ecdsa-0.18.0.dist-info/LICENSE,sha256=PsqYRXc9LluMydjBGdNF8ApIBuS9Zg1KPWzfnA6di7I,1147 +ecdsa-0.18.0.dist-info/METADATA,sha256=vesFVMWT6uSeOuNwGGxtBm8nm6GROqNNRO28jr8wWqM,29750 +ecdsa-0.18.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +ecdsa-0.18.0.dist-info/top_level.txt,sha256=7ovPHfAPyTou19f8gOSbHm6B9dGjTibWolcCB7Zjovs,6 +ecdsa-0.18.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +ecdsa-0.18.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/WHEEL new file mode 100644 index 0000000..01b8fc7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/top_level.txt new file mode 100644 index 0000000..aa5efdb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa-0.18.0.dist-info/top_level.txt @@ -0,0 +1 @@ +ecdsa diff --git a/myenv/lib/python3.9/site-packages/ecdsa/__init__.py b/myenv/lib/python3.9/site-packages/ecdsa/__init__.py new file mode 100644 index 0000000..ce8749a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/__init__.py @@ -0,0 +1,90 @@ +# while we don't use six in this file, we did bundle it for a long time, so +# keep as part of module in a virtual way (through __all__) +import six +from .keys import ( + SigningKey, + VerifyingKey, + BadSignatureError, + BadDigestError, + MalformedPointError, +) +from .curves import ( + NIST192p, + NIST224p, + NIST256p, + NIST384p, + NIST521p, + SECP256k1, + BRAINPOOLP160r1, + BRAINPOOLP192r1, + BRAINPOOLP224r1, + BRAINPOOLP256r1, + BRAINPOOLP320r1, + BRAINPOOLP384r1, + BRAINPOOLP512r1, + SECP112r1, + SECP112r2, + SECP128r1, + SECP160r1, + Ed25519, + Ed448, +) +from .ecdh import ( + ECDH, + NoKeyError, + NoCurveError, + InvalidCurveError, + InvalidSharedSecretError, +) +from .der import UnexpectedDER +from . import _version + +# This code comes from http://github.com/tlsfuzzer/python-ecdsa +__all__ = [ + "curves", + "der", + "ecdsa", + "ellipticcurve", + "keys", + "numbertheory", + "test_pyecdsa", + "util", + "six", +] + +_hush_pyflakes = [ + SigningKey, + VerifyingKey, + BadSignatureError, + BadDigestError, + MalformedPointError, + UnexpectedDER, + InvalidCurveError, + NoKeyError, + InvalidSharedSecretError, + ECDH, + NoCurveError, + NIST192p, + NIST224p, + NIST256p, + NIST384p, + NIST521p, + SECP256k1, + BRAINPOOLP160r1, + BRAINPOOLP192r1, + BRAINPOOLP224r1, + BRAINPOOLP256r1, + BRAINPOOLP320r1, + BRAINPOOLP384r1, + BRAINPOOLP512r1, + SECP112r1, + SECP112r2, + SECP128r1, + SECP160r1, + Ed25519, + Ed448, + six.b(""), +] +del _hush_pyflakes + +__version__ = _version.get_versions()["version"] diff --git a/myenv/lib/python3.9/site-packages/ecdsa/_compat.py b/myenv/lib/python3.9/site-packages/ecdsa/_compat.py new file mode 100644 index 0000000..83d41a5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/_compat.py @@ -0,0 +1,153 @@ +""" +Common functions for providing cross-python version compatibility. +""" +import sys +import re +import binascii +from six import integer_types + + +def str_idx_as_int(string, index): + """Take index'th byte from string, return as integer""" + val = string[index] + if isinstance(val, integer_types): + return val + return ord(val) + + +if sys.version_info < (3, 0): # pragma: no branch + import platform + + def normalise_bytes(buffer_object): + """Cast the input into array of bytes.""" + # flake8 runs on py3 where `buffer` indeed doesn't exist... + return buffer(buffer_object) # noqa: F821 + + def hmac_compat(ret): + return ret + + if ( + sys.version_info < (2, 7) + or sys.version_info < (2, 7, 4) + or platform.system() == "Java" + ): # pragma: no branch + + def remove_whitespace(text): + """Removes all whitespace from passed in string""" + return re.sub(r"\s+", "", text) + + def compat26_str(val): + return str(val) + + def bit_length(val): + if val == 0: + return 0 + return len(bin(val)) - 2 + + else: + + def remove_whitespace(text): + """Removes all whitespace from passed in string""" + return re.sub(r"\s+", "", text, flags=re.UNICODE) + + def compat26_str(val): + return val + + def bit_length(val): + """Return number of bits necessary to represent an integer.""" + return val.bit_length() + + def b2a_hex(val): + return binascii.b2a_hex(compat26_str(val)) + + def a2b_hex(val): + try: + return bytearray(binascii.a2b_hex(val)) + except Exception as e: + raise ValueError("base16 error: %s" % e) + + def bytes_to_int(val, byteorder): + """Convert bytes to an int.""" + if not val: + return 0 + if byteorder == "big": + return int(b2a_hex(val), 16) + if byteorder == "little": + return int(b2a_hex(val[::-1]), 16) + raise ValueError("Only 'big' and 'little' endian supported") + + def int_to_bytes(val, length=None, byteorder="big"): + """Return number converted to bytes""" + if length is None: + length = byte_length(val) + if byteorder == "big": + return bytearray( + (val >> i) & 0xFF for i in reversed(range(0, length * 8, 8)) + ) + if byteorder == "little": + return bytearray( + (val >> i) & 0xFF for i in range(0, length * 8, 8) + ) + raise ValueError("Only 'big' or 'little' endian supported") + +else: + if sys.version_info < (3, 4): # pragma: no branch + # on python 3.3 hmac.hmac.update() accepts only bytes, on newer + # versions it does accept memoryview() also + def hmac_compat(data): + if not isinstance(data, bytes): # pragma: no branch + return bytes(data) + return data + + def normalise_bytes(buffer_object): + """Cast the input into array of bytes.""" + if not buffer_object: + return b"" + return memoryview(buffer_object).cast("B") + + else: + + def hmac_compat(data): + return data + + def normalise_bytes(buffer_object): + """Cast the input into array of bytes.""" + return memoryview(buffer_object).cast("B") + + def compat26_str(val): + return val + + def remove_whitespace(text): + """Removes all whitespace from passed in string""" + return re.sub(r"\s+", "", text, flags=re.UNICODE) + + def a2b_hex(val): + try: + return bytearray(binascii.a2b_hex(bytearray(val, "ascii"))) + except Exception as e: + raise ValueError("base16 error: %s" % e) + + # pylint: disable=invalid-name + # pylint is stupid here and doesn't notice it's a function, not + # constant + bytes_to_int = int.from_bytes + # pylint: enable=invalid-name + + def bit_length(val): + """Return number of bits necessary to represent an integer.""" + return val.bit_length() + + def int_to_bytes(val, length=None, byteorder="big"): + """Convert integer to bytes.""" + if length is None: + length = byte_length(val) + # for gmpy we need to convert back to native int + if type(val) != int: + val = int(val) + return bytearray(val.to_bytes(length=length, byteorder=byteorder)) + + +def byte_length(val): + """Return number of bytes necessary to represent an integer.""" + length = bit_length(val) + return (length + 7) // 8 diff --git a/myenv/lib/python3.9/site-packages/ecdsa/_rwlock.py b/myenv/lib/python3.9/site-packages/ecdsa/_rwlock.py new file mode 100644 index 0000000..010e498 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/_rwlock.py @@ -0,0 +1,86 @@ +# Copyright Mateusz Kobos, (c) 2011 +# https://code.activestate.com/recipes/577803-reader-writer-lock-with-priority-for-writers/ +# released under the MIT licence + +import threading + + +__author__ = "Mateusz Kobos" + + +class RWLock: + """ + Read-Write locking primitive + + Synchronization object used in a solution of so-called second + readers-writers problem. In this problem, many readers can simultaneously + access a share, and a writer has an exclusive access to this share. + Additionally, the following constraints should be met: + 1) no reader should be kept waiting if the share is currently opened for + reading unless a writer is also waiting for the share, + 2) no writer should be kept waiting for the share longer than absolutely + necessary. + + The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7] + with a modification -- adding an additional lock (C{self.__readers_queue}) + -- in accordance with [2]. + + Sources: + [1] A.B. Downey: "The little book of semaphores", Version 2.1.5, 2008 + [2] P.J. Courtois, F. Heymans, D.L. Parnas: + "Concurrent Control with 'Readers' and 'Writers'", + Communications of the ACM, 1971 (via [3]) + [3] http://en.wikipedia.org/wiki/Readers-writers_problem + """ + + def __init__(self): + """ + A lock giving an even higher priority to the writer in certain + cases (see [2] for a discussion). + """ + self.__read_switch = _LightSwitch() + self.__write_switch = _LightSwitch() + self.__no_readers = threading.Lock() + self.__no_writers = threading.Lock() + self.__readers_queue = threading.Lock() + + def reader_acquire(self): + self.__readers_queue.acquire() + self.__no_readers.acquire() + self.__read_switch.acquire(self.__no_writers) + self.__no_readers.release() + self.__readers_queue.release() + + def reader_release(self): + self.__read_switch.release(self.__no_writers) + + def writer_acquire(self): + self.__write_switch.acquire(self.__no_readers) + self.__no_writers.acquire() + + def writer_release(self): + self.__no_writers.release() + self.__write_switch.release(self.__no_readers) + + +class _LightSwitch: + """An auxiliary "light switch"-like object. The first thread turns on the + "switch", the last one turns it off (see [1, sec. 4.2.2] for details).""" + + def __init__(self): + self.__counter = 0 + self.__mutex = threading.Lock() + + def acquire(self, lock): + self.__mutex.acquire() + self.__counter += 1 + if self.__counter == 1: + lock.acquire() + self.__mutex.release() + + def release(self, lock): + self.__mutex.acquire() + self.__counter -= 1 + if self.__counter == 0: + lock.release() + self.__mutex.release() diff --git a/myenv/lib/python3.9/site-packages/ecdsa/_sha3.py b/myenv/lib/python3.9/site-packages/ecdsa/_sha3.py new file mode 100644 index 0000000..2db0058 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/_sha3.py @@ -0,0 +1,181 @@ +""" +Implementation of the SHAKE-256 algorithm for Ed448 +""" + +try: + import hashlib + + hashlib.new("shake256").digest(64) + + def shake_256(msg, outlen): + return hashlib.new("shake256", msg).digest(outlen) + +except (TypeError, ValueError): + + from ._compat import bytes_to_int, int_to_bytes + + # From little endian. + def _from_le(s): + return bytes_to_int(s, byteorder="little") + + # Rotate a word x by b places to the left. + def _rol(x, b): + return ((x << b) | (x >> (64 - b))) & (2**64 - 1) + + # Do the SHA-3 state transform on state s. + def _sha3_transform(s): + ROTATIONS = [ + 0, + 1, + 62, + 28, + 27, + 36, + 44, + 6, + 55, + 20, + 3, + 10, + 43, + 25, + 39, + 41, + 45, + 15, + 21, + 8, + 18, + 2, + 61, + 56, + 14, + ] + PERMUTATION = [ + 1, + 6, + 9, + 22, + 14, + 20, + 2, + 12, + 13, + 19, + 23, + 15, + 4, + 24, + 21, + 8, + 16, + 5, + 3, + 18, + 17, + 11, + 7, + 10, + ] + RC = [ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, + ] + + for rnd in range(0, 24): + # AddColumnParity (Theta) + c = [0] * 5 + d = [0] * 5 + for i in range(0, 25): + c[i % 5] ^= s[i] + for i in range(0, 5): + d[i] = c[(i + 4) % 5] ^ _rol(c[(i + 1) % 5], 1) + for i in range(0, 25): + s[i] ^= d[i % 5] + # RotateWords (Rho) + for i in range(0, 25): + s[i] = _rol(s[i], ROTATIONS[i]) + # PermuteWords (Pi) + t = s[PERMUTATION[0]] + for i in range(0, len(PERMUTATION) - 1): + s[PERMUTATION[i]] = s[PERMUTATION[i + 1]] + s[PERMUTATION[-1]] = t + # NonlinearMixRows (Chi) + for i in range(0, 25, 5): + t = [ + s[i], + s[i + 1], + s[i + 2], + s[i + 3], + s[i + 4], + s[i], + s[i + 1], + ] + for j in range(0, 5): + s[i + j] = t[j] ^ ((~t[j + 1]) & (t[j + 2])) + # AddRoundConstant (Iota) + s[0] ^= RC[rnd] + + # Reinterpret octet array b to word array and XOR it to state s. + def _reinterpret_to_words_and_xor(s, b): + for j in range(0, len(b) // 8): + s[j] ^= _from_le(b[8 * j : 8 * j + 8]) + + # Reinterpret word array w to octet array and return it. + def _reinterpret_to_octets(w): + mp = bytearray() + for j in range(0, len(w)): + mp += int_to_bytes(w[j], 8, byteorder="little") + return mp + + def _sha3_raw(msg, r_w, o_p, e_b): + """Semi-generic SHA-3 implementation""" + r_b = 8 * r_w + s = [0] * 25 + # Handle whole blocks. + idx = 0 + blocks = len(msg) // r_b + for i in range(0, blocks): + _reinterpret_to_words_and_xor(s, msg[idx : idx + r_b]) + idx += r_b + _sha3_transform(s) + # Handle last block padding. + m = bytearray(msg[idx:]) + m.append(o_p) + while len(m) < r_b: + m.append(0) + m[len(m) - 1] |= 128 + # Handle padded last block. + _reinterpret_to_words_and_xor(s, m) + _sha3_transform(s) + # Output. + out = bytearray() + while len(out) < e_b: + out += _reinterpret_to_octets(s[:r_w]) + _sha3_transform(s) + return out[:e_b] + + def shake_256(msg, outlen): + return _sha3_raw(msg, 17, 31, outlen) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/_version.py b/myenv/lib/python3.9/site-packages/ecdsa/_version.py new file mode 100644 index 0000000..96aae17 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/_version.py @@ -0,0 +1,21 @@ + +# This file was generated by 'versioneer.py' (0.21) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +{ + "date": "2022-07-09T14:49:17+0200", + "dirty": false, + "error": null, + "full-revisionid": "341e0d8be9fedf66fbc9a95630b4ed2138343380", + "version": "0.18.0" +} +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/curves.py b/myenv/lib/python3.9/site-packages/ecdsa/curves.py new file mode 100644 index 0000000..1119ee5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/curves.py @@ -0,0 +1,513 @@ +from __future__ import division + +from six import PY2 +from . import der, ecdsa, ellipticcurve, eddsa +from .util import orderlen, number_to_string, string_to_number +from ._compat import normalise_bytes, bit_length + + +# orderlen was defined in this module previously, so keep it in __all__, +# will need to mark it as deprecated later +__all__ = [ + "UnknownCurveError", + "orderlen", + "Curve", + "SECP112r1", + "SECP112r2", + "SECP128r1", + "SECP160r1", + "NIST192p", + "NIST224p", + "NIST256p", + "NIST384p", + "NIST521p", + "curves", + "find_curve", + "curve_by_name", + "SECP256k1", + "BRAINPOOLP160r1", + "BRAINPOOLP192r1", + "BRAINPOOLP224r1", + "BRAINPOOLP256r1", + "BRAINPOOLP320r1", + "BRAINPOOLP384r1", + "BRAINPOOLP512r1", + "PRIME_FIELD_OID", + "CHARACTERISTIC_TWO_FIELD_OID", + "Ed25519", + "Ed448", +] + + +PRIME_FIELD_OID = (1, 2, 840, 10045, 1, 1) +CHARACTERISTIC_TWO_FIELD_OID = (1, 2, 840, 10045, 1, 2) + + +class UnknownCurveError(Exception): + pass + + +class Curve: + def __init__(self, name, curve, generator, oid, openssl_name=None): + self.name = name + self.openssl_name = openssl_name # maybe None + self.curve = curve + self.generator = generator + self.order = generator.order() + if isinstance(curve, ellipticcurve.CurveEdTw): + # EdDSA keys are special in that both private and public + # are the same size (as it's defined only with compressed points) + + # +1 for the sign bit and then round up + self.baselen = (bit_length(curve.p()) + 1 + 7) // 8 + self.verifying_key_length = self.baselen + else: + self.baselen = orderlen(self.order) + self.verifying_key_length = 2 * orderlen(curve.p()) + self.signature_length = 2 * self.baselen + self.oid = oid + if oid: + self.encoded_oid = der.encode_oid(*oid) + + def __eq__(self, other): + if isinstance(other, Curve): + return ( + self.curve == other.curve and self.generator == other.generator + ) + return NotImplemented + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return self.name + + def to_der(self, encoding=None, point_encoding="uncompressed"): + """Serialise the curve parameters to binary string. + + :param str encoding: the format to save the curve parameters in. + Default is ``named_curve``, with fallback being the ``explicit`` + if the OID is not set for the curve. + :param str point_encoding: the point encoding of the generator when + explicit curve encoding is used. Ignored for ``named_curve`` + format. + + :return: DER encoded ECParameters structure + :rtype: bytes + """ + if encoding is None: + if self.oid: + encoding = "named_curve" + else: + encoding = "explicit" + + if encoding not in ("named_curve", "explicit"): + raise ValueError( + "Only 'named_curve' and 'explicit' encodings supported" + ) + + if encoding == "named_curve": + if not self.oid: + raise UnknownCurveError( + "Can't encode curve using named_curve encoding without " + "associated curve OID" + ) + return der.encode_oid(*self.oid) + elif isinstance(self.curve, ellipticcurve.CurveEdTw): + assert encoding == "explicit" + raise UnknownCurveError( + "Twisted Edwards curves don't support explicit encoding" + ) + + # encode the ECParameters sequence + curve_p = self.curve.p() + version = der.encode_integer(1) + field_id = der.encode_sequence( + der.encode_oid(*PRIME_FIELD_OID), der.encode_integer(curve_p) + ) + curve = der.encode_sequence( + der.encode_octet_string( + number_to_string(self.curve.a() % curve_p, curve_p) + ), + der.encode_octet_string( + number_to_string(self.curve.b() % curve_p, curve_p) + ), + ) + base = der.encode_octet_string(self.generator.to_bytes(point_encoding)) + order = der.encode_integer(self.generator.order()) + seq_elements = [version, field_id, curve, base, order] + if self.curve.cofactor(): + cofactor = der.encode_integer(self.curve.cofactor()) + seq_elements.append(cofactor) + + return der.encode_sequence(*seq_elements) + + def to_pem(self, encoding=None, point_encoding="uncompressed"): + """ + Serialise the curve parameters to the :term:`PEM` format. + + :param str encoding: the format to save the curve parameters in. + Default is ``named_curve``, with fallback being the ``explicit`` + if the OID is not set for the curve. + :param str point_encoding: the point encoding of the generator when + explicit curve encoding is used. Ignored for ``named_curve`` + format. + + :return: PEM encoded ECParameters structure + :rtype: str + """ + return der.topem( + self.to_der(encoding, point_encoding), "EC PARAMETERS" + ) + + @staticmethod + def from_der(data, valid_encodings=None): + """Decode the curve parameters from DER file. + + :param data: the binary string to decode the parameters from + :type data: :term:`bytes-like object` + :param valid_encodings: set of names of allowed encodings, by default + all (set by passing ``None``), supported ones are ``named_curve`` + and ``explicit`` + :type valid_encodings: :term:`set-like object` + """ + if not valid_encodings: + valid_encodings = set(("named_curve", "explicit")) + if not all(i in ["named_curve", "explicit"] for i in valid_encodings): + raise ValueError( + "Only named_curve and explicit encodings supported" + ) + data = normalise_bytes(data) + if not der.is_sequence(data): + if "named_curve" not in valid_encodings: + raise der.UnexpectedDER( + "named_curve curve parameters not allowed" + ) + oid, empty = der.remove_object(data) + if empty: + raise der.UnexpectedDER("Unexpected data after OID") + return find_curve(oid) + + if "explicit" not in valid_encodings: + raise der.UnexpectedDER("explicit curve parameters not allowed") + + seq, empty = der.remove_sequence(data) + if empty: + raise der.UnexpectedDER( + "Unexpected data after ECParameters structure" + ) + # decode the ECParameters sequence + version, rest = der.remove_integer(seq) + if version != 1: + raise der.UnexpectedDER("Unknown parameter encoding format") + field_id, rest = der.remove_sequence(rest) + curve, rest = der.remove_sequence(rest) + base_bytes, rest = der.remove_octet_string(rest) + order, rest = der.remove_integer(rest) + cofactor = None + if rest: + # the ASN.1 specification of ECParameters allows for future + # extensions of the sequence, so ignore the remaining bytes + cofactor, _ = der.remove_integer(rest) + + # decode the ECParameters.fieldID sequence + field_type, rest = der.remove_object(field_id) + if field_type == CHARACTERISTIC_TWO_FIELD_OID: + raise UnknownCurveError("Characteristic 2 curves unsupported") + if field_type != PRIME_FIELD_OID: + raise UnknownCurveError( + "Unknown field type: {0}".format(field_type) + ) + prime, empty = der.remove_integer(rest) + if empty: + raise der.UnexpectedDER( + "Unexpected data after ECParameters.fieldID.Prime-p element" + ) + + # decode the ECParameters.curve sequence + curve_a_bytes, rest = der.remove_octet_string(curve) + curve_b_bytes, rest = der.remove_octet_string(rest) + # seed can be defined here, but we don't parse it, so ignore `rest` + + curve_a = string_to_number(curve_a_bytes) + curve_b = string_to_number(curve_b_bytes) + + curve_fp = ellipticcurve.CurveFp(prime, curve_a, curve_b, cofactor) + + # decode the ECParameters.base point + + base = ellipticcurve.PointJacobi.from_bytes( + curve_fp, + base_bytes, + valid_encodings=("uncompressed", "compressed", "hybrid"), + order=order, + generator=True, + ) + tmp_curve = Curve("unknown", curve_fp, base, None) + + # if the curve matches one of the well-known ones, use the well-known + # one in preference, as it will have the OID and name associated + for i in curves: + if tmp_curve == i: + return i + return tmp_curve + + @classmethod + def from_pem(cls, string, valid_encodings=None): + """Decode the curve parameters from PEM file. + + :param str string: the text string to decode the parameters from + :param valid_encodings: set of names of allowed encodings, by default + all (set by passing ``None``), supported ones are ``named_curve`` + and ``explicit`` + :type valid_encodings: :term:`set-like object` + """ + if not PY2 and isinstance(string, str): # pragma: no branch + string = string.encode() + + ec_param_index = string.find(b"-----BEGIN EC PARAMETERS-----") + if ec_param_index == -1: + raise der.UnexpectedDER("EC PARAMETERS PEM header not found") + + return cls.from_der( + der.unpem(string[ec_param_index:]), valid_encodings + ) + + +# the SEC curves +SECP112r1 = Curve( + "SECP112r1", + ecdsa.curve_112r1, + ecdsa.generator_112r1, + (1, 3, 132, 0, 6), + "secp112r1", +) + + +SECP112r2 = Curve( + "SECP112r2", + ecdsa.curve_112r2, + ecdsa.generator_112r2, + (1, 3, 132, 0, 7), + "secp112r2", +) + + +SECP128r1 = Curve( + "SECP128r1", + ecdsa.curve_128r1, + ecdsa.generator_128r1, + (1, 3, 132, 0, 28), + "secp128r1", +) + + +SECP160r1 = Curve( + "SECP160r1", + ecdsa.curve_160r1, + ecdsa.generator_160r1, + (1, 3, 132, 0, 8), + "secp160r1", +) + + +# the NIST curves +NIST192p = Curve( + "NIST192p", + ecdsa.curve_192, + ecdsa.generator_192, + (1, 2, 840, 10045, 3, 1, 1), + "prime192v1", +) + + +NIST224p = Curve( + "NIST224p", + ecdsa.curve_224, + ecdsa.generator_224, + (1, 3, 132, 0, 33), + "secp224r1", +) + + +NIST256p = Curve( + "NIST256p", + ecdsa.curve_256, + ecdsa.generator_256, + (1, 2, 840, 10045, 3, 1, 7), + "prime256v1", +) + + +NIST384p = Curve( + "NIST384p", + ecdsa.curve_384, + ecdsa.generator_384, + (1, 3, 132, 0, 34), + "secp384r1", +) + + +NIST521p = Curve( + "NIST521p", + ecdsa.curve_521, + ecdsa.generator_521, + (1, 3, 132, 0, 35), + "secp521r1", +) + + +SECP256k1 = Curve( + "SECP256k1", + ecdsa.curve_secp256k1, + ecdsa.generator_secp256k1, + (1, 3, 132, 0, 10), + "secp256k1", +) + + +BRAINPOOLP160r1 = Curve( + "BRAINPOOLP160r1", + ecdsa.curve_brainpoolp160r1, + ecdsa.generator_brainpoolp160r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 1), + "brainpoolP160r1", +) + + +BRAINPOOLP192r1 = Curve( + "BRAINPOOLP192r1", + ecdsa.curve_brainpoolp192r1, + ecdsa.generator_brainpoolp192r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 3), + "brainpoolP192r1", +) + + +BRAINPOOLP224r1 = Curve( + "BRAINPOOLP224r1", + ecdsa.curve_brainpoolp224r1, + ecdsa.generator_brainpoolp224r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 5), + "brainpoolP224r1", +) + + +BRAINPOOLP256r1 = Curve( + "BRAINPOOLP256r1", + ecdsa.curve_brainpoolp256r1, + ecdsa.generator_brainpoolp256r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 7), + "brainpoolP256r1", +) + + +BRAINPOOLP320r1 = Curve( + "BRAINPOOLP320r1", + ecdsa.curve_brainpoolp320r1, + ecdsa.generator_brainpoolp320r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 9), + "brainpoolP320r1", +) + + +BRAINPOOLP384r1 = Curve( + "BRAINPOOLP384r1", + ecdsa.curve_brainpoolp384r1, + ecdsa.generator_brainpoolp384r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 11), + "brainpoolP384r1", +) + + +BRAINPOOLP512r1 = Curve( + "BRAINPOOLP512r1", + ecdsa.curve_brainpoolp512r1, + ecdsa.generator_brainpoolp512r1, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 13), + "brainpoolP512r1", +) + + +Ed25519 = Curve( + "Ed25519", + eddsa.curve_ed25519, + eddsa.generator_ed25519, + (1, 3, 101, 112), +) + + +Ed448 = Curve( + "Ed448", + eddsa.curve_ed448, + eddsa.generator_ed448, + (1, 3, 101, 113), +) + + +# no order in particular, but keep previously added curves first +curves = [ + NIST192p, + NIST224p, + NIST256p, + NIST384p, + NIST521p, + SECP256k1, + BRAINPOOLP160r1, + BRAINPOOLP192r1, + BRAINPOOLP224r1, + BRAINPOOLP256r1, + BRAINPOOLP320r1, + BRAINPOOLP384r1, + BRAINPOOLP512r1, + SECP112r1, + SECP112r2, + SECP128r1, + SECP160r1, + Ed25519, + Ed448, +] + + +def find_curve(oid_curve): + """Select a curve based on its OID + + :param tuple[int,...] oid_curve: ASN.1 Object Identifier of the + curve to return, like ``(1, 2, 840, 10045, 3, 1, 7)`` for ``NIST256p``. + + :raises UnknownCurveError: When the oid doesn't match any of the supported + curves + + :rtype: ~ecdsa.curves.Curve + """ + for c in curves: + if c.oid == oid_curve: + return c + raise UnknownCurveError( + "I don't know about the curve with oid %s." + "I only know about these: %s" % (oid_curve, [c.name for c in curves]) + ) + + +def curve_by_name(name): + """Select a curve based on its name. + + Returns a :py:class:`~ecdsa.curves.Curve` object with a ``name`` name. + Note that ``name`` is case-sensitve. + + :param str name: Name of the curve to return, like ``NIST256p`` or + ``prime256v1`` + + :raises UnknownCurveError: When the name doesn't match any of the supported + curves + + :rtype: ~ecdsa.curves.Curve + """ + for c in curves: + if name == c.name or (c.openssl_name and name == c.openssl_name): + return c + raise UnknownCurveError( + "Curve with name {0!r} unknown, only curves supported: {1}".format( + name, [c.name for c in curves] + ) + ) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/der.py b/myenv/lib/python3.9/site-packages/ecdsa/der.py new file mode 100644 index 0000000..8b27941 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/der.py @@ -0,0 +1,409 @@ +from __future__ import division + +import binascii +import base64 +import warnings +from itertools import chain +from six import int2byte, b, text_type +from ._compat import str_idx_as_int + + +class UnexpectedDER(Exception): + pass + + +def encode_constructed(tag, value): + return int2byte(0xA0 + tag) + encode_length(len(value)) + value + + +def encode_integer(r): + assert r >= 0 # can't support negative numbers yet + h = ("%x" % r).encode() + if len(h) % 2: + h = b("0") + h + s = binascii.unhexlify(h) + num = str_idx_as_int(s, 0) + if num <= 0x7F: + return b("\x02") + encode_length(len(s)) + s + else: + # DER integers are two's complement, so if the first byte is + # 0x80-0xff then we need an extra 0x00 byte to prevent it from + # looking negative. + return b("\x02") + encode_length(len(s) + 1) + b("\x00") + s + + +# sentry object to check if an argument was specified (used to detect +# deprecated calling convention) +_sentry = object() + + +def encode_bitstring(s, unused=_sentry): + """ + Encode a binary string as a BIT STRING using :term:`DER` encoding. + + Note, because there is no native Python object that can encode an actual + bit string, this function only accepts byte strings as the `s` argument. + The byte string is the actual bit string that will be encoded, padded + on the right (least significant bits, looking from big endian perspective) + to the first full byte. If the bit string has a bit length that is multiple + of 8, then the padding should not be included. For correct DER encoding + the padding bits MUST be set to 0. + + Number of bits of padding need to be provided as the `unused` parameter. + In case they are specified as None, it means the number of unused bits + is already encoded in the string as the first byte. + + The deprecated call convention specifies just the `s` parameters and + encodes the number of unused bits as first parameter (same convention + as with None). + + Empty string must be encoded with `unused` specified as 0. + + Future version of python-ecdsa will make specifying the `unused` argument + mandatory. + + :param s: bytes to encode + :type s: bytes like object + :param unused: number of bits at the end of `s` that are unused, must be + between 0 and 7 (inclusive) + :type unused: int or None + + :raises ValueError: when `unused` is too large or too small + + :return: `s` encoded using DER + :rtype: bytes + """ + encoded_unused = b"" + len_extra = 0 + if unused is _sentry: + warnings.warn( + "Legacy call convention used, unused= needs to be specified", + DeprecationWarning, + ) + elif unused is not None: + if not 0 <= unused <= 7: + raise ValueError("unused must be integer between 0 and 7") + if unused: + if not s: + raise ValueError("unused is non-zero but s is empty") + last = str_idx_as_int(s, -1) + if last & (2**unused - 1): + raise ValueError("unused bits must be zeros in DER") + encoded_unused = int2byte(unused) + len_extra = 1 + return b("\x03") + encode_length(len(s) + len_extra) + encoded_unused + s + + +def encode_octet_string(s): + return b("\x04") + encode_length(len(s)) + s + + +def encode_oid(first, second, *pieces): + assert 0 <= first < 2 and 0 <= second <= 39 or first == 2 and 0 <= second + body = b"".join( + chain( + [encode_number(40 * first + second)], + (encode_number(p) for p in pieces), + ) + ) + return b"\x06" + encode_length(len(body)) + body + + +def encode_sequence(*encoded_pieces): + total_len = sum([len(p) for p in encoded_pieces]) + return b("\x30") + encode_length(total_len) + b("").join(encoded_pieces) + + +def encode_number(n): + b128_digits = [] + while n: + b128_digits.insert(0, (n & 0x7F) | 0x80) + n = n >> 7 + if not b128_digits: + b128_digits.append(0) + b128_digits[-1] &= 0x7F + return b("").join([int2byte(d) for d in b128_digits]) + + +def is_sequence(string): + return string and string[:1] == b"\x30" + + +def remove_constructed(string): + s0 = str_idx_as_int(string, 0) + if (s0 & 0xE0) != 0xA0: + raise UnexpectedDER( + "wanted type 'constructed tag' (0xa0-0xbf), got 0x%02x" % s0 + ) + tag = s0 & 0x1F + length, llen = read_length(string[1:]) + body = string[1 + llen : 1 + llen + length] + rest = string[1 + llen + length :] + return tag, body, rest + + +def remove_sequence(string): + if not string: + raise UnexpectedDER("Empty string does not encode a sequence") + if string[:1] != b"\x30": + n = str_idx_as_int(string, 0) + raise UnexpectedDER("wanted type 'sequence' (0x30), got 0x%02x" % n) + length, lengthlength = read_length(string[1:]) + if length > len(string) - 1 - lengthlength: + raise UnexpectedDER("Length longer than the provided buffer") + endseq = 1 + lengthlength + length + return string[1 + lengthlength : endseq], string[endseq:] + + +def remove_octet_string(string): + if string[:1] != b"\x04": + n = str_idx_as_int(string, 0) + raise UnexpectedDER("wanted type 'octetstring' (0x04), got 0x%02x" % n) + length, llen = read_length(string[1:]) + body = string[1 + llen : 1 + llen + length] + rest = string[1 + llen + length :] + return body, rest + + +def remove_object(string): + if not string: + raise UnexpectedDER( + "Empty string does not encode an object identifier" + ) + if string[:1] != b"\x06": + n = str_idx_as_int(string, 0) + raise UnexpectedDER("wanted type 'object' (0x06), got 0x%02x" % n) + length, lengthlength = read_length(string[1:]) + body = string[1 + lengthlength : 1 + lengthlength + length] + rest = string[1 + lengthlength + length :] + if not body: + raise UnexpectedDER("Empty object identifier") + if len(body) != length: + raise UnexpectedDER( + "Length of object identifier longer than the provided buffer" + ) + numbers = [] + while body: + n, ll = read_number(body) + numbers.append(n) + body = body[ll:] + n0 = numbers.pop(0) + if n0 < 80: + first = n0 // 40 + else: + first = 2 + second = n0 - (40 * first) + numbers.insert(0, first) + numbers.insert(1, second) + return tuple(numbers), rest + + +def remove_integer(string): + if not string: + raise UnexpectedDER( + "Empty string is an invalid encoding of an integer" + ) + if string[:1] != b"\x02": + n = str_idx_as_int(string, 0) + raise UnexpectedDER("wanted type 'integer' (0x02), got 0x%02x" % n) + length, llen = read_length(string[1:]) + if length > len(string) - 1 - llen: + raise UnexpectedDER("Length longer than provided buffer") + if length == 0: + raise UnexpectedDER("0-byte long encoding of integer") + numberbytes = string[1 + llen : 1 + llen + length] + rest = string[1 + llen + length :] + msb = str_idx_as_int(numberbytes, 0) + if not msb < 0x80: + raise UnexpectedDER("Negative integers are not supported") + # check if the encoding is the minimal one (DER requirement) + if length > 1 and not msb: + # leading zero byte is allowed if the integer would have been + # considered a negative number otherwise + smsb = str_idx_as_int(numberbytes, 1) + if smsb < 0x80: + raise UnexpectedDER( + "Invalid encoding of integer, unnecessary " + "zero padding bytes" + ) + return int(binascii.hexlify(numberbytes), 16), rest + + +def read_number(string): + number = 0 + llen = 0 + if str_idx_as_int(string, 0) == 0x80: + raise UnexpectedDER("Non minimal encoding of OID subidentifier") + # base-128 big endian, with most significant bit set in all but the last + # byte + while True: + if llen >= len(string): + raise UnexpectedDER("ran out of length bytes") + number = number << 7 + d = str_idx_as_int(string, llen) + number += d & 0x7F + llen += 1 + if not d & 0x80: + break + return number, llen + + +def encode_length(l): + assert l >= 0 + if l < 0x80: + return int2byte(l) + s = ("%x" % l).encode() + if len(s) % 2: + s = b("0") + s + s = binascii.unhexlify(s) + llen = len(s) + return int2byte(0x80 | llen) + s + + +def read_length(string): + if not string: + raise UnexpectedDER("Empty string can't encode valid length value") + num = str_idx_as_int(string, 0) + if not (num & 0x80): + # short form + return (num & 0x7F), 1 + # else long-form: b0&0x7f is number of additional base256 length bytes, + # big-endian + llen = num & 0x7F + if not llen: + raise UnexpectedDER("Invalid length encoding, length of length is 0") + if llen > len(string) - 1: + raise UnexpectedDER("Length of length longer than provided buffer") + # verify that the encoding is minimal possible (DER requirement) + msb = str_idx_as_int(string, 1) + if not msb or llen == 1 and msb < 0x80: + raise UnexpectedDER("Not minimal encoding of length") + return int(binascii.hexlify(string[1 : 1 + llen]), 16), 1 + llen + + +def remove_bitstring(string, expect_unused=_sentry): + """ + Remove a BIT STRING object from `string` following :term:`DER`. + + The `expect_unused` can be used to specify if the bit string should + have the amount of unused bits decoded or not. If it's an integer, any + read BIT STRING that has number of unused bits different from specified + value will cause UnexpectedDER exception to be raised (this is especially + useful when decoding BIT STRINGS that have DER encoded object in them; + DER encoding is byte oriented, so the unused bits will always equal 0). + + If the `expect_unused` is specified as None, the first element returned + will be a tuple, with the first value being the extracted bit string + while the second value will be the decoded number of unused bits. + + If the `expect_unused` is unspecified, the decoding of byte with + number of unused bits will not be attempted and the bit string will be + returned as-is, the callee will be required to decode it and verify its + correctness. + + Future version of python will require the `expected_unused` parameter + to be specified. + + :param string: string of bytes to extract the BIT STRING from + :type string: bytes like object + :param expect_unused: number of bits that should be unused in the BIT + STRING, or None, to return it to caller + :type expect_unused: int or None + + :raises UnexpectedDER: when the encoding does not follow DER. + + :return: a tuple with first element being the extracted bit string and + the second being the remaining bytes in the string (if any); if the + `expect_unused` is specified as None, the first element of the returned + tuple will be a tuple itself, with first element being the bit string + as bytes and the second element being the number of unused bits at the + end of the byte array as an integer + :rtype: tuple + """ + if not string: + raise UnexpectedDER("Empty string does not encode a bitstring") + if expect_unused is _sentry: + warnings.warn( + "Legacy call convention used, expect_unused= needs to be" + " specified", + DeprecationWarning, + ) + num = str_idx_as_int(string, 0) + if string[:1] != b"\x03": + raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % num) + length, llen = read_length(string[1:]) + if not length: + raise UnexpectedDER("Invalid length of bit string, can't be 0") + body = string[1 + llen : 1 + llen + length] + rest = string[1 + llen + length :] + if expect_unused is not _sentry: + unused = str_idx_as_int(body, 0) + if not 0 <= unused <= 7: + raise UnexpectedDER("Invalid encoding of unused bits") + if expect_unused is not None and expect_unused != unused: + raise UnexpectedDER("Unexpected number of unused bits") + body = body[1:] + if unused: + if not body: + raise UnexpectedDER("Invalid encoding of empty bit string") + last = str_idx_as_int(body, -1) + # verify that all the unused bits are set to zero (DER requirement) + if last & (2**unused - 1): + raise UnexpectedDER("Non zero padding bits in bit string") + if expect_unused is None: + body = (body, unused) + return body, rest + + +# SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING) + + +# signatures: (from RFC3279) +# ansi-X9-62 OBJECT IDENTIFIER ::= { +# iso(1) member-body(2) us(840) 10045 } +# +# id-ecSigType OBJECT IDENTIFIER ::= { +# ansi-X9-62 signatures(4) } +# ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +# id-ecSigType 1 } +# so 1,2,840,10045,4,1 +# so 0x42, .. .. + +# Ecdsa-Sig-Value ::= SEQUENCE { +# r INTEGER, +# s INTEGER } + +# id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 } +# +# id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 } + +# I think the secp224r1 identifier is (t=06,l=05,v=2b81040021) +# secp224r1 OBJECT IDENTIFIER ::= { +# iso(1) identified-organization(3) certicom(132) curve(0) 33 } +# and the secp384r1 is (t=06,l=05,v=2b81040022) +# secp384r1 OBJECT IDENTIFIER ::= { +# iso(1) identified-organization(3) certicom(132) curve(0) 34 } + + +def unpem(pem): + if isinstance(pem, text_type): # pragma: no branch + pem = pem.encode() + + d = b("").join( + [ + l.strip() + for l in pem.split(b("\n")) + if l and not l.startswith(b("-----")) + ] + ) + return base64.b64decode(d) + + +def topem(der, name): + b64 = base64.b64encode(der) + lines = [("-----BEGIN %s-----\n" % name).encode()] + lines.extend( + [b64[start : start + 64] + b("\n") for start in range(0, len(b64), 64)] + ) + lines.append(("-----END %s-----\n" % name).encode()) + return b("").join(lines) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/ecdh.py b/myenv/lib/python3.9/site-packages/ecdsa/ecdh.py new file mode 100644 index 0000000..7f697d9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/ecdh.py @@ -0,0 +1,336 @@ +""" +Class for performing Elliptic-curve Diffie-Hellman (ECDH) operations. +""" + +from .util import number_to_string +from .ellipticcurve import INFINITY +from .keys import SigningKey, VerifyingKey + + +__all__ = [ + "ECDH", + "NoKeyError", + "NoCurveError", + "InvalidCurveError", + "InvalidSharedSecretError", +] + + +class NoKeyError(Exception): + """ECDH. Key not found but it is needed for operation.""" + + pass + + +class NoCurveError(Exception): + """ECDH. Curve not set but it is needed for operation.""" + + pass + + +class InvalidCurveError(Exception): + """ + ECDH. Raised in case the public and private keys use different curves. + """ + + pass + + +class InvalidSharedSecretError(Exception): + """ECDH. Raised in case the shared secret we obtained is an INFINITY.""" + + pass + + +class ECDH(object): + """ + Elliptic-curve Diffie-Hellman (ECDH). A key agreement protocol. + + Allows two parties, each having an elliptic-curve public-private key + pair, to establish a shared secret over an insecure channel + """ + + def __init__(self, curve=None, private_key=None, public_key=None): + """ + ECDH init. + + Call can be initialised without parameters, then the first operation + (loading either key) will set the used curve. + All parameters must be ultimately set before shared secret + calculation will be allowed. + + :param curve: curve for operations + :type curve: Curve + :param private_key: `my` private key for ECDH + :type private_key: SigningKey + :param public_key: `their` public key for ECDH + :type public_key: VerifyingKey + """ + self.curve = curve + self.private_key = None + self.public_key = None + if private_key: + self.load_private_key(private_key) + if public_key: + self.load_received_public_key(public_key) + + def _get_shared_secret(self, remote_public_key): + if not self.private_key: + raise NoKeyError( + "Private key needs to be set to create shared secret" + ) + if not self.public_key: + raise NoKeyError( + "Public key needs to be set to create shared secret" + ) + if not ( + self.private_key.curve == self.curve == remote_public_key.curve + ): + raise InvalidCurveError( + "Curves for public key and private key is not equal." + ) + + # shared secret = PUBKEYtheirs * PRIVATEKEYours + result = ( + remote_public_key.pubkey.point + * self.private_key.privkey.secret_multiplier + ) + if result == INFINITY: + raise InvalidSharedSecretError("Invalid shared secret (INFINITY).") + + return result.x() + + def set_curve(self, key_curve): + """ + Set the working curve for ecdh operations. + + :param key_curve: curve from `curves` module + :type key_curve: Curve + """ + self.curve = key_curve + + def generate_private_key(self): + """ + Generate local private key for ecdh operation with curve that was set. + + :raises NoCurveError: Curve must be set before key generation. + + :return: public (verifying) key from this private key. + :rtype: VerifyingKey + """ + if not self.curve: + raise NoCurveError("Curve must be set prior to key generation.") + return self.load_private_key(SigningKey.generate(curve=self.curve)) + + def load_private_key(self, private_key): + """ + Load private key from SigningKey (keys.py) object. + + Needs to have the same curve as was set with set_curve method. + If curve is not set - it sets from this SigningKey + + :param private_key: Initialised SigningKey class + :type private_key: SigningKey + + :raises InvalidCurveError: private_key curve not the same as self.curve + + :return: public (verifying) key from this private key. + :rtype: VerifyingKey + """ + if not self.curve: + self.curve = private_key.curve + if self.curve != private_key.curve: + raise InvalidCurveError("Curve mismatch.") + self.private_key = private_key + return self.private_key.get_verifying_key() + + def load_private_key_bytes(self, private_key): + """ + Load private key from byte string. + + Uses current curve and checks if the provided key matches + the curve of ECDH key agreement. + Key loads via from_string method of SigningKey class + + :param private_key: private key in bytes string format + :type private_key: :term:`bytes-like object` + + :raises NoCurveError: Curve must be set before loading. + + :return: public (verifying) key from this private key. + :rtype: VerifyingKey + """ + if not self.curve: + raise NoCurveError("Curve must be set prior to key load.") + return self.load_private_key( + SigningKey.from_string(private_key, curve=self.curve) + ) + + def load_private_key_der(self, private_key_der): + """ + Load private key from DER byte string. + + Compares the curve of the DER-encoded key with the ECDH set curve, + uses the former if unset. + + Note, the only DER format supported is the RFC5915 + Look at keys.py:SigningKey.from_der() + + :param private_key_der: string with the DER encoding of private ECDSA + key + :type private_key_der: string + + :raises InvalidCurveError: private_key curve not the same as self.curve + + :return: public (verifying) key from this private key. + :rtype: VerifyingKey + """ + return self.load_private_key(SigningKey.from_der(private_key_der)) + + def load_private_key_pem(self, private_key_pem): + """ + Load private key from PEM string. + + Compares the curve of the DER-encoded key with the ECDH set curve, + uses the former if unset. + + Note, the only PEM format supported is the RFC5915 + Look at keys.py:SigningKey.from_pem() + it needs to have `EC PRIVATE KEY` section + + :param private_key_pem: string with PEM-encoded private ECDSA key + :type private_key_pem: string + + :raises InvalidCurveError: private_key curve not the same as self.curve + + :return: public (verifying) key from this private key. + :rtype: VerifyingKey + """ + return self.load_private_key(SigningKey.from_pem(private_key_pem)) + + def get_public_key(self): + """ + Provides a public key that matches the local private key. + + Needs to be sent to the remote party. + + :return: public (verifying) key from local private key. + :rtype: VerifyingKey + """ + return self.private_key.get_verifying_key() + + def load_received_public_key(self, public_key): + """ + Load public key from VerifyingKey (keys.py) object. + + Needs to have the same curve as set as current for ecdh operation. + If curve is not set - it sets it from VerifyingKey. + + :param public_key: Initialised VerifyingKey class + :type public_key: VerifyingKey + + :raises InvalidCurveError: public_key curve not the same as self.curve + """ + if not self.curve: + self.curve = public_key.curve + if self.curve != public_key.curve: + raise InvalidCurveError("Curve mismatch.") + self.public_key = public_key + + def load_received_public_key_bytes( + self, public_key_str, valid_encodings=None + ): + """ + Load public key from byte string. + + Uses current curve and checks if key length corresponds to + the current curve. + Key loads via from_string method of VerifyingKey class + + :param public_key_str: public key in bytes string format + :type public_key_str: :term:`bytes-like object` + :param valid_encodings: list of acceptable point encoding formats, + supported ones are: :term:`uncompressed`, :term:`compressed`, + :term:`hybrid`, and :term:`raw encoding` (specified with ``raw`` + name). All formats by default (specified with ``None``). + :type valid_encodings: :term:`set-like object` + """ + return self.load_received_public_key( + VerifyingKey.from_string( + public_key_str, self.curve, valid_encodings + ) + ) + + def load_received_public_key_der(self, public_key_der): + """ + Load public key from DER byte string. + + Compares the curve of the DER-encoded key with the ECDH set curve, + uses the former if unset. + + Note, the only DER format supported is the RFC5912 + Look at keys.py:VerifyingKey.from_der() + + :param public_key_der: string with the DER encoding of public ECDSA key + :type public_key_der: string + + :raises InvalidCurveError: public_key curve not the same as self.curve + """ + return self.load_received_public_key( + VerifyingKey.from_der(public_key_der) + ) + + def load_received_public_key_pem(self, public_key_pem): + """ + Load public key from PEM string. + + Compares the curve of the PEM-encoded key with the ECDH set curve, + uses the former if unset. + + Note, the only PEM format supported is the RFC5912 + Look at keys.py:VerifyingKey.from_pem() + + :param public_key_pem: string with PEM-encoded public ECDSA key + :type public_key_pem: string + + :raises InvalidCurveError: public_key curve not the same as self.curve + """ + return self.load_received_public_key( + VerifyingKey.from_pem(public_key_pem) + ) + + def generate_sharedsecret_bytes(self): + """ + Generate shared secret from local private key and remote public key. + + The objects needs to have both private key and received public key + before generation is allowed. + + :raises InvalidCurveError: public_key curve not the same as self.curve + :raises NoKeyError: public_key or private_key is not set + + :return: shared secret + :rtype: bytes + """ + return number_to_string( + self.generate_sharedsecret(), self.private_key.curve.curve.p() + ) + + def generate_sharedsecret(self): + """ + Generate shared secret from local private key and remote public key. + + The objects needs to have both private key and received public key + before generation is allowed. + + It's the same for local and remote party, + shared secret(local private key, remote public key) == + shared secret(local public key, remote private key) + + :raises InvalidCurveError: public_key curve not the same as self.curve + :raises NoKeyError: public_key or private_key is not set + + :return: shared secret + :rtype: int + """ + return self._get_shared_secret(self.public_key) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/ecdsa.py b/myenv/lib/python3.9/site-packages/ecdsa/ecdsa.py new file mode 100644 index 0000000..3328281 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/ecdsa.py @@ -0,0 +1,859 @@ +#! /usr/bin/env python + +""" +Low level implementation of Elliptic-Curve Digital Signatures. + +.. note :: + You're most likely looking for the :py:class:`~ecdsa.keys` module. + This is a low-level implementation of the ECDSA that operates on + integers, not byte strings. + +NOTE: This a low level implementation of ECDSA, for normal applications +you should be looking at the keys.py module. + +Classes and methods for elliptic-curve signatures: +private keys, public keys, signatures, +and definitions of prime-modulus curves. + +Example: + +.. code-block:: python + + # (In real-life applications, you would probably want to + # protect against defects in SystemRandom.) + from random import SystemRandom + randrange = SystemRandom().randrange + + # Generate a public/private key pair using the NIST Curve P-192: + + g = generator_192 + n = g.order() + secret = randrange( 1, n ) + pubkey = Public_key( g, g * secret ) + privkey = Private_key( pubkey, secret ) + + # Signing a hash value: + + hash = randrange( 1, n ) + signature = privkey.sign( hash, randrange( 1, n ) ) + + # Verifying a signature for a hash value: + + if pubkey.verifies( hash, signature ): + print_("Demo verification succeeded.") + else: + print_("*** Demo verification failed.") + + # Verification fails if the hash value is modified: + + if pubkey.verifies( hash-1, signature ): + print_("**** Demo verification failed to reject tampered hash.") + else: + print_("Demo verification correctly rejected tampered hash.") + +Revision history: + 2005.12.31 - Initial version. + + 2008.11.25 - Substantial revisions introducing new classes. + + 2009.05.16 - Warn against using random.randrange in real applications. + + 2009.05.17 - Use random.SystemRandom by default. + +Originally written in 2005 by Peter Pearson and placed in the public domain, +modified as part of the python-ecdsa package. +""" + +from six import int2byte, b +from . import ellipticcurve +from . import numbertheory +from .util import bit_length +from ._compat import remove_whitespace + + +class RSZeroError(RuntimeError): + pass + + +class InvalidPointError(RuntimeError): + pass + + +class Signature(object): + """ + ECDSA signature. + + :ivar int r: the ``r`` element of the ECDSA signature + :ivar int s: the ``s`` element of the ECDSA signature + """ + + def __init__(self, r, s): + self.r = r + self.s = s + + def recover_public_keys(self, hash, generator): + """ + Returns two public keys for which the signature is valid + + :param int hash: signed hash + :param AbstractPoint generator: is the generator used in creation + of the signature + :rtype: tuple(Public_key, Public_key) + :return: a pair of public keys that can validate the signature + """ + curve = generator.curve() + n = generator.order() + r = self.r + s = self.s + e = hash + x = r + + # Compute the curve point with x as x-coordinate + alpha = ( + pow(x, 3, curve.p()) + (curve.a() * x) + curve.b() + ) % curve.p() + beta = numbertheory.square_root_mod_prime(alpha, curve.p()) + y = beta if beta % 2 == 0 else curve.p() - beta + + # Compute the public key + R1 = ellipticcurve.PointJacobi(curve, x, y, 1, n) + Q1 = numbertheory.inverse_mod(r, n) * (s * R1 + (-e % n) * generator) + Pk1 = Public_key(generator, Q1) + + # And the second solution + R2 = ellipticcurve.PointJacobi(curve, x, -y, 1, n) + Q2 = numbertheory.inverse_mod(r, n) * (s * R2 + (-e % n) * generator) + Pk2 = Public_key(generator, Q2) + + return [Pk1, Pk2] + + +class Public_key(object): + """Public key for ECDSA.""" + + def __init__(self, generator, point, verify=True): + """Low level ECDSA public key object. + + :param generator: the Point that generates the group (the base point) + :param point: the Point that defines the public key + :param bool verify: if True check if point is valid point on curve + + :raises InvalidPointError: if the point parameters are invalid or + point does not lay on the curve + """ + + self.curve = generator.curve() + self.generator = generator + self.point = point + n = generator.order() + p = self.curve.p() + if not (0 <= point.x() < p) or not (0 <= point.y() < p): + raise InvalidPointError( + "The public point has x or y out of range." + ) + if verify and not self.curve.contains_point(point.x(), point.y()): + raise InvalidPointError("Point does not lay on the curve") + if not n: + raise InvalidPointError("Generator point must have order.") + # for curve parameters with base point with cofactor 1, all points + # that are on the curve are scalar multiples of the base point, so + # verifying that is not necessary. See Section 3.2.2.1 of SEC 1 v2 + if ( + verify + and self.curve.cofactor() != 1 + and not n * point == ellipticcurve.INFINITY + ): + raise InvalidPointError("Generator point order is bad.") + + def __eq__(self, other): + """Return True if the keys are identical, False otherwise. + + Note: for comparison, only placement on the same curve and point + equality is considered, use of the same generator point is not + considered. + """ + if isinstance(other, Public_key): + return self.curve == other.curve and self.point == other.point + return NotImplemented + + def __ne__(self, other): + """Return False if the keys are identical, True otherwise.""" + return not self == other + + def verifies(self, hash, signature): + """Verify that signature is a valid signature of hash. + Return True if the signature is valid. + """ + + # From X9.62 J.3.1. + + G = self.generator + n = G.order() + r = signature.r + s = signature.s + if r < 1 or r > n - 1: + return False + if s < 1 or s > n - 1: + return False + c = numbertheory.inverse_mod(s, n) + u1 = (hash * c) % n + u2 = (r * c) % n + if hasattr(G, "mul_add"): + xy = G.mul_add(u1, self.point, u2) + else: + xy = u1 * G + u2 * self.point + v = xy.x() % n + return v == r + + +class Private_key(object): + """Private key for ECDSA.""" + + def __init__(self, public_key, secret_multiplier): + """public_key is of class Public_key; + secret_multiplier is a large integer. + """ + + self.public_key = public_key + self.secret_multiplier = secret_multiplier + + def __eq__(self, other): + """Return True if the points are identical, False otherwise.""" + if isinstance(other, Private_key): + return ( + self.public_key == other.public_key + and self.secret_multiplier == other.secret_multiplier + ) + return NotImplemented + + def __ne__(self, other): + """Return False if the points are identical, True otherwise.""" + return not self == other + + def sign(self, hash, random_k): + """Return a signature for the provided hash, using the provided + random nonce. It is absolutely vital that random_k be an unpredictable + number in the range [1, self.public_key.point.order()-1]. If + an attacker can guess random_k, he can compute our private key from a + single signature. Also, if an attacker knows a few high-order + bits (or a few low-order bits) of random_k, he can compute our private + key from many signatures. The generation of nonces with adequate + cryptographic strength is very difficult and far beyond the scope + of this comment. + + May raise RuntimeError, in which case retrying with a new + random value k is in order. + """ + + G = self.public_key.generator + n = G.order() + k = random_k % n + # Fix the bit-length of the random nonce, + # so that it doesn't leak via timing. + # This does not change that ks = k mod n + ks = k + n + kt = ks + n + if bit_length(ks) == bit_length(n): + p1 = kt * G + else: + p1 = ks * G + r = p1.x() % n + if r == 0: + raise RSZeroError("amazingly unlucky random number r") + s = ( + numbertheory.inverse_mod(k, n) + * (hash + (self.secret_multiplier * r) % n) + ) % n + if s == 0: + raise RSZeroError("amazingly unlucky random number s") + return Signature(r, s) + + +def int_to_string(x): + """Convert integer x into a string of bytes, as per X9.62.""" + assert x >= 0 + if x == 0: + return b("\0") + result = [] + while x: + ordinal = x & 0xFF + result.append(int2byte(ordinal)) + x >>= 8 + + result.reverse() + return b("").join(result) + + +def string_to_int(s): + """Convert a string of bytes into an integer, as per X9.62.""" + result = 0 + for c in s: + if not isinstance(c, int): + c = ord(c) + result = 256 * result + c + return result + + +def digest_integer(m): + """Convert an integer into a string of bytes, compute + its SHA-1 hash, and convert the result to an integer.""" + # + # I don't expect this function to be used much. I wrote + # it in order to be able to duplicate the examples + # in ECDSAVS. + # + from hashlib import sha1 + + return string_to_int(sha1(int_to_string(m)).digest()) + + +def point_is_valid(generator, x, y): + """Is (x,y) a valid public key based on the specified generator?""" + + # These are the tests specified in X9.62. + + n = generator.order() + curve = generator.curve() + p = curve.p() + if not (0 <= x < p) or not (0 <= y < p): + return False + if not curve.contains_point(x, y): + return False + if ( + curve.cofactor() != 1 + and not n * ellipticcurve.PointJacobi(curve, x, y, 1) + == ellipticcurve.INFINITY + ): + return False + return True + + +# secp112r1 curve +_p = int(remove_whitespace("DB7C 2ABF62E3 5E668076 BEAD208B"), 16) +# s = 00F50B02 8E4D696E 67687561 51752904 72783FB1 +_a = int(remove_whitespace("DB7C 2ABF62E3 5E668076 BEAD2088"), 16) +_b = int(remove_whitespace("659E F8BA0439 16EEDE89 11702B22"), 16) +_Gx = int(remove_whitespace("09487239 995A5EE7 6B55F9C2 F098"), 16) +_Gy = int(remove_whitespace("A89C E5AF8724 C0A23E0E 0FF77500"), 16) +_r = int(remove_whitespace("DB7C 2ABF62E3 5E7628DF AC6561C5"), 16) +_h = 1 +curve_112r1 = ellipticcurve.CurveFp(_p, _a, _b, _h) +generator_112r1 = ellipticcurve.PointJacobi( + curve_112r1, _Gx, _Gy, 1, _r, generator=True +) + + +# secp112r2 curve +_p = int(remove_whitespace("DB7C 2ABF62E3 5E668076 BEAD208B"), 16) +# s = 022757A1 114D69E 67687561 51755316 C05E0BD4 +_a = int(remove_whitespace("6127 C24C05F3 8A0AAAF6 5C0EF02C"), 16) +_b = int(remove_whitespace("51DE F1815DB5 ED74FCC3 4C85D709"), 16) +_Gx = int(remove_whitespace("4BA30AB5 E892B4E1 649DD092 8643"), 16) +_Gy = int(remove_whitespace("ADCD 46F5882E 3747DEF3 6E956E97"), 16) +_r = int(remove_whitespace("36DF 0AAFD8B8 D7597CA1 0520D04B"), 16) +_h = 4 +curve_112r2 = ellipticcurve.CurveFp(_p, _a, _b, _h) +generator_112r2 = ellipticcurve.PointJacobi( + curve_112r2, _Gx, _Gy, 1, _r, generator=True +) + + +# secp128r1 curve +_p = int(remove_whitespace("FFFFFFFD FFFFFFFF FFFFFFFF FFFFFFFF"), 16) +# S = 000E0D4D 69E6768 75615175 0CC03A44 73D03679 +# a and b are mod p, so a is equal to p-3, or simply -3 +# _a = -3 +_b = int(remove_whitespace("E87579C1 1079F43D D824993C 2CEE5ED3"), 16) +_Gx = int(remove_whitespace("161FF752 8B899B2D 0C28607C A52C5B86"), 16) +_Gy = int(remove_whitespace("CF5AC839 5BAFEB13 C02DA292 DDED7A83"), 16) +_r = int(remove_whitespace("FFFFFFFE 00000000 75A30D1B 9038A115"), 16) +_h = 1 +curve_128r1 = ellipticcurve.CurveFp(_p, -3, _b, _h) +generator_128r1 = ellipticcurve.PointJacobi( + curve_128r1, _Gx, _Gy, 1, _r, generator=True +) + + +# secp160r1 +_p = int(remove_whitespace("FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF 7FFFFFFF"), 16) +# S = 1053CDE4 2C14D696 E6768756 1517533B F3F83345 +# a and b are mod p, so a is equal to p-3, or simply -3 +# _a = -3 +_b = int(remove_whitespace("1C97BEFC 54BD7A8B 65ACF89F 81D4D4AD C565FA45"), 16) +_Gx = int( + remove_whitespace("4A96B568 8EF57328 46646989 68C38BB9 13CBFC82"), + 16, +) +_Gy = int( + remove_whitespace("23A62855 3168947D 59DCC912 04235137 7AC5FB32"), + 16, +) +_r = int( + remove_whitespace("01 00000000 00000000 0001F4C8 F927AED3 CA752257"), + 16, +) +_h = 1 +curve_160r1 = ellipticcurve.CurveFp(_p, -3, _b, _h) +generator_160r1 = ellipticcurve.PointJacobi( + curve_160r1, _Gx, _Gy, 1, _r, generator=True +) + + +# NIST Curve P-192: +_p = 6277101735386680763835789423207666416083908700390324961279 +_r = 6277101735386680763835789423176059013767194773182842284081 +# s = 0x3045ae6fc8422f64ed579528d38120eae12196d5L +# c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65L +_b = int( + remove_whitespace( + """ + 64210519 E59C80E7 0FA7E9AB 72243049 FEB8DEEC C146B9B1""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + 188DA80E B03090F6 7CBF20EB 43A18800 F4FF0AFD 82FF1012""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 07192B95 FFC8DA78 631011ED 6B24CDD5 73F977A1 1E794811""" + ), + 16, +) + +curve_192 = ellipticcurve.CurveFp(_p, -3, _b, 1) +generator_192 = ellipticcurve.PointJacobi( + curve_192, _Gx, _Gy, 1, _r, generator=True +) + + +# NIST Curve P-224: +_p = int( + remove_whitespace( + """ + 2695994666715063979466701508701963067355791626002630814351 + 0066298881""" + ) +) +_r = int( + remove_whitespace( + """ + 2695994666715063979466701508701962594045780771442439172168 + 2722368061""" + ) +) +# s = 0xbd71344799d5c7fcdc45b59fa3b9ab8f6a948bc5L +# c = 0x5b056c7e11dd68f40469ee7f3c7a7d74f7d121116506d031218291fbL +_b = int( + remove_whitespace( + """ + B4050A85 0C04B3AB F5413256 5044B0B7 D7BFD8BA 270B3943 + 2355FFB4""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + B70E0CBD 6BB4BF7F 321390B9 4A03C1D3 56C21122 343280D6 + 115C1D21""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + BD376388 B5F723FB 4C22DFE6 CD4375A0 5A074764 44D58199 + 85007E34""" + ), + 16, +) + +curve_224 = ellipticcurve.CurveFp(_p, -3, _b, 1) +generator_224 = ellipticcurve.PointJacobi( + curve_224, _Gx, _Gy, 1, _r, generator=True +) + +# NIST Curve P-256: +_p = int( + remove_whitespace( + """ + 1157920892103562487626974469494075735300861434152903141955 + 33631308867097853951""" + ) +) +_r = int( + remove_whitespace( + """ + 115792089210356248762697446949407573529996955224135760342 + 422259061068512044369""" + ) +) +# s = 0xc49d360886e704936a6678e1139d26b7819f7e90L +# c = 0x7efba1662985be9403cb055c75d4f7e0ce8d84a9c5114abcaf3177680104fa0dL +_b = int( + remove_whitespace( + """ + 5AC635D8 AA3A93E7 B3EBBD55 769886BC 651D06B0 CC53B0F6 + 3BCE3C3E 27D2604B""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + 6B17D1F2 E12C4247 F8BCE6E5 63A440F2 77037D81 2DEB33A0 + F4A13945 D898C296""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 4FE342E2 FE1A7F9B 8EE7EB4A 7C0F9E16 2BCE3357 6B315ECE + CBB64068 37BF51F5""" + ), + 16, +) + +curve_256 = ellipticcurve.CurveFp(_p, -3, _b, 1) +generator_256 = ellipticcurve.PointJacobi( + curve_256, _Gx, _Gy, 1, _r, generator=True +) + +# NIST Curve P-384: +_p = int( + remove_whitespace( + """ + 3940200619639447921227904010014361380507973927046544666794 + 8293404245721771496870329047266088258938001861606973112319""" + ) +) +_r = int( + remove_whitespace( + """ + 3940200619639447921227904010014361380507973927046544666794 + 6905279627659399113263569398956308152294913554433653942643""" + ) +) +# s = 0xa335926aa319a27a1d00896a6773a4827acdac73L +# c = int(remove_whitespace( +# """ +# 79d1e655 f868f02f ff48dcde e14151dd b80643c1 406d0ca1 +# 0dfe6fc5 2009540a 495e8042 ea5f744f 6e184667 cc722483""" +# ), 16) +_b = int( + remove_whitespace( + """ + B3312FA7 E23EE7E4 988E056B E3F82D19 181D9C6E FE814112 + 0314088F 5013875A C656398D 8A2ED19D 2A85C8ED D3EC2AEF""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + AA87CA22 BE8B0537 8EB1C71E F320AD74 6E1D3B62 8BA79B98 + 59F741E0 82542A38 5502F25D BF55296C 3A545E38 72760AB7""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 3617DE4A 96262C6F 5D9E98BF 9292DC29 F8F41DBD 289A147C + E9DA3113 B5F0B8C0 0A60B1CE 1D7E819D 7A431D7C 90EA0E5F""" + ), + 16, +) + +curve_384 = ellipticcurve.CurveFp(_p, -3, _b, 1) +generator_384 = ellipticcurve.PointJacobi( + curve_384, _Gx, _Gy, 1, _r, generator=True +) + +# NIST Curve P-521: +_p = int( + "686479766013060971498190079908139321726943530014330540939" + "446345918554318339765605212255964066145455497729631139148" + "0858037121987999716643812574028291115057151" +) +_r = int( + "686479766013060971498190079908139321726943530014330540939" + "446345918554318339765539424505774633321719753296399637136" + "3321113864768612440380340372808892707005449" +) +# s = 0xd09e8800291cb85396cc6717393284aaa0da64baL +# c = int(remove_whitespace( +# """ +# 0b4 8bfa5f42 0a349495 39d2bdfc 264eeeeb 077688e4 +# 4fbf0ad8 f6d0edb3 7bd6b533 28100051 8e19f1b9 ffbe0fe9 +# ed8a3c22 00b8f875 e523868c 70c1e5bf 55bad637""" +# ), 16) +_b = int( + remove_whitespace( + """ + 051 953EB961 8E1C9A1F 929A21A0 B68540EE A2DA725B + 99B315F3 B8B48991 8EF109E1 56193951 EC7E937B 1652C0BD + 3BB1BF07 3573DF88 3D2C34F1 EF451FD4 6B503F00""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + C6 858E06B7 0404E9CD 9E3ECB66 2395B442 9C648139 + 053FB521 F828AF60 6B4D3DBA A14B5E77 EFE75928 FE1DC127 + A2FFA8DE 3348B3C1 856A429B F97E7E31 C2E5BD66""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 118 39296A78 9A3BC004 5C8A5FB4 2C7D1BD9 98F54449 + 579B4468 17AFBD17 273E662C 97EE7299 5EF42640 C550B901 + 3FAD0761 353C7086 A272C240 88BE9476 9FD16650""" + ), + 16, +) + +curve_521 = ellipticcurve.CurveFp(_p, -3, _b, 1) +generator_521 = ellipticcurve.PointJacobi( + curve_521, _Gx, _Gy, 1, _r, generator=True +) + +# Certicom secp256-k1 +_a = 0x0000000000000000000000000000000000000000000000000000000000000000 +_b = 0x0000000000000000000000000000000000000000000000000000000000000007 +_p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F +_Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798 +_Gy = 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8 +_r = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 + +curve_secp256k1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_secp256k1 = ellipticcurve.PointJacobi( + curve_secp256k1, _Gx, _Gy, 1, _r, generator=True +) + +# Brainpool P-160-r1 +_a = 0x340E7BE2A280EB74E2BE61BADA745D97E8F7C300 +_b = 0x1E589A8595423412134FAA2DBDEC95C8D8675E58 +_p = 0xE95E4A5F737059DC60DFC7AD95B3D8139515620F +_Gx = 0xBED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3 +_Gy = 0x1667CB477A1A8EC338F94741669C976316DA6321 +_q = 0xE95E4A5F737059DC60DF5991D45029409E60FC09 + +curve_brainpoolp160r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp160r1 = ellipticcurve.PointJacobi( + curve_brainpoolp160r1, _Gx, _Gy, 1, _q, generator=True +) + +# Brainpool P-192-r1 +_a = 0x6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF +_b = 0x469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9 +_p = 0xC302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297 +_Gx = 0xC0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6 +_Gy = 0x14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F +_q = 0xC302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1 + +curve_brainpoolp192r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp192r1 = ellipticcurve.PointJacobi( + curve_brainpoolp192r1, _Gx, _Gy, 1, _q, generator=True +) + +# Brainpool P-224-r1 +_a = 0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43 +_b = 0x2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B +_p = 0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF +_Gx = 0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D +_Gy = 0x58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD +_q = 0xD7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F + +curve_brainpoolp224r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp224r1 = ellipticcurve.PointJacobi( + curve_brainpoolp224r1, _Gx, _Gy, 1, _q, generator=True +) + +# Brainpool P-256-r1 +_a = 0x7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9 +_b = 0x26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6 +_p = 0xA9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377 +_Gx = 0x8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262 +_Gy = 0x547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997 +_q = 0xA9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7 + +curve_brainpoolp256r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp256r1 = ellipticcurve.PointJacobi( + curve_brainpoolp256r1, _Gx, _Gy, 1, _q, generator=True +) + +# Brainpool P-320-r1 +_a = int( + remove_whitespace( + """ + 3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9 + F492F375A97D860EB4""" + ), + 16, +) +_b = int( + remove_whitespace( + """ + 520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539 + 816F5EB4AC8FB1F1A6""" + ), + 16, +) +_p = int( + remove_whitespace( + """ + D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC + 28FCD412B1F1B32E27""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + 43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599 + C710AF8D0D39E20611""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6A + C7D35245D1692E8EE1""" + ), + 16, +) +_q = int( + remove_whitespace( + """ + D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658 + E98691555B44C59311""" + ), + 16, +) + +curve_brainpoolp320r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp320r1 = ellipticcurve.PointJacobi( + curve_brainpoolp320r1, _Gx, _Gy, 1, _q, generator=True +) + +# Brainpool P-384-r1 +_a = int( + remove_whitespace( + """ + 7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F9 + 0F8AA5814A503AD4EB04A8C7DD22CE2826""" + ), + 16, +) +_b = int( + remove_whitespace( + """ + 04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62 + D57CB4390295DBC9943AB78696FA504C11""" + ), + 16, +) +_p = int( + remove_whitespace( + """ + 8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB711 + 23ACD3A729901D1A71874700133107EC53""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + 1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10 + E8E826E03436D646AAEF87B2E247D4AF1E""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF991292 + 80E4646217791811142820341263C5315""" + ), + 16, +) +_q = int( + remove_whitespace( + """ + 8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425 + A7CF3AB6AF6B7FC3103B883202E9046565""" + ), + 16, +) + +curve_brainpoolp384r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp384r1 = ellipticcurve.PointJacobi( + curve_brainpoolp384r1, _Gx, _Gy, 1, _q, generator=True +) + +# Brainpool P-512-r1 +_a = int( + remove_whitespace( + """ + 7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863 + BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA""" + ), + 16, +) +_b = int( + remove_whitespace( + """ + 3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117 + A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723""" + ), + 16, +) +_p = int( + remove_whitespace( + """ + AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308 + 717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3""" + ), + 16, +) +_Gx = int( + remove_whitespace( + """ + 81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D009 + 8EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822""" + ), + 16, +) +_Gy = int( + remove_whitespace( + """ + 7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F81 + 11B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892""" + ), + 16, +) +_q = int( + remove_whitespace( + """ + AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308 + 70553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069""" + ), + 16, +) + +curve_brainpoolp512r1 = ellipticcurve.CurveFp(_p, _a, _b, 1) +generator_brainpoolp512r1 = ellipticcurve.PointJacobi( + curve_brainpoolp512r1, _Gx, _Gy, 1, _q, generator=True +) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/eddsa.py b/myenv/lib/python3.9/site-packages/ecdsa/eddsa.py new file mode 100644 index 0000000..9769cfd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/eddsa.py @@ -0,0 +1,252 @@ +"""Implementation of Edwards Digital Signature Algorithm.""" + +import hashlib +from ._sha3 import shake_256 +from . import ellipticcurve +from ._compat import ( + remove_whitespace, + bit_length, + bytes_to_int, + int_to_bytes, + compat26_str, +) + +# edwards25519, defined in RFC7748 +_p = 2**255 - 19 +_a = -1 +_d = int( + remove_whitespace( + "370957059346694393431380835087545651895421138798432190163887855330" + "85940283555" + ) +) +_h = 8 + +_Gx = int( + remove_whitespace( + "151122213495354007725011514095885315114540126930418572060461132" + "83949847762202" + ) +) +_Gy = int( + remove_whitespace( + "463168356949264781694283940034751631413079938662562256157830336" + "03165251855960" + ) +) +_r = 2**252 + 0x14DEF9DEA2F79CD65812631A5CF5D3ED + + +def _sha512(data): + return hashlib.new("sha512", compat26_str(data)).digest() + + +curve_ed25519 = ellipticcurve.CurveEdTw(_p, _a, _d, _h, _sha512) +generator_ed25519 = ellipticcurve.PointEdwards( + curve_ed25519, _Gx, _Gy, 1, _Gx * _Gy % _p, _r, generator=True +) + + +# edwards448, defined in RFC7748 +_p = 2**448 - 2**224 - 1 +_a = 1 +_d = -39081 % _p +_h = 4 + +_Gx = int( + remove_whitespace( + "224580040295924300187604334099896036246789641632564134246125461" + "686950415467406032909029192869357953282578032075146446173674602635" + "247710" + ) +) +_Gy = int( + remove_whitespace( + "298819210078481492676017930443930673437544040154080242095928241" + "372331506189835876003536878655418784733982303233503462500531545062" + "832660" + ) +) +_r = 2**446 - 0x8335DC163BB124B65129C96FDE933D8D723A70AADC873D6D54A7BB0D + + +def _shake256(data): + return shake_256(data, 114) + + +curve_ed448 = ellipticcurve.CurveEdTw(_p, _a, _d, _h, _shake256) +generator_ed448 = ellipticcurve.PointEdwards( + curve_ed448, _Gx, _Gy, 1, _Gx * _Gy % _p, _r, generator=True +) + + +class PublicKey(object): + """Public key for the Edwards Digital Signature Algorithm.""" + + def __init__(self, generator, public_key, public_point=None): + self.generator = generator + self.curve = generator.curve() + self.__encoded = public_key + # plus one for the sign bit and round up + self.baselen = (bit_length(self.curve.p()) + 1 + 7) // 8 + if len(public_key) != self.baselen: + raise ValueError( + "Incorrect size of the public key, expected: {0} bytes".format( + self.baselen + ) + ) + if public_point: + self.__point = public_point + else: + self.__point = ellipticcurve.PointEdwards.from_bytes( + self.curve, public_key + ) + + def __eq__(self, other): + if isinstance(other, PublicKey): + return ( + self.curve == other.curve and self.__encoded == other.__encoded + ) + return NotImplemented + + def __ne__(self, other): + return not self == other + + @property + def point(self): + return self.__point + + @point.setter + def point(self, other): + if self.__point != other: + raise ValueError("Can't change the coordinates of the point") + self.__point = other + + def public_point(self): + return self.__point + + def public_key(self): + return self.__encoded + + def verify(self, data, signature): + """Verify a Pure EdDSA signature over data.""" + data = compat26_str(data) + if len(signature) != 2 * self.baselen: + raise ValueError( + "Invalid signature length, expected: {0} bytes".format( + 2 * self.baselen + ) + ) + R = ellipticcurve.PointEdwards.from_bytes( + self.curve, signature[: self.baselen] + ) + S = bytes_to_int(signature[self.baselen :], "little") + if S >= self.generator.order(): + raise ValueError("Invalid signature") + + dom = bytearray() + if self.curve == curve_ed448: + dom = bytearray(b"SigEd448" + b"\x00\x00") + + k = bytes_to_int( + self.curve.hash_func(dom + R.to_bytes() + self.__encoded + data), + "little", + ) + + if self.generator * S != self.__point * k + R: + raise ValueError("Invalid signature") + + return True + + +class PrivateKey(object): + """Private key for the Edwards Digital Signature Algorithm.""" + + def __init__(self, generator, private_key): + self.generator = generator + self.curve = generator.curve() + # plus one for the sign bit and round up + self.baselen = (bit_length(self.curve.p()) + 1 + 7) // 8 + if len(private_key) != self.baselen: + raise ValueError( + "Incorrect size of private key, expected: {0} bytes".format( + self.baselen + ) + ) + self.__private_key = bytes(private_key) + self.__h = bytearray(self.curve.hash_func(private_key)) + self.__public_key = None + + a = self.__h[: self.baselen] + a = self._key_prune(a) + scalar = bytes_to_int(a, "little") + self.__s = scalar + + @property + def private_key(self): + return self.__private_key + + def __eq__(self, other): + if isinstance(other, PrivateKey): + return ( + self.curve == other.curve + and self.__private_key == other.__private_key + ) + return NotImplemented + + def __ne__(self, other): + return not self == other + + def _key_prune(self, key): + # make sure the key is not in a small subgroup + h = self.curve.cofactor() + if h == 4: + h_log = 2 + elif h == 8: + h_log = 3 + else: + raise ValueError("Only cofactor 4 and 8 curves supported") + key[0] &= ~((1 << h_log) - 1) + + # ensure the highest bit is set but no higher + l = bit_length(self.curve.p()) + if l % 8 == 0: + key[-1] = 0 + key[-2] |= 0x80 + else: + key[-1] = key[-1] & (1 << (l % 8)) - 1 | 1 << (l % 8) - 1 + return key + + def public_key(self): + """Generate the public key based on the included private key""" + if self.__public_key: + return self.__public_key + + public_point = self.generator * self.__s + + self.__public_key = PublicKey( + self.generator, public_point.to_bytes(), public_point + ) + + return self.__public_key + + def sign(self, data): + """Perform a Pure EdDSA signature over data.""" + data = compat26_str(data) + A = self.public_key().public_key() + + prefix = self.__h[self.baselen :] + + dom = bytearray() + if self.curve == curve_ed448: + dom = bytearray(b"SigEd448" + b"\x00\x00") + + r = bytes_to_int(self.curve.hash_func(dom + prefix + data), "little") + R = (self.generator * r).to_bytes() + + k = bytes_to_int(self.curve.hash_func(dom + R + A + data), "little") + k %= self.generator.order() + + S = (r + k * self.__s) % self.generator.order() + + return R + int_to_bytes(S, self.baselen, "little") diff --git a/myenv/lib/python3.9/site-packages/ecdsa/ellipticcurve.py b/myenv/lib/python3.9/site-packages/ecdsa/ellipticcurve.py new file mode 100644 index 0000000..d6f7146 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/ellipticcurve.py @@ -0,0 +1,1584 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- +# +# Implementation of elliptic curves, for cryptographic applications. +# +# This module doesn't provide any way to choose a random elliptic +# curve, nor to verify that an elliptic curve was chosen randomly, +# because one can simply use NIST's standard curves. +# +# Notes from X9.62-1998 (draft): +# Nomenclature: +# - Q is a public key. +# The "Elliptic Curve Domain Parameters" include: +# - q is the "field size", which in our case equals p. +# - p is a big prime. +# - G is a point of prime order (5.1.1.1). +# - n is the order of G (5.1.1.1). +# Public-key validation (5.2.2): +# - Verify that Q is not the point at infinity. +# - Verify that X_Q and Y_Q are in [0,p-1]. +# - Verify that Q is on the curve. +# - Verify that nQ is the point at infinity. +# Signature generation (5.3): +# - Pick random k from [1,n-1]. +# Signature checking (5.4.2): +# - Verify that r and s are in [1,n-1]. +# +# Revision history: +# 2005.12.31 - Initial version. +# 2008.11.25 - Change CurveFp.is_on to contains_point. +# +# Written in 2005 by Peter Pearson and placed in the public domain. +# Modified extensively as part of python-ecdsa. + +from __future__ import division + +try: + from gmpy2 import mpz + + GMPY = True +except ImportError: # pragma: no branch + try: + from gmpy import mpz + + GMPY = True + except ImportError: + GMPY = False + + +from six import python_2_unicode_compatible +from . import numbertheory +from ._compat import normalise_bytes, int_to_bytes, bit_length, bytes_to_int +from .errors import MalformedPointError +from .util import orderlen, string_to_number, number_to_string + + +@python_2_unicode_compatible +class CurveFp(object): + """ + :term:`Short Weierstrass Elliptic Curve ` over a + prime field. + """ + + if GMPY: # pragma: no branch + + def __init__(self, p, a, b, h=None): + """ + The curve of points satisfying y^2 = x^3 + a*x + b (mod p). + + h is an integer that is the cofactor of the elliptic curve domain + parameters; it is the number of points satisfying the elliptic + curve equation divided by the order of the base point. It is used + for selection of efficient algorithm for public point verification. + """ + self.__p = mpz(p) + self.__a = mpz(a) + self.__b = mpz(b) + # h is not used in calculations and it can be None, so don't use + # gmpy with it + self.__h = h + + else: # pragma: no branch + + def __init__(self, p, a, b, h=None): + """ + The curve of points satisfying y^2 = x^3 + a*x + b (mod p). + + h is an integer that is the cofactor of the elliptic curve domain + parameters; it is the number of points satisfying the elliptic + curve equation divided by the order of the base point. It is used + for selection of efficient algorithm for public point verification. + """ + self.__p = p + self.__a = a + self.__b = b + self.__h = h + + def __eq__(self, other): + """Return True if other is an identical curve, False otherwise. + + Note: the value of the cofactor of the curve is not taken into account + when comparing curves, as it's derived from the base point and + intrinsic curve characteristic (but it's complex to compute), + only the prime and curve parameters are considered. + """ + if isinstance(other, CurveFp): + p = self.__p + return ( + self.__p == other.__p + and self.__a % p == other.__a % p + and self.__b % p == other.__b % p + ) + return NotImplemented + + def __ne__(self, other): + """Return False if other is an identical curve, True otherwise.""" + return not self == other + + def __hash__(self): + return hash((self.__p, self.__a, self.__b)) + + def p(self): + return self.__p + + def a(self): + return self.__a + + def b(self): + return self.__b + + def cofactor(self): + return self.__h + + def contains_point(self, x, y): + """Is the point (x,y) on this curve?""" + return (y * y - ((x * x + self.__a) * x + self.__b)) % self.__p == 0 + + def __str__(self): + return "CurveFp(p=%d, a=%d, b=%d, h=%d)" % ( + self.__p, + self.__a, + self.__b, + self.__h, + ) + + +class CurveEdTw(object): + """Parameters for a Twisted Edwards Elliptic Curve""" + + if GMPY: # pragma: no branch + + def __init__(self, p, a, d, h=None, hash_func=None): + """ + The curve of points satisfying a*x^2 + y^2 = 1 + d*x^2*y^2 (mod p). + + h is the cofactor of the curve. + hash_func is the hash function associated with the curve + (like SHA-512 for Ed25519) + """ + self.__p = mpz(p) + self.__a = mpz(a) + self.__d = mpz(d) + self.__h = h + self.__hash_func = hash_func + + else: + + def __init__(self, p, a, d, h=None, hash_func=None): + """ + The curve of points satisfying a*x^2 + y^2 = 1 + d*x^2*y^2 (mod p). + + h is the cofactor of the curve. + hash_func is the hash function associated with the curve + (like SHA-512 for Ed25519) + """ + self.__p = p + self.__a = a + self.__d = d + self.__h = h + self.__hash_func = hash_func + + def __eq__(self, other): + """Returns True if other is an identical curve.""" + if isinstance(other, CurveEdTw): + p = self.__p + return ( + self.__p == other.__p + and self.__a % p == other.__a % p + and self.__d % p == other.__d % p + ) + return NotImplemented + + def __ne__(self, other): + """Return False if the other is an identical curve, True otherwise.""" + return not self == other + + def __hash__(self): + return hash((self.__p, self.__a, self.__d)) + + def contains_point(self, x, y): + """Is the point (x, y) on this curve?""" + return ( + self.__a * x * x + y * y - 1 - self.__d * x * x * y * y + ) % self.__p == 0 + + def p(self): + return self.__p + + def a(self): + return self.__a + + def d(self): + return self.__d + + def hash_func(self, data): + return self.__hash_func(data) + + def cofactor(self): + return self.__h + + def __str__(self): + return "CurveEdTw(p={0}, a={1}, d={2}, h={3})".format( + self.__p, + self.__a, + self.__d, + self.__h, + ) + + +class AbstractPoint(object): + """Class for common methods of elliptic curve points.""" + + @staticmethod + def _from_raw_encoding(data, raw_encoding_length): + """ + Decode public point from :term:`raw encoding`. + + :term:`raw encoding` is the same as the :term:`uncompressed` encoding, + but without the 0x04 byte at the beginning. + """ + # real assert, from_bytes() should not call us with different length + assert len(data) == raw_encoding_length + xs = data[: raw_encoding_length // 2] + ys = data[raw_encoding_length // 2 :] + # real assert, raw_encoding_length is calculated by multiplying an + # integer by two so it will always be even + assert len(xs) == raw_encoding_length // 2 + assert len(ys) == raw_encoding_length // 2 + coord_x = string_to_number(xs) + coord_y = string_to_number(ys) + + return coord_x, coord_y + + @staticmethod + def _from_compressed(data, curve): + """Decode public point from compressed encoding.""" + if data[:1] not in (b"\x02", b"\x03"): + raise MalformedPointError("Malformed compressed point encoding") + + is_even = data[:1] == b"\x02" + x = string_to_number(data[1:]) + p = curve.p() + alpha = (pow(x, 3, p) + (curve.a() * x) + curve.b()) % p + try: + beta = numbertheory.square_root_mod_prime(alpha, p) + except numbertheory.Error as e: + raise MalformedPointError( + "Encoding does not correspond to a point on curve", e + ) + if is_even == bool(beta & 1): + y = p - beta + else: + y = beta + return x, y + + @classmethod + def _from_hybrid(cls, data, raw_encoding_length, validate_encoding): + """Decode public point from hybrid encoding.""" + # real assert, from_bytes() should not call us with different types + assert data[:1] in (b"\x06", b"\x07") + + # primarily use the uncompressed as it's easiest to handle + x, y = cls._from_raw_encoding(data[1:], raw_encoding_length) + + # but validate if it's self-consistent if we're asked to do that + if validate_encoding and ( + y & 1 + and data[:1] != b"\x07" + or (not y & 1) + and data[:1] != b"\x06" + ): + raise MalformedPointError("Inconsistent hybrid point encoding") + + return x, y + + @classmethod + def _from_edwards(cls, curve, data): + """Decode a point on an Edwards curve.""" + data = bytearray(data) + p = curve.p() + # add 1 for the sign bit and then round up + exp_len = (bit_length(p) + 1 + 7) // 8 + if len(data) != exp_len: + raise MalformedPointError("Point length doesn't match the curve.") + x_0 = (data[-1] & 0x80) >> 7 + + data[-1] &= 0x80 - 1 + + y = bytes_to_int(data, "little") + if GMPY: + y = mpz(y) + + x2 = ( + (y * y - 1) + * numbertheory.inverse_mod(curve.d() * y * y - curve.a(), p) + % p + ) + + try: + x = numbertheory.square_root_mod_prime(x2, p) + except numbertheory.Error as e: + raise MalformedPointError( + "Encoding does not correspond to a point on curve", e + ) + + if x % 2 != x_0: + x = -x % p + + return x, y + + @classmethod + def from_bytes( + cls, curve, data, validate_encoding=True, valid_encodings=None + ): + """ + Initialise the object from byte encoding of a point. + + The method does accept and automatically detect the type of point + encoding used. It supports the :term:`raw encoding`, + :term:`uncompressed`, :term:`compressed`, and :term:`hybrid` encodings. + + Note: generally you will want to call the ``from_bytes()`` method of + either a child class, PointJacobi or Point. + + :param data: single point encoding of the public key + :type data: :term:`bytes-like object` + :param curve: the curve on which the public key is expected to lay + :type curve: ~ecdsa.ellipticcurve.CurveFp + :param validate_encoding: whether to verify that the encoding of the + point is self-consistent, defaults to True, has effect only + on ``hybrid`` encoding + :type validate_encoding: bool + :param valid_encodings: list of acceptable point encoding formats, + supported ones are: :term:`uncompressed`, :term:`compressed`, + :term:`hybrid`, and :term:`raw encoding` (specified with ``raw`` + name). All formats by default (specified with ``None``). + :type valid_encodings: :term:`set-like object` + + :raises `~ecdsa.errors.MalformedPointError`: if the public point does + not lay on the curve or the encoding is invalid + + :return: x and y coordinates of the encoded point + :rtype: tuple(int, int) + """ + if not valid_encodings: + valid_encodings = set( + ["uncompressed", "compressed", "hybrid", "raw"] + ) + if not all( + i in set(("uncompressed", "compressed", "hybrid", "raw")) + for i in valid_encodings + ): + raise ValueError( + "Only uncompressed, compressed, hybrid or raw encoding " + "supported." + ) + data = normalise_bytes(data) + + if isinstance(curve, CurveEdTw): + return cls._from_edwards(curve, data) + + key_len = len(data) + raw_encoding_length = 2 * orderlen(curve.p()) + if key_len == raw_encoding_length and "raw" in valid_encodings: + coord_x, coord_y = cls._from_raw_encoding( + data, raw_encoding_length + ) + elif key_len == raw_encoding_length + 1 and ( + "hybrid" in valid_encodings or "uncompressed" in valid_encodings + ): + if data[:1] in (b"\x06", b"\x07") and "hybrid" in valid_encodings: + coord_x, coord_y = cls._from_hybrid( + data, raw_encoding_length, validate_encoding + ) + elif data[:1] == b"\x04" and "uncompressed" in valid_encodings: + coord_x, coord_y = cls._from_raw_encoding( + data[1:], raw_encoding_length + ) + else: + raise MalformedPointError( + "Invalid X9.62 encoding of the public point" + ) + elif ( + key_len == raw_encoding_length // 2 + 1 + and "compressed" in valid_encodings + ): + coord_x, coord_y = cls._from_compressed(data, curve) + else: + raise MalformedPointError( + "Length of string does not match lengths of " + "any of the enabled ({0}) encodings of the " + "curve.".format(", ".join(valid_encodings)) + ) + return coord_x, coord_y + + def _raw_encode(self): + """Convert the point to the :term:`raw encoding`.""" + prime = self.curve().p() + x_str = number_to_string(self.x(), prime) + y_str = number_to_string(self.y(), prime) + return x_str + y_str + + def _compressed_encode(self): + """Encode the point into the compressed form.""" + prime = self.curve().p() + x_str = number_to_string(self.x(), prime) + if self.y() & 1: + return b"\x03" + x_str + return b"\x02" + x_str + + def _hybrid_encode(self): + """Encode the point into the hybrid form.""" + raw_enc = self._raw_encode() + if self.y() & 1: + return b"\x07" + raw_enc + return b"\x06" + raw_enc + + def _edwards_encode(self): + """Encode the point according to RFC8032 encoding.""" + self.scale() + x, y, p = self.x(), self.y(), self.curve().p() + + # add 1 for the sign bit and then round up + enc_len = (bit_length(p) + 1 + 7) // 8 + y_str = int_to_bytes(y, enc_len, "little") + if x % 2: + y_str[-1] |= 0x80 + return y_str + + def to_bytes(self, encoding="raw"): + """ + Convert the point to a byte string. + + The method by default uses the :term:`raw encoding` (specified + by `encoding="raw"`. It can also output points in :term:`uncompressed`, + :term:`compressed`, and :term:`hybrid` formats. + + For points on Edwards curves `encoding` is ignored and only the + encoding defined in RFC 8032 is supported. + + :return: :term:`raw encoding` of a public on the curve + :rtype: bytes + """ + assert encoding in ("raw", "uncompressed", "compressed", "hybrid") + curve = self.curve() + if isinstance(curve, CurveEdTw): + return self._edwards_encode() + elif encoding == "raw": + return self._raw_encode() + elif encoding == "uncompressed": + return b"\x04" + self._raw_encode() + elif encoding == "hybrid": + return self._hybrid_encode() + else: + return self._compressed_encode() + + @staticmethod + def _naf(mult): + """Calculate non-adjacent form of number.""" + ret = [] + while mult: + if mult % 2: + nd = mult % 4 + if nd >= 2: + nd -= 4 + ret.append(nd) + mult -= nd + else: + ret.append(0) + mult //= 2 + return ret + + +class PointJacobi(AbstractPoint): + """ + Point on a short Weierstrass elliptic curve. Uses Jacobi coordinates. + + In Jacobian coordinates, there are three parameters, X, Y and Z. + They correspond to affine parameters 'x' and 'y' like so: + + x = X / Z² + y = Y / Z³ + """ + + def __init__(self, curve, x, y, z, order=None, generator=False): + """ + Initialise a point that uses Jacobi representation internally. + + :param CurveFp curve: curve on which the point resides + :param int x: the X parameter of Jacobi representation (equal to x when + converting from affine coordinates + :param int y: the Y parameter of Jacobi representation (equal to y when + converting from affine coordinates + :param int z: the Z parameter of Jacobi representation (equal to 1 when + converting from affine coordinates + :param int order: the point order, must be non zero when using + generator=True + :param bool generator: the point provided is a curve generator, as + such, it will be commonly used with scalar multiplication. This will + cause to precompute multiplication table generation for it + """ + super(PointJacobi, self).__init__() + self.__curve = curve + if GMPY: # pragma: no branch + self.__coords = (mpz(x), mpz(y), mpz(z)) + self.__order = order and mpz(order) + else: # pragma: no branch + self.__coords = (x, y, z) + self.__order = order + self.__generator = generator + self.__precompute = [] + + @classmethod + def from_bytes( + cls, + curve, + data, + validate_encoding=True, + valid_encodings=None, + order=None, + generator=False, + ): + """ + Initialise the object from byte encoding of a point. + + The method does accept and automatically detect the type of point + encoding used. It supports the :term:`raw encoding`, + :term:`uncompressed`, :term:`compressed`, and :term:`hybrid` encodings. + + :param data: single point encoding of the public key + :type data: :term:`bytes-like object` + :param curve: the curve on which the public key is expected to lay + :type curve: ~ecdsa.ellipticcurve.CurveFp + :param validate_encoding: whether to verify that the encoding of the + point is self-consistent, defaults to True, has effect only + on ``hybrid`` encoding + :type validate_encoding: bool + :param valid_encodings: list of acceptable point encoding formats, + supported ones are: :term:`uncompressed`, :term:`compressed`, + :term:`hybrid`, and :term:`raw encoding` (specified with ``raw`` + name). All formats by default (specified with ``None``). + :type valid_encodings: :term:`set-like object` + :param int order: the point order, must be non zero when using + generator=True + :param bool generator: the point provided is a curve generator, as + such, it will be commonly used with scalar multiplication. This + will cause to precompute multiplication table generation for it + + :raises `~ecdsa.errors.MalformedPointError`: if the public point does + not lay on the curve or the encoding is invalid + + :return: Point on curve + :rtype: PointJacobi + """ + coord_x, coord_y = super(PointJacobi, cls).from_bytes( + curve, data, validate_encoding, valid_encodings + ) + return PointJacobi(curve, coord_x, coord_y, 1, order, generator) + + def _maybe_precompute(self): + if not self.__generator or self.__precompute: + return + + # since this code will execute just once, and it's fully deterministic, + # depend on atomicity of the last assignment to switch from empty + # self.__precompute to filled one and just ignore the unlikely + # situation when two threads execute it at the same time (as it won't + # lead to inconsistent __precompute) + order = self.__order + assert order + precompute = [] + i = 1 + order *= 2 + coord_x, coord_y, coord_z = self.__coords + doubler = PointJacobi(self.__curve, coord_x, coord_y, coord_z, order) + order *= 2 + precompute.append((doubler.x(), doubler.y())) + + while i < order: + i *= 2 + doubler = doubler.double().scale() + precompute.append((doubler.x(), doubler.y())) + + self.__precompute = precompute + + def __getstate__(self): + # while this code can execute at the same time as _maybe_precompute() + # is updating the __precompute or scale() is updating the __coords, + # there is no requirement for consistency between __coords and + # __precompute + state = self.__dict__.copy() + return state + + def __setstate__(self, state): + self.__dict__.update(state) + + def __eq__(self, other): + """Compare for equality two points with each-other. + + Note: only points that lay on the same curve can be equal. + """ + x1, y1, z1 = self.__coords + if other is INFINITY: + return not y1 or not z1 + if isinstance(other, Point): + x2, y2, z2 = other.x(), other.y(), 1 + elif isinstance(other, PointJacobi): + x2, y2, z2 = other.__coords + else: + return NotImplemented + if self.__curve != other.curve(): + return False + p = self.__curve.p() + + zz1 = z1 * z1 % p + zz2 = z2 * z2 % p + + # compare the fractions by bringing them to the same denominator + # depend on short-circuit to save 4 multiplications in case of + # inequality + return (x1 * zz2 - x2 * zz1) % p == 0 and ( + y1 * zz2 * z2 - y2 * zz1 * z1 + ) % p == 0 + + def __ne__(self, other): + """Compare for inequality two points with each-other.""" + return not self == other + + def order(self): + """Return the order of the point. + + None if it is undefined. + """ + return self.__order + + def curve(self): + """Return curve over which the point is defined.""" + return self.__curve + + def x(self): + """ + Return affine x coordinate. + + This method should be used only when the 'y' coordinate is not needed. + It's computationally more efficient to use `to_affine()` and then + call x() and y() on the returned instance. Or call `scale()` + and then x() and y() on the returned instance. + """ + x, _, z = self.__coords + if z == 1: + return x + p = self.__curve.p() + z = numbertheory.inverse_mod(z, p) + return x * z**2 % p + + def y(self): + """ + Return affine y coordinate. + + This method should be used only when the 'x' coordinate is not needed. + It's computationally more efficient to use `to_affine()` and then + call x() and y() on the returned instance. Or call `scale()` + and then x() and y() on the returned instance. + """ + _, y, z = self.__coords + if z == 1: + return y + p = self.__curve.p() + z = numbertheory.inverse_mod(z, p) + return y * z**3 % p + + def scale(self): + """ + Return point scaled so that z == 1. + + Modifies point in place, returns self. + """ + x, y, z = self.__coords + if z == 1: + return self + + # scaling is deterministic, so even if two threads execute the below + # code at the same time, they will set __coords to the same value + p = self.__curve.p() + z_inv = numbertheory.inverse_mod(z, p) + zz_inv = z_inv * z_inv % p + x = x * zz_inv % p + y = y * zz_inv * z_inv % p + self.__coords = (x, y, 1) + return self + + def to_affine(self): + """Return point in affine form.""" + _, y, z = self.__coords + if not y or not z: + return INFINITY + self.scale() + x, y, z = self.__coords + return Point(self.__curve, x, y, self.__order) + + @staticmethod + def from_affine(point, generator=False): + """Create from an affine point. + + :param bool generator: set to True to make the point to precalculate + multiplication table - useful for public point when verifying many + signatures (around 100 or so) or for generator points of a curve. + """ + return PointJacobi( + point.curve(), point.x(), point.y(), 1, point.order(), generator + ) + + # please note that all the methods that use the equations from + # hyperelliptic + # are formatted in a way to maximise performance. + # Things that make code faster: multiplying instead of taking to the power + # (`xx = x * x; xxxx = xx * xx % p` is faster than `xxxx = x**4 % p` and + # `pow(x, 4, p)`), + # multiple assignments at the same time (`x1, x2 = self.x1, self.x2` is + # faster than `x1 = self.x1; x2 = self.x2`), + # similarly, sometimes the `% p` is skipped if it makes the calculation + # faster and the result of calculation is later reduced modulo `p` + + def _double_with_z_1(self, X1, Y1, p, a): + """Add a point to itself with z == 1.""" + # after: + # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-mdbl-2007-bl + XX, YY = X1 * X1 % p, Y1 * Y1 % p + if not YY: + return 0, 0, 1 + YYYY = YY * YY % p + S = 2 * ((X1 + YY) ** 2 - XX - YYYY) % p + M = 3 * XX + a + T = (M * M - 2 * S) % p + # X3 = T + Y3 = (M * (S - T) - 8 * YYYY) % p + Z3 = 2 * Y1 % p + return T, Y3, Z3 + + def _double(self, X1, Y1, Z1, p, a): + """Add a point to itself, arbitrary z.""" + if Z1 == 1: + return self._double_with_z_1(X1, Y1, p, a) + if not Y1 or not Z1: + return 0, 0, 1 + # after: + # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl + XX, YY = X1 * X1 % p, Y1 * Y1 % p + if not YY: + return 0, 0, 1 + YYYY = YY * YY % p + ZZ = Z1 * Z1 % p + S = 2 * ((X1 + YY) ** 2 - XX - YYYY) % p + M = (3 * XX + a * ZZ * ZZ) % p + T = (M * M - 2 * S) % p + # X3 = T + Y3 = (M * (S - T) - 8 * YYYY) % p + Z3 = ((Y1 + Z1) ** 2 - YY - ZZ) % p + + return T, Y3, Z3 + + def double(self): + """Add a point to itself.""" + X1, Y1, Z1 = self.__coords + + if not Y1: + return INFINITY + + p, a = self.__curve.p(), self.__curve.a() + + X3, Y3, Z3 = self._double(X1, Y1, Z1, p, a) + + if not Y3 or not Z3: + return INFINITY + return PointJacobi(self.__curve, X3, Y3, Z3, self.__order) + + def _add_with_z_1(self, X1, Y1, X2, Y2, p): + """add points when both Z1 and Z2 equal 1""" + # after: + # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-mmadd-2007-bl + H = X2 - X1 + HH = H * H + I = 4 * HH % p + J = H * I + r = 2 * (Y2 - Y1) + if not H and not r: + return self._double_with_z_1(X1, Y1, p, self.__curve.a()) + V = X1 * I + X3 = (r**2 - J - 2 * V) % p + Y3 = (r * (V - X3) - 2 * Y1 * J) % p + Z3 = 2 * H % p + return X3, Y3, Z3 + + def _add_with_z_eq(self, X1, Y1, Z1, X2, Y2, p): + """add points when Z1 == Z2""" + # after: + # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-zadd-2007-m + A = (X2 - X1) ** 2 % p + B = X1 * A % p + C = X2 * A + D = (Y2 - Y1) ** 2 % p + if not A and not D: + return self._double(X1, Y1, Z1, p, self.__curve.a()) + X3 = (D - B - C) % p + Y3 = ((Y2 - Y1) * (B - X3) - Y1 * (C - B)) % p + Z3 = Z1 * (X2 - X1) % p + return X3, Y3, Z3 + + def _add_with_z2_1(self, X1, Y1, Z1, X2, Y2, p): + """add points when Z2 == 1""" + # after: + # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-madd-2007-bl + Z1Z1 = Z1 * Z1 % p + U2, S2 = X2 * Z1Z1 % p, Y2 * Z1 * Z1Z1 % p + H = (U2 - X1) % p + HH = H * H % p + I = 4 * HH % p + J = H * I + r = 2 * (S2 - Y1) % p + if not r and not H: + return self._double_with_z_1(X2, Y2, p, self.__curve.a()) + V = X1 * I + X3 = (r * r - J - 2 * V) % p + Y3 = (r * (V - X3) - 2 * Y1 * J) % p + Z3 = ((Z1 + H) ** 2 - Z1Z1 - HH) % p + return X3, Y3, Z3 + + def _add_with_z_ne(self, X1, Y1, Z1, X2, Y2, Z2, p): + """add points with arbitrary z""" + # after: + # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-add-2007-bl + Z1Z1 = Z1 * Z1 % p + Z2Z2 = Z2 * Z2 % p + U1 = X1 * Z2Z2 % p + U2 = X2 * Z1Z1 % p + S1 = Y1 * Z2 * Z2Z2 % p + S2 = Y2 * Z1 * Z1Z1 % p + H = U2 - U1 + I = 4 * H * H % p + J = H * I % p + r = 2 * (S2 - S1) % p + if not H and not r: + return self._double(X1, Y1, Z1, p, self.__curve.a()) + V = U1 * I + X3 = (r * r - J - 2 * V) % p + Y3 = (r * (V - X3) - 2 * S1 * J) % p + Z3 = ((Z1 + Z2) ** 2 - Z1Z1 - Z2Z2) * H % p + + return X3, Y3, Z3 + + def __radd__(self, other): + """Add other to self.""" + return self + other + + def _add(self, X1, Y1, Z1, X2, Y2, Z2, p): + """add two points, select fastest method.""" + if not Y1 or not Z1: + return X2, Y2, Z2 + if not Y2 or not Z2: + return X1, Y1, Z1 + if Z1 == Z2: + if Z1 == 1: + return self._add_with_z_1(X1, Y1, X2, Y2, p) + return self._add_with_z_eq(X1, Y1, Z1, X2, Y2, p) + if Z1 == 1: + return self._add_with_z2_1(X2, Y2, Z2, X1, Y1, p) + if Z2 == 1: + return self._add_with_z2_1(X1, Y1, Z1, X2, Y2, p) + return self._add_with_z_ne(X1, Y1, Z1, X2, Y2, Z2, p) + + def __add__(self, other): + """Add two points on elliptic curve.""" + if self == INFINITY: + return other + if other == INFINITY: + return self + if isinstance(other, Point): + other = PointJacobi.from_affine(other) + if self.__curve != other.__curve: + raise ValueError("The other point is on different curve") + + p = self.__curve.p() + X1, Y1, Z1 = self.__coords + X2, Y2, Z2 = other.__coords + + X3, Y3, Z3 = self._add(X1, Y1, Z1, X2, Y2, Z2, p) + + if not Y3 or not Z3: + return INFINITY + return PointJacobi(self.__curve, X3, Y3, Z3, self.__order) + + def __rmul__(self, other): + """Multiply point by an integer.""" + return self * other + + def _mul_precompute(self, other): + """Multiply point by integer with precomputation table.""" + X3, Y3, Z3, p = 0, 0, 1, self.__curve.p() + _add = self._add + for X2, Y2 in self.__precompute: + if other % 2: + if other % 4 >= 2: + other = (other + 1) // 2 + X3, Y3, Z3 = _add(X3, Y3, Z3, X2, -Y2, 1, p) + else: + other = (other - 1) // 2 + X3, Y3, Z3 = _add(X3, Y3, Z3, X2, Y2, 1, p) + else: + other //= 2 + + if not Y3 or not Z3: + return INFINITY + return PointJacobi(self.__curve, X3, Y3, Z3, self.__order) + + def __mul__(self, other): + """Multiply point by an integer.""" + if not self.__coords[1] or not other: + return INFINITY + if other == 1: + return self + if self.__order: + # order*2 as a protection for Minerva + other = other % (self.__order * 2) + self._maybe_precompute() + if self.__precompute: + return self._mul_precompute(other) + + self = self.scale() + X2, Y2, _ = self.__coords + X3, Y3, Z3 = 0, 0, 1 + p, a = self.__curve.p(), self.__curve.a() + _double = self._double + _add = self._add + # since adding points when at least one of them is scaled + # is quicker, reverse the NAF order + for i in reversed(self._naf(other)): + X3, Y3, Z3 = _double(X3, Y3, Z3, p, a) + if i < 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, X2, -Y2, 1, p) + elif i > 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, X2, Y2, 1, p) + + if not Y3 or not Z3: + return INFINITY + + return PointJacobi(self.__curve, X3, Y3, Z3, self.__order) + + def mul_add(self, self_mul, other, other_mul): + """ + Do two multiplications at the same time, add results. + + calculates self*self_mul + other*other_mul + """ + if other == INFINITY or other_mul == 0: + return self * self_mul + if self_mul == 0: + return other * other_mul + if not isinstance(other, PointJacobi): + other = PointJacobi.from_affine(other) + # when the points have precomputed answers, then multiplying them alone + # is faster (as it uses NAF and no point doublings) + self._maybe_precompute() + other._maybe_precompute() + if self.__precompute and other.__precompute: + return self * self_mul + other * other_mul + + if self.__order: + self_mul = self_mul % self.__order + other_mul = other_mul % self.__order + + # (X3, Y3, Z3) is the accumulator + X3, Y3, Z3 = 0, 0, 1 + p, a = self.__curve.p(), self.__curve.a() + + # as we have 6 unique points to work with, we can't scale all of them, + # but do scale the ones that are used most often + self.scale() + X1, Y1, Z1 = self.__coords + other.scale() + X2, Y2, Z2 = other.__coords + + _double = self._double + _add = self._add + + # with NAF we have 3 options: no add, subtract, add + # so with 2 points, we have 9 combinations: + # 0, -A, +A, -B, -A-B, +A-B, +B, -A+B, +A+B + # so we need 4 combined points: + mAmB_X, mAmB_Y, mAmB_Z = _add(X1, -Y1, Z1, X2, -Y2, Z2, p) + pAmB_X, pAmB_Y, pAmB_Z = _add(X1, Y1, Z1, X2, -Y2, Z2, p) + mApB_X, mApB_Y, mApB_Z = _add(X1, -Y1, Z1, X2, Y2, Z2, p) + pApB_X, pApB_Y, pApB_Z = _add(X1, Y1, Z1, X2, Y2, Z2, p) + # when the self and other sum to infinity, we need to add them + # one by one to get correct result but as that's very unlikely to + # happen in regular operation, we don't need to optimise this case + if not pApB_Y or not pApB_Z: + return self * self_mul + other * other_mul + + # gmp object creation has cumulatively higher overhead than the + # speedup we get from calculating the NAF using gmp so ensure use + # of int() + self_naf = list(reversed(self._naf(int(self_mul)))) + other_naf = list(reversed(self._naf(int(other_mul)))) + # ensure that the lists are the same length (zip() will truncate + # longer one otherwise) + if len(self_naf) < len(other_naf): + self_naf = [0] * (len(other_naf) - len(self_naf)) + self_naf + elif len(self_naf) > len(other_naf): + other_naf = [0] * (len(self_naf) - len(other_naf)) + other_naf + + for A, B in zip(self_naf, other_naf): + X3, Y3, Z3 = _double(X3, Y3, Z3, p, a) + + # conditions ordered from most to least likely + if A == 0: + if B == 0: + pass + elif B < 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, X2, -Y2, Z2, p) + else: + assert B > 0 + X3, Y3, Z3 = _add(X3, Y3, Z3, X2, Y2, Z2, p) + elif A < 0: + if B == 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, X1, -Y1, Z1, p) + elif B < 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, mAmB_X, mAmB_Y, mAmB_Z, p) + else: + assert B > 0 + X3, Y3, Z3 = _add(X3, Y3, Z3, mApB_X, mApB_Y, mApB_Z, p) + else: + assert A > 0 + if B == 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, X1, Y1, Z1, p) + elif B < 0: + X3, Y3, Z3 = _add(X3, Y3, Z3, pAmB_X, pAmB_Y, pAmB_Z, p) + else: + assert B > 0 + X3, Y3, Z3 = _add(X3, Y3, Z3, pApB_X, pApB_Y, pApB_Z, p) + + if not Y3 or not Z3: + return INFINITY + + return PointJacobi(self.__curve, X3, Y3, Z3, self.__order) + + def __neg__(self): + """Return negated point.""" + x, y, z = self.__coords + return PointJacobi(self.__curve, x, -y, z, self.__order) + + +class Point(AbstractPoint): + """A point on a short Weierstrass elliptic curve. Altering x and y is + forbidden, but they can be read by the x() and y() methods.""" + + def __init__(self, curve, x, y, order=None): + """curve, x, y, order; order (optional) is the order of this point.""" + super(Point, self).__init__() + self.__curve = curve + if GMPY: + self.__x = x and mpz(x) + self.__y = y and mpz(y) + self.__order = order and mpz(order) + else: + self.__x = x + self.__y = y + self.__order = order + # self.curve is allowed to be None only for INFINITY: + if self.__curve: + assert self.__curve.contains_point(x, y) + # for curves with cofactor 1, all points that are on the curve are + # scalar multiples of the base point, so performing multiplication is + # not necessary to verify that. See Section 3.2.2.1 of SEC 1 v2 + if curve and curve.cofactor() != 1 and order: + assert self * order == INFINITY + + @classmethod + def from_bytes( + cls, + curve, + data, + validate_encoding=True, + valid_encodings=None, + order=None, + ): + """ + Initialise the object from byte encoding of a point. + + The method does accept and automatically detect the type of point + encoding used. It supports the :term:`raw encoding`, + :term:`uncompressed`, :term:`compressed`, and :term:`hybrid` encodings. + + :param data: single point encoding of the public key + :type data: :term:`bytes-like object` + :param curve: the curve on which the public key is expected to lay + :type curve: ~ecdsa.ellipticcurve.CurveFp + :param validate_encoding: whether to verify that the encoding of the + point is self-consistent, defaults to True, has effect only + on ``hybrid`` encoding + :type validate_encoding: bool + :param valid_encodings: list of acceptable point encoding formats, + supported ones are: :term:`uncompressed`, :term:`compressed`, + :term:`hybrid`, and :term:`raw encoding` (specified with ``raw`` + name). All formats by default (specified with ``None``). + :type valid_encodings: :term:`set-like object` + :param int order: the point order, must be non zero when using + generator=True + + :raises `~ecdsa.errors.MalformedPointError`: if the public point does + not lay on the curve or the encoding is invalid + + :return: Point on curve + :rtype: Point + """ + coord_x, coord_y = super(Point, cls).from_bytes( + curve, data, validate_encoding, valid_encodings + ) + return Point(curve, coord_x, coord_y, order) + + def __eq__(self, other): + """Return True if the points are identical, False otherwise. + + Note: only points that lay on the same curve can be equal. + """ + if isinstance(other, Point): + return ( + self.__curve == other.__curve + and self.__x == other.__x + and self.__y == other.__y + ) + return NotImplemented + + def __ne__(self, other): + """Returns False if points are identical, True otherwise.""" + return not self == other + + def __neg__(self): + return Point(self.__curve, self.__x, self.__curve.p() - self.__y) + + def __add__(self, other): + """Add one point to another point.""" + + # X9.62 B.3: + + if not isinstance(other, Point): + return NotImplemented + if other == INFINITY: + return self + if self == INFINITY: + return other + assert self.__curve == other.__curve + if self.__x == other.__x: + if (self.__y + other.__y) % self.__curve.p() == 0: + return INFINITY + else: + return self.double() + + p = self.__curve.p() + + l = ( + (other.__y - self.__y) + * numbertheory.inverse_mod(other.__x - self.__x, p) + ) % p + + x3 = (l * l - self.__x - other.__x) % p + y3 = (l * (self.__x - x3) - self.__y) % p + + return Point(self.__curve, x3, y3) + + def __mul__(self, other): + """Multiply a point by an integer.""" + + def leftmost_bit(x): + assert x > 0 + result = 1 + while result <= x: + result = 2 * result + return result // 2 + + e = other + if e == 0 or (self.__order and e % self.__order == 0): + return INFINITY + if self == INFINITY: + return INFINITY + if e < 0: + return (-self) * (-e) + + # From X9.62 D.3.2: + + e3 = 3 * e + negative_self = Point(self.__curve, self.__x, -self.__y, self.__order) + i = leftmost_bit(e3) // 2 + result = self + # print_("Multiplying %s by %d (e3 = %d):" % (self, other, e3)) + while i > 1: + result = result.double() + if (e3 & i) != 0 and (e & i) == 0: + result = result + self + if (e3 & i) == 0 and (e & i) != 0: + result = result + negative_self + # print_(". . . i = %d, result = %s" % ( i, result )) + i = i // 2 + + return result + + def __rmul__(self, other): + """Multiply a point by an integer.""" + + return self * other + + def __str__(self): + if self == INFINITY: + return "infinity" + return "(%d,%d)" % (self.__x, self.__y) + + def double(self): + """Return a new point that is twice the old.""" + + if self == INFINITY: + return INFINITY + + # X9.62 B.3: + + p = self.__curve.p() + a = self.__curve.a() + + l = ( + (3 * self.__x * self.__x + a) + * numbertheory.inverse_mod(2 * self.__y, p) + ) % p + + x3 = (l * l - 2 * self.__x) % p + y3 = (l * (self.__x - x3) - self.__y) % p + + return Point(self.__curve, x3, y3) + + def x(self): + return self.__x + + def y(self): + return self.__y + + def curve(self): + return self.__curve + + def order(self): + return self.__order + + +class PointEdwards(AbstractPoint): + """Point on Twisted Edwards curve. + + Internally represents the coordinates on the curve using four parameters, + X, Y, Z, T. They correspond to affine parameters 'x' and 'y' like so: + + x = X / Z + y = Y / Z + x*y = T / Z + """ + + def __init__(self, curve, x, y, z, t, order=None, generator=False): + """ + Initialise a point that uses the extended coordinates internally. + """ + super(PointEdwards, self).__init__() + self.__curve = curve + if GMPY: # pragma: no branch + self.__coords = (mpz(x), mpz(y), mpz(z), mpz(t)) + self.__order = order and mpz(order) + else: # pragma: no branch + self.__coords = (x, y, z, t) + self.__order = order + self.__generator = generator + self.__precompute = [] + + @classmethod + def from_bytes( + cls, + curve, + data, + validate_encoding=None, + valid_encodings=None, + order=None, + generator=False, + ): + """ + Initialise the object from byte encoding of a point. + + `validate_encoding` and `valid_encodings` are provided for + compatibility with Weierstrass curves, they are ignored for Edwards + points. + + :param data: single point encoding of the public key + :type data: :term:`bytes-like object` + :param curve: the curve on which the public key is expected to lay + :type curve: ecdsa.ellipticcurve.CurveEdTw + :param None validate_encoding: Ignored, encoding is always validated + :param None valid_encodings: Ignored, there is just one encoding + supported + :param int order: the point order, must be non zero when using + generator=True + :param bool generator: Flag to mark the point as a curve generator, + this will cause the library to pre-compute some values to + make repeated usages of the point much faster + + :raises `~ecdsa.errors.MalformedPointError`: if the public point does + not lay on the curve or the encoding is invalid + + :return: Initialised point on an Edwards curve + :rtype: PointEdwards + """ + coord_x, coord_y = super(PointEdwards, cls).from_bytes( + curve, data, validate_encoding, valid_encodings + ) + return PointEdwards( + curve, coord_x, coord_y, 1, coord_x * coord_y, order, generator + ) + + def _maybe_precompute(self): + if not self.__generator or self.__precompute: + return self.__precompute + + # since this code will execute just once, and it's fully deterministic, + # depend on atomicity of the last assignment to switch from empty + # self.__precompute to filled one and just ignore the unlikely + # situation when two threads execute it at the same time (as it won't + # lead to inconsistent __precompute) + order = self.__order + assert order + precompute = [] + i = 1 + order *= 2 + coord_x, coord_y, coord_z, coord_t = self.__coords + prime = self.__curve.p() + + doubler = PointEdwards( + self.__curve, coord_x, coord_y, coord_z, coord_t, order + ) + # for "protection" against Minerva we need 1 or 2 more bits depending + # on order bit size, but it's easier to just calculate one + # point more always + order *= 4 + + while i < order: + doubler = doubler.scale() + coord_x, coord_y = doubler.x(), doubler.y() + coord_t = coord_x * coord_y % prime + precompute.append((coord_x, coord_y, coord_t)) + + i *= 2 + doubler = doubler.double() + + self.__precompute = precompute + return self.__precompute + + def x(self): + """Return affine x coordinate.""" + X1, _, Z1, _ = self.__coords + if Z1 == 1: + return X1 + p = self.__curve.p() + z_inv = numbertheory.inverse_mod(Z1, p) + return X1 * z_inv % p + + def y(self): + """Return affine y coordinate.""" + _, Y1, Z1, _ = self.__coords + if Z1 == 1: + return Y1 + p = self.__curve.p() + z_inv = numbertheory.inverse_mod(Z1, p) + return Y1 * z_inv % p + + def curve(self): + """Return the curve of the point.""" + return self.__curve + + def order(self): + return self.__order + + def scale(self): + """ + Return point scaled so that z == 1. + + Modifies point in place, returns self. + """ + X1, Y1, Z1, _ = self.__coords + if Z1 == 1: + return self + + p = self.__curve.p() + z_inv = numbertheory.inverse_mod(Z1, p) + x = X1 * z_inv % p + y = Y1 * z_inv % p + t = x * y % p + self.__coords = (x, y, 1, t) + return self + + def __eq__(self, other): + """Compare for equality two points with each-other. + + Note: only points on the same curve can be equal. + """ + x1, y1, z1, t1 = self.__coords + if other is INFINITY: + return not x1 or not t1 + if isinstance(other, PointEdwards): + x2, y2, z2, t2 = other.__coords + else: + return NotImplemented + if self.__curve != other.curve(): + return False + p = self.__curve.p() + + # cross multiply to eliminate divisions + xn1 = x1 * z2 % p + xn2 = x2 * z1 % p + yn1 = y1 * z2 % p + yn2 = y2 * z1 % p + return xn1 == xn2 and yn1 == yn2 + + def __ne__(self, other): + """Compare for inequality two points with each-other.""" + return not self == other + + def _add(self, X1, Y1, Z1, T1, X2, Y2, Z2, T2, p, a): + """add two points, assume sane parameters.""" + # after add-2008-hwcd-2 + # from https://hyperelliptic.org/EFD/g1p/auto-twisted-extended.html + # NOTE: there are more efficient formulas for Z1 or Z2 == 1 + A = X1 * X2 % p + B = Y1 * Y2 % p + C = Z1 * T2 % p + D = T1 * Z2 % p + E = D + C + F = ((X1 - Y1) * (X2 + Y2) + B - A) % p + G = B + a * A + H = D - C + if not H: + return self._double(X1, Y1, Z1, T1, p, a) + X3 = E * F % p + Y3 = G * H % p + T3 = E * H % p + Z3 = F * G % p + + return X3, Y3, Z3, T3 + + def __add__(self, other): + """Add point to another.""" + if other == INFINITY: + return self + if ( + not isinstance(other, PointEdwards) + or self.__curve != other.__curve + ): + raise ValueError("The other point is on a different curve.") + + p, a = self.__curve.p(), self.__curve.a() + X1, Y1, Z1, T1 = self.__coords + X2, Y2, Z2, T2 = other.__coords + + X3, Y3, Z3, T3 = self._add(X1, Y1, Z1, T1, X2, Y2, Z2, T2, p, a) + + if not X3 or not T3: + return INFINITY + return PointEdwards(self.__curve, X3, Y3, Z3, T3, self.__order) + + def __radd__(self, other): + """Add other to self.""" + return self + other + + def _double(self, X1, Y1, Z1, T1, p, a): + """Double the point, assume sane parameters.""" + # after "dbl-2008-hwcd" + # from https://hyperelliptic.org/EFD/g1p/auto-twisted-extended.html + # NOTE: there are more efficient formulas for Z1 == 1 + A = X1 * X1 % p + B = Y1 * Y1 % p + C = 2 * Z1 * Z1 % p + D = a * A % p + E = ((X1 + Y1) * (X1 + Y1) - A - B) % p + G = D + B + F = G - C + H = D - B + X3 = E * F % p + Y3 = G * H % p + T3 = E * H % p + Z3 = F * G % p + + return X3, Y3, Z3, T3 + + def double(self): + """Return point added to itself.""" + X1, Y1, Z1, T1 = self.__coords + + if not X1 or not T1: + return INFINITY + + p, a = self.__curve.p(), self.__curve.a() + + X3, Y3, Z3, T3 = self._double(X1, Y1, Z1, T1, p, a) + + if not X3 or not T3: + return INFINITY + return PointEdwards(self.__curve, X3, Y3, Z3, T3, self.__order) + + def __rmul__(self, other): + """Multiply point by an integer.""" + return self * other + + def _mul_precompute(self, other): + """Multiply point by integer with precomputation table.""" + X3, Y3, Z3, T3, p, a = 0, 1, 1, 0, self.__curve.p(), self.__curve.a() + _add = self._add + for X2, Y2, T2 in self.__precompute: + rem = other % 4 + if rem == 0 or rem == 2: + other //= 2 + elif rem == 3: + other = (other + 1) // 2 + X3, Y3, Z3, T3 = _add(X3, Y3, Z3, T3, -X2, Y2, 1, -T2, p, a) + else: + assert rem == 1 + other = (other - 1) // 2 + X3, Y3, Z3, T3 = _add(X3, Y3, Z3, T3, X2, Y2, 1, T2, p, a) + + if not X3 or not T3: + return INFINITY + + return PointEdwards(self.__curve, X3, Y3, Z3, T3, self.__order) + + def __mul__(self, other): + """Multiply point by an integer.""" + X2, Y2, Z2, T2 = self.__coords + if not X2 or not T2 or not other: + return INFINITY + if other == 1: + return self + if self.__order: + # order*2 as a "protection" for Minerva + other = other % (self.__order * 2) + if self._maybe_precompute(): + return self._mul_precompute(other) + + X3, Y3, Z3, T3 = 0, 1, 1, 0 # INFINITY in extended coordinates + p, a = self.__curve.p(), self.__curve.a() + _double = self._double + _add = self._add + + for i in reversed(self._naf(other)): + X3, Y3, Z3, T3 = _double(X3, Y3, Z3, T3, p, a) + if i < 0: + X3, Y3, Z3, T3 = _add(X3, Y3, Z3, T3, -X2, Y2, Z2, -T2, p, a) + elif i > 0: + X3, Y3, Z3, T3 = _add(X3, Y3, Z3, T3, X2, Y2, Z2, T2, p, a) + + if not X3 or not T3: + return INFINITY + + return PointEdwards(self.__curve, X3, Y3, Z3, T3, self.__order) + + +# This one point is the Point At Infinity for all purposes: +INFINITY = Point(None, None, None) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/errors.py b/myenv/lib/python3.9/site-packages/ecdsa/errors.py new file mode 100644 index 0000000..0184c05 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/errors.py @@ -0,0 +1,4 @@ +class MalformedPointError(AssertionError): + """Raised in case the encoding of private or public key is malformed.""" + + pass diff --git a/myenv/lib/python3.9/site-packages/ecdsa/keys.py b/myenv/lib/python3.9/site-packages/ecdsa/keys.py new file mode 100644 index 0000000..2b7d316 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/keys.py @@ -0,0 +1,1612 @@ +""" +Primary classes for performing signing and verification operations. +""" + +import binascii +from hashlib import sha1 +import os +from six import PY2, b +from . import ecdsa, eddsa +from . import der +from . import rfc6979 +from . import ellipticcurve +from .curves import NIST192p, Curve, Ed25519, Ed448 +from .ecdsa import RSZeroError +from .util import string_to_number, number_to_string, randrange +from .util import sigencode_string, sigdecode_string, bit_length +from .util import ( + oid_ecPublicKey, + encoded_oid_ecPublicKey, + oid_ecDH, + oid_ecMQV, + MalformedSignature, +) +from ._compat import normalise_bytes +from .errors import MalformedPointError +from .ellipticcurve import PointJacobi, CurveEdTw + + +__all__ = [ + "BadSignatureError", + "BadDigestError", + "VerifyingKey", + "SigningKey", + "MalformedPointError", +] + + +class BadSignatureError(Exception): + """ + Raised when verification of signature failed. + + Will be raised irrespective of reason of the failure: + + * the calculated or provided hash does not match the signature + * the signature does not match the curve/public key + * the encoding of the signature is malformed + * the size of the signature does not match the curve of the VerifyingKey + """ + + pass + + +class BadDigestError(Exception): + """Raised in case the selected hash is too large for the curve.""" + + pass + + +def _truncate_and_convert_digest(digest, curve, allow_truncate): + """Truncates and converts digest to an integer.""" + if not allow_truncate: + if len(digest) > curve.baselen: + raise BadDigestError( + "this curve ({0}) is too short " + "for the length of your digest ({1})".format( + curve.name, 8 * len(digest) + ) + ) + else: + digest = digest[: curve.baselen] + number = string_to_number(digest) + if allow_truncate: + max_length = bit_length(curve.order) + # we don't use bit_length(number) as that truncates leading zeros + length = len(digest) * 8 + + # See NIST FIPS 186-4: + # + # When the length of the output of the hash function is greater + # than N (i.e., the bit length of q), then the leftmost N bits of + # the hash function output block shall be used in any calculation + # using the hash function output during the generation or + # verification of a digital signature. + # + # as such, we need to shift-out the low-order bits: + number >>= max(0, length - max_length) + + return number + + +class VerifyingKey(object): + """ + Class for handling keys that can verify signatures (public keys). + + :ivar `~ecdsa.curves.Curve` ~.curve: The Curve over which all the + cryptographic operations will take place + :ivar default_hashfunc: the function that will be used for hashing the + data. Should implement the same API as hashlib.sha1 + :vartype default_hashfunc: callable + :ivar pubkey: the actual public key + :vartype pubkey: ~ecdsa.ecdsa.Public_key + """ + + def __init__(self, _error__please_use_generate=None): + """Unsupported, please use one of the classmethods to initialise.""" + if not _error__please_use_generate: + raise TypeError( + "Please use VerifyingKey.generate() to construct me" + ) + self.curve = None + self.default_hashfunc = None + self.pubkey = None + + def __repr__(self): + pub_key = self.to_string("compressed") + if self.default_hashfunc: + hash_name = self.default_hashfunc().name + else: + hash_name = "None" + return "VerifyingKey.from_string({0!r}, {1!r}, {2})".format( + pub_key, self.curve, hash_name + ) + + def __eq__(self, other): + """Return True if the points are identical, False otherwise.""" + if isinstance(other, VerifyingKey): + return self.curve == other.curve and self.pubkey == other.pubkey + return NotImplemented + + def __ne__(self, other): + """Return False if the points are identical, True otherwise.""" + return not self == other + + @classmethod + def from_public_point( + cls, point, curve=NIST192p, hashfunc=sha1, validate_point=True + ): + """ + Initialise the object from a Point object. + + This is a low-level method, generally you will not want to use it. + + :param point: The point to wrap around, the actual public key + :type point: ~ecdsa.ellipticcurve.AbstractPoint + :param curve: The curve on which the point needs to reside, defaults + to NIST192p + :type curve: ~ecdsa.curves.Curve + :param hashfunc: The default hash function that will be used for + verification, needs to implement the same interface + as :py:class:`hashlib.sha1` + :type hashfunc: callable + :type bool validate_point: whether to check if the point lays on curve + should always be used if the public point is not a result + of our own calculation + + :raises MalformedPointError: if the public point does not lay on the + curve + + :return: Initialised VerifyingKey object + :rtype: VerifyingKey + """ + self = cls(_error__please_use_generate=True) + if isinstance(curve.curve, CurveEdTw): + raise ValueError("Method incompatible with Edwards curves") + if not isinstance(point, ellipticcurve.PointJacobi): + point = ellipticcurve.PointJacobi.from_affine(point) + self.curve = curve + self.default_hashfunc = hashfunc + try: + self.pubkey = ecdsa.Public_key( + curve.generator, point, validate_point + ) + except ecdsa.InvalidPointError: + raise MalformedPointError("Point does not lay on the curve") + self.pubkey.order = curve.order + return self + + def precompute(self, lazy=False): + """ + Precompute multiplication tables for faster signature verification. + + Calling this method will cause the library to precompute the + scalar multiplication tables, used in signature verification. + While it's an expensive operation (comparable to performing + as many signatures as the bit size of the curve, i.e. 256 for NIST256p) + it speeds up verification 2 times. You should call this method + if you expect to verify hundreds of signatures (or more) using the same + VerifyingKey object. + + Note: You should call this method only once, this method generates a + new precomputation table every time it's called. + + :param bool lazy: whether to calculate the precomputation table now + (if set to False) or if it should be delayed to the time of first + use (when set to True) + """ + if isinstance(self.curve.curve, CurveEdTw): + pt = self.pubkey.point + self.pubkey.point = ellipticcurve.PointEdwards( + pt.curve(), + pt.x(), + pt.y(), + 1, + pt.x() * pt.y(), + self.curve.order, + generator=True, + ) + else: + self.pubkey.point = ellipticcurve.PointJacobi.from_affine( + self.pubkey.point, True + ) + # as precomputation in now delayed to the time of first use of the + # point and we were asked specifically to precompute now, make + # sure the precomputation is performed now to preserve the behaviour + if not lazy: + self.pubkey.point * 2 + + @classmethod + def from_string( + cls, + string, + curve=NIST192p, + hashfunc=sha1, + validate_point=True, + valid_encodings=None, + ): + """ + Initialise the object from byte encoding of public key. + + The method does accept and automatically detect the type of point + encoding used. It supports the :term:`raw encoding`, + :term:`uncompressed`, :term:`compressed`, and :term:`hybrid` encodings. + It also works with the native encoding of Ed25519 and Ed448 public + keys (technically those are compressed, but encoded differently than + in other signature systems). + + Note, while the method is named "from_string" it's a misnomer from + Python 2 days when there were no binary strings. In Python 3 the + input needs to be a bytes-like object. + + :param string: single point encoding of the public key + :type string: :term:`bytes-like object` + :param curve: the curve on which the public key is expected to lay + :type curve: ~ecdsa.curves.Curve + :param hashfunc: The default hash function that will be used for + verification, needs to implement the same interface as + hashlib.sha1. Ignored for EdDSA. + :type hashfunc: callable + :param validate_point: whether to verify that the point lays on the + provided curve or not, defaults to True. Ignored for EdDSA. + :type validate_point: bool + :param valid_encodings: list of acceptable point encoding formats, + supported ones are: :term:`uncompressed`, :term:`compressed`, + :term:`hybrid`, and :term:`raw encoding` (specified with ``raw`` + name). All formats by default (specified with ``None``). + Ignored for EdDSA. + :type valid_encodings: :term:`set-like object` + + :raises MalformedPointError: if the public point does not lay on the + curve or the encoding is invalid + + :return: Initialised VerifyingKey object + :rtype: VerifyingKey + """ + if isinstance(curve.curve, CurveEdTw): + self = cls(_error__please_use_generate=True) + self.curve = curve + self.default_hashfunc = None # ignored for EdDSA + try: + self.pubkey = eddsa.PublicKey(curve.generator, string) + except ValueError: + raise MalformedPointError("Malformed point for the curve") + return self + + point = PointJacobi.from_bytes( + curve.curve, + string, + validate_encoding=validate_point, + valid_encodings=valid_encodings, + ) + return cls.from_public_point(point, curve, hashfunc, validate_point) + + @classmethod + def from_pem( + cls, + string, + hashfunc=sha1, + valid_encodings=None, + valid_curve_encodings=None, + ): + """ + Initialise from public key stored in :term:`PEM` format. + + The PEM header of the key should be ``BEGIN PUBLIC KEY``. + + See the :func:`~VerifyingKey.from_der()` method for details of the + format supported. + + Note: only a single PEM object decoding is supported in provided + string. + + :param string: text with PEM-encoded public ECDSA key + :type string: str + :param valid_encodings: list of allowed point encodings. + By default :term:`uncompressed`, :term:`compressed`, and + :term:`hybrid`. To read malformed files, include + :term:`raw encoding` with ``raw`` in the list. + :type valid_encodings: :term:`set-like object` + :param valid_curve_encodings: list of allowed encoding formats + for curve parameters. By default (``None``) all are supported: + ``named_curve`` and ``explicit``. + :type valid_curve_encodings: :term:`set-like object` + + + :return: Initialised VerifyingKey object + :rtype: VerifyingKey + """ + return cls.from_der( + der.unpem(string), + hashfunc=hashfunc, + valid_encodings=valid_encodings, + valid_curve_encodings=valid_curve_encodings, + ) + + @classmethod + def from_der( + cls, + string, + hashfunc=sha1, + valid_encodings=None, + valid_curve_encodings=None, + ): + """ + Initialise the key stored in :term:`DER` format. + + The expected format of the key is the SubjectPublicKeyInfo structure + from RFC5912 (for RSA keys, it's known as the PKCS#1 format):: + + SubjectPublicKeyInfo {PUBLIC-KEY: IOSet} ::= SEQUENCE { + algorithm AlgorithmIdentifier {PUBLIC-KEY, {IOSet}}, + subjectPublicKey BIT STRING + } + + Note: only public EC keys are supported by this method. The + SubjectPublicKeyInfo.algorithm.algorithm field must specify + id-ecPublicKey (see RFC3279). + + Only the named curve encoding is supported, thus the + SubjectPublicKeyInfo.algorithm.parameters field needs to be an + object identifier. A sequence in that field indicates an explicit + parameter curve encoding, this format is not supported. A NULL object + in that field indicates an "implicitlyCA" encoding, where the curve + parameters come from CA certificate, those, again, are not supported. + + :param string: binary string with the DER encoding of public ECDSA key + :type string: bytes-like object + :param valid_encodings: list of allowed point encodings. + By default :term:`uncompressed`, :term:`compressed`, and + :term:`hybrid`. To read malformed files, include + :term:`raw encoding` with ``raw`` in the list. + :type valid_encodings: :term:`set-like object` + :param valid_curve_encodings: list of allowed encoding formats + for curve parameters. By default (``None``) all are supported: + ``named_curve`` and ``explicit``. + :type valid_curve_encodings: :term:`set-like object` + + :return: Initialised VerifyingKey object + :rtype: VerifyingKey + """ + if valid_encodings is None: + valid_encodings = set(["uncompressed", "compressed", "hybrid"]) + string = normalise_bytes(string) + # [[oid_ecPublicKey,oid_curve], point_str_bitstring] + s1, empty = der.remove_sequence(string) + if empty != b"": + raise der.UnexpectedDER( + "trailing junk after DER pubkey: %s" % binascii.hexlify(empty) + ) + s2, point_str_bitstring = der.remove_sequence(s1) + # s2 = oid_ecPublicKey,oid_curve + oid_pk, rest = der.remove_object(s2) + if oid_pk in (Ed25519.oid, Ed448.oid): + if oid_pk == Ed25519.oid: + curve = Ed25519 + else: + assert oid_pk == Ed448.oid + curve = Ed448 + point_str, empty = der.remove_bitstring(point_str_bitstring, 0) + if empty: + raise der.UnexpectedDER("trailing junk after public key") + return cls.from_string(point_str, curve, None) + if not oid_pk == oid_ecPublicKey: + raise der.UnexpectedDER( + "Unexpected object identifier in DER " + "encoding: {0!r}".format(oid_pk) + ) + curve = Curve.from_der(rest, valid_curve_encodings) + point_str, empty = der.remove_bitstring(point_str_bitstring, 0) + if empty != b"": + raise der.UnexpectedDER( + "trailing junk after pubkey pointstring: %s" + % binascii.hexlify(empty) + ) + # raw encoding of point is invalid in DER files + if len(point_str) == curve.verifying_key_length: + raise der.UnexpectedDER("Malformed encoding of public point") + return cls.from_string( + point_str, + curve, + hashfunc=hashfunc, + valid_encodings=valid_encodings, + ) + + @classmethod + def from_public_key_recovery( + cls, + signature, + data, + curve, + hashfunc=sha1, + sigdecode=sigdecode_string, + allow_truncate=True, + ): + """ + Return keys that can be used as verifiers of the provided signature. + + Tries to recover the public key that can be used to verify the + signature, usually returns two keys like that. + + :param signature: the byte string with the encoded signature + :type signature: bytes-like object + :param data: the data to be hashed for signature verification + :type data: bytes-like object + :param curve: the curve over which the signature was performed + :type curve: ~ecdsa.curves.Curve + :param hashfunc: The default hash function that will be used for + verification, needs to implement the same interface as hashlib.sha1 + :type hashfunc: callable + :param sigdecode: Callable to define the way the signature needs to + be decoded to an object, needs to handle `signature` as the + first parameter, the curve order (an int) as the second and return + a tuple with two integers, "r" as the first one and "s" as the + second one. See :func:`ecdsa.util.sigdecode_string` and + :func:`ecdsa.util.sigdecode_der` for examples. + :param bool allow_truncate: if True, the provided hashfunc can generate + values larger than the bit size of the order of the curve, the + extra bits (at the end of the digest) will be truncated. + :type sigdecode: callable + + :return: Initialised VerifyingKey objects + :rtype: list of VerifyingKey + """ + if isinstance(curve.curve, CurveEdTw): + raise ValueError("Method unsupported for Edwards curves") + data = normalise_bytes(data) + digest = hashfunc(data).digest() + return cls.from_public_key_recovery_with_digest( + signature, + digest, + curve, + hashfunc=hashfunc, + sigdecode=sigdecode, + allow_truncate=allow_truncate, + ) + + @classmethod + def from_public_key_recovery_with_digest( + cls, + signature, + digest, + curve, + hashfunc=sha1, + sigdecode=sigdecode_string, + allow_truncate=False, + ): + """ + Return keys that can be used as verifiers of the provided signature. + + Tries to recover the public key that can be used to verify the + signature, usually returns two keys like that. + + :param signature: the byte string with the encoded signature + :type signature: bytes-like object + :param digest: the hash value of the message signed by the signature + :type digest: bytes-like object + :param curve: the curve over which the signature was performed + :type curve: ~ecdsa.curves.Curve + :param hashfunc: The default hash function that will be used for + verification, needs to implement the same interface as hashlib.sha1 + :type hashfunc: callable + :param sigdecode: Callable to define the way the signature needs to + be decoded to an object, needs to handle `signature` as the + first parameter, the curve order (an int) as the second and return + a tuple with two integers, "r" as the first one and "s" as the + second one. See :func:`ecdsa.util.sigdecode_string` and + :func:`ecdsa.util.sigdecode_der` for examples. + :type sigdecode: callable + :param bool allow_truncate: if True, the provided hashfunc can generate + values larger than the bit size of the order of the curve (and + the length of provided `digest`), the extra bits (at the end of the + digest) will be truncated. + + :return: Initialised VerifyingKey object + :rtype: VerifyingKey + """ + if isinstance(curve.curve, CurveEdTw): + raise ValueError("Method unsupported for Edwards curves") + generator = curve.generator + r, s = sigdecode(signature, generator.order()) + sig = ecdsa.Signature(r, s) + + digest = normalise_bytes(digest) + digest_as_number = _truncate_and_convert_digest( + digest, curve, allow_truncate + ) + pks = sig.recover_public_keys(digest_as_number, generator) + + # Transforms the ecdsa.Public_key object into a VerifyingKey + verifying_keys = [ + cls.from_public_point(pk.point, curve, hashfunc) for pk in pks + ] + return verifying_keys + + def to_string(self, encoding="raw"): + """ + Convert the public key to a byte string. + + The method by default uses the :term:`raw encoding` (specified + by `encoding="raw"`. It can also output keys in :term:`uncompressed`, + :term:`compressed` and :term:`hybrid` formats. + + Remember that the curve identification is not part of the encoding + so to decode the point using :func:`~VerifyingKey.from_string`, curve + needs to be specified. + + Note: while the method is called "to_string", it's a misnomer from + Python 2 days when character strings and byte strings shared type. + On Python 3 the returned type will be `bytes`. + + :return: :term:`raw encoding` of the public key (public point) on the + curve + :rtype: bytes + """ + assert encoding in ("raw", "uncompressed", "compressed", "hybrid") + return self.pubkey.point.to_bytes(encoding) + + def to_pem( + self, point_encoding="uncompressed", curve_parameters_encoding=None + ): + """ + Convert the public key to the :term:`PEM` format. + + The PEM header of the key will be ``BEGIN PUBLIC KEY``. + + The format of the key is described in the + :func:`~VerifyingKey.from_der()` method. + This method supports only "named curve" encoding of keys. + + :param str point_encoding: specification of the encoding format + of public keys. "uncompressed" is most portable, "compressed" is + smallest. "hybrid" is uncommon and unsupported by most + implementations, it is as big as "uncompressed". + :param str curve_parameters_encoding: the encoding for curve parameters + to use, by default tries to use ``named_curve`` encoding, + if that is not possible, falls back to ``explicit`` encoding. + + :return: portable encoding of the public key + :rtype: bytes + + .. warning:: The PEM is encoded to US-ASCII, it needs to be + re-encoded if the system is incompatible (e.g. uses UTF-16) + """ + return der.topem( + self.to_der(point_encoding, curve_parameters_encoding), + "PUBLIC KEY", + ) + + def to_der( + self, point_encoding="uncompressed", curve_parameters_encoding=None + ): + """ + Convert the public key to the :term:`DER` format. + + The format of the key is described in the + :func:`~VerifyingKey.from_der()` method. + This method supports only "named curve" encoding of keys. + + :param str point_encoding: specification of the encoding format + of public keys. "uncompressed" is most portable, "compressed" is + smallest. "hybrid" is uncommon and unsupported by most + implementations, it is as big as "uncompressed". + :param str curve_parameters_encoding: the encoding for curve parameters + to use, by default tries to use ``named_curve`` encoding, + if that is not possible, falls back to ``explicit`` encoding. + + :return: DER encoding of the public key + :rtype: bytes + """ + if point_encoding == "raw": + raise ValueError("raw point_encoding not allowed in DER") + point_str = self.to_string(point_encoding) + if isinstance(self.curve.curve, CurveEdTw): + return der.encode_sequence( + der.encode_sequence(der.encode_oid(*self.curve.oid)), + der.encode_bitstring(bytes(point_str), 0), + ) + return der.encode_sequence( + der.encode_sequence( + encoded_oid_ecPublicKey, + self.curve.to_der(curve_parameters_encoding, point_encoding), + ), + # 0 is the number of unused bits in the + # bit string + der.encode_bitstring(point_str, 0), + ) + + def verify( + self, + signature, + data, + hashfunc=None, + sigdecode=sigdecode_string, + allow_truncate=True, + ): + """ + Verify a signature made over provided data. + + Will hash `data` to verify the signature. + + By default expects signature in :term:`raw encoding`. Can also be used + to verify signatures in ASN.1 DER encoding by using + :func:`ecdsa.util.sigdecode_der` + as the `sigdecode` parameter. + + :param signature: encoding of the signature + :type signature: sigdecode method dependent + :param data: data signed by the `signature`, will be hashed using + `hashfunc`, if specified, or default hash function + :type data: :term:`bytes-like object` + :param hashfunc: The default hash function that will be used for + verification, needs to implement the same interface as hashlib.sha1 + :type hashfunc: callable + :param sigdecode: Callable to define the way the signature needs to + be decoded to an object, needs to handle `signature` as the + first parameter, the curve order (an int) as the second and return + a tuple with two integers, "r" as the first one and "s" as the + second one. See :func:`ecdsa.util.sigdecode_string` and + :func:`ecdsa.util.sigdecode_der` for examples. + :type sigdecode: callable + :param bool allow_truncate: if True, the provided digest can have + bigger bit-size than the order of the curve, the extra bits (at + the end of the digest) will be truncated. Use it when verifying + SHA-384 output using NIST256p or in similar situations. Defaults to + True. + + :raises BadSignatureError: if the signature is invalid or malformed + + :return: True if the verification was successful + :rtype: bool + """ + # signature doesn't have to be a bytes-like-object so don't normalise + # it, the decoders will do that + data = normalise_bytes(data) + if isinstance(self.curve.curve, CurveEdTw): + signature = normalise_bytes(signature) + try: + return self.pubkey.verify(data, signature) + except (ValueError, MalformedPointError) as e: + raise BadSignatureError("Signature verification failed", e) + + hashfunc = hashfunc or self.default_hashfunc + digest = hashfunc(data).digest() + return self.verify_digest(signature, digest, sigdecode, allow_truncate) + + def verify_digest( + self, + signature, + digest, + sigdecode=sigdecode_string, + allow_truncate=False, + ): + """ + Verify a signature made over provided hash value. + + By default expects signature in :term:`raw encoding`. Can also be used + to verify signatures in ASN.1 DER encoding by using + :func:`ecdsa.util.sigdecode_der` + as the `sigdecode` parameter. + + :param signature: encoding of the signature + :type signature: sigdecode method dependent + :param digest: raw hash value that the signature authenticates. + :type digest: :term:`bytes-like object` + :param sigdecode: Callable to define the way the signature needs to + be decoded to an object, needs to handle `signature` as the + first parameter, the curve order (an int) as the second and return + a tuple with two integers, "r" as the first one and "s" as the + second one. See :func:`ecdsa.util.sigdecode_string` and + :func:`ecdsa.util.sigdecode_der` for examples. + :type sigdecode: callable + :param bool allow_truncate: if True, the provided digest can have + bigger bit-size than the order of the curve, the extra bits (at + the end of the digest) will be truncated. Use it when verifying + SHA-384 output using NIST256p or in similar situations. + + :raises BadSignatureError: if the signature is invalid or malformed + :raises BadDigestError: if the provided digest is too big for the curve + associated with this VerifyingKey and allow_truncate was not set + + :return: True if the verification was successful + :rtype: bool + """ + # signature doesn't have to be a bytes-like-object so don't normalise + # it, the decoders will do that + digest = normalise_bytes(digest) + number = _truncate_and_convert_digest( + digest, + self.curve, + allow_truncate, + ) + + try: + r, s = sigdecode(signature, self.pubkey.order) + except (der.UnexpectedDER, MalformedSignature) as e: + raise BadSignatureError("Malformed formatting of signature", e) + sig = ecdsa.Signature(r, s) + if self.pubkey.verifies(number, sig): + return True + raise BadSignatureError("Signature verification failed") + + +class SigningKey(object): + """ + Class for handling keys that can create signatures (private keys). + + :ivar `~ecdsa.curves.Curve` curve: The Curve over which all the + cryptographic operations will take place + :ivar default_hashfunc: the function that will be used for hashing the + data. Should implement the same API as :py:class:`hashlib.sha1` + :ivar int baselen: the length of a :term:`raw encoding` of private key + :ivar `~ecdsa.keys.VerifyingKey` verifying_key: the public key + associated with this private key + :ivar `~ecdsa.ecdsa.Private_key` privkey: the actual private key + """ + + def __init__(self, _error__please_use_generate=None): + """Unsupported, please use one of the classmethods to initialise.""" + if not _error__please_use_generate: + raise TypeError("Please use SigningKey.generate() to construct me") + self.curve = None + self.default_hashfunc = None + self.baselen = None + self.verifying_key = None + self.privkey = None + + def __eq__(self, other): + """Return True if the points are identical, False otherwise.""" + if isinstance(other, SigningKey): + return ( + self.curve == other.curve + and self.verifying_key == other.verifying_key + and self.privkey == other.privkey + ) + return NotImplemented + + def __ne__(self, other): + """Return False if the points are identical, True otherwise.""" + return not self == other + + @classmethod + def _twisted_edwards_keygen(cls, curve, entropy): + """Generate a private key on a Twisted Edwards curve.""" + if not entropy: + entropy = os.urandom + random = entropy(curve.baselen) + private_key = eddsa.PrivateKey(curve.generator, random) + public_key = private_key.public_key() + + verifying_key = VerifyingKey.from_string( + public_key.public_key(), curve + ) + + self = cls(_error__please_use_generate=True) + self.curve = curve + self.default_hashfunc = None + self.baselen = curve.baselen + self.privkey = private_key + self.verifying_key = verifying_key + return self + + @classmethod + def _weierstrass_keygen(cls, curve, entropy, hashfunc): + """Generate a private key on a Weierstrass curve.""" + secexp = randrange(curve.order, entropy) + return cls.from_secret_exponent(secexp, curve, hashfunc) + + @classmethod + def generate(cls, curve=NIST192p, entropy=None, hashfunc=sha1): + """ + Generate a random private key. + + :param curve: The curve on which the point needs to reside, defaults + to NIST192p + :type curve: ~ecdsa.curves.Curve + :param entropy: Source of randomness for generating the private keys, + should provide cryptographically secure random numbers if the keys + need to be secure. Uses os.urandom() by default. + :type entropy: callable + :param hashfunc: The default hash function that will be used for + signing, needs to implement the same interface + as hashlib.sha1 + :type hashfunc: callable + + :return: Initialised SigningKey object + :rtype: SigningKey + """ + if isinstance(curve.curve, CurveEdTw): + return cls._twisted_edwards_keygen(curve, entropy) + return cls._weierstrass_keygen(curve, entropy, hashfunc) + + @classmethod + def from_secret_exponent(cls, secexp, curve=NIST192p, hashfunc=sha1): + """ + Create a private key from a random integer. + + Note: it's a low level method, it's recommended to use the + :func:`~SigningKey.generate` method to create private keys. + + :param int secexp: secret multiplier (the actual private key in ECDSA). + Needs to be an integer between 1 and the curve order. + :param curve: The curve on which the point needs to reside + :type curve: ~ecdsa.curves.Curve + :param hashfunc: The default hash function that will be used for + signing, needs to implement the same interface + as hashlib.sha1 + :type hashfunc: callable + + :raises MalformedPointError: when the provided secexp is too large + or too small for the curve selected + :raises RuntimeError: if the generation of public key from private + key failed + + :return: Initialised SigningKey object + :rtype: SigningKey + """ + if isinstance(curve.curve, CurveEdTw): + raise ValueError( + "Edwards keys don't support setting the secret scalar " + "(exponent) directly" + ) + self = cls(_error__please_use_generate=True) + self.curve = curve + self.default_hashfunc = hashfunc + self.baselen = curve.baselen + n = curve.order + if not 1 <= secexp < n: + raise MalformedPointError( + "Invalid value for secexp, expected integer " + "between 1 and {0}".format(n) + ) + pubkey_point = curve.generator * secexp + if hasattr(pubkey_point, "scale"): + pubkey_point = pubkey_point.scale() + self.verifying_key = VerifyingKey.from_public_point( + pubkey_point, curve, hashfunc, False + ) + pubkey = self.verifying_key.pubkey + self.privkey = ecdsa.Private_key(pubkey, secexp) + self.privkey.order = n + return self + + @classmethod + def from_string(cls, string, curve=NIST192p, hashfunc=sha1): + """ + Decode the private key from :term:`raw encoding`. + + Note: the name of this method is a misnomer coming from days of + Python 2, when binary strings and character strings shared a type. + In Python 3, the expected type is `bytes`. + + :param string: the raw encoding of the private key + :type string: :term:`bytes-like object` + :param curve: The curve on which the point needs to reside + :type curve: ~ecdsa.curves.Curve + :param hashfunc: The default hash function that will be used for + signing, needs to implement the same interface + as hashlib.sha1 + :type hashfunc: callable + + :raises MalformedPointError: if the length of encoding doesn't match + the provided curve or the encoded values is too large + :raises RuntimeError: if the generation of public key from private + key failed + + :return: Initialised SigningKey object + :rtype: SigningKey + """ + string = normalise_bytes(string) + + if len(string) != curve.baselen: + raise MalformedPointError( + "Invalid length of private key, received {0}, " + "expected {1}".format(len(string), curve.baselen) + ) + if isinstance(curve.curve, CurveEdTw): + self = cls(_error__please_use_generate=True) + self.curve = curve + self.default_hashfunc = None # Ignored for EdDSA + self.baselen = curve.baselen + self.privkey = eddsa.PrivateKey(curve.generator, string) + self.verifying_key = VerifyingKey.from_string( + self.privkey.public_key().public_key(), curve + ) + return self + secexp = string_to_number(string) + return cls.from_secret_exponent(secexp, curve, hashfunc) + + @classmethod + def from_pem(cls, string, hashfunc=sha1, valid_curve_encodings=None): + """ + Initialise from key stored in :term:`PEM` format. + + The PEM formats supported are the un-encrypted RFC5915 + (the ssleay format) supported by OpenSSL, and the more common + un-encrypted RFC5958 (the PKCS #8 format). + + The legacy format files have the header with the string + ``BEGIN EC PRIVATE KEY``. + PKCS#8 files have the header ``BEGIN PRIVATE KEY``. + Encrypted files (ones that include the string + ``Proc-Type: 4,ENCRYPTED`` + right after the PEM header) are not supported. + + See :func:`~SigningKey.from_der` for ASN.1 syntax of the objects in + this files. + + :param string: text with PEM-encoded private ECDSA key + :type string: str + :param valid_curve_encodings: list of allowed encoding formats + for curve parameters. By default (``None``) all are supported: + ``named_curve`` and ``explicit``. + :type valid_curve_encodings: :term:`set-like object` + + + :raises MalformedPointError: if the length of encoding doesn't match + the provided curve or the encoded values is too large + :raises RuntimeError: if the generation of public key from private + key failed + :raises UnexpectedDER: if the encoding of the PEM file is incorrect + + :return: Initialised SigningKey object + :rtype: SigningKey + """ + if not PY2 and isinstance(string, str): # pragma: no branch + string = string.encode() + + # The privkey pem may have multiple sections, commonly it also has + # "EC PARAMETERS", we need just "EC PRIVATE KEY". PKCS#8 should not + # have the "EC PARAMETERS" section; it's just "PRIVATE KEY". + private_key_index = string.find(b"-----BEGIN EC PRIVATE KEY-----") + if private_key_index == -1: + private_key_index = string.index(b"-----BEGIN PRIVATE KEY-----") + + return cls.from_der( + der.unpem(string[private_key_index:]), + hashfunc, + valid_curve_encodings, + ) + + @classmethod + def from_der(cls, string, hashfunc=sha1, valid_curve_encodings=None): + """ + Initialise from key stored in :term:`DER` format. + + The DER formats supported are the un-encrypted RFC5915 + (the ssleay format) supported by OpenSSL, and the more common + un-encrypted RFC5958 (the PKCS #8 format). + + Both formats contain an ASN.1 object following the syntax specified + in RFC5915:: + + ECPrivateKey ::= SEQUENCE { + version INTEGER { ecPrivkeyVer1(1) }} (ecPrivkeyVer1), + privateKey OCTET STRING, + parameters [0] ECParameters {{ NamedCurve }} OPTIONAL, + publicKey [1] BIT STRING OPTIONAL + } + + `publicKey` field is ignored completely (errors, if any, in it will + be undetected). + + Two formats are supported for the `parameters` field: the named + curve and the explicit encoding of curve parameters. + In the legacy ssleay format, this implementation requires the optional + `parameters` field to get the curve name. In PKCS #8 format, the curve + is part of the PrivateKeyAlgorithmIdentifier. + + The PKCS #8 format includes an ECPrivateKey object as the `privateKey` + field within a larger structure:: + + OneAsymmetricKey ::= SEQUENCE { + version Version, + privateKeyAlgorithm PrivateKeyAlgorithmIdentifier, + privateKey PrivateKey, + attributes [0] Attributes OPTIONAL, + ..., + [[2: publicKey [1] PublicKey OPTIONAL ]], + ... + } + + The `attributes` and `publicKey` fields are completely ignored; errors + in them will not be detected. + + :param string: binary string with DER-encoded private ECDSA key + :type string: :term:`bytes-like object` + :param valid_curve_encodings: list of allowed encoding formats + for curve parameters. By default (``None``) all are supported: + ``named_curve`` and ``explicit``. + Ignored for EdDSA. + :type valid_curve_encodings: :term:`set-like object` + + :raises MalformedPointError: if the length of encoding doesn't match + the provided curve or the encoded values is too large + :raises RuntimeError: if the generation of public key from private + key failed + :raises UnexpectedDER: if the encoding of the DER file is incorrect + + :return: Initialised SigningKey object + :rtype: SigningKey + """ + s = normalise_bytes(string) + curve = None + + s, empty = der.remove_sequence(s) + if empty != b(""): + raise der.UnexpectedDER( + "trailing junk after DER privkey: %s" % binascii.hexlify(empty) + ) + + version, s = der.remove_integer(s) + + # At this point, PKCS #8 has a sequence containing the algorithm + # identifier and the curve identifier. The ssleay format instead has + # an octet string containing the key data, so this is how we can + # distinguish the two formats. + if der.is_sequence(s): + if version not in (0, 1): + raise der.UnexpectedDER( + "expected version '0' or '1' at start of privkey, got %d" + % version + ) + + sequence, s = der.remove_sequence(s) + algorithm_oid, algorithm_identifier = der.remove_object(sequence) + + if algorithm_oid in (Ed25519.oid, Ed448.oid): + if algorithm_identifier: + raise der.UnexpectedDER( + "Non NULL parameters for a EdDSA key" + ) + key_str_der, s = der.remove_octet_string(s) + + # As RFC5958 describe, there are may be optional Attributes + # and Publickey. Don't raise error if something after + # Privatekey + + # TODO parse attributes or validate publickey + # if s: + # raise der.UnexpectedDER( + # "trailing junk inside the privateKey" + # ) + key_str, s = der.remove_octet_string(key_str_der) + if s: + raise der.UnexpectedDER( + "trailing junk after the encoded private key" + ) + + if algorithm_oid == Ed25519.oid: + curve = Ed25519 + else: + assert algorithm_oid == Ed448.oid + curve = Ed448 + + return cls.from_string(key_str, curve, None) + + if algorithm_oid not in (oid_ecPublicKey, oid_ecDH, oid_ecMQV): + raise der.UnexpectedDER( + "unexpected algorithm identifier '%s'" % (algorithm_oid,) + ) + + curve = Curve.from_der(algorithm_identifier, valid_curve_encodings) + + if empty != b"": + raise der.UnexpectedDER( + "unexpected data after algorithm identifier: %s" + % binascii.hexlify(empty) + ) + + # Up next is an octet string containing an ECPrivateKey. Ignore + # the optional "attributes" and "publicKey" fields that come after. + s, _ = der.remove_octet_string(s) + + # Unpack the ECPrivateKey to get to the key data octet string, + # and rejoin the ssleay parsing path. + s, empty = der.remove_sequence(s) + if empty != b(""): + raise der.UnexpectedDER( + "trailing junk after DER privkey: %s" + % binascii.hexlify(empty) + ) + + version, s = der.remove_integer(s) + + # The version of the ECPrivateKey must be 1. + if version != 1: + raise der.UnexpectedDER( + "expected version '1' at start of DER privkey, got %d" + % version + ) + + privkey_str, s = der.remove_octet_string(s) + + if not curve: + tag, curve_oid_str, s = der.remove_constructed(s) + if tag != 0: + raise der.UnexpectedDER( + "expected tag 0 in DER privkey, got %d" % tag + ) + curve = Curve.from_der(curve_oid_str, valid_curve_encodings) + + # we don't actually care about the following fields + # + # tag, pubkey_bitstring, s = der.remove_constructed(s) + # if tag != 1: + # raise der.UnexpectedDER("expected tag 1 in DER privkey, got %d" + # % tag) + # pubkey_str = der.remove_bitstring(pubkey_bitstring, 0) + # if empty != "": + # raise der.UnexpectedDER("trailing junk after DER privkey " + # "pubkeystr: %s" + # % binascii.hexlify(empty)) + + # our from_string method likes fixed-length privkey strings + if len(privkey_str) < curve.baselen: + privkey_str = ( + b("\x00") * (curve.baselen - len(privkey_str)) + privkey_str + ) + return cls.from_string(privkey_str, curve, hashfunc) + + def to_string(self): + """ + Convert the private key to :term:`raw encoding`. + + Note: while the method is named "to_string", its name comes from + Python 2 days, when binary and character strings used the same type. + The type used in Python 3 is `bytes`. + + :return: raw encoding of private key + :rtype: bytes + """ + if isinstance(self.curve.curve, CurveEdTw): + return bytes(self.privkey.private_key) + secexp = self.privkey.secret_multiplier + s = number_to_string(secexp, self.privkey.order) + return s + + def to_pem( + self, + point_encoding="uncompressed", + format="ssleay", + curve_parameters_encoding=None, + ): + """ + Convert the private key to the :term:`PEM` format. + + See :func:`~SigningKey.from_pem` method for format description. + + Only the named curve format is supported. + The public key will be included in generated string. + + The PEM header will specify ``BEGIN EC PRIVATE KEY`` or + ``BEGIN PRIVATE KEY``, depending on the desired format. + + :param str point_encoding: format to use for encoding public point + :param str format: either ``ssleay`` (default) or ``pkcs8`` + :param str curve_parameters_encoding: format of encoded curve + parameters, default depends on the curve, if the curve has + an associated OID, ``named_curve`` format will be used, + if no OID is associated with the curve, the fallback of + ``explicit`` parameters will be used. + + :return: PEM encoded private key + :rtype: bytes + + .. warning:: The PEM is encoded to US-ASCII, it needs to be + re-encoded if the system is incompatible (e.g. uses UTF-16) + """ + # TODO: "BEGIN ECPARAMETERS" + assert format in ("ssleay", "pkcs8") + header = "EC PRIVATE KEY" if format == "ssleay" else "PRIVATE KEY" + return der.topem( + self.to_der(point_encoding, format, curve_parameters_encoding), + header, + ) + + def _encode_eddsa(self): + """Create a PKCS#8 encoding of EdDSA keys.""" + ec_private_key = der.encode_octet_string(self.to_string()) + return der.encode_sequence( + der.encode_integer(0), + der.encode_sequence(der.encode_oid(*self.curve.oid)), + der.encode_octet_string(ec_private_key), + ) + + def to_der( + self, + point_encoding="uncompressed", + format="ssleay", + curve_parameters_encoding=None, + ): + """ + Convert the private key to the :term:`DER` format. + + See :func:`~SigningKey.from_der` method for format specification. + + Only the named curve format is supported. + The public key will be included in the generated string. + + :param str point_encoding: format to use for encoding public point + Ignored for EdDSA + :param str format: either ``ssleay`` (default) or ``pkcs8``. + EdDSA keys require ``pkcs8``. + :param str curve_parameters_encoding: format of encoded curve + parameters, default depends on the curve, if the curve has + an associated OID, ``named_curve`` format will be used, + if no OID is associated with the curve, the fallback of + ``explicit`` parameters will be used. + Ignored for EdDSA. + + :return: DER encoded private key + :rtype: bytes + """ + # SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1), + # cont[1],bitstring]) + if point_encoding == "raw": + raise ValueError("raw encoding not allowed in DER") + assert format in ("ssleay", "pkcs8") + if isinstance(self.curve.curve, CurveEdTw): + if format != "pkcs8": + raise ValueError("Only PKCS#8 format supported for EdDSA keys") + return self._encode_eddsa() + encoded_vk = self.get_verifying_key().to_string(point_encoding) + priv_key_elems = [ + der.encode_integer(1), + der.encode_octet_string(self.to_string()), + ] + if format == "ssleay": + priv_key_elems.append( + der.encode_constructed( + 0, self.curve.to_der(curve_parameters_encoding) + ) + ) + # the 0 in encode_bitstring specifies the number of unused bits + # in the `encoded_vk` string + priv_key_elems.append( + der.encode_constructed(1, der.encode_bitstring(encoded_vk, 0)) + ) + ec_private_key = der.encode_sequence(*priv_key_elems) + + if format == "ssleay": + return ec_private_key + else: + return der.encode_sequence( + # version = 1 means the public key is not present in the + # top-level structure. + der.encode_integer(1), + der.encode_sequence( + der.encode_oid(*oid_ecPublicKey), + self.curve.to_der(curve_parameters_encoding), + ), + der.encode_octet_string(ec_private_key), + ) + + def get_verifying_key(self): + """ + Return the VerifyingKey associated with this private key. + + Equivalent to reading the `verifying_key` field of an instance. + + :return: a public key that can be used to verify the signatures made + with this SigningKey + :rtype: VerifyingKey + """ + return self.verifying_key + + def sign_deterministic( + self, + data, + hashfunc=None, + sigencode=sigencode_string, + extra_entropy=b"", + ): + """ + Create signature over data. + + For Weierstrass curves it uses the deterministic RFC6979 algorithm. + For Edwards curves it uses the standard EdDSA algorithm. + + For ECDSA the data will be hashed using the `hashfunc` function before + signing. + For EdDSA the data will be hashed with the hash associated with the + curve (SHA-512 for Ed25519 and SHAKE-256 for Ed448). + + This is the recommended method for performing signatures when hashing + of data is necessary. + + :param data: data to be hashed and computed signature over + :type data: :term:`bytes-like object` + :param hashfunc: hash function to use for computing the signature, + if unspecified, the default hash function selected during + object initialisation will be used (see + `VerifyingKey.default_hashfunc`). The object needs to implement + the same interface as hashlib.sha1. + Ignored with EdDSA. + :type hashfunc: callable + :param sigencode: function used to encode the signature. + The function needs to accept three parameters: the two integers + that are the signature and the order of the curve over which the + signature was computed. It needs to return an encoded signature. + See `ecdsa.util.sigencode_string` and `ecdsa.util.sigencode_der` + as examples of such functions. + Ignored with EdDSA. + :type sigencode: callable + :param extra_entropy: additional data that will be fed into the random + number generator used in the RFC6979 process. Entirely optional. + Ignored with EdDSA. + :type extra_entropy: :term:`bytes-like object` + + :return: encoded signature over `data` + :rtype: bytes or sigencode function dependent type + """ + hashfunc = hashfunc or self.default_hashfunc + data = normalise_bytes(data) + + if isinstance(self.curve.curve, CurveEdTw): + return self.privkey.sign(data) + + extra_entropy = normalise_bytes(extra_entropy) + digest = hashfunc(data).digest() + + return self.sign_digest_deterministic( + digest, + hashfunc=hashfunc, + sigencode=sigencode, + extra_entropy=extra_entropy, + allow_truncate=True, + ) + + def sign_digest_deterministic( + self, + digest, + hashfunc=None, + sigencode=sigencode_string, + extra_entropy=b"", + allow_truncate=False, + ): + """ + Create signature for digest using the deterministic RFC6979 algorithm. + + `digest` should be the output of cryptographically secure hash function + like SHA256 or SHA-3-256. + + This is the recommended method for performing signatures when no + hashing of data is necessary. + + :param digest: hash of data that will be signed + :type digest: :term:`bytes-like object` + :param hashfunc: hash function to use for computing the random "k" + value from RFC6979 process, + if unspecified, the default hash function selected during + object initialisation will be used (see + :attr:`.VerifyingKey.default_hashfunc`). The object needs to + implement + the same interface as :func:`~hashlib.sha1` from :py:mod:`hashlib`. + :type hashfunc: callable + :param sigencode: function used to encode the signature. + The function needs to accept three parameters: the two integers + that are the signature and the order of the curve over which the + signature was computed. It needs to return an encoded signature. + See :func:`~ecdsa.util.sigencode_string` and + :func:`~ecdsa.util.sigencode_der` + as examples of such functions. + :type sigencode: callable + :param extra_entropy: additional data that will be fed into the random + number generator used in the RFC6979 process. Entirely optional. + :type extra_entropy: :term:`bytes-like object` + :param bool allow_truncate: if True, the provided digest can have + bigger bit-size than the order of the curve, the extra bits (at + the end of the digest) will be truncated. Use it when signing + SHA-384 output using NIST256p or in similar situations. + + :return: encoded signature for the `digest` hash + :rtype: bytes or sigencode function dependent type + """ + if isinstance(self.curve.curve, CurveEdTw): + raise ValueError("Method unsupported for Edwards curves") + secexp = self.privkey.secret_multiplier + hashfunc = hashfunc or self.default_hashfunc + digest = normalise_bytes(digest) + extra_entropy = normalise_bytes(extra_entropy) + + def simple_r_s(r, s, order): + return r, s, order + + retry_gen = 0 + while True: + k = rfc6979.generate_k( + self.curve.generator.order(), + secexp, + hashfunc, + digest, + retry_gen=retry_gen, + extra_entropy=extra_entropy, + ) + try: + r, s, order = self.sign_digest( + digest, + sigencode=simple_r_s, + k=k, + allow_truncate=allow_truncate, + ) + break + except RSZeroError: + retry_gen += 1 + + return sigencode(r, s, order) + + def sign( + self, + data, + entropy=None, + hashfunc=None, + sigencode=sigencode_string, + k=None, + allow_truncate=True, + ): + """ + Create signature over data. + + Uses the probabilistic ECDSA algorithm for Weierstrass curves + (NIST256p, etc.) and the deterministic EdDSA algorithm for the + Edwards curves (Ed25519, Ed448). + + This method uses the standard ECDSA algorithm that requires a + cryptographically secure random number generator. + + It's recommended to use the :func:`~SigningKey.sign_deterministic` + method instead of this one. + + :param data: data that will be hashed for signing + :type data: :term:`bytes-like object` + :param callable entropy: randomness source, :func:`os.urandom` by + default. Ignored with EdDSA. + :param hashfunc: hash function to use for hashing the provided + ``data``. + If unspecified the default hash function selected during + object initialisation will be used (see + :attr:`.VerifyingKey.default_hashfunc`). + Should behave like :func:`~hashlib.sha1` from :py:mod:`hashlib`. + The output length of the + hash (in bytes) must not be longer than the length of the curve + order (rounded up to the nearest byte), so using SHA256 with + NIST256p is ok, but SHA256 with NIST192p is not. (In the 2**-96ish + unlikely event of a hash output larger than the curve order, the + hash will effectively be wrapped mod n). + If you want to explicitly allow use of large hashes with small + curves set the ``allow_truncate`` to ``True``. + Use ``hashfunc=hashlib.sha1`` to match openssl's + ``-ecdsa-with-SHA1`` mode, + or ``hashfunc=hashlib.sha256`` for openssl-1.0.0's + ``-ecdsa-with-SHA256``. + Ignored for EdDSA + :type hashfunc: callable + :param sigencode: function used to encode the signature. + The function needs to accept three parameters: the two integers + that are the signature and the order of the curve over which the + signature was computed. It needs to return an encoded signature. + See :func:`~ecdsa.util.sigencode_string` and + :func:`~ecdsa.util.sigencode_der` + as examples of such functions. + Ignored for EdDSA + :type sigencode: callable + :param int k: a pre-selected nonce for calculating the signature. + In typical use cases, it should be set to None (the default) to + allow its generation from an entropy source. + Ignored for EdDSA. + :param bool allow_truncate: if ``True``, the provided digest can have + bigger bit-size than the order of the curve, the extra bits (at + the end of the digest) will be truncated. Use it when signing + SHA-384 output using NIST256p or in similar situations. True by + default. + Ignored for EdDSA. + + :raises RSZeroError: in the unlikely event when *r* parameter or + *s* parameter of the created signature is equal 0, as that would + leak the key. Caller should try a better entropy source, retry with + different ``k``, or use the + :func:`~SigningKey.sign_deterministic` in such case. + + :return: encoded signature of the hash of `data` + :rtype: bytes or sigencode function dependent type + """ + hashfunc = hashfunc or self.default_hashfunc + data = normalise_bytes(data) + if isinstance(self.curve.curve, CurveEdTw): + return self.sign_deterministic(data) + h = hashfunc(data).digest() + return self.sign_digest(h, entropy, sigencode, k, allow_truncate) + + def sign_digest( + self, + digest, + entropy=None, + sigencode=sigencode_string, + k=None, + allow_truncate=False, + ): + """ + Create signature over digest using the probabilistic ECDSA algorithm. + + This method uses the standard ECDSA algorithm that requires a + cryptographically secure random number generator. + + This method does not hash the input. + + It's recommended to use the + :func:`~SigningKey.sign_digest_deterministic` method + instead of this one. + + :param digest: hash value that will be signed + :type digest: :term:`bytes-like object` + :param callable entropy: randomness source, os.urandom by default + :param sigencode: function used to encode the signature. + The function needs to accept three parameters: the two integers + that are the signature and the order of the curve over which the + signature was computed. It needs to return an encoded signature. + See `ecdsa.util.sigencode_string` and `ecdsa.util.sigencode_der` + as examples of such functions. + :type sigencode: callable + :param int k: a pre-selected nonce for calculating the signature. + In typical use cases, it should be set to None (the default) to + allow its generation from an entropy source. + :param bool allow_truncate: if True, the provided digest can have + bigger bit-size than the order of the curve, the extra bits (at + the end of the digest) will be truncated. Use it when signing + SHA-384 output using NIST256p or in similar situations. + + :raises RSZeroError: in the unlikely event when "r" parameter or + "s" parameter of the created signature is equal 0, as that would + leak the key. Caller should try a better entropy source, retry with + different 'k', or use the + :func:`~SigningKey.sign_digest_deterministic` in such case. + + :return: encoded signature for the `digest` hash + :rtype: bytes or sigencode function dependent type + """ + if isinstance(self.curve.curve, CurveEdTw): + raise ValueError("Method unsupported for Edwards curves") + digest = normalise_bytes(digest) + number = _truncate_and_convert_digest( + digest, + self.curve, + allow_truncate, + ) + r, s = self.sign_number(number, entropy, k) + return sigencode(r, s, self.privkey.order) + + def sign_number(self, number, entropy=None, k=None): + """ + Sign an integer directly. + + Note, this is a low level method, usually you will want to use + :func:`~SigningKey.sign_deterministic` or + :func:`~SigningKey.sign_digest_deterministic`. + + :param int number: number to sign using the probabilistic ECDSA + algorithm. + :param callable entropy: entropy source, os.urandom by default + :param int k: pre-selected nonce for signature operation. If unset + it will be selected at random using the entropy source. + + :raises RSZeroError: in the unlikely event when "r" parameter or + "s" parameter of the created signature is equal 0, as that would + leak the key. Caller should try a better entropy source, retry with + different 'k', or use the + :func:`~SigningKey.sign_digest_deterministic` in such case. + + :return: the "r" and "s" parameters of the signature + :rtype: tuple of ints + """ + if isinstance(self.curve.curve, CurveEdTw): + raise ValueError("Method unsupported for Edwards curves") + order = self.privkey.order + + if k is not None: + _k = k + else: + _k = randrange(order, entropy) + + assert 1 <= _k < order + sig = self.privkey.sign(number, _k) + return sig.r, sig.s diff --git a/myenv/lib/python3.9/site-packages/ecdsa/numbertheory.py b/myenv/lib/python3.9/site-packages/ecdsa/numbertheory.py new file mode 100644 index 0000000..d3500c7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/numbertheory.py @@ -0,0 +1,825 @@ +#! /usr/bin/env python +# +# Provide some simple capabilities from number theory. +# +# Version of 2008.11.14. +# +# Written in 2005 and 2006 by Peter Pearson and placed in the public domain. +# Revision history: +# 2008.11.14: Use pow(base, exponent, modulus) for modular_exp. +# Make gcd and lcm accept arbitrarily many arguments. + +from __future__ import division + +import sys +from six import integer_types, PY2 +from six.moves import reduce + +try: + xrange +except NameError: + xrange = range +try: + from gmpy2 import powmod + + GMPY2 = True + GMPY = False +except ImportError: + GMPY2 = False + try: + from gmpy import mpz + + GMPY = True + except ImportError: + GMPY = False + +import math +import warnings + + +class Error(Exception): + """Base class for exceptions in this module.""" + + pass + + +class JacobiError(Error): + pass + + +class SquareRootError(Error): + pass + + +class NegativeExponentError(Error): + pass + + +def modular_exp(base, exponent, modulus): # pragma: no cover + """Raise base to exponent, reducing by modulus""" + # deprecated in 0.14 + warnings.warn( + "Function is unused in library code. If you use this code, " + "change to pow() builtin.", + DeprecationWarning, + ) + if exponent < 0: + raise NegativeExponentError( + "Negative exponents (%d) not allowed" % exponent + ) + return pow(base, exponent, modulus) + + +def polynomial_reduce_mod(poly, polymod, p): + """Reduce poly by polymod, integer arithmetic modulo p. + + Polynomials are represented as lists of coefficients + of increasing powers of x.""" + + # This module has been tested only by extensive use + # in calculating modular square roots. + + # Just to make this easy, require a monic polynomial: + assert polymod[-1] == 1 + + assert len(polymod) > 1 + + while len(poly) >= len(polymod): + if poly[-1] != 0: + for i in xrange(2, len(polymod) + 1): + poly[-i] = (poly[-i] - poly[-1] * polymod[-i]) % p + poly = poly[0:-1] + + return poly + + +def polynomial_multiply_mod(m1, m2, polymod, p): + """Polynomial multiplication modulo a polynomial over ints mod p. + + Polynomials are represented as lists of coefficients + of increasing powers of x.""" + + # This is just a seat-of-the-pants implementation. + + # This module has been tested only by extensive use + # in calculating modular square roots. + + # Initialize the product to zero: + + prod = (len(m1) + len(m2) - 1) * [0] + + # Add together all the cross-terms: + + for i in xrange(len(m1)): + for j in xrange(len(m2)): + prod[i + j] = (prod[i + j] + m1[i] * m2[j]) % p + + return polynomial_reduce_mod(prod, polymod, p) + + +def polynomial_exp_mod(base, exponent, polymod, p): + """Polynomial exponentiation modulo a polynomial over ints mod p. + + Polynomials are represented as lists of coefficients + of increasing powers of x.""" + + # Based on the Handbook of Applied Cryptography, algorithm 2.227. + + # This module has been tested only by extensive use + # in calculating modular square roots. + + assert exponent < p + + if exponent == 0: + return [1] + + G = base + k = exponent + if k % 2 == 1: + s = G + else: + s = [1] + + while k > 1: + k = k // 2 + G = polynomial_multiply_mod(G, G, polymod, p) + if k % 2 == 1: + s = polynomial_multiply_mod(G, s, polymod, p) + + return s + + +def jacobi(a, n): + """Jacobi symbol""" + + # Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149. + + # This function has been tested by comparison with a small + # table printed in HAC, and by extensive use in calculating + # modular square roots. + + if not n >= 3: + raise JacobiError("n must be larger than 2") + if not n % 2 == 1: + raise JacobiError("n must be odd") + a = a % n + if a == 0: + return 0 + if a == 1: + return 1 + a1, e = a, 0 + while a1 % 2 == 0: + a1, e = a1 // 2, e + 1 + if e % 2 == 0 or n % 8 == 1 or n % 8 == 7: + s = 1 + else: + s = -1 + if a1 == 1: + return s + if n % 4 == 3 and a1 % 4 == 3: + s = -s + return s * jacobi(n % a1, a1) + + +def square_root_mod_prime(a, p): + """Modular square root of a, mod p, p prime.""" + + # Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39. + + # This module has been tested for all values in [0,p-1] for + # every prime p from 3 to 1229. + + assert 0 <= a < p + assert 1 < p + + if a == 0: + return 0 + if p == 2: + return a + + jac = jacobi(a, p) + if jac == -1: + raise SquareRootError("%d has no square root modulo %d" % (a, p)) + + if p % 4 == 3: + return pow(a, (p + 1) // 4, p) + + if p % 8 == 5: + d = pow(a, (p - 1) // 4, p) + if d == 1: + return pow(a, (p + 3) // 8, p) + assert d == p - 1 + return (2 * a * pow(4 * a, (p - 5) // 8, p)) % p + + if PY2: + # xrange on python2 can take integers representable as C long only + range_top = min(0x7FFFFFFF, p) + else: + range_top = p + for b in xrange(2, range_top): + if jacobi(b * b - 4 * a, p) == -1: + f = (a, -b, 1) + ff = polynomial_exp_mod((0, 1), (p + 1) // 2, f, p) + if ff[1]: + raise SquareRootError("p is not prime") + return ff[0] + raise RuntimeError("No b found.") + + +# because all the inverse_mod code is arch/environment specific, and coveralls +# expects it to execute equal number of times, we need to waive it by +# adding the "no branch" pragma to all branches +if GMPY2: # pragma: no branch + + def inverse_mod(a, m): + """Inverse of a mod m.""" + if a == 0: # pragma: no branch + return 0 + return powmod(a, -1, m) + +elif GMPY: # pragma: no branch + + def inverse_mod(a, m): + """Inverse of a mod m.""" + # while libgmp does support inverses modulo, it is accessible + # only using the native `pow()` function, and `pow()` in gmpy sanity + # checks the parameters before passing them on to underlying + # implementation + if a == 0: # pragma: no branch + return 0 + a = mpz(a) + m = mpz(m) + + lm, hm = mpz(1), mpz(0) + low, high = a % m, m + while low > 1: # pragma: no branch + r = high // low + lm, low, hm, high = hm - lm * r, high - low * r, lm, low + + return lm % m + +elif sys.version_info >= (3, 8): # pragma: no branch + + def inverse_mod(a, m): + """Inverse of a mod m.""" + if a == 0: # pragma: no branch + return 0 + return pow(a, -1, m) + +else: # pragma: no branch + + def inverse_mod(a, m): + """Inverse of a mod m.""" + + if a == 0: # pragma: no branch + return 0 + + lm, hm = 1, 0 + low, high = a % m, m + while low > 1: # pragma: no branch + r = high // low + lm, low, hm, high = hm - lm * r, high - low * r, lm, low + + return lm % m + + +try: + gcd2 = math.gcd +except AttributeError: + + def gcd2(a, b): + """Greatest common divisor using Euclid's algorithm.""" + while a: + a, b = b % a, a + return b + + +def gcd(*a): + """Greatest common divisor. + + Usage: gcd([ 2, 4, 6 ]) + or: gcd(2, 4, 6) + """ + + if len(a) > 1: + return reduce(gcd2, a) + if hasattr(a[0], "__iter__"): + return reduce(gcd2, a[0]) + return a[0] + + +def lcm2(a, b): + """Least common multiple of two integers.""" + + return (a * b) // gcd(a, b) + + +def lcm(*a): + """Least common multiple. + + Usage: lcm([ 3, 4, 5 ]) + or: lcm(3, 4, 5) + """ + + if len(a) > 1: + return reduce(lcm2, a) + if hasattr(a[0], "__iter__"): + return reduce(lcm2, a[0]) + return a[0] + + +def factorization(n): + """Decompose n into a list of (prime,exponent) pairs.""" + + assert isinstance(n, integer_types) + + if n < 2: + return [] + + result = [] + + # Test the small primes: + + for d in smallprimes: + if d > n: + break + q, r = divmod(n, d) + if r == 0: + count = 1 + while d <= n: + n = q + q, r = divmod(n, d) + if r != 0: + break + count = count + 1 + result.append((d, count)) + + # If n is still greater than the last of our small primes, + # it may require further work: + + if n > smallprimes[-1]: + if is_prime(n): # If what's left is prime, it's easy: + result.append((n, 1)) + else: # Ugh. Search stupidly for a divisor: + d = smallprimes[-1] + while 1: + d = d + 2 # Try the next divisor. + q, r = divmod(n, d) + if q < d: # n < d*d means we're done, n = 1 or prime. + break + if r == 0: # d divides n. How many times? + count = 1 + n = q + while d <= n: # As long as d might still divide n, + q, r = divmod(n, d) # see if it does. + if r != 0: + break + n = q # It does. Reduce n, increase count. + count = count + 1 + result.append((d, count)) + if n > 1: + result.append((n, 1)) + + return result + + +def phi(n): # pragma: no cover + """Return the Euler totient function of n.""" + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + assert isinstance(n, integer_types) + + if n < 3: + return 1 + + result = 1 + ff = factorization(n) + for f in ff: + e = f[1] + if e > 1: + result = result * f[0] ** (e - 1) * (f[0] - 1) + else: + result = result * (f[0] - 1) + return result + + +def carmichael(n): # pragma: no cover + """Return Carmichael function of n. + + Carmichael(n) is the smallest integer x such that + m**x = 1 mod n for all m relatively prime to n. + """ + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + return carmichael_of_factorized(factorization(n)) + + +def carmichael_of_factorized(f_list): # pragma: no cover + """Return the Carmichael function of a number that is + represented as a list of (prime,exponent) pairs. + """ + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + if len(f_list) < 1: + return 1 + + result = carmichael_of_ppower(f_list[0]) + for i in xrange(1, len(f_list)): + result = lcm(result, carmichael_of_ppower(f_list[i])) + + return result + + +def carmichael_of_ppower(pp): # pragma: no cover + """Carmichael function of the given power of the given prime.""" + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + p, a = pp + if p == 2 and a > 2: + return 2 ** (a - 2) + else: + return (p - 1) * p ** (a - 1) + + +def order_mod(x, m): # pragma: no cover + """Return the order of x in the multiplicative group mod m.""" + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + # Warning: this implementation is not very clever, and will + # take a long time if m is very large. + + if m <= 1: + return 0 + + assert gcd(x, m) == 1 + + z = x + result = 1 + while z != 1: + z = (z * x) % m + result = result + 1 + return result + + +def largest_factor_relatively_prime(a, b): # pragma: no cover + """Return the largest factor of a relatively prime to b.""" + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + while 1: + d = gcd(a, b) + if d <= 1: + break + b = d + while 1: + q, r = divmod(a, d) + if r > 0: + break + a = q + return a + + +def kinda_order_mod(x, m): # pragma: no cover + """Return the order of x in the multiplicative group mod m', + where m' is the largest factor of m relatively prime to x. + """ + # deprecated in 0.14 + warnings.warn( + "Function is unused by library code. If you use this code, " + "please open an issue in " + "https://github.com/tlsfuzzer/python-ecdsa", + DeprecationWarning, + ) + + return order_mod(x, largest_factor_relatively_prime(m, x)) + + +def is_prime(n): + """Return True if x is prime, False otherwise. + + We use the Miller-Rabin test, as given in Menezes et al. p. 138. + This test is not exact: there are composite values n for which + it returns True. + + In testing the odd numbers from 10000001 to 19999999, + about 66 composites got past the first test, + 5 got past the second test, and none got past the third. + Since factors of 2, 3, 5, 7, and 11 were detected during + preliminary screening, the number of numbers tested by + Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7) + = 4.57 million. + """ + + # (This is used to study the risk of false positives:) + global miller_rabin_test_count + + miller_rabin_test_count = 0 + + if n <= smallprimes[-1]: + if n in smallprimes: + return True + else: + return False + + if gcd(n, 2 * 3 * 5 * 7 * 11) != 1: + return False + + # Choose a number of iterations sufficient to reduce the + # probability of accepting a composite below 2**-80 + # (from Menezes et al. Table 4.4): + + t = 40 + n_bits = 1 + int(math.log(n, 2)) + for k, tt in ( + (100, 27), + (150, 18), + (200, 15), + (250, 12), + (300, 9), + (350, 8), + (400, 7), + (450, 6), + (550, 5), + (650, 4), + (850, 3), + (1300, 2), + ): + if n_bits < k: + break + t = tt + + # Run the test t times: + + s = 0 + r = n - 1 + while (r % 2) == 0: + s = s + 1 + r = r // 2 + for i in xrange(t): + a = smallprimes[i] + y = pow(a, r, n) + if y != 1 and y != n - 1: + j = 1 + while j <= s - 1 and y != n - 1: + y = pow(y, 2, n) + if y == 1: + miller_rabin_test_count = i + 1 + return False + j = j + 1 + if y != n - 1: + miller_rabin_test_count = i + 1 + return False + return True + + +def next_prime(starting_value): + """Return the smallest prime larger than the starting value.""" + + if starting_value < 2: + return 2 + result = (starting_value + 1) | 1 + while not is_prime(result): + result = result + 2 + return result + + +smallprimes = [ + 2, + 3, + 5, + 7, + 11, + 13, + 17, + 19, + 23, + 29, + 31, + 37, + 41, + 43, + 47, + 53, + 59, + 61, + 67, + 71, + 73, + 79, + 83, + 89, + 97, + 101, + 103, + 107, + 109, + 113, + 127, + 131, + 137, + 139, + 149, + 151, + 157, + 163, + 167, + 173, + 179, + 181, + 191, + 193, + 197, + 199, + 211, + 223, + 227, + 229, + 233, + 239, + 241, + 251, + 257, + 263, + 269, + 271, + 277, + 281, + 283, + 293, + 307, + 311, + 313, + 317, + 331, + 337, + 347, + 349, + 353, + 359, + 367, + 373, + 379, + 383, + 389, + 397, + 401, + 409, + 419, + 421, + 431, + 433, + 439, + 443, + 449, + 457, + 461, + 463, + 467, + 479, + 487, + 491, + 499, + 503, + 509, + 521, + 523, + 541, + 547, + 557, + 563, + 569, + 571, + 577, + 587, + 593, + 599, + 601, + 607, + 613, + 617, + 619, + 631, + 641, + 643, + 647, + 653, + 659, + 661, + 673, + 677, + 683, + 691, + 701, + 709, + 719, + 727, + 733, + 739, + 743, + 751, + 757, + 761, + 769, + 773, + 787, + 797, + 809, + 811, + 821, + 823, + 827, + 829, + 839, + 853, + 857, + 859, + 863, + 877, + 881, + 883, + 887, + 907, + 911, + 919, + 929, + 937, + 941, + 947, + 953, + 967, + 971, + 977, + 983, + 991, + 997, + 1009, + 1013, + 1019, + 1021, + 1031, + 1033, + 1039, + 1049, + 1051, + 1061, + 1063, + 1069, + 1087, + 1091, + 1093, + 1097, + 1103, + 1109, + 1117, + 1123, + 1129, + 1151, + 1153, + 1163, + 1171, + 1181, + 1187, + 1193, + 1201, + 1213, + 1217, + 1223, + 1229, +] + +miller_rabin_test_count = 0 diff --git a/myenv/lib/python3.9/site-packages/ecdsa/rfc6979.py b/myenv/lib/python3.9/site-packages/ecdsa/rfc6979.py new file mode 100644 index 0000000..0728b5a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/rfc6979.py @@ -0,0 +1,113 @@ +""" +RFC 6979: + Deterministic Usage of the Digital Signature Algorithm (DSA) and + Elliptic Curve Digital Signature Algorithm (ECDSA) + + http://tools.ietf.org/html/rfc6979 + +Many thanks to Coda Hale for his implementation in Go language: + https://github.com/codahale/rfc6979 +""" + +import hmac +from binascii import hexlify +from .util import number_to_string, number_to_string_crop, bit_length +from ._compat import hmac_compat + + +# bit_length was defined in this module previously so keep it for backwards +# compatibility, will need to deprecate and remove it later +__all__ = ["bit_length", "bits2int", "bits2octets", "generate_k"] + + +def bits2int(data, qlen): + x = int(hexlify(data), 16) + l = len(data) * 8 + + if l > qlen: + return x >> (l - qlen) + return x + + +def bits2octets(data, order): + z1 = bits2int(data, bit_length(order)) + z2 = z1 - order + + if z2 < 0: + z2 = z1 + + return number_to_string_crop(z2, order) + + +# https://tools.ietf.org/html/rfc6979#section-3.2 +def generate_k(order, secexp, hash_func, data, retry_gen=0, extra_entropy=b""): + """ + Generate the ``k`` value - the nonce for DSA. + + :param int order: order of the DSA generator used in the signature + :param int secexp: secure exponent (private key) in numeric form + :param hash_func: reference to the same hash function used for generating + hash, like :py:class:`hashlib.sha1` + :param bytes data: hash in binary form of the signing data + :param int retry_gen: how many good 'k' values to skip before returning + :param bytes extra_entropy: additional added data in binary form as per + section-3.6 of rfc6979 + :rtype: int + """ + + qlen = bit_length(order) + holen = hash_func().digest_size + rolen = (qlen + 7) // 8 + bx = ( + hmac_compat(number_to_string(secexp, order)), + hmac_compat(bits2octets(data, order)), + hmac_compat(extra_entropy), + ) + + # Step B + v = b"\x01" * holen + + # Step C + k = b"\x00" * holen + + # Step D + + k = hmac.new(k, digestmod=hash_func) + k.update(v + b"\x00") + for i in bx: + k.update(i) + k = k.digest() + + # Step E + v = hmac.new(k, v, hash_func).digest() + + # Step F + k = hmac.new(k, digestmod=hash_func) + k.update(v + b"\x01") + for i in bx: + k.update(i) + k = k.digest() + + # Step G + v = hmac.new(k, v, hash_func).digest() + + # Step H + while True: + # Step H1 + t = b"" + + # Step H2 + while len(t) < rolen: + v = hmac.new(k, v, hash_func).digest() + t += v + + # Step H3 + secret = bits2int(t, qlen) + + if 1 <= secret < order: + if retry_gen <= 0: + return secret + retry_gen -= 1 + + k = hmac.new(k, v + b"\x00", hash_func).digest() + v = hmac.new(k, v, hash_func).digest() diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_curves.py b/myenv/lib/python3.9/site-packages/ecdsa/test_curves.py new file mode 100644 index 0000000..93b6c9b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_curves.py @@ -0,0 +1,361 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest + +import base64 +import pytest +from .curves import ( + Curve, + NIST256p, + curves, + UnknownCurveError, + PRIME_FIELD_OID, + curve_by_name, +) +from .ellipticcurve import CurveFp, PointJacobi, CurveEdTw +from . import der +from .util import number_to_string + + +class TestParameterEncoding(unittest.TestCase): + @classmethod + def setUpClass(cls): + # minimal, but with cofactor (excludes seed when compared to + # OpenSSL output) + cls.base64_params = ( + "MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP/////////" + "//////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12K" + "o6k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDyd" + "wN9gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1" + "AiEA/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=" + ) + + def test_from_pem(self): + pem_params = ( + "-----BEGIN EC PARAMETERS-----\n" + "MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP/////////\n" + "//////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12K\n" + "o6k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDyd\n" + "wN9gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1\n" + "AiEA/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=\n" + "-----END EC PARAMETERS-----\n" + ) + curve = Curve.from_pem(pem_params) + + self.assertIs(curve, NIST256p) + + def test_from_pem_with_explicit_when_explicit_disabled(self): + pem_params = ( + "-----BEGIN EC PARAMETERS-----\n" + "MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP/////////\n" + "//////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12K\n" + "o6k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDyd\n" + "wN9gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1\n" + "AiEA/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=\n" + "-----END EC PARAMETERS-----\n" + ) + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_pem(pem_params, ["named_curve"]) + + self.assertIn("explicit curve parameters not", str(e.exception)) + + def test_from_pem_with_named_curve_with_named_curve_disabled(self): + pem_params = ( + "-----BEGIN EC PARAMETERS-----\n" + "BggqhkjOPQMBBw==\n" + "-----END EC PARAMETERS-----\n" + ) + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_pem(pem_params, ["explicit"]) + + self.assertIn("named_curve curve parameters not", str(e.exception)) + + def test_from_pem_with_wrong_header(self): + pem_params = ( + "-----BEGIN PARAMETERS-----\n" + "MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP/////////\n" + "//////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12K\n" + "o6k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDyd\n" + "wN9gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1\n" + "AiEA/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=\n" + "-----END PARAMETERS-----\n" + ) + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_pem(pem_params) + + self.assertIn("PARAMETERS PEM header", str(e.exception)) + + def test_to_pem(self): + pem_params = ( + b"-----BEGIN EC PARAMETERS-----\n" + b"BggqhkjOPQMBBw==\n" + b"-----END EC PARAMETERS-----\n" + ) + encoding = NIST256p.to_pem() + + self.assertEqual(pem_params, encoding) + + def test_compare_with_different_object(self): + self.assertNotEqual(NIST256p, 256) + + def test_named_curve_params_der(self): + encoded = NIST256p.to_der() + + # just the encoding of the NIST256p OID (prime256v1) + self.assertEqual(b"\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07", encoded) + + def test_verify_that_default_is_named_curve_der(self): + encoded_default = NIST256p.to_der() + encoded_named = NIST256p.to_der("named_curve") + + self.assertEqual(encoded_default, encoded_named) + + def test_encoding_to_explicit_params(self): + encoded = NIST256p.to_der("explicit") + + self.assertEqual(encoded, bytes(base64.b64decode(self.base64_params))) + + def test_encoding_to_unsupported_type(self): + with self.assertRaises(ValueError) as e: + NIST256p.to_der("unsupported") + + self.assertIn("Only 'named_curve'", str(e.exception)) + + def test_encoding_to_explicit_compressed_params(self): + encoded = NIST256p.to_der("explicit", "compressed") + + compressed_base_point = ( + "MIHAAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP//////////" + "/////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12Ko6" + "k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEIQNrF9Hy4SxCR/i85uVjpEDydwN9" + "gS3rM6D0oTlF2JjClgIhAP////8AAAAA//////////+85vqtpxeehPO5ysL8YyVR" + "AgEB" + ) + + self.assertEqual( + encoded, bytes(base64.b64decode(compressed_base_point)) + ) + + def test_decoding_explicit_from_openssl(self): + # generated with openssl 1.1.1k using + # openssl ecparam -name P-256 -param_enc explicit -out /tmp/file.pem + p256_explicit = ( + "MIH3AgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP//////////" + "/////zBbBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12Ko6" + "k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsDFQDEnTYIhucEk2pmeOETnSa3gZ9+" + "kARBBGsX0fLhLEJH+Lzm5WOkQPJ3A32BLeszoPShOUXYmMKWT+NC4v4af5uO5+tK" + "fA+eFivOM1drMV7Oy7ZAaDe/UfUCIQD/////AAAAAP//////////vOb6racXnoTz" + "ucrC/GMlUQIBAQ==" + ) + + decoded = Curve.from_der(bytes(base64.b64decode(p256_explicit))) + + self.assertEqual(NIST256p, decoded) + + def test_decoding_well_known_from_explicit_params(self): + curve = Curve.from_der(bytes(base64.b64decode(self.base64_params))) + + self.assertIs(curve, NIST256p) + + def test_decoding_with_incorrect_valid_encodings(self): + with self.assertRaises(ValueError) as e: + Curve.from_der(b"", ["explicitCA"]) + + self.assertIn("Only named_curve", str(e.exception)) + + def test_compare_curves_with_different_generators(self): + curve_fp = CurveFp(23, 1, 7) + base_a = PointJacobi(curve_fp, 13, 3, 1, 9, generator=True) + base_b = PointJacobi(curve_fp, 1, 20, 1, 9, generator=True) + + curve_a = Curve("unknown", curve_fp, base_a, None) + curve_b = Curve("unknown", curve_fp, base_b, None) + + self.assertNotEqual(curve_a, curve_b) + + def test_default_encode_for_custom_curve(self): + curve_fp = CurveFp(23, 1, 7) + base_point = PointJacobi(curve_fp, 13, 3, 1, 9, generator=True) + + curve = Curve("unknown", curve_fp, base_point, None) + + encoded = curve.to_der() + + decoded = Curve.from_der(encoded) + + self.assertEqual(curve, decoded) + + expected = "MCECAQEwDAYHKoZIzj0BAQIBFzAGBAEBBAEHBAMEDQMCAQk=" + + self.assertEqual(encoded, bytes(base64.b64decode(expected))) + + def test_named_curve_encode_for_custom_curve(self): + curve_fp = CurveFp(23, 1, 7) + base_point = PointJacobi(curve_fp, 13, 3, 1, 9, generator=True) + + curve = Curve("unknown", curve_fp, base_point, None) + + with self.assertRaises(UnknownCurveError) as e: + curve.to_der("named_curve") + + self.assertIn("Can't encode curve", str(e.exception)) + + def test_try_decoding_binary_explicit(self): + sect113r1_explicit = ( + "MIGRAgEBMBwGByqGSM49AQIwEQIBcQYJKoZIzj0BAgMCAgEJMDkEDwAwiCUMpufH" + "/mSc6Fgg9wQPAOi+5NPiJgdEGIvg6ccjAxUAEOcjqxTWluZ2h1YVF1b+v4/LSakE" + "HwQAnXNhbzX0qxQH1zViwQ8ApSgwJ3lY7oTRMV7TGIYCDwEAAAAAAAAA2czsijnl" + "bwIBAg==" + ) + + with self.assertRaises(UnknownCurveError) as e: + Curve.from_der(base64.b64decode(sect113r1_explicit)) + + self.assertIn("Characteristic 2 curves unsupported", str(e.exception)) + + def test_decode_malformed_named_curve(self): + bad_der = der.encode_oid(*NIST256p.oid) + der.encode_integer(1) + + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_der(bad_der) + + self.assertIn("Unexpected data after OID", str(e.exception)) + + def test_decode_malformed_explicit_garbage_after_ECParam(self): + bad_der = bytes( + base64.b64decode(self.base64_params) + ) + der.encode_integer(1) + + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_der(bad_der) + + self.assertIn("Unexpected data after ECParameters", str(e.exception)) + + def test_decode_malformed_unknown_version_number(self): + bad_der = der.encode_sequence(der.encode_integer(2)) + + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_der(bad_der) + + self.assertIn("Unknown parameter encoding format", str(e.exception)) + + def test_decode_malformed_unknown_field_type(self): + curve_p = NIST256p.curve.p() + bad_der = der.encode_sequence( + der.encode_integer(1), + der.encode_sequence( + der.encode_oid(1, 2, 3), der.encode_integer(curve_p) + ), + der.encode_sequence( + der.encode_octet_string( + number_to_string(NIST256p.curve.a() % curve_p, curve_p) + ), + der.encode_octet_string( + number_to_string(NIST256p.curve.b(), curve_p) + ), + ), + der.encode_octet_string( + NIST256p.generator.to_bytes("uncompressed") + ), + der.encode_integer(NIST256p.generator.order()), + ) + + with self.assertRaises(UnknownCurveError) as e: + Curve.from_der(bad_der) + + self.assertIn("Unknown field type: (1, 2, 3)", str(e.exception)) + + def test_decode_malformed_garbage_after_prime(self): + curve_p = NIST256p.curve.p() + bad_der = der.encode_sequence( + der.encode_integer(1), + der.encode_sequence( + der.encode_oid(*PRIME_FIELD_OID), + der.encode_integer(curve_p), + der.encode_integer(1), + ), + der.encode_sequence( + der.encode_octet_string( + number_to_string(NIST256p.curve.a() % curve_p, curve_p) + ), + der.encode_octet_string( + number_to_string(NIST256p.curve.b(), curve_p) + ), + ), + der.encode_octet_string( + NIST256p.generator.to_bytes("uncompressed") + ), + der.encode_integer(NIST256p.generator.order()), + ) + + with self.assertRaises(der.UnexpectedDER) as e: + Curve.from_der(bad_der) + + self.assertIn("Prime-p element", str(e.exception)) + + +class TestCurveSearching(unittest.TestCase): + def test_correct_name(self): + c = curve_by_name("NIST256p") + self.assertIs(c, NIST256p) + + def test_openssl_name(self): + c = curve_by_name("prime256v1") + self.assertIs(c, NIST256p) + + def test_unknown_curve(self): + with self.assertRaises(UnknownCurveError) as e: + curve_by_name("foo bar") + + self.assertIn( + "name 'foo bar' unknown, only curves supported: " + "['NIST192p', 'NIST224p'", + str(e.exception), + ) + + def test_with_None_as_parameter(self): + with self.assertRaises(UnknownCurveError) as e: + curve_by_name(None) + + self.assertIn( + "name None unknown, only curves supported: " + "['NIST192p', 'NIST224p'", + str(e.exception), + ) + + +@pytest.mark.parametrize("curve", curves, ids=[i.name for i in curves]) +def test_curve_params_encode_decode_named(curve): + ret = Curve.from_der(curve.to_der("named_curve")) + + assert curve == ret + + +@pytest.mark.parametrize("curve", curves, ids=[i.name for i in curves]) +def test_curve_params_encode_decode_explicit(curve): + if isinstance(curve.curve, CurveEdTw): + with pytest.raises(UnknownCurveError): + curve.to_der("explicit") + else: + ret = Curve.from_der(curve.to_der("explicit")) + + assert curve == ret + + +@pytest.mark.parametrize("curve", curves, ids=[i.name for i in curves]) +def test_curve_params_encode_decode_default(curve): + ret = Curve.from_der(curve.to_der()) + + assert curve == ret + + +@pytest.mark.parametrize("curve", curves, ids=[i.name for i in curves]) +def test_curve_params_encode_decode_explicit_compressed(curve): + if isinstance(curve.curve, CurveEdTw): + with pytest.raises(UnknownCurveError): + curve.to_der("explicit", "compressed") + else: + ret = Curve.from_der(curve.to_der("explicit", "compressed")) + + assert curve == ret diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_der.py b/myenv/lib/python3.9/site-packages/ecdsa/test_der.py new file mode 100644 index 0000000..0ca5bd7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_der.py @@ -0,0 +1,476 @@ +# compatibility with Python 2.6, for that we need unittest2 package, +# which is not available on 3.3 or 3.4 +import warnings +from binascii import hexlify + +try: + import unittest2 as unittest +except ImportError: + import unittest +from six import b +import hypothesis.strategies as st +from hypothesis import given +import pytest +from ._compat import str_idx_as_int +from .curves import NIST256p, NIST224p +from .der import ( + remove_integer, + UnexpectedDER, + read_length, + encode_bitstring, + remove_bitstring, + remove_object, + encode_oid, + remove_constructed, + remove_octet_string, + remove_sequence, +) + + +class TestRemoveInteger(unittest.TestCase): + # DER requires the integers to be 0-padded only if they would be + # interpreted as negative, check if those errors are detected + def test_non_minimal_encoding(self): + with self.assertRaises(UnexpectedDER): + remove_integer(b("\x02\x02\x00\x01")) + + def test_negative_with_high_bit_set(self): + with self.assertRaises(UnexpectedDER): + remove_integer(b("\x02\x01\x80")) + + def test_minimal_with_high_bit_set(self): + val, rem = remove_integer(b("\x02\x02\x00\x80")) + + self.assertEqual(val, 0x80) + self.assertEqual(rem, b"") + + def test_two_zero_bytes_with_high_bit_set(self): + with self.assertRaises(UnexpectedDER): + remove_integer(b("\x02\x03\x00\x00\xff")) + + def test_zero_length_integer(self): + with self.assertRaises(UnexpectedDER): + remove_integer(b("\x02\x00")) + + def test_empty_string(self): + with self.assertRaises(UnexpectedDER): + remove_integer(b("")) + + def test_encoding_of_zero(self): + val, rem = remove_integer(b("\x02\x01\x00")) + + self.assertEqual(val, 0) + self.assertEqual(rem, b"") + + def test_encoding_of_127(self): + val, rem = remove_integer(b("\x02\x01\x7f")) + + self.assertEqual(val, 127) + self.assertEqual(rem, b"") + + def test_encoding_of_128(self): + val, rem = remove_integer(b("\x02\x02\x00\x80")) + + self.assertEqual(val, 128) + self.assertEqual(rem, b"") + + def test_wrong_tag(self): + with self.assertRaises(UnexpectedDER) as e: + remove_integer(b"\x01\x02\x00\x80") + + self.assertIn("wanted type 'integer'", str(e.exception)) + + def test_wrong_length(self): + with self.assertRaises(UnexpectedDER) as e: + remove_integer(b"\x02\x03\x00\x80") + + self.assertIn("Length longer", str(e.exception)) + + +class TestReadLength(unittest.TestCase): + # DER requires the lengths between 0 and 127 to be encoded using the short + # form and lengths above that encoded with minimal number of bytes + # necessary + def test_zero_length(self): + self.assertEqual((0, 1), read_length(b("\x00"))) + + def test_two_byte_zero_length(self): + with self.assertRaises(UnexpectedDER): + read_length(b("\x81\x00")) + + def test_two_byte_small_length(self): + with self.assertRaises(UnexpectedDER): + read_length(b("\x81\x7f")) + + def test_long_form_with_zero_length(self): + with self.assertRaises(UnexpectedDER): + read_length(b("\x80")) + + def test_smallest_two_byte_length(self): + self.assertEqual((128, 2), read_length(b("\x81\x80"))) + + def test_zero_padded_length(self): + with self.assertRaises(UnexpectedDER): + read_length(b("\x82\x00\x80")) + + def test_two_three_byte_length(self): + self.assertEqual((256, 3), read_length(b"\x82\x01\x00")) + + def test_empty_string(self): + with self.assertRaises(UnexpectedDER): + read_length(b("")) + + def test_length_overflow(self): + with self.assertRaises(UnexpectedDER): + read_length(b("\x83\x01\x00")) + + +class TestEncodeBitstring(unittest.TestCase): + # DER requires BIT STRINGS to include a number of padding bits in the + # encoded byte string, that padding must be between 0 and 7 + + def test_old_call_convention(self): + """This is the old way to use the function.""" + warnings.simplefilter("always") + with pytest.warns(DeprecationWarning) as warns: + der = encode_bitstring(b"\x00\xff") + + self.assertEqual(len(warns), 1) + self.assertIn( + "unused= needs to be specified", warns[0].message.args[0] + ) + + self.assertEqual(der, b"\x03\x02\x00\xff") + + def test_new_call_convention(self): + """This is how it should be called now.""" + warnings.simplefilter("always") + with pytest.warns(None) as warns: + der = encode_bitstring(b"\xff", 0) + + # verify that new call convention doesn't raise Warnings + self.assertEqual(len(warns), 0) + + self.assertEqual(der, b"\x03\x02\x00\xff") + + def test_implicit_unused_bits(self): + """ + Writing bit string with already included the number of unused bits. + """ + warnings.simplefilter("always") + with pytest.warns(None) as warns: + der = encode_bitstring(b"\x00\xff", None) + + # verify that new call convention doesn't raise Warnings + self.assertEqual(len(warns), 0) + + self.assertEqual(der, b"\x03\x02\x00\xff") + + def test_explicit_unused_bits(self): + der = encode_bitstring(b"\xff\xf0", 4) + + self.assertEqual(der, b"\x03\x03\x04\xff\xf0") + + def test_empty_string(self): + self.assertEqual(encode_bitstring(b"", 0), b"\x03\x01\x00") + + def test_invalid_unused_count(self): + with self.assertRaises(ValueError): + encode_bitstring(b"\xff\x00", 8) + + def test_invalid_unused_with_empty_string(self): + with self.assertRaises(ValueError): + encode_bitstring(b"", 1) + + def test_non_zero_padding_bits(self): + with self.assertRaises(ValueError): + encode_bitstring(b"\xff", 2) + + +class TestRemoveBitstring(unittest.TestCase): + def test_old_call_convention(self): + """This is the old way to call the function.""" + warnings.simplefilter("always") + with pytest.warns(DeprecationWarning) as warns: + bits, rest = remove_bitstring(b"\x03\x02\x00\xff") + + self.assertEqual(len(warns), 1) + self.assertIn( + "expect_unused= needs to be specified", warns[0].message.args[0] + ) + + self.assertEqual(bits, b"\x00\xff") + self.assertEqual(rest, b"") + + def test_new_call_convention(self): + warnings.simplefilter("always") + with pytest.warns(None) as warns: + bits, rest = remove_bitstring(b"\x03\x02\x00\xff", 0) + + self.assertEqual(len(warns), 0) + + self.assertEqual(bits, b"\xff") + self.assertEqual(rest, b"") + + def test_implicit_unexpected_unused(self): + warnings.simplefilter("always") + with pytest.warns(None) as warns: + bits, rest = remove_bitstring(b"\x03\x02\x00\xff", None) + + self.assertEqual(len(warns), 0) + + self.assertEqual(bits, (b"\xff", 0)) + self.assertEqual(rest, b"") + + def test_with_padding(self): + ret, rest = remove_bitstring(b"\x03\x02\x04\xf0", None) + + self.assertEqual(ret, (b"\xf0", 4)) + self.assertEqual(rest, b"") + + def test_not_a_bitstring(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x02\x02\x00\xff", None) + + def test_empty_encoding(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x03\x00", None) + + def test_empty_string(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"", None) + + def test_no_length(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x03", None) + + def test_unexpected_number_of_unused_bits(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x03\x02\x00\xff", 1) + + def test_invalid_encoding_of_unused_bits(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x03\x03\x08\xff\x00", None) + + def test_invalid_encoding_of_empty_string(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x03\x01\x01", None) + + def test_invalid_padding_bits(self): + with self.assertRaises(UnexpectedDER): + remove_bitstring(b"\x03\x02\x01\xff", None) + + +class TestStrIdxAsInt(unittest.TestCase): + def test_str(self): + self.assertEqual(115, str_idx_as_int("str", 0)) + + def test_bytes(self): + self.assertEqual(115, str_idx_as_int(b"str", 0)) + + def test_bytearray(self): + self.assertEqual(115, str_idx_as_int(bytearray(b"str"), 0)) + + +class TestEncodeOid(unittest.TestCase): + def test_pub_key_oid(self): + oid_ecPublicKey = encode_oid(1, 2, 840, 10045, 2, 1) + self.assertEqual(hexlify(oid_ecPublicKey), b("06072a8648ce3d0201")) + + def test_nist224p_oid(self): + self.assertEqual(hexlify(NIST224p.encoded_oid), b("06052b81040021")) + + def test_nist256p_oid(self): + self.assertEqual( + hexlify(NIST256p.encoded_oid), b"06082a8648ce3d030107" + ) + + def test_large_second_subid(self): + # from X.690, section 8.19.5 + oid = encode_oid(2, 999, 3) + self.assertEqual(oid, b"\x06\x03\x88\x37\x03") + + def test_with_two_subids(self): + oid = encode_oid(2, 999) + self.assertEqual(oid, b"\x06\x02\x88\x37") + + def test_zero_zero(self): + oid = encode_oid(0, 0) + self.assertEqual(oid, b"\x06\x01\x00") + + def test_with_wrong_types(self): + with self.assertRaises((TypeError, AssertionError)): + encode_oid(0, None) + + def test_with_small_first_large_second(self): + with self.assertRaises(AssertionError): + encode_oid(1, 40) + + def test_small_first_max_second(self): + oid = encode_oid(1, 39) + self.assertEqual(oid, b"\x06\x01\x4f") + + def test_with_invalid_first(self): + with self.assertRaises(AssertionError): + encode_oid(3, 39) + + +class TestRemoveObject(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.oid_ecPublicKey = encode_oid(1, 2, 840, 10045, 2, 1) + + def test_pub_key_oid(self): + oid, rest = remove_object(self.oid_ecPublicKey) + self.assertEqual(rest, b"") + self.assertEqual(oid, (1, 2, 840, 10045, 2, 1)) + + def test_with_extra_bytes(self): + oid, rest = remove_object(self.oid_ecPublicKey + b"more") + self.assertEqual(rest, b"more") + self.assertEqual(oid, (1, 2, 840, 10045, 2, 1)) + + def test_with_large_second_subid(self): + # from X.690, section 8.19.5 + oid, rest = remove_object(b"\x06\x03\x88\x37\x03") + self.assertEqual(rest, b"") + self.assertEqual(oid, (2, 999, 3)) + + def test_with_padded_first_subid(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06\x02\x80\x00") + + def test_with_padded_second_subid(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06\x04\x88\x37\x80\x01") + + def test_with_missing_last_byte_of_multi_byte(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06\x03\x88\x37\x83") + + def test_with_two_subids(self): + oid, rest = remove_object(b"\x06\x02\x88\x37") + self.assertEqual(rest, b"") + self.assertEqual(oid, (2, 999)) + + def test_zero_zero(self): + oid, rest = remove_object(b"\x06\x01\x00") + self.assertEqual(rest, b"") + self.assertEqual(oid, (0, 0)) + + def test_empty_string(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"") + + def test_missing_length(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06") + + def test_empty_oid(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06\x00") + + def test_empty_oid_overflow(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06\x01") + + def test_with_wrong_type(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x04\x02\x88\x37") + + def test_with_too_long_length(self): + with self.assertRaises(UnexpectedDER): + remove_object(b"\x06\x03\x88\x37") + + +class TestRemoveConstructed(unittest.TestCase): + def test_simple(self): + data = b"\xa1\x02\xff\xaa" + + tag, body, rest = remove_constructed(data) + + self.assertEqual(tag, 0x01) + self.assertEqual(body, b"\xff\xaa") + self.assertEqual(rest, b"") + + def test_with_malformed_tag(self): + data = b"\x01\x02\xff\xaa" + + with self.assertRaises(UnexpectedDER) as e: + remove_constructed(data) + + self.assertIn("constructed tag", str(e.exception)) + + +class TestRemoveOctetString(unittest.TestCase): + def test_simple(self): + data = b"\x04\x03\xaa\xbb\xcc" + body, rest = remove_octet_string(data) + self.assertEqual(body, b"\xaa\xbb\xcc") + self.assertEqual(rest, b"") + + def test_with_malformed_tag(self): + data = b"\x03\x03\xaa\xbb\xcc" + with self.assertRaises(UnexpectedDER) as e: + remove_octet_string(data) + + self.assertIn("octetstring", str(e.exception)) + + +class TestRemoveSequence(unittest.TestCase): + def test_simple(self): + data = b"\x30\x02\xff\xaa" + body, rest = remove_sequence(data) + self.assertEqual(body, b"\xff\xaa") + self.assertEqual(rest, b"") + + def test_with_empty_string(self): + with self.assertRaises(UnexpectedDER) as e: + remove_sequence(b"") + + self.assertIn("Empty string", str(e.exception)) + + def test_with_wrong_tag(self): + data = b"\x20\x02\xff\xaa" + + with self.assertRaises(UnexpectedDER) as e: + remove_sequence(data) + + self.assertIn("wanted type 'sequence'", str(e.exception)) + + def test_with_wrong_length(self): + data = b"\x30\x03\xff\xaa" + + with self.assertRaises(UnexpectedDER) as e: + remove_sequence(data) + + self.assertIn("Length longer", str(e.exception)) + + +@st.composite +def st_oid(draw, max_value=2**512, max_size=50): + """ + Hypothesis strategy that returns valid OBJECT IDENTIFIERs as tuples + + :param max_value: maximum value of any single sub-identifier + :param max_size: maximum length of the generated OID + """ + first = draw(st.integers(min_value=0, max_value=2)) + if first < 2: + second = draw(st.integers(min_value=0, max_value=39)) + else: + second = draw(st.integers(min_value=0, max_value=max_value)) + rest = draw( + st.lists( + st.integers(min_value=0, max_value=max_value), max_size=max_size + ) + ) + return (first, second) + tuple(rest) + + +@given(st_oid()) +def test_oids(ids): + encoded_oid = encode_oid(*ids) + decoded_oid, rest = remove_object(encoded_oid) + assert rest == b"" + assert decoded_oid == ids diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_ecdh.py b/myenv/lib/python3.9/site-packages/ecdsa/test_ecdh.py new file mode 100644 index 0000000..872d4d1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_ecdh.py @@ -0,0 +1,441 @@ +import os +import shutil +import subprocess +import pytest +from binascii import unhexlify + +try: + import unittest2 as unittest +except ImportError: + import unittest + +from .curves import ( + NIST192p, + NIST224p, + NIST256p, + NIST384p, + NIST521p, + BRAINPOOLP160r1, +) +from .curves import curves +from .ecdh import ( + ECDH, + InvalidCurveError, + InvalidSharedSecretError, + NoKeyError, + NoCurveError, +) +from .keys import SigningKey, VerifyingKey +from .ellipticcurve import CurveEdTw + + +@pytest.mark.parametrize( + "vcurve", + curves, + ids=[curve.name for curve in curves], +) +def test_ecdh_each(vcurve): + if isinstance(vcurve.curve, CurveEdTw): + pytest.skip("ECDH is not supported for Edwards curves") + ecdh1 = ECDH(curve=vcurve) + ecdh2 = ECDH(curve=vcurve) + + ecdh2.generate_private_key() + ecdh1.load_received_public_key(ecdh2.get_public_key()) + ecdh2.load_received_public_key(ecdh1.generate_private_key()) + + secret1 = ecdh1.generate_sharedsecret_bytes() + secret2 = ecdh2.generate_sharedsecret_bytes() + assert secret1 == secret2 + + +def test_ecdh_both_keys_present(): + key1 = SigningKey.generate(BRAINPOOLP160r1) + key2 = SigningKey.generate(BRAINPOOLP160r1) + + ecdh1 = ECDH(BRAINPOOLP160r1, key1, key2.verifying_key) + ecdh2 = ECDH(private_key=key2, public_key=key1.verifying_key) + + secret1 = ecdh1.generate_sharedsecret_bytes() + secret2 = ecdh2.generate_sharedsecret_bytes() + + assert secret1 == secret2 + + +def test_ecdh_no_public_key(): + ecdh1 = ECDH(curve=NIST192p) + + with pytest.raises(NoKeyError): + ecdh1.generate_sharedsecret_bytes() + + ecdh1.generate_private_key() + + with pytest.raises(NoKeyError): + ecdh1.generate_sharedsecret_bytes() + + +class TestECDH(unittest.TestCase): + def test_load_key_from_wrong_curve(self): + ecdh1 = ECDH() + ecdh1.set_curve(NIST192p) + + key1 = SigningKey.generate(BRAINPOOLP160r1) + + with self.assertRaises(InvalidCurveError) as e: + ecdh1.load_private_key(key1) + + self.assertIn("Curve mismatch", str(e.exception)) + + def test_generate_without_curve(self): + ecdh1 = ECDH() + + with self.assertRaises(NoCurveError) as e: + ecdh1.generate_private_key() + + self.assertIn("Curve must be set", str(e.exception)) + + def test_load_bytes_without_curve_set(self): + ecdh1 = ECDH() + + with self.assertRaises(NoCurveError) as e: + ecdh1.load_private_key_bytes(b"\x01" * 32) + + self.assertIn("Curve must be set", str(e.exception)) + + def test_set_curve_from_received_public_key(self): + ecdh1 = ECDH() + + key1 = SigningKey.generate(BRAINPOOLP160r1) + + ecdh1.load_received_public_key(key1.verifying_key) + + self.assertEqual(ecdh1.curve, BRAINPOOLP160r1) + + +def test_ecdh_wrong_public_key_curve(): + ecdh1 = ECDH(curve=NIST192p) + ecdh1.generate_private_key() + ecdh2 = ECDH(curve=NIST256p) + ecdh2.generate_private_key() + + with pytest.raises(InvalidCurveError): + ecdh1.load_received_public_key(ecdh2.get_public_key()) + + with pytest.raises(InvalidCurveError): + ecdh2.load_received_public_key(ecdh1.get_public_key()) + + ecdh1.public_key = ecdh2.get_public_key() + ecdh2.public_key = ecdh1.get_public_key() + + with pytest.raises(InvalidCurveError): + ecdh1.generate_sharedsecret_bytes() + + with pytest.raises(InvalidCurveError): + ecdh2.generate_sharedsecret_bytes() + + +def test_ecdh_invalid_shared_secret_curve(): + ecdh1 = ECDH(curve=NIST256p) + ecdh1.generate_private_key() + + ecdh1.load_received_public_key( + SigningKey.generate(NIST256p).get_verifying_key() + ) + + ecdh1.private_key.privkey.secret_multiplier = ecdh1.private_key.curve.order + + with pytest.raises(InvalidSharedSecretError): + ecdh1.generate_sharedsecret_bytes() + + +# https://github.com/scogliani/ecc-test-vectors/blob/master/ecdh_kat/secp192r1.txt +# https://github.com/scogliani/ecc-test-vectors/blob/master/ecdh_kat/secp256r1.txt +# https://github.com/coruus/nist-testvectors/blob/master/csrc.nist.gov/groups/STM/cavp/documents/components/ecccdhtestvectors/KAS_ECC_CDH_PrimitiveTest.txt +@pytest.mark.parametrize( + "curve,privatekey,pubkey,secret", + [ + pytest.param( + NIST192p, + "f17d3fea367b74d340851ca4270dcb24c271f445bed9d527", + "42ea6dd9969dd2a61fea1aac7f8e98edcc896c6e55857cc0" + "dfbe5d7c61fac88b11811bde328e8a0d12bf01a9d204b523", + "803d8ab2e5b6e6fca715737c3a82f7ce3c783124f6d51cd0", + id="NIST192p-1", + ), + pytest.param( + NIST192p, + "56e853349d96fe4c442448dacb7cf92bb7a95dcf574a9bd5", + "deb5712fa027ac8d2f22c455ccb73a91e17b6512b5e030e7" + "7e2690a02cc9b28708431a29fb54b87b1f0c14e011ac2125", + "c208847568b98835d7312cef1f97f7aa298283152313c29d", + id="NIST192p-2", + ), + pytest.param( + NIST192p, + "c6ef61fe12e80bf56f2d3f7d0bb757394519906d55500949", + "4edaa8efc5a0f40f843663ec5815e7762dddc008e663c20f" + "0a9f8dc67a3e60ef6d64b522185d03df1fc0adfd42478279", + "87229107047a3b611920d6e3b2c0c89bea4f49412260b8dd", + id="NIST192p-3", + ), + pytest.param( + NIST192p, + "e6747b9c23ba7044f38ff7e62c35e4038920f5a0163d3cda", + "8887c276edeed3e9e866b46d58d895c73fbd80b63e382e88" + "04c5097ba6645e16206cfb70f7052655947dd44a17f1f9d5", + "eec0bed8fc55e1feddc82158fd6dc0d48a4d796aaf47d46c", + id="NIST192p-4", + ), + pytest.param( + NIST192p, + "beabedd0154a1afcfc85d52181c10f5eb47adc51f655047d", + "0d045f30254adc1fcefa8a5b1f31bf4e739dd327cd18d594" + "542c314e41427c08278a08ce8d7305f3b5b849c72d8aff73", + "716e743b1b37a2cd8479f0a3d5a74c10ba2599be18d7e2f4", + id="NIST192p-5", + ), + pytest.param( + NIST192p, + "cf70354226667321d6e2baf40999e2fd74c7a0f793fa8699", + "fb35ca20d2e96665c51b98e8f6eb3d79113508d8bccd4516" + "368eec0d5bfb847721df6aaff0e5d48c444f74bf9cd8a5a7", + "f67053b934459985a315cb017bf0302891798d45d0e19508", + id="NIST192p-6", + ), + pytest.param( + NIST224p, + "8346a60fc6f293ca5a0d2af68ba71d1dd389e5e40837942df3e43cbd", + "af33cd0629bc7e996320a3f40368f74de8704fa37b8fab69abaae280" + "882092ccbba7930f419a8a4f9bb16978bbc3838729992559a6f2e2d7", + "7d96f9a3bd3c05cf5cc37feb8b9d5209d5c2597464dec3e9983743e8", + id="NIST224p", + ), + pytest.param( + NIST256p, + "7d7dc5f71eb29ddaf80d6214632eeae03d9058af1fb6d22ed80badb62bc1a534", + "700c48f77f56584c5cc632ca65640db91b6bacce3a4df6b42ce7cc838833d287" + "db71e509e3fd9b060ddb20ba5c51dcc5948d46fbf640dfe0441782cab85fa4ac", + "46fc62106420ff012e54a434fbdd2d25ccc5852060561e68040dd7778997bd7b", + id="NIST256p-1", + ), + pytest.param( + NIST256p, + "38f65d6dce47676044d58ce5139582d568f64bb16098d179dbab07741dd5caf5", + "809f04289c64348c01515eb03d5ce7ac1a8cb9498f5caa50197e58d43a86a7ae" + "b29d84e811197f25eba8f5194092cb6ff440e26d4421011372461f579271cda3", + "057d636096cb80b67a8c038c890e887d1adfa4195e9b3ce241c8a778c59cda67", + id="NIST256p-2", + ), + pytest.param( + NIST256p, + "1accfaf1b97712b85a6f54b148985a1bdc4c9bec0bd258cad4b3d603f49f32c8", + "a2339c12d4a03c33546de533268b4ad667debf458b464d77443636440ee7fec3" + "ef48a3ab26e20220bcda2c1851076839dae88eae962869a497bf73cb66faf536", + "2d457b78b4614132477618a5b077965ec90730a8c81a1c75d6d4ec68005d67ec", + id="NIST256p-3", + ), + pytest.param( + NIST256p, + "207c43a79bfee03db6f4b944f53d2fb76cc49ef1c9c4d34d51b6c65c4db6932d", + "df3989b9fa55495719b3cf46dccd28b5153f7808191dd518eff0c3cff2b705ed" + "422294ff46003429d739a33206c8752552c8ba54a270defc06e221e0feaf6ac4", + "96441259534b80f6aee3d287a6bb17b5094dd4277d9e294f8fe73e48bf2a0024", + id="NIST256p-4", + ), + pytest.param( + NIST256p, + "59137e38152350b195c9718d39673d519838055ad908dd4757152fd8255c09bf", + "41192d2813e79561e6a1d6f53c8bc1a433a199c835e141b05a74a97b0faeb922" + "1af98cc45e98a7e041b01cf35f462b7562281351c8ebf3ffa02e33a0722a1328", + "19d44c8d63e8e8dd12c22a87b8cd4ece27acdde04dbf47f7f27537a6999a8e62", + id="NIST256p-5", + ), + pytest.param( + NIST256p, + "f5f8e0174610a661277979b58ce5c90fee6c9b3bb346a90a7196255e40b132ef", + "33e82092a0f1fb38f5649d5867fba28b503172b7035574bf8e5b7100a3052792" + "f2cf6b601e0a05945e335550bf648d782f46186c772c0f20d3cd0d6b8ca14b2f", + "664e45d5bba4ac931cd65d52017e4be9b19a515f669bea4703542a2c525cd3d3", + id="NIST256p-6", + ), + pytest.param( + NIST384p, + "3cc3122a68f0d95027ad38c067916ba0eb8c38894d22e1b1" + "5618b6818a661774ad463b205da88cf699ab4d43c9cf98a1", + "a7c76b970c3b5fe8b05d2838ae04ab47697b9eaf52e76459" + "2efda27fe7513272734466b400091adbf2d68c58e0c50066" + "ac68f19f2e1cb879aed43a9969b91a0839c4c38a49749b66" + "1efedf243451915ed0905a32b060992b468c64766fc8437a", + "5f9d29dc5e31a163060356213669c8ce132e22f57c9a04f4" + "0ba7fcead493b457e5621e766c40a2e3d4d6a04b25e533f1", + id="NIST384p", + ), + pytest.param( + NIST521p, + "017eecc07ab4b329068fba65e56a1f8890aa935e57134ae0ffcce802735151f4ea" + "c6564f6ee9974c5e6887a1fefee5743ae2241bfeb95d5ce31ddcb6f9edb4d6fc47", + "00685a48e86c79f0f0875f7bc18d25eb5fc8c0b07e5da4f4370f3a949034085433" + "4b1e1b87fa395464c60626124a4e70d0f785601d37c09870ebf176666877a2046d" + "01ba52c56fc8776d9e8f5db4f0cc27636d0b741bbe05400697942e80b739884a83" + "bde99e0f6716939e632bc8986fa18dccd443a348b6c3e522497955a4f3c302f676", + "005fc70477c3e63bc3954bd0df3ea0d1f41ee21746ed95fc5e1fdf90930d5e1366" + "72d72cc770742d1711c3c3a4c334a0ad9759436a4d3c5bf6e74b9578fac148c831", + id="NIST521p", + ), + ], +) +def test_ecdh_NIST(curve, privatekey, pubkey, secret): + ecdh = ECDH(curve=curve) + ecdh.load_private_key_bytes(unhexlify(privatekey)) + ecdh.load_received_public_key_bytes(unhexlify(pubkey)) + + sharedsecret = ecdh.generate_sharedsecret_bytes() + + assert sharedsecret == unhexlify(secret) + + +pem_local_private_key = ( + "-----BEGIN EC PRIVATE KEY-----\n" + "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n" + "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n" + "bA==\n" + "-----END EC PRIVATE KEY-----\n" +) +der_local_private_key = ( + "305f02010104185ec8420bd6ef9252a942e989043ca29f561fa525770eb1c5a00a06082a864" + "8ce3d030101a13403320004b88177d084ef17f5e45639408028360f9f59b4a4d7264e62da06" + "51dce47a35a4c5b45cf51593423a8b557b9c2099f36c" +) +pem_remote_public_key = ( + "-----BEGIN PUBLIC KEY-----\n" + "MEkwEwYHKoZIzj0CAQYIKoZIzj0DAQEDMgAEuIF30ITvF/XkVjlAgCg2D59ZtKTX\n" + "Jk5i2gZR3OR6NaTFtFz1FZNCOotVe5wgmfNs\n" + "-----END PUBLIC KEY-----\n" +) +der_remote_public_key = ( + "3049301306072a8648ce3d020106082a8648ce3d03010103320004b88177d084ef17f5e4563" + "9408028360f9f59b4a4d7264e62da0651dce47a35a4c5b45cf51593423a8b557b9c2099f36c" +) +gshared_secret = "8f457e34982478d1c34b9cd2d0c15911b72dd60d869e2cea" + + +def test_ecdh_pem(): + ecdh = ECDH() + ecdh.load_private_key_pem(pem_local_private_key) + ecdh.load_received_public_key_pem(pem_remote_public_key) + + sharedsecret = ecdh.generate_sharedsecret_bytes() + + assert sharedsecret == unhexlify(gshared_secret) + + +def test_ecdh_der(): + ecdh = ECDH() + ecdh.load_private_key_der(unhexlify(der_local_private_key)) + ecdh.load_received_public_key_der(unhexlify(der_remote_public_key)) + + sharedsecret = ecdh.generate_sharedsecret_bytes() + + assert sharedsecret == unhexlify(gshared_secret) + + +# Exception classes used by run_openssl. +class RunOpenSslError(Exception): + pass + + +def run_openssl(cmd): + OPENSSL = "openssl" + p = subprocess.Popen( + [OPENSSL] + cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + stdout, ignored = p.communicate() + if p.returncode != 0: + raise RunOpenSslError( + "cmd '%s %s' failed: rc=%s, stdout/err was %s" + % (OPENSSL, cmd, p.returncode, stdout) + ) + return stdout.decode() + + +OPENSSL_SUPPORTED_CURVES = set( + c.split(":")[0].strip() + for c in run_openssl("ecparam -list_curves").split("\n") +) + + +@pytest.mark.parametrize( + "vcurve", + curves, + ids=[curve.name for curve in curves], +) +def test_ecdh_with_openssl(vcurve): + if isinstance(vcurve.curve, CurveEdTw): + pytest.skip("Edwards curves are not supported for ECDH") + + assert vcurve.openssl_name + + if vcurve.openssl_name not in OPENSSL_SUPPORTED_CURVES: + pytest.skip("system openssl does not support " + vcurve.openssl_name) + + try: + hlp = run_openssl("pkeyutl -help") + if hlp.find("-derive") == 0: # pragma: no cover + pytest.skip("system openssl does not support `pkeyutl -derive`") + except RunOpenSslError: # pragma: no cover + pytest.skip("system openssl could not be executed") + + if os.path.isdir("t"): # pragma: no branch + shutil.rmtree("t") + os.mkdir("t") + run_openssl( + "ecparam -name %s -genkey -out t/privkey1.pem" % vcurve.openssl_name + ) + run_openssl( + "ecparam -name %s -genkey -out t/privkey2.pem" % vcurve.openssl_name + ) + run_openssl("ec -in t/privkey1.pem -pubout -out t/pubkey1.pem") + + ecdh1 = ECDH(curve=vcurve) + ecdh2 = ECDH(curve=vcurve) + with open("t/privkey1.pem") as e: + key = e.read() + ecdh1.load_private_key_pem(key) + with open("t/privkey2.pem") as e: + key = e.read() + ecdh2.load_private_key_pem(key) + + with open("t/pubkey1.pem") as e: + key = e.read() + vk1 = VerifyingKey.from_pem(key) + assert vk1.to_string() == ecdh1.get_public_key().to_string() + vk2 = ecdh2.get_public_key() + with open("t/pubkey2.pem", "wb") as e: + e.write(vk2.to_pem()) + + ecdh1.load_received_public_key(vk2) + ecdh2.load_received_public_key(vk1) + secret1 = ecdh1.generate_sharedsecret_bytes() + secret2 = ecdh2.generate_sharedsecret_bytes() + + assert secret1 == secret2 + + run_openssl( + "pkeyutl -derive -inkey t/privkey1.pem -peerkey t/pubkey2.pem -out t/secret1" + ) + run_openssl( + "pkeyutl -derive -inkey t/privkey2.pem -peerkey t/pubkey1.pem -out t/secret2" + ) + + with open("t/secret1", "rb") as e: + ssl_secret1 = e.read() + with open("t/secret1", "rb") as e: + ssl_secret2 = e.read() + + assert len(ssl_secret1) == vk1.curve.verifying_key_length // 2 + assert len(secret1) == vk1.curve.verifying_key_length // 2 + + assert ssl_secret1 == ssl_secret2 + assert secret1 == ssl_secret1 diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_ecdsa.py b/myenv/lib/python3.9/site-packages/ecdsa/test_ecdsa.py new file mode 100644 index 0000000..dbc4a6e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_ecdsa.py @@ -0,0 +1,661 @@ +from __future__ import print_function +import sys +import hypothesis.strategies as st +from hypothesis import given, settings, note, example + +try: + import unittest2 as unittest +except ImportError: + import unittest +import pytest +from .ecdsa import ( + Private_key, + Public_key, + Signature, + generator_192, + digest_integer, + ellipticcurve, + point_is_valid, + generator_224, + generator_256, + generator_384, + generator_521, + generator_secp256k1, + curve_192, + InvalidPointError, + curve_112r2, + generator_112r2, + int_to_string, +) + + +HYP_SETTINGS = {} +# old hypothesis doesn't have the "deadline" setting +if sys.version_info > (2, 7): # pragma: no branch + # SEC521p is slow, allow long execution for it + HYP_SETTINGS["deadline"] = 5000 + + +class TestP192FromX9_62(unittest.TestCase): + """Check test vectors from X9.62""" + + @classmethod + def setUpClass(cls): + cls.d = 651056770906015076056810763456358567190100156695615665659 + cls.Q = cls.d * generator_192 + cls.k = 6140507067065001063065065565667405560006161556565665656654 + cls.R = cls.k * generator_192 + + cls.msg = 968236873715988614170569073515315707566766479517 + cls.pubk = Public_key(generator_192, generator_192 * cls.d) + cls.privk = Private_key(cls.pubk, cls.d) + cls.sig = cls.privk.sign(cls.msg, cls.k) + + def test_point_multiplication(self): + assert self.Q.x() == 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5 + + def test_point_multiplication_2(self): + assert self.R.x() == 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD + assert self.R.y() == 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835 + + def test_mult_and_addition(self): + u1 = 2563697409189434185194736134579731015366492496392189760599 + u2 = 6266643813348617967186477710235785849136406323338782220568 + temp = u1 * generator_192 + u2 * self.Q + assert temp.x() == 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD + assert temp.y() == 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835 + + def test_signature(self): + r, s = self.sig.r, self.sig.s + assert r == 3342403536405981729393488334694600415596881826869351677613 + assert s == 5735822328888155254683894997897571951568553642892029982342 + + def test_verification(self): + assert self.pubk.verifies(self.msg, self.sig) + + def test_rejection(self): + assert not self.pubk.verifies(self.msg - 1, self.sig) + + +class TestPublicKey(unittest.TestCase): + def test_equality_public_keys(self): + gen = generator_192 + x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point = ellipticcurve.Point(gen.curve(), x, y) + pub_key1 = Public_key(gen, point) + pub_key2 = Public_key(gen, point) + self.assertEqual(pub_key1, pub_key2) + + def test_inequality_public_key(self): + gen = generator_192 + x1 = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y1 = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point1 = ellipticcurve.Point(gen.curve(), x1, y1) + + x2 = 0x6A223D00BD22C52833409A163E057E5B5DA1DEF2A197DD15 + y2 = 0x7B482604199367F1F303F9EF627F922F97023E90EAE08ABF + point2 = ellipticcurve.Point(gen.curve(), x2, y2) + + pub_key1 = Public_key(gen, point1) + pub_key2 = Public_key(gen, point2) + self.assertNotEqual(pub_key1, pub_key2) + + def test_inequality_different_curves(self): + gen = generator_192 + x1 = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y1 = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point1 = ellipticcurve.Point(gen.curve(), x1, y1) + + x2 = 0x722BA0FB6B8FC8898A4C6AB49E66 + y2 = 0x2B7344BB57A7ABC8CA0F1A398C7D + point2 = ellipticcurve.Point(generator_112r2.curve(), x2, y2) + + pub_key1 = Public_key(gen, point1) + pub_key2 = Public_key(generator_112r2, point2) + self.assertNotEqual(pub_key1, pub_key2) + + def test_inequality_public_key_not_implemented(self): + gen = generator_192 + x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point = ellipticcurve.Point(gen.curve(), x, y) + pub_key = Public_key(gen, point) + self.assertNotEqual(pub_key, None) + + def test_public_key_with_generator_without_order(self): + gen = ellipticcurve.PointJacobi( + generator_192.curve(), generator_192.x(), generator_192.y(), 1 + ) + + x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point = ellipticcurve.Point(gen.curve(), x, y) + + with self.assertRaises(InvalidPointError) as e: + Public_key(gen, point) + + self.assertIn("Generator point must have order", str(e.exception)) + + def test_public_point_on_curve_not_scalar_multiple_of_base_point(self): + x = 2 + y = 0xBE6AA4938EF7CFE6FE29595B6B00 + # we need a curve with cofactor != 1 + point = ellipticcurve.PointJacobi(curve_112r2, x, y, 1) + + self.assertTrue(curve_112r2.contains_point(x, y)) + + with self.assertRaises(InvalidPointError) as e: + Public_key(generator_112r2, point) + + self.assertIn("Generator point order", str(e.exception)) + + def test_point_is_valid_with_not_scalar_multiple_of_base_point(self): + x = 2 + y = 0xBE6AA4938EF7CFE6FE29595B6B00 + + self.assertFalse(point_is_valid(generator_112r2, x, y)) + + # the tests to verify the extensiveness of tests in ecdsa.ecdsa + # if PointJacobi gets modified to calculate the x and y mod p the tests + # below will need to use a fake/mock object + def test_invalid_point_x_negative(self): + pt = ellipticcurve.PointJacobi(curve_192, -1, 0, 1) + + with self.assertRaises(InvalidPointError) as e: + Public_key(generator_192, pt) + + self.assertIn("The public point has x or y", str(e.exception)) + + def test_invalid_point_x_equal_p(self): + pt = ellipticcurve.PointJacobi(curve_192, curve_192.p(), 0, 1) + + with self.assertRaises(InvalidPointError) as e: + Public_key(generator_192, pt) + + self.assertIn("The public point has x or y", str(e.exception)) + + def test_invalid_point_y_negative(self): + pt = ellipticcurve.PointJacobi(curve_192, 0, -1, 1) + + with self.assertRaises(InvalidPointError) as e: + Public_key(generator_192, pt) + + self.assertIn("The public point has x or y", str(e.exception)) + + def test_invalid_point_y_equal_p(self): + pt = ellipticcurve.PointJacobi(curve_192, 0, curve_192.p(), 1) + + with self.assertRaises(InvalidPointError) as e: + Public_key(generator_192, pt) + + self.assertIn("The public point has x or y", str(e.exception)) + + +class TestPublicKeyVerifies(unittest.TestCase): + # test all the different ways that a signature can be publicly invalid + @classmethod + def setUpClass(cls): + gen = generator_192 + x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point = ellipticcurve.Point(gen.curve(), x, y) + + cls.pub_key = Public_key(gen, point) + + def test_sig_with_r_zero(self): + sig = Signature(0, 1) + + self.assertFalse(self.pub_key.verifies(1, sig)) + + def test_sig_with_r_order(self): + sig = Signature(generator_192.order(), 1) + + self.assertFalse(self.pub_key.verifies(1, sig)) + + def test_sig_with_s_zero(self): + sig = Signature(1, 0) + + self.assertFalse(self.pub_key.verifies(1, sig)) + + def test_sig_with_s_order(self): + sig = Signature(1, generator_192.order()) + + self.assertFalse(self.pub_key.verifies(1, sig)) + + +class TestPrivateKey(unittest.TestCase): + @classmethod + def setUpClass(cls): + gen = generator_192 + x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 + y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F + point = ellipticcurve.Point(gen.curve(), x, y) + cls.pub_key = Public_key(gen, point) + + def test_equality_private_keys(self): + pr_key1 = Private_key(self.pub_key, 100) + pr_key2 = Private_key(self.pub_key, 100) + self.assertEqual(pr_key1, pr_key2) + + def test_inequality_private_keys(self): + pr_key1 = Private_key(self.pub_key, 100) + pr_key2 = Private_key(self.pub_key, 200) + self.assertNotEqual(pr_key1, pr_key2) + + def test_inequality_private_keys_not_implemented(self): + pr_key = Private_key(self.pub_key, 100) + self.assertNotEqual(pr_key, None) + + +# Testing point validity, as per ECDSAVS.pdf B.2.2: +P192_POINTS = [ + ( + generator_192, + 0xCD6D0F029A023E9AACA429615B8F577ABEE685D8257CC83A, + 0x00019C410987680E9FB6C0B6ECC01D9A2647C8BAE27721BACDFC, + False, + ), + ( + generator_192, + 0x00017F2FCE203639E9EAF9FB50B81FC32776B30E3B02AF16C73B, + 0x95DA95C5E72DD48E229D4748D4EEE658A9A54111B23B2ADB, + False, + ), + ( + generator_192, + 0x4F77F8BC7FCCBADD5760F4938746D5F253EE2168C1CF2792, + 0x000147156FF824D131629739817EDB197717C41AAB5C2A70F0F6, + False, + ), + ( + generator_192, + 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6, + 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F, + True, + ), + ( + generator_192, + 0xCDF56C1AA3D8AFC53C521ADF3FFB96734A6A630A4A5B5A70, + 0x97C1C44A5FB229007B5EC5D25F7413D170068FFD023CAA4E, + True, + ), + ( + generator_192, + 0x89009C0DC361C81E99280C8E91DF578DF88CDF4B0CDEDCED, + 0x27BE44A529B7513E727251F128B34262A0FD4D8EC82377B9, + True, + ), + ( + generator_192, + 0x6A223D00BD22C52833409A163E057E5B5DA1DEF2A197DD15, + 0x7B482604199367F1F303F9EF627F922F97023E90EAE08ABF, + True, + ), + ( + generator_192, + 0x6DCCBDE75C0948C98DAB32EA0BC59FE125CF0FB1A3798EDA, + 0x0001171A3E0FA60CF3096F4E116B556198DE430E1FBD330C8835, + False, + ), + ( + generator_192, + 0xD266B39E1F491FC4ACBBBC7D098430931CFA66D55015AF12, + 0x193782EB909E391A3148B7764E6B234AA94E48D30A16DBB2, + False, + ), + ( + generator_192, + 0x9D6DDBCD439BAA0C6B80A654091680E462A7D1D3F1FFEB43, + 0x6AD8EFC4D133CCF167C44EB4691C80ABFFB9F82B932B8CAA, + False, + ), + ( + generator_192, + 0x146479D944E6BDA87E5B35818AA666A4C998A71F4E95EDBC, + 0xA86D6FE62BC8FBD88139693F842635F687F132255858E7F6, + False, + ), + ( + generator_192, + 0xE594D4A598046F3598243F50FD2C7BD7D380EDB055802253, + 0x509014C0C4D6B536E3CA750EC09066AF39B4C8616A53A923, + False, + ), +] + + +@pytest.mark.parametrize("generator,x,y,expected", P192_POINTS) +def test_point_validity(generator, x, y, expected): + """ + `generator` defines the curve; is `(x, y)` a point on + this curve? `expected` is True if the right answer is Yes. + """ + assert point_is_valid(generator, x, y) == expected + + +# Trying signature-verification tests from ECDSAVS.pdf B.2.4: +CURVE_192_KATS = [ + ( + generator_192, + int( + "0x84ce72aa8699df436059f052ac51b6398d2511e49631bcb7e71f89c499b9ee" + "425dfbc13a5f6d408471b054f2655617cbbaf7937b7c80cd8865cf02c8487d30" + "d2b0fbd8b2c4e102e16d828374bbc47b93852f212d5043c3ea720f086178ff79" + "8cc4f63f787b9c2e419efa033e7644ea7936f54462dc21a6c4580725f7f0e7d1" + "58", + 16, + ), + 0xD9DBFB332AA8E5FF091E8CE535857C37C73F6250FFB2E7AC, + 0x282102E364FEDED3AD15DDF968F88D8321AA268DD483EBC4, + 0x64DCA58A20787C488D11D6DD96313F1B766F2D8EFE122916, + 0x1ECBA28141E84AB4ECAD92F56720E2CC83EB3D22DEC72479, + True, + ), + ( + generator_192, + int( + "0x94bb5bacd5f8ea765810024db87f4224ad71362a3c28284b2b9f39fab86db1" + "2e8beb94aae899768229be8fdb6c4f12f28912bb604703a79ccff769c1607f5a" + "91450f30ba0460d359d9126cbd6296be6d9c4bb96c0ee74cbb44197c207f6db3" + "26ab6f5a659113a9034e54be7b041ced9dcf6458d7fb9cbfb2744d999f7dfd63" + "f4", + 16, + ), + 0x3E53EF8D3112AF3285C0E74842090712CD324832D4277AE7, + 0xCC75F8952D30AEC2CBB719FC6AA9934590B5D0FF5A83ADB7, + 0x8285261607283BA18F335026130BAB31840DCFD9C3E555AF, + 0x356D89E1B04541AFC9704A45E9C535CE4A50929E33D7E06C, + True, + ), + ( + generator_192, + int( + "0xf6227a8eeb34afed1621dcc89a91d72ea212cb2f476839d9b4243c66877911" + "b37b4ad6f4448792a7bbba76c63bdd63414b6facab7dc71c3396a73bd7ee14cd" + "d41a659c61c99b779cecf07bc51ab391aa3252386242b9853ea7da67fd768d30" + "3f1b9b513d401565b6f1eb722dfdb96b519fe4f9bd5de67ae131e64b40e78c42" + "dd", + 16, + ), + 0x16335DBE95F8E8254A4E04575D736BEFB258B8657F773CB7, + 0x421B13379C59BC9DCE38A1099CA79BBD06D647C7F6242336, + 0x4141BD5D64EA36C5B0BD21EF28C02DA216ED9D04522B1E91, + 0x159A6AA852BCC579E821B7BB0994C0861FB08280C38DAA09, + False, + ), + ( + generator_192, + int( + "0x16b5f93afd0d02246f662761ed8e0dd9504681ed02a253006eb36736b56309" + "7ba39f81c8e1bce7a16c1339e345efabbc6baa3efb0612948ae51103382a8ee8" + "bc448e3ef71e9f6f7a9676694831d7f5dd0db5446f179bcb737d4a526367a447" + "bfe2c857521c7f40b6d7d7e01a180d92431fb0bbd29c04a0c420a57b3ed26ccd" + "8a", + 16, + ), + 0xFD14CDF1607F5EFB7B1793037B15BDF4BAA6F7C16341AB0B, + 0x83FA0795CC6C4795B9016DAC928FD6BAC32F3229A96312C4, + 0x8DFDB832951E0167C5D762A473C0416C5C15BC1195667DC1, + 0x1720288A2DC13FA1EC78F763F8FE2FF7354A7E6FDDE44520, + False, + ), + ( + generator_192, + int( + "0x08a2024b61b79d260e3bb43ef15659aec89e5b560199bc82cf7c65c77d3919" + "2e03b9a895d766655105edd9188242b91fbde4167f7862d4ddd61e5d4ab55196" + "683d4f13ceb90d87aea6e07eb50a874e33086c4a7cb0273a8e1c4408f4b846bc" + "eae1ebaac1b2b2ea851a9b09de322efe34cebe601653efd6ddc876ce8c2f2072" + "fb", + 16, + ), + 0x674F941DC1A1F8B763C9334D726172D527B90CA324DB8828, + 0x65ADFA32E8B236CB33A3E84CF59BFB9417AE7E8EDE57A7FF, + 0x9508B9FDD7DAF0D8126F9E2BC5A35E4C6D800B5B804D7796, + 0x36F2BF6B21B987C77B53BB801B3435A577E3D493744BFAB0, + False, + ), + ( + generator_192, + int( + "0x1843aba74b0789d4ac6b0b8923848023a644a7b70afa23b1191829bbe4397c" + "e15b629bf21a8838298653ed0c19222b95fa4f7390d1b4c844d96e645537e0aa" + "e98afb5c0ac3bd0e4c37f8daaff25556c64e98c319c52687c904c4de7240a1cc" + "55cd9756b7edaef184e6e23b385726e9ffcba8001b8f574987c1a3fedaaa83ca" + "6d", + 16, + ), + 0x10ECCA1AAD7220B56A62008B35170BFD5E35885C4014A19F, + 0x04EB61984C6C12ADE3BC47F3C629ECE7AA0A033B9948D686, + 0x82BFA4E82C0DFE9274169B86694E76CE993FD83B5C60F325, + 0xA97685676C59A65DBDE002FE9D613431FB183E8006D05633, + False, + ), + ( + generator_192, + int( + "0x5a478f4084ddd1a7fea038aa9732a822106385797d02311aeef4d0264f824f" + "698df7a48cfb6b578cf3da416bc0799425bb491be5b5ecc37995b85b03420a98" + "f2c4dc5c31a69a379e9e322fbe706bbcaf0f77175e05cbb4fa162e0da82010a2" + "78461e3e974d137bc746d1880d6eb02aa95216014b37480d84b87f717bb13f76" + "e1", + 16, + ), + 0x6636653CB5B894CA65C448277B29DA3AD101C4C2300F7C04, + 0xFDF1CBB3FC3FD6A4F890B59E554544175FA77DBDBEB656C1, + 0xEAC2DDECDDFB79931A9C3D49C08DE0645C783A24CB365E1C, + 0x3549FEE3CFA7E5F93BC47D92D8BA100E881A2A93C22F8D50, + False, + ), + ( + generator_192, + int( + "0xc598774259a058fa65212ac57eaa4f52240e629ef4c310722088292d1d4af6" + "c39b49ce06ba77e4247b20637174d0bd67c9723feb57b5ead232b47ea452d5d7" + "a089f17c00b8b6767e434a5e16c231ba0efa718a340bf41d67ea2d295812ff1b" + "9277daacb8bc27b50ea5e6443bcf95ef4e9f5468fe78485236313d53d1c68f6b" + "a2", + 16, + ), + 0xA82BD718D01D354001148CD5F69B9EBF38FF6F21898F8AAA, + 0xE67CEEDE07FC2EBFAFD62462A51E4B6C6B3D5B537B7CAF3E, + 0x4D292486C620C3DE20856E57D3BB72FCDE4A73AD26376955, + 0xA85289591A6081D5728825520E62FF1C64F94235C04C7F95, + False, + ), + ( + generator_192, + int( + "0xca98ed9db081a07b7557f24ced6c7b9891269a95d2026747add9e9eb80638a" + "961cf9c71a1b9f2c29744180bd4c3d3db60f2243c5c0b7cc8a8d40a3f9a7fc91" + "0250f2187136ee6413ffc67f1a25e1c4c204fa9635312252ac0e0481d89b6d53" + "808f0c496ba87631803f6c572c1f61fa049737fdacce4adff757afed4f05beb6" + "58", + 16, + ), + 0x7D3B016B57758B160C4FCA73D48DF07AE3B6B30225126C2F, + 0x4AF3790D9775742BDE46F8DA876711BE1B65244B2B39E7EC, + 0x95F778F5F656511A5AB49A5D69DDD0929563C29CBC3A9E62, + 0x75C87FC358C251B4C83D2DD979FAAD496B539F9F2EE7A289, + False, + ), + ( + generator_192, + int( + "0x31dd9a54c8338bea06b87eca813d555ad1850fac9742ef0bbe40dad400e102" + "88acc9c11ea7dac79eb16378ebea9490e09536099f1b993e2653cd50240014c9" + "0a9c987f64545abc6a536b9bd2435eb5e911fdfde2f13be96ea36ad38df4ae9e" + "a387b29cced599af777338af2794820c9cce43b51d2112380a35802ab7e396c9" + "7a", + 16, + ), + 0x9362F28C4EF96453D8A2F849F21E881CD7566887DA8BEB4A, + 0xE64D26D8D74C48A024AE85D982EE74CD16046F4EE5333905, + 0xF3923476A296C88287E8DE914B0B324AD5A963319A4FE73B, + 0xF0BAEED7624ED00D15244D8BA2AEDE085517DBDEC8AC65F5, + True, + ), + ( + generator_192, + int( + "0xb2b94e4432267c92f9fdb9dc6040c95ffa477652761290d3c7de312283f645" + "0d89cc4aabe748554dfb6056b2d8e99c7aeaad9cdddebdee9dbc099839562d90" + "64e68e7bb5f3a6bba0749ca9a538181fc785553a4000785d73cc207922f63e8c" + "e1112768cb1de7b673aed83a1e4a74592f1268d8e2a4e9e63d414b5d442bd045" + "6d", + 16, + ), + 0xCC6FC032A846AAAC25533EB033522824F94E670FA997ECEF, + 0xE25463EF77A029ECCDA8B294FD63DD694E38D223D30862F1, + 0x066B1D07F3A40E679B620EDA7F550842A35C18B80C5EBE06, + 0xA0B0FB201E8F2DF65E2C4508EF303BDC90D934016F16B2DC, + False, + ), + ( + generator_192, + int( + "0x4366fcadf10d30d086911de30143da6f579527036937007b337f7282460eae" + "5678b15cccda853193ea5fc4bc0a6b9d7a31128f27e1214988592827520b214e" + "ed5052f7775b750b0c6b15f145453ba3fee24a085d65287e10509eb5d5f602c4" + "40341376b95c24e5c4727d4b859bfe1483d20538acdd92c7997fa9c614f0f839" + "d7", + 16, + ), + 0x955C908FE900A996F7E2089BEE2F6376830F76A19135E753, + 0xBA0C42A91D3847DE4A592A46DC3FDAF45A7CC709B90DE520, + 0x1F58AD77FC04C782815A1405B0925E72095D906CBF52A668, + 0xF2E93758B3AF75EDF784F05A6761C9B9A6043C66B845B599, + False, + ), + ( + generator_192, + int( + "0x543f8af57d750e33aa8565e0cae92bfa7a1ff78833093421c2942cadf99866" + "70a5ff3244c02a8225e790fbf30ea84c74720abf99cfd10d02d34377c3d3b412" + "69bea763384f372bb786b5846f58932defa68023136cd571863b304886e95e52" + "e7877f445b9364b3f06f3c28da12707673fecb4b8071de06b6e0a3c87da160ce" + "f3", + 16, + ), + 0x31F7FA05576D78A949B24812D4383107A9A45BB5FCCDD835, + 0x8DC0EB65994A90F02B5E19BD18B32D61150746C09107E76B, + 0xBE26D59E4E883DDE7C286614A767B31E49AD88789D3A78FF, + 0x8762CA831C1CE42DF77893C9B03119428E7A9B819B619068, + False, + ), + ( + generator_192, + int( + "0xd2e8454143ce281e609a9d748014dcebb9d0bc53adb02443a6aac2ffe6cb009f" + "387c346ecb051791404f79e902ee333ad65e5c8cb38dc0d1d39a8dc90add502357" + "2720e5b94b190d43dd0d7873397504c0c7aef2727e628eb6a74411f2e400c65670" + "716cb4a815dc91cbbfeb7cfe8c929e93184c938af2c078584da045e8f8d1", + 16, + ), + 0x66AA8EDBBDB5CF8E28CEB51B5BDA891CAE2DF84819FE25C0, + 0x0C6BC2F69030A7CE58D4A00E3B3349844784A13B8936F8DA, + 0xA4661E69B1734F4A71B788410A464B71E7FFE42334484F23, + 0x738421CF5E049159D69C57A915143E226CAC8355E149AFE9, + False, + ), + ( + generator_192, + int( + "0x6660717144040f3e2f95a4e25b08a7079c702a8b29babad5a19a87654bc5c5af" + "a261512a11b998a4fb36b5d8fe8bd942792ff0324b108120de86d63f65855e5461" + "184fc96a0a8ffd2ce6d5dfb0230cbbdd98f8543e361b3205f5da3d500fdc8bac6d" + "b377d75ebef3cb8f4d1ff738071ad0938917889250b41dd1d98896ca06fb", + 16, + ), + 0xBCFACF45139B6F5F690A4C35A5FFFA498794136A2353FC77, + 0x6F4A6C906316A6AFC6D98FE1F0399D056F128FE0270B0F22, + 0x9DB679A3DAFE48F7CCAD122933ACFE9DA0970B71C94C21C1, + 0x984C2DB99827576C0A41A5DA41E07D8CC768BC82F18C9DA9, + False, + ), +] + + +@pytest.mark.parametrize("gen,msg,qx,qy,r,s,expected", CURVE_192_KATS) +def test_signature_validity(gen, msg, qx, qy, r, s, expected): + """ + `msg` = message, `qx` and `qy` represent the base point on + elliptic curve of `gen`, `r` and `s` are the signature, and + `expected` is True iff the signature is expected to be valid.""" + pubk = Public_key(gen, ellipticcurve.Point(gen.curve(), qx, qy)) + assert expected == pubk.verifies(digest_integer(msg), Signature(r, s)) + + +@pytest.mark.parametrize( + "gen,msg,qx,qy,r,s,expected", [x for x in CURVE_192_KATS if x[6]] +) +def test_pk_recovery(gen, msg, r, s, qx, qy, expected): + del expected + sign = Signature(r, s) + pks = sign.recover_public_keys(digest_integer(msg), gen) + + assert pks + + # Test if the signature is valid for all found public keys + for pk in pks: + q = pk.point + test_signature_validity(gen, msg, q.x(), q.y(), r, s, True) + + # Test if the original public key is in the set of found keys + original_q = ellipticcurve.Point(gen.curve(), qx, qy) + points = [pk.point for pk in pks] + assert original_q in points + + +@st.composite +def st_random_gen_key_msg_nonce(draw): + """Hypothesis strategy for test_sig_verify().""" + name_gen = { + "generator_192": generator_192, + "generator_224": generator_224, + "generator_256": generator_256, + "generator_secp256k1": generator_secp256k1, + "generator_384": generator_384, + "generator_521": generator_521, + } + name = draw(st.sampled_from(sorted(name_gen.keys()))) + note("Generator used: {0}".format(name)) + generator = name_gen[name] + order = int(generator.order()) + + key = draw(st.integers(min_value=1, max_value=order)) + msg = draw(st.integers(min_value=1, max_value=order)) + nonce = draw( + st.integers(min_value=1, max_value=order + 1) + | st.integers(min_value=order >> 1, max_value=order) + ) + return generator, key, msg, nonce + + +SIG_VER_SETTINGS = dict(HYP_SETTINGS) +SIG_VER_SETTINGS["max_examples"] = 10 + + +@settings(**SIG_VER_SETTINGS) +@example((generator_224, 4, 1, 1)) +@given(st_random_gen_key_msg_nonce()) +def test_sig_verify(args): + """ + Check if signing and verification works for arbitrary messages and + that signatures for other messages are rejected. + """ + generator, sec_mult, msg, nonce = args + + pubkey = Public_key(generator, generator * sec_mult) + privkey = Private_key(pubkey, sec_mult) + + signature = privkey.sign(msg, nonce) + + assert pubkey.verifies(msg, signature) + + assert not pubkey.verifies(msg - 1, signature) + + +def test_int_to_string_with_zero(): + assert int_to_string(0) == b"\x00" diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_eddsa.py b/myenv/lib/python3.9/site-packages/ecdsa/test_eddsa.py new file mode 100644 index 0000000..7a09ad7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_eddsa.py @@ -0,0 +1,1079 @@ +import pickle +import hashlib +import pytest + +try: + import unittest2 as unittest +except ImportError: + import unittest +from hypothesis import given, settings, example +import hypothesis.strategies as st +from .ellipticcurve import PointEdwards, INFINITY, CurveEdTw +from .eddsa import ( + generator_ed25519, + curve_ed25519, + generator_ed448, + curve_ed448, + PrivateKey, + PublicKey, +) +from .ecdsa import generator_256, curve_256 +from .errors import MalformedPointError +from ._compat import a2b_hex, compat26_str + + +class TestA2B_Hex(unittest.TestCase): + def test_invalid_input(self): + with self.assertRaises(ValueError): + a2b_hex("abcdefghi") + + +def test_ed25519_curve_compare(): + assert curve_ed25519 != curve_256 + + +def test_ed25519_and_ed448_compare(): + assert curve_ed448 != curve_ed25519 + + +def test_ed25519_and_custom_curve_compare(): + a = CurveEdTw(curve_ed25519.p(), -curve_ed25519.a(), 1) + + assert curve_ed25519 != a + + +def test_ed25519_and_almost_exact_curve_compare(): + a = CurveEdTw(curve_ed25519.p(), curve_ed25519.a(), 1) + + assert curve_ed25519 != a + + +def test_ed25519_and_same_curve_params(): + a = CurveEdTw(curve_ed25519.p(), curve_ed25519.a(), curve_ed25519.d()) + + assert curve_ed25519 == a + assert not (curve_ed25519 != a) + + +def test_ed25519_contains_point(): + g = generator_ed25519 + assert curve_ed25519.contains_point(g.x(), g.y()) + + +def test_ed25519_contains_point_bad(): + assert not curve_ed25519.contains_point(1, 1) + + +def test_ed25519_double(): + a = generator_ed25519 + + z = a.double() + + assert isinstance(z, PointEdwards) + + x2 = int( + "24727413235106541002554574571675588834622768167397638456726423" + "682521233608206" + ) + y2 = int( + "15549675580280190176352668710449542251549572066445060580507079" + "593062643049417" + ) + + b = PointEdwards(curve_ed25519, x2, y2, 1, x2 * y2) + + assert z == b + assert a != b + + +def test_ed25519_add_as_double(): + a = generator_ed25519 + + z = a + a + + assert isinstance(z, PointEdwards) + + b = generator_ed25519.double() + + assert z == b + + +def test_ed25519_double_infinity(): + a = PointEdwards(curve_ed25519, 0, 1, 1, 0) + + z = a.double() + + assert z is INFINITY + + +def test_ed25519_double_badly_encoded_infinity(): + # invalid point, mostly to make instrumental happy + a = PointEdwards(curve_ed25519, 1, 1, 1, 0) + + z = a.double() + + assert z is INFINITY + + +def test_ed25519_eq_with_different_z(): + x = generator_ed25519.x() + y = generator_ed25519.y() + p = curve_ed25519.p() + + a = PointEdwards(curve_ed25519, x * 2 % p, y * 2 % p, 2, x * y * 2 % p) + b = PointEdwards(curve_ed25519, x * 3 % p, y * 3 % p, 3, x * y * 3 % p) + + assert a == b + + assert not (a != b) + + +def test_ed25519_eq_against_infinity(): + assert generator_ed25519 != INFINITY + + +def test_ed25519_eq_encoded_infinity_against_infinity(): + a = PointEdwards(curve_ed25519, 0, 1, 1, 0) + assert a == INFINITY + + +def test_ed25519_eq_bad_encode_of_infinity_against_infinity(): + # technically incorrect encoding of the point at infinity, but we check + # both X and T, so verify that just T==0 works + a = PointEdwards(curve_ed25519, 1, 1, 1, 0) + assert a == INFINITY + + +def test_ed25519_eq_against_non_Edwards_point(): + assert generator_ed25519 != generator_256 + + +def test_ed25519_eq_against_negated_point(): + g = generator_ed25519 + neg = PointEdwards(curve_ed25519, -g.x(), g.y(), 1, -g.x() * g.y()) + assert g != neg + + +def test_ed25519_eq_x_different_y(): + # not points on the curve, but __eq__ doesn't care + a = PointEdwards(curve_ed25519, 1, 1, 1, 1) + b = PointEdwards(curve_ed25519, 1, 2, 1, 2) + + assert a != b + + +def test_ed25519_test_normalisation_and_scaling(): + x = generator_ed25519.x() + y = generator_ed25519.y() + p = curve_ed25519.p() + + a = PointEdwards(curve_ed25519, x * 11 % p, y * 11 % p, 11, x * y * 11 % p) + + assert a.x() == x + assert a.y() == y + + a.scale() + + assert a.x() == x + assert a.y() == y + + a.scale() # second execution should be a noop + + assert a.x() == x + assert a.y() == y + + +def test_ed25519_add_three_times(): + a = generator_ed25519 + + z = a + a + a + + x3 = int( + "468967334644549386571235445953867877890461982801326656862413" + "21779790909858396" + ) + y3 = int( + "832484377853344397649037712036920113830141722629755531674120" + "2210403726505172" + ) + + b = PointEdwards(curve_ed25519, x3, y3, 1, x3 * y3) + + assert z == b + + +def test_ed25519_add_to_infinity(): + # generator * (order-1) + x1 = int( + "427838232691226969392843410947554224151809796397784248136826" + "78720006717057747" + ) + y1 = int( + "463168356949264781694283940034751631413079938662562256157830" + "33603165251855960" + ) + inf_m_1 = PointEdwards(curve_ed25519, x1, y1, 1, x1 * y1) + + inf = inf_m_1 + generator_ed25519 + + assert inf is INFINITY + + +def test_ed25519_add_and_mul_equivalence(): + g = generator_ed25519 + + assert g + g == g * 2 + assert g + g + g == g * 3 + + +def test_ed25519_add_literal_infinity(): + g = generator_ed25519 + z = g + INFINITY + + assert z == g + + +def test_ed25519_add_infinity(): + inf = PointEdwards(curve_ed25519, 0, 1, 1, 0) + g = generator_ed25519 + z = g + inf + + assert z == g + + z = inf + g + + assert z == g + + +class TestEd25519(unittest.TestCase): + def test_add_wrong_curves(self): + with self.assertRaises(ValueError) as e: + generator_ed25519 + generator_ed448 + + self.assertIn("different curve", str(e.exception)) + + def test_add_wrong_point_type(self): + with self.assertRaises(ValueError) as e: + generator_ed25519 + generator_256 + + self.assertIn("different curve", str(e.exception)) + + +def test_ed25519_mul_to_order_min_1(): + x1 = int( + "427838232691226969392843410947554224151809796397784248136826" + "78720006717057747" + ) + y1 = int( + "463168356949264781694283940034751631413079938662562256157830" + "33603165251855960" + ) + inf_m_1 = PointEdwards(curve_ed25519, x1, y1, 1, x1 * y1) + + assert generator_ed25519 * (generator_ed25519.order() - 1) == inf_m_1 + + +def test_ed25519_mul_to_infinity(): + assert generator_ed25519 * generator_ed25519.order() == INFINITY + + +def test_ed25519_mul_to_infinity_plus_1(): + g = generator_ed25519 + assert g * (g.order() + 1) == g + + +def test_ed25519_mul_and_add(): + g = generator_ed25519 + a = g * 128 + b = g * 64 + g * 64 + + assert a == b + + +def test_ed25519_mul_and_add_2(): + g = generator_ed25519 + + a = g * 123 + b = g * 120 + g * 3 + + assert a == b + + +def test_ed25519_mul_infinity(): + inf = PointEdwards(curve_ed25519, 0, 1, 1, 0) + + z = inf * 11 + + assert z == INFINITY + + +def test_ed25519_mul_by_zero(): + z = generator_ed25519 * 0 + + assert z == INFINITY + + +def test_ed25519_mul_by_one(): + z = generator_ed25519 * 1 + + assert z == generator_ed25519 + + +def test_ed25519_mul_custom_point(): + # verify that multiplication without order set works + + g = generator_ed25519 + + a = PointEdwards(curve_ed25519, g.x(), g.y(), 1, g.x() * g.y()) + + z = a * 11 + + assert z == g * 11 + + +def test_ed25519_pickle(): + g = generator_ed25519 + assert pickle.loads(pickle.dumps(g)) == g + + +def test_ed448_eq_against_different_curve(): + assert generator_ed25519 != generator_ed448 + + +def test_ed448_double(): + g = generator_ed448 + z = g.double() + + assert isinstance(z, PointEdwards) + + x2 = int( + "4845591495304045936995492052586696895690942404582120401876" + "6013278705691214670908136440114445572635086627683154494739" + "7859048262938744149" + ) + y2 = int( + "4940887598674337276743026725267350893505445523037277237461" + "2648447308771911703729389009346215770388834286503647778745" + "3078312060500281069" + ) + + b = PointEdwards(curve_ed448, x2, y2, 1, x2 * y2) + + assert z == b + assert g != b + + +def test_ed448_add_as_double(): + g = generator_ed448 + z = g + g + + b = g.double() + + assert z == b + + +def test_ed448_mul_as_double(): + g = generator_ed448 + z = g * 2 + b = g.double() + + assert z == b + + +def test_ed448_add_to_infinity(): + # generator * (order - 1) + x1 = int( + "5022586839996825903617194737881084981068517190547539260353" + "6473749366191269932473977736719082931859264751085238669719" + "1187378895383117729" + ) + y1 = int( + "2988192100784814926760179304439306734375440401540802420959" + "2824137233150618983587600353687865541878473398230323350346" + "2500531545062832660" + ) + inf_m_1 = PointEdwards(curve_ed448, x1, y1, 1, x1 * y1) + + inf = inf_m_1 + generator_ed448 + + assert inf is INFINITY + + +def test_ed448_mul_to_infinity(): + g = generator_ed448 + inf = g * g.order() + + assert inf is INFINITY + + +def test_ed448_mul_to_infinity_plus_1(): + g = generator_ed448 + + z = g * (g.order() + 1) + + assert z == g + + +def test_ed448_add_and_mul_equivalence(): + g = generator_ed448 + + assert g + g == g * 2 + assert g + g + g == g * 3 + + +def test_ed25519_encode(): + g = generator_ed25519 + g_bytes = g.to_bytes() + assert len(g_bytes) == 32 + exp_bytes = ( + b"\x58\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + b"\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + ) + assert g_bytes == exp_bytes + + +def test_ed25519_decode(): + exp_bytes = ( + b"\x58\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + b"\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + ) + a = PointEdwards.from_bytes(curve_ed25519, exp_bytes) + + assert a == generator_ed25519 + + +class TestEdwardsMalformed(unittest.TestCase): + def test_invalid_point(self): + exp_bytes = ( + b"\x78\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + b"\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + ) + with self.assertRaises(MalformedPointError): + PointEdwards.from_bytes(curve_ed25519, exp_bytes) + + def test_invalid_length(self): + exp_bytes = ( + b"\x58\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + b"\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66\x66" + b"\x66" + ) + with self.assertRaises(MalformedPointError) as e: + PointEdwards.from_bytes(curve_ed25519, exp_bytes) + + self.assertIn("length", str(e.exception)) + + def test_ed448_invalid(self): + exp_bytes = b"\xff" * 57 + with self.assertRaises(MalformedPointError): + PointEdwards.from_bytes(curve_ed448, exp_bytes) + + +def test_ed448_encode(): + g = generator_ed448 + g_bytes = g.to_bytes() + assert len(g_bytes) == 57 + exp_bytes = ( + b"\x14\xfa\x30\xf2\x5b\x79\x08\x98\xad\xc8\xd7\x4e\x2c\x13\xbd" + b"\xfd\xc4\x39\x7c\xe6\x1c\xff\xd3\x3a\xd7\xc2\xa0\x05\x1e\x9c" + b"\x78\x87\x40\x98\xa3\x6c\x73\x73\xea\x4b\x62\xc7\xc9\x56\x37" + b"\x20\x76\x88\x24\xbc\xb6\x6e\x71\x46\x3f\x69\x00" + ) + assert g_bytes == exp_bytes + + +def test_ed448_decode(): + exp_bytes = ( + b"\x14\xfa\x30\xf2\x5b\x79\x08\x98\xad\xc8\xd7\x4e\x2c\x13\xbd" + b"\xfd\xc4\x39\x7c\xe6\x1c\xff\xd3\x3a\xd7\xc2\xa0\x05\x1e\x9c" + b"\x78\x87\x40\x98\xa3\x6c\x73\x73\xea\x4b\x62\xc7\xc9\x56\x37" + b"\x20\x76\x88\x24\xbc\xb6\x6e\x71\x46\x3f\x69\x00" + ) + + a = PointEdwards.from_bytes(curve_ed448, exp_bytes) + + assert a == generator_ed448 + + +class TestEdDSAEquality(unittest.TestCase): + def test_equal_public_points(self): + key1 = PublicKey(generator_ed25519, b"\x01" * 32) + key2 = PublicKey(generator_ed25519, b"\x01" * 32) + + self.assertEqual(key1, key2) + self.assertFalse(key1 != key2) + + def test_unequal_public_points(self): + key1 = PublicKey(generator_ed25519, b"\x01" * 32) + key2 = PublicKey(generator_ed25519, b"\x03" * 32) + + self.assertNotEqual(key1, key2) + + def test_unequal_to_string(self): + key1 = PublicKey(generator_ed25519, b"\x01" * 32) + key2 = b"\x01" * 32 + + self.assertNotEqual(key1, key2) + + def test_unequal_publickey_curves(self): + key1 = PublicKey(generator_ed25519, b"\x01" * 32) + key2 = PublicKey(generator_ed448, b"\x03" * 56 + b"\x00") + + self.assertNotEqual(key1, key2) + self.assertTrue(key1 != key2) + + def test_equal_private_keys(self): + key1 = PrivateKey(generator_ed25519, b"\x01" * 32) + key2 = PrivateKey(generator_ed25519, b"\x01" * 32) + + self.assertEqual(key1, key2) + self.assertFalse(key1 != key2) + + def test_unequal_private_keys(self): + key1 = PrivateKey(generator_ed25519, b"\x01" * 32) + key2 = PrivateKey(generator_ed25519, b"\x02" * 32) + + self.assertNotEqual(key1, key2) + self.assertTrue(key1 != key2) + + def test_unequal_privatekey_to_string(self): + key1 = PrivateKey(generator_ed25519, b"\x01" * 32) + key2 = b"\x01" * 32 + + self.assertNotEqual(key1, key2) + + def test_unequal_privatekey_curves(self): + key1 = PrivateKey(generator_ed25519, b"\x01" * 32) + key2 = PrivateKey(generator_ed448, b"\x01" * 57) + + self.assertNotEqual(key1, key2) + + +class TestInvalidEdDSAInputs(unittest.TestCase): + def test_wrong_length_of_private_key(self): + with self.assertRaises(ValueError): + PrivateKey(generator_ed25519, b"\x01" * 31) + + def test_wrong_length_of_public_key(self): + with self.assertRaises(ValueError): + PublicKey(generator_ed25519, b"\x01" * 33) + + def test_wrong_cofactor_curve(self): + ed_c = curve_ed25519 + + def _hash(data): + return hashlib.new("sha512", compat26_str(data)).digest() + + curve = CurveEdTw(ed_c.p(), ed_c.a(), ed_c.d(), 1, _hash) + g = generator_ed25519 + fake_gen = PointEdwards(curve, g.x(), g.y(), 1, g.x() * g.y()) + + with self.assertRaises(ValueError) as e: + PrivateKey(fake_gen, g.to_bytes()) + + self.assertIn("cofactor", str(e.exception)) + + def test_invalid_signature_length(self): + key = PublicKey(generator_ed25519, b"\x01" * 32) + + with self.assertRaises(ValueError) as e: + key.verify(b"", b"\x01" * 65) + + self.assertIn("length", str(e.exception)) + + def test_changing_public_key(self): + key = PublicKey(generator_ed25519, b"\x01" * 32) + + g = key.point + + new_g = PointEdwards(curve_ed25519, g.x(), g.y(), 1, g.x() * g.y()) + + key.point = new_g + + self.assertEqual(g, key.point) + + def test_changing_public_key_to_different_point(self): + key = PublicKey(generator_ed25519, b"\x01" * 32) + + with self.assertRaises(ValueError) as e: + key.point = generator_ed25519 + + self.assertIn("coordinates", str(e.exception)) + + def test_invalid_s_value(self): + key = PublicKey( + generator_ed25519, + b"\xd7\x5a\x98\x01\x82\xb1\x0a\xb7\xd5\x4b\xfe\xd3\xc9\x64\x07\x3a" + b"\x0e\xe1\x72\xf3\xda\xa6\x23\x25\xaf\x02\x1a\x68\xf7\x07\x51\x1a", + ) + sig_valid = bytearray( + b"\xe5\x56\x43\x00\xc3\x60\xac\x72\x90\x86\xe2\xcc\x80\x6e\x82\x8a" + b"\x84\x87\x7f\x1e\xb8\xe5\xd9\x74\xd8\x73\xe0\x65\x22\x49\x01\x55" + b"\x5f\xb8\x82\x15\x90\xa3\x3b\xac\xc6\x1e\x39\x70\x1c\xf9\xb4\x6b" + b"\xd2\x5b\xf5\xf0\x59\x5b\xbe\x24\x65\x51\x41\x43\x8e\x7a\x10\x0b" + ) + + self.assertTrue(key.verify(b"", sig_valid)) + + sig_invalid = bytearray(sig_valid) + sig_invalid[-1] = 0xFF + + with self.assertRaises(ValueError): + key.verify(b"", sig_invalid) + + def test_invalid_r_value(self): + key = PublicKey( + generator_ed25519, + b"\xd7\x5a\x98\x01\x82\xb1\x0a\xb7\xd5\x4b\xfe\xd3\xc9\x64\x07\x3a" + b"\x0e\xe1\x72\xf3\xda\xa6\x23\x25\xaf\x02\x1a\x68\xf7\x07\x51\x1a", + ) + sig_valid = bytearray( + b"\xe5\x56\x43\x00\xc3\x60\xac\x72\x90\x86\xe2\xcc\x80\x6e\x82\x8a" + b"\x84\x87\x7f\x1e\xb8\xe5\xd9\x74\xd8\x73\xe0\x65\x22\x49\x01\x55" + b"\x5f\xb8\x82\x15\x90\xa3\x3b\xac\xc6\x1e\x39\x70\x1c\xf9\xb4\x6b" + b"\xd2\x5b\xf5\xf0\x59\x5b\xbe\x24\x65\x51\x41\x43\x8e\x7a\x10\x0b" + ) + + self.assertTrue(key.verify(b"", sig_valid)) + + sig_invalid = bytearray(sig_valid) + sig_invalid[0] = 0xE0 + + with self.assertRaises(ValueError): + key.verify(b"", sig_invalid) + + +HYP_SETTINGS = dict() +HYP_SETTINGS["max_examples"] = 10 + + +@settings(**HYP_SETTINGS) +@example(1) +@example(5) # smallest multiple that requires changing sign of x +@given(st.integers(min_value=1, max_value=int(generator_ed25519.order() - 1))) +def test_ed25519_encode_decode(multiple): + a = generator_ed25519 * multiple + + b = PointEdwards.from_bytes(curve_ed25519, a.to_bytes()) + + assert a == b + + +@settings(**HYP_SETTINGS) +@example(1) +@example(2) # smallest multiple that requires changing the sign of x +@given(st.integers(min_value=1, max_value=int(generator_ed448.order() - 1))) +def test_ed448_encode_decode(multiple): + a = generator_ed448 * multiple + + b = PointEdwards.from_bytes(curve_ed448, a.to_bytes()) + + assert a == b + + +@settings(**HYP_SETTINGS) +@example(1) +@example(2) +@given(st.integers(min_value=1, max_value=int(generator_ed25519.order()) - 1)) +def test_ed25519_mul_precompute_vs_naf(multiple): + """Compare multiplication with and without precomputation.""" + g = generator_ed25519 + new_g = PointEdwards(curve_ed25519, g.x(), g.y(), 1, g.x() * g.y()) + + assert g * multiple == multiple * new_g + + +# Test vectors from RFC 8032 +TEST_VECTORS = [ + # TEST 1 + ( + generator_ed25519, + "9d61b19deffd5a60ba844af492ec2cc4" "4449c5697b326919703bac031cae7f60", + "d75a980182b10ab7d54bfed3c964073a" "0ee172f3daa62325af021a68f707511a", + "", + "e5564300c360ac729086e2cc806e828a" + "84877f1eb8e5d974d873e06522490155" + "5fb8821590a33bacc61e39701cf9b46b" + "d25bf5f0595bbe24655141438e7a100b", + ), + # TEST 2 + ( + generator_ed25519, + "4ccd089b28ff96da9db6c346ec114e0f" "5b8a319f35aba624da8cf6ed4fb8a6fb", + "3d4017c3e843895a92b70aa74d1b7ebc" "9c982ccf2ec4968cc0cd55f12af4660c", + "72", + "92a009a9f0d4cab8720e820b5f642540" + "a2b27b5416503f8fb3762223ebdb69da" + "085ac1e43e15996e458f3613d0f11d8c" + "387b2eaeb4302aeeb00d291612bb0c00", + ), + # TEST 3 + ( + generator_ed25519, + "c5aa8df43f9f837bedb7442f31dcb7b1" "66d38535076f094b85ce3a2e0b4458f7", + "fc51cd8e6218a1a38da47ed00230f058" "0816ed13ba3303ac5deb911548908025", + "af82", + "6291d657deec24024827e69c3abe01a3" + "0ce548a284743a445e3680d7db5ac3ac" + "18ff9b538d16f290ae67f760984dc659" + "4a7c15e9716ed28dc027beceea1ec40a", + ), + # TEST 1024 + ( + generator_ed25519, + "f5e5767cf153319517630f226876b86c" "8160cc583bc013744c6bf255f5cc0ee5", + "278117fc144c72340f67d0f2316e8386" "ceffbf2b2428c9c51fef7c597f1d426e", + "08b8b2b733424243760fe426a4b54908" + "632110a66c2f6591eabd3345e3e4eb98" + "fa6e264bf09efe12ee50f8f54e9f77b1" + "e355f6c50544e23fb1433ddf73be84d8" + "79de7c0046dc4996d9e773f4bc9efe57" + "38829adb26c81b37c93a1b270b20329d" + "658675fc6ea534e0810a4432826bf58c" + "941efb65d57a338bbd2e26640f89ffbc" + "1a858efcb8550ee3a5e1998bd177e93a" + "7363c344fe6b199ee5d02e82d522c4fe" + "ba15452f80288a821a579116ec6dad2b" + "3b310da903401aa62100ab5d1a36553e" + "06203b33890cc9b832f79ef80560ccb9" + "a39ce767967ed628c6ad573cb116dbef" + "efd75499da96bd68a8a97b928a8bbc10" + "3b6621fcde2beca1231d206be6cd9ec7" + "aff6f6c94fcd7204ed3455c68c83f4a4" + "1da4af2b74ef5c53f1d8ac70bdcb7ed1" + "85ce81bd84359d44254d95629e9855a9" + "4a7c1958d1f8ada5d0532ed8a5aa3fb2" + "d17ba70eb6248e594e1a2297acbbb39d" + "502f1a8c6eb6f1ce22b3de1a1f40cc24" + "554119a831a9aad6079cad88425de6bd" + "e1a9187ebb6092cf67bf2b13fd65f270" + "88d78b7e883c8759d2c4f5c65adb7553" + "878ad575f9fad878e80a0c9ba63bcbcc" + "2732e69485bbc9c90bfbd62481d9089b" + "eccf80cfe2df16a2cf65bd92dd597b07" + "07e0917af48bbb75fed413d238f5555a" + "7a569d80c3414a8d0859dc65a46128ba" + "b27af87a71314f318c782b23ebfe808b" + "82b0ce26401d2e22f04d83d1255dc51a" + "ddd3b75a2b1ae0784504df543af8969b" + "e3ea7082ff7fc9888c144da2af58429e" + "c96031dbcad3dad9af0dcbaaaf268cb8" + "fcffead94f3c7ca495e056a9b47acdb7" + "51fb73e666c6c655ade8297297d07ad1" + "ba5e43f1bca32301651339e22904cc8c" + "42f58c30c04aafdb038dda0847dd988d" + "cda6f3bfd15c4b4c4525004aa06eeff8" + "ca61783aacec57fb3d1f92b0fe2fd1a8" + "5f6724517b65e614ad6808d6f6ee34df" + "f7310fdc82aebfd904b01e1dc54b2927" + "094b2db68d6f903b68401adebf5a7e08" + "d78ff4ef5d63653a65040cf9bfd4aca7" + "984a74d37145986780fc0b16ac451649" + "de6188a7dbdf191f64b5fc5e2ab47b57" + "f7f7276cd419c17a3ca8e1b939ae49e4" + "88acba6b965610b5480109c8b17b80e1" + "b7b750dfc7598d5d5011fd2dcc5600a3" + "2ef5b52a1ecc820e308aa342721aac09" + "43bf6686b64b2579376504ccc493d97e" + "6aed3fb0f9cd71a43dd497f01f17c0e2" + "cb3797aa2a2f256656168e6c496afc5f" + "b93246f6b1116398a346f1a641f3b041" + "e989f7914f90cc2c7fff357876e506b5" + "0d334ba77c225bc307ba537152f3f161" + "0e4eafe595f6d9d90d11faa933a15ef1" + "369546868a7f3a45a96768d40fd9d034" + "12c091c6315cf4fde7cb68606937380d" + "b2eaaa707b4c4185c32eddcdd306705e" + "4dc1ffc872eeee475a64dfac86aba41c" + "0618983f8741c5ef68d3a101e8a3b8ca" + "c60c905c15fc910840b94c00a0b9d0", + "0aab4c900501b3e24d7cdf4663326a3a" + "87df5e4843b2cbdb67cbf6e460fec350" + "aa5371b1508f9f4528ecea23c436d94b" + "5e8fcd4f681e30a6ac00a9704a188a03", + ), + # TEST SHA(abc) + ( + generator_ed25519, + "833fe62409237b9d62ec77587520911e" "9a759cec1d19755b7da901b96dca3d42", + "ec172b93ad5e563bf4932c70e1245034" "c35467ef2efd4d64ebf819683467e2bf", + "ddaf35a193617abacc417349ae204131" + "12e6fa4e89a97ea20a9eeee64b55d39a" + "2192992a274fc1a836ba3c23a3feebbd" + "454d4423643ce80e2a9ac94fa54ca49f", + "dc2a4459e7369633a52b1bf277839a00" + "201009a3efbf3ecb69bea2186c26b589" + "09351fc9ac90b3ecfdfbc7c66431e030" + "3dca179c138ac17ad9bef1177331a704", + ), + # Blank + ( + generator_ed448, + "6c82a562cb808d10d632be89c8513ebf" + "6c929f34ddfa8c9f63c9960ef6e348a3" + "528c8a3fcc2f044e39a3fc5b94492f8f" + "032e7549a20098f95b", + "5fd7449b59b461fd2ce787ec616ad46a" + "1da1342485a70e1f8a0ea75d80e96778" + "edf124769b46c7061bd6783df1e50f6c" + "d1fa1abeafe8256180", + "", + "533a37f6bbe457251f023c0d88f976ae" + "2dfb504a843e34d2074fd823d41a591f" + "2b233f034f628281f2fd7a22ddd47d78" + "28c59bd0a21bfd3980ff0d2028d4b18a" + "9df63e006c5d1c2d345b925d8dc00b41" + "04852db99ac5c7cdda8530a113a0f4db" + "b61149f05a7363268c71d95808ff2e65" + "2600", + ), + # 1 octet + ( + generator_ed448, + "c4eab05d357007c632f3dbb48489924d" + "552b08fe0c353a0d4a1f00acda2c463a" + "fbea67c5e8d2877c5e3bc397a659949e" + "f8021e954e0a12274e", + "43ba28f430cdff456ae531545f7ecd0a" + "c834a55d9358c0372bfa0c6c6798c086" + "6aea01eb00742802b8438ea4cb82169c" + "235160627b4c3a9480", + "03", + "26b8f91727bd62897af15e41eb43c377" + "efb9c610d48f2335cb0bd0087810f435" + "2541b143c4b981b7e18f62de8ccdf633" + "fc1bf037ab7cd779805e0dbcc0aae1cb" + "cee1afb2e027df36bc04dcecbf154336" + "c19f0af7e0a6472905e799f1953d2a0f" + "f3348ab21aa4adafd1d234441cf807c0" + "3a00", + ), + # 11 octets + ( + generator_ed448, + "cd23d24f714274e744343237b93290f5" + "11f6425f98e64459ff203e8985083ffd" + "f60500553abc0e05cd02184bdb89c4cc" + "d67e187951267eb328", + "dcea9e78f35a1bf3499a831b10b86c90" + "aac01cd84b67a0109b55a36e9328b1e3" + "65fce161d71ce7131a543ea4cb5f7e9f" + "1d8b00696447001400", + "0c3e544074ec63b0265e0c", + "1f0a8888ce25e8d458a21130879b840a" + "9089d999aaba039eaf3e3afa090a09d3" + "89dba82c4ff2ae8ac5cdfb7c55e94d5d" + "961a29fe0109941e00b8dbdeea6d3b05" + "1068df7254c0cdc129cbe62db2dc957d" + "bb47b51fd3f213fb8698f064774250a5" + "028961c9bf8ffd973fe5d5c206492b14" + "0e00", + ), + # 12 octets + ( + generator_ed448, + "258cdd4ada32ed9c9ff54e63756ae582" + "fb8fab2ac721f2c8e676a72768513d93" + "9f63dddb55609133f29adf86ec9929dc" + "cb52c1c5fd2ff7e21b", + "3ba16da0c6f2cc1f30187740756f5e79" + "8d6bc5fc015d7c63cc9510ee3fd44adc" + "24d8e968b6e46e6f94d19b945361726b" + "d75e149ef09817f580", + "64a65f3cdedcdd66811e2915", + "7eeeab7c4e50fb799b418ee5e3197ff6" + "bf15d43a14c34389b59dd1a7b1b85b4a" + "e90438aca634bea45e3a2695f1270f07" + "fdcdf7c62b8efeaf00b45c2c96ba457e" + "b1a8bf075a3db28e5c24f6b923ed4ad7" + "47c3c9e03c7079efb87cb110d3a99861" + "e72003cbae6d6b8b827e4e6c143064ff" + "3c00", + ), + # 13 octets + ( + generator_ed448, + "7ef4e84544236752fbb56b8f31a23a10" + "e42814f5f55ca037cdcc11c64c9a3b29" + "49c1bb60700314611732a6c2fea98eeb" + "c0266a11a93970100e", + "b3da079b0aa493a5772029f0467baebe" + "e5a8112d9d3a22532361da294f7bb381" + "5c5dc59e176b4d9f381ca0938e13c6c0" + "7b174be65dfa578e80", + "64a65f3cdedcdd66811e2915e7", + "6a12066f55331b6c22acd5d5bfc5d712" + "28fbda80ae8dec26bdd306743c5027cb" + "4890810c162c027468675ecf645a8317" + "6c0d7323a2ccde2d80efe5a1268e8aca" + "1d6fbc194d3f77c44986eb4ab4177919" + "ad8bec33eb47bbb5fc6e28196fd1caf5" + "6b4e7e0ba5519234d047155ac727a105" + "3100", + ), + # 64 octets + ( + generator_ed448, + "d65df341ad13e008567688baedda8e9d" + "cdc17dc024974ea5b4227b6530e339bf" + "f21f99e68ca6968f3cca6dfe0fb9f4fa" + "b4fa135d5542ea3f01", + "df9705f58edbab802c7f8363cfe5560a" + "b1c6132c20a9f1dd163483a26f8ac53a" + "39d6808bf4a1dfbd261b099bb03b3fb5" + "0906cb28bd8a081f00", + "bd0f6a3747cd561bdddf4640a332461a" + "4a30a12a434cd0bf40d766d9c6d458e5" + "512204a30c17d1f50b5079631f64eb31" + "12182da3005835461113718d1a5ef944", + "554bc2480860b49eab8532d2a533b7d5" + "78ef473eeb58c98bb2d0e1ce488a98b1" + "8dfde9b9b90775e67f47d4a1c3482058" + "efc9f40d2ca033a0801b63d45b3b722e" + "f552bad3b4ccb667da350192b61c508c" + "f7b6b5adadc2c8d9a446ef003fb05cba" + "5f30e88e36ec2703b349ca229c267083" + "3900", + ), + # 256 octets + ( + generator_ed448, + "2ec5fe3c17045abdb136a5e6a913e32a" + "b75ae68b53d2fc149b77e504132d3756" + "9b7e766ba74a19bd6162343a21c8590a" + "a9cebca9014c636df5", + "79756f014dcfe2079f5dd9e718be4171" + "e2ef2486a08f25186f6bff43a9936b9b" + "fe12402b08ae65798a3d81e22e9ec80e" + "7690862ef3d4ed3a00", + "15777532b0bdd0d1389f636c5f6b9ba7" + "34c90af572877e2d272dd078aa1e567c" + "fa80e12928bb542330e8409f31745041" + "07ecd5efac61ae7504dabe2a602ede89" + "e5cca6257a7c77e27a702b3ae39fc769" + "fc54f2395ae6a1178cab4738e543072f" + "c1c177fe71e92e25bf03e4ecb72f47b6" + "4d0465aaea4c7fad372536c8ba516a60" + "39c3c2a39f0e4d832be432dfa9a706a6" + "e5c7e19f397964ca4258002f7c0541b5" + "90316dbc5622b6b2a6fe7a4abffd9610" + "5eca76ea7b98816af0748c10df048ce0" + "12d901015a51f189f3888145c03650aa" + "23ce894c3bd889e030d565071c59f409" + "a9981b51878fd6fc110624dcbcde0bf7" + "a69ccce38fabdf86f3bef6044819de11", + "c650ddbb0601c19ca11439e1640dd931" + "f43c518ea5bea70d3dcde5f4191fe53f" + "00cf966546b72bcc7d58be2b9badef28" + "743954e3a44a23f880e8d4f1cfce2d7a" + "61452d26da05896f0a50da66a239a8a1" + "88b6d825b3305ad77b73fbac0836ecc6" + "0987fd08527c1a8e80d5823e65cafe2a" + "3d00", + ), + # 1023 octets + ( + generator_ed448, + "872d093780f5d3730df7c212664b37b8" + "a0f24f56810daa8382cd4fa3f77634ec" + "44dc54f1c2ed9bea86fafb7632d8be19" + "9ea165f5ad55dd9ce8", + "a81b2e8a70a5ac94ffdbcc9badfc3feb" + "0801f258578bb114ad44ece1ec0e799d" + "a08effb81c5d685c0c56f64eecaef8cd" + "f11cc38737838cf400", + "6ddf802e1aae4986935f7f981ba3f035" + "1d6273c0a0c22c9c0e8339168e675412" + "a3debfaf435ed651558007db4384b650" + "fcc07e3b586a27a4f7a00ac8a6fec2cd" + "86ae4bf1570c41e6a40c931db27b2faa" + "15a8cedd52cff7362c4e6e23daec0fbc" + "3a79b6806e316efcc7b68119bf46bc76" + "a26067a53f296dafdbdc11c77f7777e9" + "72660cf4b6a9b369a6665f02e0cc9b6e" + "dfad136b4fabe723d2813db3136cfde9" + "b6d044322fee2947952e031b73ab5c60" + "3349b307bdc27bc6cb8b8bbd7bd32321" + "9b8033a581b59eadebb09b3c4f3d2277" + "d4f0343624acc817804728b25ab79717" + "2b4c5c21a22f9c7839d64300232eb66e" + "53f31c723fa37fe387c7d3e50bdf9813" + "a30e5bb12cf4cd930c40cfb4e1fc6225" + "92a49588794494d56d24ea4b40c89fc0" + "596cc9ebb961c8cb10adde976a5d602b" + "1c3f85b9b9a001ed3c6a4d3b1437f520" + "96cd1956d042a597d561a596ecd3d173" + "5a8d570ea0ec27225a2c4aaff26306d1" + "526c1af3ca6d9cf5a2c98f47e1c46db9" + "a33234cfd4d81f2c98538a09ebe76998" + "d0d8fd25997c7d255c6d66ece6fa56f1" + "1144950f027795e653008f4bd7ca2dee" + "85d8e90f3dc315130ce2a00375a318c7" + "c3d97be2c8ce5b6db41a6254ff264fa6" + "155baee3b0773c0f497c573f19bb4f42" + "40281f0b1f4f7be857a4e59d416c06b4" + "c50fa09e1810ddc6b1467baeac5a3668" + "d11b6ecaa901440016f389f80acc4db9" + "77025e7f5924388c7e340a732e554440" + "e76570f8dd71b7d640b3450d1fd5f041" + "0a18f9a3494f707c717b79b4bf75c984" + "00b096b21653b5d217cf3565c9597456" + "f70703497a078763829bc01bb1cbc8fa" + "04eadc9a6e3f6699587a9e75c94e5bab" + "0036e0b2e711392cff0047d0d6b05bd2" + "a588bc109718954259f1d86678a579a3" + "120f19cfb2963f177aeb70f2d4844826" + "262e51b80271272068ef5b3856fa8535" + "aa2a88b2d41f2a0e2fda7624c2850272" + "ac4a2f561f8f2f7a318bfd5caf969614" + "9e4ac824ad3460538fdc25421beec2cc" + "6818162d06bbed0c40a387192349db67" + "a118bada6cd5ab0140ee273204f628aa" + "d1c135f770279a651e24d8c14d75a605" + "9d76b96a6fd857def5e0b354b27ab937" + "a5815d16b5fae407ff18222c6d1ed263" + "be68c95f32d908bd895cd76207ae7264" + "87567f9a67dad79abec316f683b17f2d" + "02bf07e0ac8b5bc6162cf94697b3c27c" + "d1fea49b27f23ba2901871962506520c" + "392da8b6ad0d99f7013fbc06c2c17a56" + "9500c8a7696481c1cd33e9b14e40b82e" + "79a5f5db82571ba97bae3ad3e0479515" + "bb0e2b0f3bfcd1fd33034efc6245eddd" + "7ee2086ddae2600d8ca73e214e8c2b0b" + "db2b047c6a464a562ed77b73d2d841c4" + "b34973551257713b753632efba348169" + "abc90a68f42611a40126d7cb21b58695" + "568186f7e569d2ff0f9e745d0487dd2e" + "b997cafc5abf9dd102e62ff66cba87", + "e301345a41a39a4d72fff8df69c98075" + "a0cc082b802fc9b2b6bc503f926b65bd" + "df7f4c8f1cb49f6396afc8a70abe6d8a" + "ef0db478d4c6b2970076c6a0484fe76d" + "76b3a97625d79f1ce240e7c576750d29" + "5528286f719b413de9ada3e8eb78ed57" + "3603ce30d8bb761785dc30dbc320869e" + "1a00", + ), +] + + +@pytest.mark.parametrize( + "generator,private_key,public_key,message,signature", + TEST_VECTORS, +) +def test_vectors(generator, private_key, public_key, message, signature): + private_key = a2b_hex(private_key) + public_key = a2b_hex(public_key) + message = a2b_hex(message) + signature = a2b_hex(signature) + + sig_key = PrivateKey(generator, private_key) + ver_key = PublicKey(generator, public_key) + + assert sig_key.public_key().public_key() == ver_key.public_key() + + gen_sig = sig_key.sign(message) + + assert gen_sig == signature + + assert ver_key.verify(message, signature) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_ellipticcurve.py b/myenv/lib/python3.9/site-packages/ecdsa/test_ellipticcurve.py new file mode 100644 index 0000000..85faef4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_ellipticcurve.py @@ -0,0 +1,199 @@ +import pytest + +try: + import unittest2 as unittest +except ImportError: + import unittest +from hypothesis import given, settings +import hypothesis.strategies as st + +try: + from hypothesis import HealthCheck + + HC_PRESENT = True +except ImportError: # pragma: no cover + HC_PRESENT = False +from .numbertheory import inverse_mod +from .ellipticcurve import CurveFp, INFINITY, Point + + +HYP_SETTINGS = {} +if HC_PRESENT: # pragma: no branch + HYP_SETTINGS["suppress_health_check"] = [HealthCheck.too_slow] + HYP_SETTINGS["deadline"] = 5000 + + +# NIST Curve P-192: +p = 6277101735386680763835789423207666416083908700390324961279 +r = 6277101735386680763835789423176059013767194773182842284081 +# s = 0x3045ae6fc8422f64ed579528d38120eae12196d5 +# c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65 +b = 0x64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1 +Gx = 0x188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012 +Gy = 0x07192B95FFC8DA78631011ED6B24CDD573F977A11E794811 + +c192 = CurveFp(p, -3, b) +p192 = Point(c192, Gx, Gy, r) + +c_23 = CurveFp(23, 1, 1) +g_23 = Point(c_23, 13, 7, 7) + + +HYP_SLOW_SETTINGS = dict(HYP_SETTINGS) +HYP_SLOW_SETTINGS["max_examples"] = 10 + + +@settings(**HYP_SLOW_SETTINGS) +@given(st.integers(min_value=1, max_value=r + 1)) +def test_p192_mult_tests(multiple): + inv_m = inverse_mod(multiple, r) + + p1 = p192 * multiple + assert p1 * inv_m == p192 + + +def add_n_times(point, n): + ret = INFINITY + i = 0 + while i <= n: + yield ret + ret = ret + point + i += 1 + + +# From X9.62 I.1 (p. 96): +@pytest.mark.parametrize( + "p, m, check", + [(g_23, n, exp) for n, exp in enumerate(add_n_times(g_23, 8))], + ids=["g_23 test with mult {0}".format(i) for i in range(9)], +) +def test_add_and_mult_equivalence(p, m, check): + assert p * m == check + + +class TestCurve(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.c_23 = CurveFp(23, 1, 1) + + def test_equality_curves(self): + self.assertEqual(self.c_23, CurveFp(23, 1, 1)) + + def test_inequality_curves(self): + c192 = CurveFp(p, -3, b) + self.assertNotEqual(self.c_23, c192) + + def test_usability_in_a_hashed_collection_curves(self): + {self.c_23: None} + + def test_hashability_curves(self): + hash(self.c_23) + + def test_conflation_curves(self): + ne1, ne2, ne3 = CurveFp(24, 1, 1), CurveFp(23, 2, 1), CurveFp(23, 1, 2) + eq1, eq2, eq3 = CurveFp(23, 1, 1), CurveFp(23, 1, 1), self.c_23 + self.assertEqual(len(set((c_23, eq1, eq2, eq3))), 1) + self.assertEqual(len(set((c_23, ne1, ne2, ne3))), 4) + self.assertDictEqual({c_23: None}, {eq1: None}) + self.assertIn(eq2, {eq3: None}) + + +class TestPoint(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.c_23 = CurveFp(23, 1, 1) + cls.g_23 = Point(cls.c_23, 13, 7, 7) + + p = 6277101735386680763835789423207666416083908700390324961279 + r = 6277101735386680763835789423176059013767194773182842284081 + # s = 0x3045ae6fc8422f64ed579528d38120eae12196d5 + # c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65 + b = 0x64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1 + Gx = 0x188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012 + Gy = 0x07192B95FFC8DA78631011ED6B24CDD573F977A11E794811 + + cls.c192 = CurveFp(p, -3, b) + cls.p192 = Point(cls.c192, Gx, Gy, r) + + def test_p192(self): + # Checking against some sample computations presented + # in X9.62: + d = 651056770906015076056810763456358567190100156695615665659 + Q = d * self.p192 + self.assertEqual( + Q.x(), 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5 + ) + + k = 6140507067065001063065065565667405560006161556565665656654 + R = k * self.p192 + self.assertEqual( + R.x(), 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD + ) + self.assertEqual( + R.y(), 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835 + ) + + u1 = 2563697409189434185194736134579731015366492496392189760599 + u2 = 6266643813348617967186477710235785849136406323338782220568 + temp = u1 * self.p192 + u2 * Q + self.assertEqual( + temp.x(), 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD + ) + self.assertEqual( + temp.y(), 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835 + ) + + def test_double_infinity(self): + p1 = INFINITY + p3 = p1.double() + self.assertEqual(p1, p3) + self.assertEqual(p3.x(), p1.x()) + self.assertEqual(p3.y(), p3.y()) + + def test_double(self): + x1, y1, x3, y3 = (3, 10, 7, 12) + + p1 = Point(self.c_23, x1, y1) + p3 = p1.double() + self.assertEqual(p3.x(), x3) + self.assertEqual(p3.y(), y3) + + def test_multiply(self): + x1, y1, m, x3, y3 = (3, 10, 2, 7, 12) + p1 = Point(self.c_23, x1, y1) + p3 = p1 * m + self.assertEqual(p3.x(), x3) + self.assertEqual(p3.y(), y3) + + # Trivial tests from X9.62 B.3: + def test_add(self): + """We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3).""" + + x1, y1, x2, y2, x3, y3 = (3, 10, 9, 7, 17, 20) + p1 = Point(self.c_23, x1, y1) + p2 = Point(self.c_23, x2, y2) + p3 = p1 + p2 + self.assertEqual(p3.x(), x3) + self.assertEqual(p3.y(), y3) + + def test_add_as_double(self): + """We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3).""" + + x1, y1, x2, y2, x3, y3 = (3, 10, 3, 10, 7, 12) + p1 = Point(self.c_23, x1, y1) + p2 = Point(self.c_23, x2, y2) + p3 = p1 + p2 + self.assertEqual(p3.x(), x3) + self.assertEqual(p3.y(), y3) + + def test_equality_points(self): + self.assertEqual(self.g_23, Point(self.c_23, 13, 7, 7)) + + def test_inequality_points(self): + c = CurveFp(100, -3, 100) + p = Point(c, 100, 100, 100) + self.assertNotEqual(self.g_23, p) + + def test_inequality_points_diff_types(self): + c = CurveFp(100, -3, 100) + self.assertNotEqual(self.g_23, c) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_jacobi.py b/myenv/lib/python3.9/site-packages/ecdsa/test_jacobi.py new file mode 100644 index 0000000..1f52804 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_jacobi.py @@ -0,0 +1,657 @@ +import pickle + +try: + import unittest2 as unittest +except ImportError: + import unittest + +import os +import sys +import signal +import pytest +import threading +import platform +import hypothesis.strategies as st +from hypothesis import given, assume, settings, example + +from .ellipticcurve import CurveFp, PointJacobi, INFINITY +from .ecdsa import ( + generator_256, + curve_256, + generator_224, + generator_brainpoolp160r1, + curve_brainpoolp160r1, + generator_112r2, +) +from .numbertheory import inverse_mod +from .util import randrange + + +NO_OLD_SETTINGS = {} +if sys.version_info > (2, 7): # pragma: no branch + NO_OLD_SETTINGS["deadline"] = 5000 + + +class TestJacobi(unittest.TestCase): + def test___init__(self): + curve = object() + x = 2 + y = 3 + z = 1 + order = 4 + pj = PointJacobi(curve, x, y, z, order) + + self.assertEqual(pj.order(), order) + self.assertIs(pj.curve(), curve) + self.assertEqual(pj.x(), x) + self.assertEqual(pj.y(), y) + + def test_add_with_different_curves(self): + p_a = PointJacobi.from_affine(generator_256) + p_b = PointJacobi.from_affine(generator_224) + + with self.assertRaises(ValueError): + p_a + p_b + + def test_compare_different_curves(self): + self.assertNotEqual(generator_256, generator_224) + + def test_equality_with_non_point(self): + pj = PointJacobi.from_affine(generator_256) + + self.assertNotEqual(pj, "value") + + def test_conversion(self): + pj = PointJacobi.from_affine(generator_256) + pw = pj.to_affine() + + self.assertEqual(generator_256, pw) + + def test_single_double(self): + pj = PointJacobi.from_affine(generator_256) + pw = generator_256.double() + + pj = pj.double() + + self.assertEqual(pj.x(), pw.x()) + self.assertEqual(pj.y(), pw.y()) + + def test_double_with_zero_point(self): + pj = PointJacobi(curve_256, 0, 0, 1) + + pj = pj.double() + + self.assertIs(pj, INFINITY) + + def test_double_with_zero_equivalent_point(self): + pj = PointJacobi(curve_256, 0, curve_256.p(), 1) + + pj = pj.double() + + self.assertIs(pj, INFINITY) + + def test_double_with_zero_equivalent_point_non_1_z(self): + pj = PointJacobi(curve_256, 0, curve_256.p(), 2) + + pj = pj.double() + + self.assertIs(pj, INFINITY) + + def test_compare_with_affine_point(self): + pj = PointJacobi.from_affine(generator_256) + pa = pj.to_affine() + + self.assertEqual(pj, pa) + self.assertEqual(pa, pj) + + def test_to_affine_with_zero_point(self): + pj = PointJacobi(curve_256, 0, 0, 1) + + pa = pj.to_affine() + + self.assertIs(pa, INFINITY) + + def test_add_with_affine_point(self): + pj = PointJacobi.from_affine(generator_256) + pa = pj.to_affine() + + s = pj + pa + + self.assertEqual(s, pj.double()) + + def test_radd_with_affine_point(self): + pj = PointJacobi.from_affine(generator_256) + pa = pj.to_affine() + + s = pa + pj + + self.assertEqual(s, pj.double()) + + def test_add_with_infinity(self): + pj = PointJacobi.from_affine(generator_256) + + s = pj + INFINITY + + self.assertEqual(s, pj) + + def test_add_zero_point_to_affine(self): + pa = PointJacobi.from_affine(generator_256).to_affine() + pj = PointJacobi(curve_256, 0, 0, 1) + + s = pj + pa + + self.assertIs(s, pa) + + def test_multiply_by_zero(self): + pj = PointJacobi.from_affine(generator_256) + + pj = pj * 0 + + self.assertIs(pj, INFINITY) + + def test_zero_point_multiply_by_one(self): + pj = PointJacobi(curve_256, 0, 0, 1) + + pj = pj * 1 + + self.assertIs(pj, INFINITY) + + def test_multiply_by_one(self): + pj = PointJacobi.from_affine(generator_256) + pw = generator_256 * 1 + + pj = pj * 1 + + self.assertEqual(pj.x(), pw.x()) + self.assertEqual(pj.y(), pw.y()) + + def test_multiply_by_two(self): + pj = PointJacobi.from_affine(generator_256) + pw = generator_256 * 2 + + pj = pj * 2 + + self.assertEqual(pj.x(), pw.x()) + self.assertEqual(pj.y(), pw.y()) + + def test_rmul_by_two(self): + pj = PointJacobi.from_affine(generator_256) + pw = generator_256 * 2 + + pj = 2 * pj + + self.assertEqual(pj, pw) + + def test_compare_non_zero_with_infinity(self): + pj = PointJacobi.from_affine(generator_256) + + self.assertNotEqual(pj, INFINITY) + + def test_compare_zero_point_with_infinity(self): + pj = PointJacobi(curve_256, 0, 0, 1) + + self.assertEqual(pj, INFINITY) + + def test_compare_double_with_multiply(self): + pj = PointJacobi.from_affine(generator_256) + dbl = pj.double() + mlpl = pj * 2 + + self.assertEqual(dbl, mlpl) + + @settings(max_examples=10) + @given( + st.integers( + min_value=0, max_value=int(generator_brainpoolp160r1.order()) + ) + ) + def test_multiplications(self, mul): + pj = PointJacobi.from_affine(generator_brainpoolp160r1) + pw = pj.to_affine() * mul + + pj = pj * mul + + self.assertEqual((pj.x(), pj.y()), (pw.x(), pw.y())) + self.assertEqual(pj, pw) + + @settings(max_examples=10) + @given( + st.integers( + min_value=0, max_value=int(generator_brainpoolp160r1.order()) + ) + ) + @example(0) + @example(int(generator_brainpoolp160r1.order())) + def test_precompute(self, mul): + precomp = generator_brainpoolp160r1 + self.assertTrue(precomp._PointJacobi__precompute) + pj = PointJacobi.from_affine(generator_brainpoolp160r1) + + a = precomp * mul + b = pj * mul + + self.assertEqual(a, b) + + @settings(max_examples=10) + @given( + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + ) + @example(3, 3) + def test_add_scaled_points(self, a_mul, b_mul): + j_g = PointJacobi.from_affine(generator_brainpoolp160r1) + a = PointJacobi.from_affine(j_g * a_mul) + b = PointJacobi.from_affine(j_g * b_mul) + + c = a + b + + self.assertEqual(c, j_g * (a_mul + b_mul)) + + @settings(max_examples=10) + @given( + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.integers(min_value=1, max_value=int(curve_brainpoolp160r1.p() - 1)), + ) + def test_add_one_scaled_point(self, a_mul, b_mul, new_z): + j_g = PointJacobi.from_affine(generator_brainpoolp160r1) + a = PointJacobi.from_affine(j_g * a_mul) + b = PointJacobi.from_affine(j_g * b_mul) + + p = curve_brainpoolp160r1.p() + + assume(inverse_mod(new_z, p)) + + new_zz = new_z * new_z % p + + b = PointJacobi( + curve_brainpoolp160r1, + b.x() * new_zz % p, + b.y() * new_zz * new_z % p, + new_z, + ) + + c = a + b + + self.assertEqual(c, j_g * (a_mul + b_mul)) + + @settings(max_examples=10) + @given( + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.integers(min_value=1, max_value=int(curve_brainpoolp160r1.p() - 1)), + ) + @example(1, 1, 1) + @example(3, 3, 3) + @example(2, int(generator_brainpoolp160r1.order() - 2), 1) + @example(2, int(generator_brainpoolp160r1.order() - 2), 3) + def test_add_same_scale_points(self, a_mul, b_mul, new_z): + j_g = PointJacobi.from_affine(generator_brainpoolp160r1) + a = PointJacobi.from_affine(j_g * a_mul) + b = PointJacobi.from_affine(j_g * b_mul) + + p = curve_brainpoolp160r1.p() + + assume(inverse_mod(new_z, p)) + + new_zz = new_z * new_z % p + + a = PointJacobi( + curve_brainpoolp160r1, + a.x() * new_zz % p, + a.y() * new_zz * new_z % p, + new_z, + ) + b = PointJacobi( + curve_brainpoolp160r1, + b.x() * new_zz % p, + b.y() * new_zz * new_z % p, + new_z, + ) + + c = a + b + + self.assertEqual(c, j_g * (a_mul + b_mul)) + + def test_add_same_scale_points_static(self): + j_g = generator_brainpoolp160r1 + p = curve_brainpoolp160r1.p() + a = j_g * 11 + a.scale() + z1 = 13 + x = PointJacobi( + curve_brainpoolp160r1, + a.x() * z1**2 % p, + a.y() * z1**3 % p, + z1, + ) + y = PointJacobi( + curve_brainpoolp160r1, + a.x() * z1**2 % p, + a.y() * z1**3 % p, + z1, + ) + + c = a + a + + self.assertEqual(c, x + y) + + @settings(max_examples=14) + @given( + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.integers( + min_value=1, max_value=int(generator_brainpoolp160r1.order()) + ), + st.lists( + st.integers( + min_value=1, max_value=int(curve_brainpoolp160r1.p() - 1) + ), + min_size=2, + max_size=2, + unique=True, + ), + ) + @example(2, 2, [2, 1]) + @example(2, 2, [2, 3]) + @example(2, int(generator_brainpoolp160r1.order() - 2), [2, 3]) + @example(2, int(generator_brainpoolp160r1.order() - 2), [2, 1]) + def test_add_different_scale_points(self, a_mul, b_mul, new_z): + j_g = PointJacobi.from_affine(generator_brainpoolp160r1) + a = PointJacobi.from_affine(j_g * a_mul) + b = PointJacobi.from_affine(j_g * b_mul) + + p = curve_brainpoolp160r1.p() + + assume(inverse_mod(new_z[0], p)) + assume(inverse_mod(new_z[1], p)) + + new_zz0 = new_z[0] * new_z[0] % p + new_zz1 = new_z[1] * new_z[1] % p + + a = PointJacobi( + curve_brainpoolp160r1, + a.x() * new_zz0 % p, + a.y() * new_zz0 * new_z[0] % p, + new_z[0], + ) + b = PointJacobi( + curve_brainpoolp160r1, + b.x() * new_zz1 % p, + b.y() * new_zz1 * new_z[1] % p, + new_z[1], + ) + + c = a + b + + self.assertEqual(c, j_g * (a_mul + b_mul)) + + def test_add_different_scale_points_static(self): + j_g = generator_brainpoolp160r1 + p = curve_brainpoolp160r1.p() + a = j_g * 11 + a.scale() + z1 = 13 + x = PointJacobi( + curve_brainpoolp160r1, + a.x() * z1**2 % p, + a.y() * z1**3 % p, + z1, + ) + z2 = 29 + y = PointJacobi( + curve_brainpoolp160r1, + a.x() * z2**2 % p, + a.y() * z2**3 % p, + z2, + ) + + c = a + a + + self.assertEqual(c, x + y) + + def test_add_point_3_times(self): + j_g = PointJacobi.from_affine(generator_256) + + self.assertEqual(j_g * 3, j_g + j_g + j_g) + + def test_mul_without_order(self): + j_g = PointJacobi(curve_256, generator_256.x(), generator_256.y(), 1) + + self.assertEqual(j_g * generator_256.order(), INFINITY) + + def test_mul_add_inf(self): + j_g = PointJacobi.from_affine(generator_256) + + self.assertEqual(j_g, j_g.mul_add(1, INFINITY, 1)) + + def test_mul_add_same(self): + j_g = PointJacobi.from_affine(generator_256) + + self.assertEqual(j_g * 2, j_g.mul_add(1, j_g, 1)) + + def test_mul_add_precompute(self): + j_g = PointJacobi.from_affine(generator_brainpoolp160r1, True) + b = PointJacobi.from_affine(j_g * 255, True) + + self.assertEqual(j_g * 256, j_g + b) + self.assertEqual(j_g * (5 + 255 * 7), j_g * 5 + b * 7) + self.assertEqual(j_g * (5 + 255 * 7), j_g.mul_add(5, b, 7)) + + def test_mul_add_precompute_large(self): + j_g = PointJacobi.from_affine(generator_brainpoolp160r1, True) + b = PointJacobi.from_affine(j_g * 255, True) + + self.assertEqual(j_g * 256, j_g + b) + self.assertEqual( + j_g * (0xFF00 + 255 * 0xF0F0), j_g * 0xFF00 + b * 0xF0F0 + ) + self.assertEqual( + j_g * (0xFF00 + 255 * 0xF0F0), j_g.mul_add(0xFF00, b, 0xF0F0) + ) + + def test_mul_add_to_mul(self): + j_g = PointJacobi.from_affine(generator_256) + + a = j_g * 3 + b = j_g.mul_add(2, j_g, 1) + + self.assertEqual(a, b) + + def test_mul_add_differnt(self): + j_g = PointJacobi.from_affine(generator_256) + + w_a = j_g * 2 + + self.assertEqual(j_g.mul_add(1, w_a, 1), j_g * 3) + + def test_mul_add_slightly_different(self): + j_g = PointJacobi.from_affine(generator_256) + + w_a = j_g * 2 + w_b = j_g * 3 + + self.assertEqual(w_a.mul_add(1, w_b, 3), w_a * 1 + w_b * 3) + + def test_mul_add(self): + j_g = PointJacobi.from_affine(generator_256) + + w_a = generator_256 * 255 + w_b = generator_256 * (0xA8 * 0xF0) + j_b = j_g * 0xA8 + + ret = j_g.mul_add(255, j_b, 0xF0) + + self.assertEqual(ret.to_affine(), w_a + w_b) + + def test_mul_add_large(self): + j_g = PointJacobi.from_affine(generator_256) + b = PointJacobi.from_affine(j_g * 255) + + self.assertEqual(j_g * 256, j_g + b) + self.assertEqual( + j_g * (0xFF00 + 255 * 0xF0F0), j_g * 0xFF00 + b * 0xF0F0 + ) + self.assertEqual( + j_g * (0xFF00 + 255 * 0xF0F0), j_g.mul_add(0xFF00, b, 0xF0F0) + ) + + def test_mul_add_with_infinity_as_result(self): + j_g = PointJacobi.from_affine(generator_256) + + order = generator_256.order() + + b = PointJacobi.from_affine(generator_256 * 256) + + self.assertEqual(j_g.mul_add(order % 256, b, order // 256), INFINITY) + + def test_mul_add_without_order(self): + j_g = PointJacobi(curve_256, generator_256.x(), generator_256.y(), 1) + + order = generator_256.order() + + w_b = generator_256 * 34 + w_b.scale() + + b = PointJacobi(curve_256, w_b.x(), w_b.y(), 1) + + self.assertEqual(j_g.mul_add(order % 34, b, order // 34), INFINITY) + + def test_mul_add_with_doubled_negation_of_itself(self): + j_g = PointJacobi.from_affine(generator_256 * 17) + + dbl_neg = 2 * (-j_g) + + self.assertEqual(j_g.mul_add(4, dbl_neg, 2), INFINITY) + + def test_equality(self): + pj1 = PointJacobi(curve=CurveFp(23, 1, 1, 1), x=2, y=3, z=1, order=1) + pj2 = PointJacobi(curve=CurveFp(23, 1, 1, 1), x=2, y=3, z=1, order=1) + self.assertEqual(pj1, pj2) + + def test_equality_with_invalid_object(self): + j_g = PointJacobi.from_affine(generator_256) + + self.assertNotEqual(j_g, 12) + + def test_equality_with_wrong_curves(self): + p_a = PointJacobi.from_affine(generator_256) + p_b = PointJacobi.from_affine(generator_224) + + self.assertNotEqual(p_a, p_b) + + def test_pickle(self): + pj = PointJacobi(curve=CurveFp(23, 1, 1, 1), x=2, y=3, z=1, order=1) + self.assertEqual(pickle.loads(pickle.dumps(pj)), pj) + + @settings(**NO_OLD_SETTINGS) + @given(st.integers(min_value=1, max_value=10)) + def test_multithreading(self, thread_num): + # ensure that generator's precomputation table is filled + generator_112r2 * 2 + + # create a fresh point that doesn't have a filled precomputation table + gen = generator_112r2 + gen = PointJacobi(gen.curve(), gen.x(), gen.y(), 1, gen.order(), True) + + self.assertEqual(gen._PointJacobi__precompute, []) + + def runner(generator): + order = generator.order() + for _ in range(10): + generator * randrange(order) + + threads = [] + for _ in range(thread_num): + threads.append(threading.Thread(target=runner, args=(gen,))) + + for t in threads: + t.start() + + runner(gen) + + for t in threads: + t.join() + + self.assertEqual( + gen._PointJacobi__precompute, + generator_112r2._PointJacobi__precompute, + ) + + @pytest.mark.skipif( + platform.system() == "Windows", + reason="there are no signals on Windows", + ) + def test_multithreading_with_interrupts(self): + thread_num = 10 + # ensure that generator's precomputation table is filled + generator_112r2 * 2 + + # create a fresh point that doesn't have a filled precomputation table + gen = generator_112r2 + gen = PointJacobi(gen.curve(), gen.x(), gen.y(), 1, gen.order(), True) + + self.assertEqual(gen._PointJacobi__precompute, []) + + def runner(generator): + order = generator.order() + for _ in range(50): + generator * randrange(order) + + def interrupter(barrier_start, barrier_end, lock_exit): + # wait until MainThread can handle KeyboardInterrupt + barrier_start.release() + barrier_end.acquire() + os.kill(os.getpid(), signal.SIGINT) + lock_exit.release() + + threads = [] + for _ in range(thread_num): + threads.append(threading.Thread(target=runner, args=(gen,))) + + barrier_start = threading.Lock() + barrier_start.acquire() + barrier_end = threading.Lock() + barrier_end.acquire() + lock_exit = threading.Lock() + lock_exit.acquire() + + threads.append( + threading.Thread( + target=interrupter, + args=(barrier_start, barrier_end, lock_exit), + ) + ) + + for t in threads: + t.start() + + with self.assertRaises(KeyboardInterrupt): + # signal to interrupter that we can now handle the signal + barrier_start.acquire() + barrier_end.release() + runner(gen) + # use the lock to ensure we never go past the scope of + # assertRaises before the os.kill is called + lock_exit.acquire() + + for t in threads: + t.join() + + self.assertEqual( + gen._PointJacobi__precompute, + generator_112r2._PointJacobi__precompute, + ) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_keys.py b/myenv/lib/python3.9/site-packages/ecdsa/test_keys.py new file mode 100644 index 0000000..25386b1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_keys.py @@ -0,0 +1,959 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest + +try: + buffer +except NameError: + buffer = memoryview + +import os +import array +import pytest +import hashlib + +from .keys import VerifyingKey, SigningKey, MalformedPointError +from .der import ( + unpem, + UnexpectedDER, + encode_sequence, + encode_oid, + encode_bitstring, +) +from .util import ( + sigencode_string, + sigencode_der, + sigencode_strings, + sigdecode_string, + sigdecode_der, + sigdecode_strings, +) +from .curves import NIST256p, Curve, BRAINPOOLP160r1, Ed25519, Ed448 +from .ellipticcurve import Point, PointJacobi, CurveFp, INFINITY +from .ecdsa import generator_brainpoolp160r1 + + +class TestVerifyingKeyFromString(unittest.TestCase): + """ + Verify that ecdsa.keys.VerifyingKey.from_string() can be used with + bytes-like objects + """ + + @classmethod + def setUpClass(cls): + cls.key_bytes = ( + b"\x04L\xa2\x95\xdb\xc7Z\xd7\x1f\x93\nz\xcf\x97\xcf" + b"\xd7\xc2\xd9o\xfe8}X!\xae\xd4\xfah\xfa^\rpI\xba\xd1" + b"Y\xfb\x92xa\xebo+\x9cG\xfav\xca" + ) + cls.vk = VerifyingKey.from_string(cls.key_bytes) + + def test_bytes(self): + self.assertIsNotNone(self.vk) + self.assertIsInstance(self.vk, VerifyingKey) + self.assertEqual( + self.vk.pubkey.point.x(), + 105419898848891948935835657980914000059957975659675736097, + ) + self.assertEqual( + self.vk.pubkey.point.y(), + 4286866841217412202667522375431381222214611213481632495306, + ) + + def test_bytes_memoryview(self): + vk = VerifyingKey.from_string(buffer(self.key_bytes)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytearray(self): + vk = VerifyingKey.from_string(bytearray(self.key_bytes)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytesarray_memoryview(self): + vk = VerifyingKey.from_string(buffer(bytearray(self.key_bytes))) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_array_array_of_bytes(self): + arr = array.array("B", self.key_bytes) + vk = VerifyingKey.from_string(arr) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_array_array_of_bytes_memoryview(self): + arr = array.array("B", self.key_bytes) + vk = VerifyingKey.from_string(buffer(arr)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_array_array_of_ints(self): + arr = array.array("I", self.key_bytes) + vk = VerifyingKey.from_string(arr) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_array_array_of_ints_memoryview(self): + arr = array.array("I", self.key_bytes) + vk = VerifyingKey.from_string(buffer(arr)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytes_uncompressed(self): + vk = VerifyingKey.from_string(b"\x04" + self.key_bytes) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytearray_uncompressed(self): + vk = VerifyingKey.from_string(bytearray(b"\x04" + self.key_bytes)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytes_compressed(self): + vk = VerifyingKey.from_string(b"\x02" + self.key_bytes[:24]) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytearray_compressed(self): + vk = VerifyingKey.from_string(bytearray(b"\x02" + self.key_bytes[:24])) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + +class TestVerifyingKeyFromDer(unittest.TestCase): + """ + Verify that ecdsa.keys.VerifyingKey.from_der() can be used with + bytes-like objects. + """ + + @classmethod + def setUpClass(cls): + prv_key_str = ( + "-----BEGIN EC PRIVATE KEY-----\n" + "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n" + "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n" + "bA==\n" + "-----END EC PRIVATE KEY-----\n" + ) + key_str = ( + "-----BEGIN PUBLIC KEY-----\n" + "MEkwEwYHKoZIzj0CAQYIKoZIzj0DAQEDMgAEuIF30ITvF/XkVjlAgCg2D59ZtKTX\n" + "Jk5i2gZR3OR6NaTFtFz1FZNCOotVe5wgmfNs\n" + "-----END PUBLIC KEY-----\n" + ) + cls.key_pem = key_str + + cls.key_bytes = unpem(key_str) + assert isinstance(cls.key_bytes, bytes) + cls.vk = VerifyingKey.from_pem(key_str) + cls.sk = SigningKey.from_pem(prv_key_str) + + key_str = ( + "-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE4H3iRbG4TSrsSRb/gusPQB/4YcN8\n" + "Poqzgjau4kfxBPyZimeRfuY/9g/wMmPuhGl4BUve51DsnKJFRr8psk0ieA==\n" + "-----END PUBLIC KEY-----\n" + ) + cls.vk2 = VerifyingKey.from_pem(key_str) + + cls.sk2 = SigningKey.generate(vk.curve) + + def test_load_key_with_explicit_parameters(self): + pub_key_str = ( + "-----BEGIN PUBLIC KEY-----\n" + "MIIBSzCCAQMGByqGSM49AgEwgfcCAQEwLAYHKoZIzj0BAQIhAP////8AAAABAAAA\n" + "AAAAAAAAAAAA////////////////MFsEIP////8AAAABAAAAAAAAAAAAAAAA////\n" + "///////////8BCBaxjXYqjqT57PrvVV2mIa8ZR0GsMxTsPY7zjw+J9JgSwMVAMSd\n" + "NgiG5wSTamZ44ROdJreBn36QBEEEaxfR8uEsQkf4vOblY6RA8ncDfYEt6zOg9KE5\n" + "RdiYwpZP40Li/hp/m47n60p8D54WK84zV2sxXs7LtkBoN79R9QIhAP////8AAAAA\n" + "//////////+85vqtpxeehPO5ysL8YyVRAgEBA0IABIr1UkgYs5jmbFc7it1/YI2X\n" + "T//IlaEjMNZft1owjqpBYH2ErJHk4U5Pp4WvWq1xmHwIZlsH7Ig4KmefCfR6SmU=\n" + "-----END PUBLIC KEY-----" + ) + pk = VerifyingKey.from_pem(pub_key_str) + + pk_exp = VerifyingKey.from_string( + b"\x04\x8a\xf5\x52\x48\x18\xb3\x98\xe6\x6c\x57\x3b\x8a\xdd\x7f" + b"\x60\x8d\x97\x4f\xff\xc8\x95\xa1\x23\x30\xd6\x5f\xb7\x5a\x30" + b"\x8e\xaa\x41\x60\x7d\x84\xac\x91\xe4\xe1\x4e\x4f\xa7\x85\xaf" + b"\x5a\xad\x71\x98\x7c\x08\x66\x5b\x07\xec\x88\x38\x2a\x67\x9f" + b"\x09\xf4\x7a\x4a\x65", + curve=NIST256p, + ) + self.assertEqual(pk, pk_exp) + + def test_load_key_with_explicit_with_explicit_disabled(self): + pub_key_str = ( + "-----BEGIN PUBLIC KEY-----\n" + "MIIBSzCCAQMGByqGSM49AgEwgfcCAQEwLAYHKoZIzj0BAQIhAP////8AAAABAAAA\n" + "AAAAAAAAAAAA////////////////MFsEIP////8AAAABAAAAAAAAAAAAAAAA////\n" + "///////////8BCBaxjXYqjqT57PrvVV2mIa8ZR0GsMxTsPY7zjw+J9JgSwMVAMSd\n" + "NgiG5wSTamZ44ROdJreBn36QBEEEaxfR8uEsQkf4vOblY6RA8ncDfYEt6zOg9KE5\n" + "RdiYwpZP40Li/hp/m47n60p8D54WK84zV2sxXs7LtkBoN79R9QIhAP////8AAAAA\n" + "//////////+85vqtpxeehPO5ysL8YyVRAgEBA0IABIr1UkgYs5jmbFc7it1/YI2X\n" + "T//IlaEjMNZft1owjqpBYH2ErJHk4U5Pp4WvWq1xmHwIZlsH7Ig4KmefCfR6SmU=\n" + "-----END PUBLIC KEY-----" + ) + with self.assertRaises(UnexpectedDER): + VerifyingKey.from_pem( + pub_key_str, valid_curve_encodings=["named_curve"] + ) + + def test_load_key_with_disabled_format(self): + with self.assertRaises(MalformedPointError) as e: + VerifyingKey.from_der(self.key_bytes, valid_encodings=["raw"]) + + self.assertIn("enabled (raw) encodings", str(e.exception)) + + def test_custom_hashfunc(self): + vk = VerifyingKey.from_der(self.key_bytes, hashlib.sha256) + + self.assertIs(vk.default_hashfunc, hashlib.sha256) + + def test_from_pem_with_custom_hashfunc(self): + vk = VerifyingKey.from_pem(self.key_pem, hashlib.sha256) + + self.assertIs(vk.default_hashfunc, hashlib.sha256) + + def test_bytes(self): + vk = VerifyingKey.from_der(self.key_bytes) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytes_memoryview(self): + vk = VerifyingKey.from_der(buffer(self.key_bytes)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytearray(self): + vk = VerifyingKey.from_der(bytearray(self.key_bytes)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_bytesarray_memoryview(self): + vk = VerifyingKey.from_der(buffer(bytearray(self.key_bytes))) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_array_array_of_bytes(self): + arr = array.array("B", self.key_bytes) + vk = VerifyingKey.from_der(arr) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_array_array_of_bytes_memoryview(self): + arr = array.array("B", self.key_bytes) + vk = VerifyingKey.from_der(buffer(arr)) + + self.assertEqual(self.vk.to_string(), vk.to_string()) + + def test_equality_on_verifying_keys(self): + self.assertEqual(self.vk, self.sk.get_verifying_key()) + + def test_inequality_on_verifying_keys(self): + self.assertNotEqual(self.vk, self.vk2) + + def test_inequality_on_verifying_keys_not_implemented(self): + self.assertNotEqual(self.vk, None) + + def test_VerifyingKey_inequality_on_same_curve(self): + self.assertNotEqual(self.vk, self.sk2.verifying_key) + + def test_SigningKey_inequality_on_same_curve(self): + self.assertNotEqual(self.sk, self.sk2) + + def test_inequality_on_wrong_types(self): + self.assertNotEqual(self.vk, self.sk) + + def test_from_public_point_old(self): + pj = self.vk.pubkey.point + point = Point(pj.curve(), pj.x(), pj.y()) + + vk = VerifyingKey.from_public_point(point, self.vk.curve) + + self.assertEqual(vk, self.vk) + + def test_ed25519_VerifyingKey_repr__(self): + sk = SigningKey.from_string(Ed25519.generator.to_bytes(), Ed25519) + string = repr(sk.verifying_key) + + self.assertEqual( + "VerifyingKey.from_string(" + "bytearray(b'K\\x0c\\xfbZH\\x8e\\x8c\\x8c\\x07\\xee\\xda\\xfb" + "\\xe1\\x97\\xcd\\x90\\x18\\x02\\x15h]\\xfe\\xbe\\xcbB\\xba\\xe6r" + "\\x10\\xae\\xf1P'), Ed25519, None)", + string, + ) + + def test_edwards_from_public_point(self): + point = Ed25519.generator + with self.assertRaises(ValueError) as e: + VerifyingKey.from_public_point(point, Ed25519) + + self.assertIn("incompatible with Edwards", str(e.exception)) + + def test_edwards_precompute_no_side_effect(self): + sk = SigningKey.from_string(Ed25519.generator.to_bytes(), Ed25519) + vk = sk.verifying_key + vk2 = VerifyingKey.from_string(vk.to_string(), Ed25519) + vk.precompute() + + self.assertEqual(vk, vk2) + + def test_parse_malfomed_eddsa_der_pubkey(self): + der_str = encode_sequence( + encode_sequence(encode_oid(*Ed25519.oid)), + encode_bitstring(bytes(Ed25519.generator.to_bytes()), 0), + encode_bitstring(b"\x00", 0), + ) + + with self.assertRaises(UnexpectedDER) as e: + VerifyingKey.from_der(der_str) + + self.assertIn("trailing junk after public key", str(e.exception)) + + def test_edwards_from_public_key_recovery(self): + with self.assertRaises(ValueError) as e: + VerifyingKey.from_public_key_recovery(b"", b"", Ed25519) + + self.assertIn("unsupported for Edwards", str(e.exception)) + + def test_edwards_from_public_key_recovery_with_digest(self): + with self.assertRaises(ValueError) as e: + VerifyingKey.from_public_key_recovery_with_digest( + b"", b"", Ed25519 + ) + + self.assertIn("unsupported for Edwards", str(e.exception)) + + def test_load_ed25519_from_pem(self): + vk_pem = ( + "-----BEGIN PUBLIC KEY-----\n" + "MCowBQYDK2VwAyEAIwBQ0NZkIiiO41WJfm5BV42u3kQm7lYnvIXmCy8qy2U=\n" + "-----END PUBLIC KEY-----\n" + ) + + vk = VerifyingKey.from_pem(vk_pem) + + self.assertIsInstance(vk.curve, Curve) + self.assertIs(vk.curve, Ed25519) + + vk_str = ( + b"\x23\x00\x50\xd0\xd6\x64\x22\x28\x8e\xe3\x55\x89\x7e\x6e\x41\x57" + b"\x8d\xae\xde\x44\x26\xee\x56\x27\xbc\x85\xe6\x0b\x2f\x2a\xcb\x65" + ) + + vk_2 = VerifyingKey.from_string(vk_str, Ed25519) + + self.assertEqual(vk, vk_2) + + def test_export_ed255_to_pem(self): + vk_str = ( + b"\x23\x00\x50\xd0\xd6\x64\x22\x28\x8e\xe3\x55\x89\x7e\x6e\x41\x57" + b"\x8d\xae\xde\x44\x26\xee\x56\x27\xbc\x85\xe6\x0b\x2f\x2a\xcb\x65" + ) + + vk = VerifyingKey.from_string(vk_str, Ed25519) + + vk_pem = ( + b"-----BEGIN PUBLIC KEY-----\n" + b"MCowBQYDK2VwAyEAIwBQ0NZkIiiO41WJfm5BV42u3kQm7lYnvIXmCy8qy2U=\n" + b"-----END PUBLIC KEY-----\n" + ) + + self.assertEqual(vk_pem, vk.to_pem()) + + def test_ed25519_export_import(self): + sk = SigningKey.generate(Ed25519) + vk = sk.verifying_key + + vk2 = VerifyingKey.from_pem(vk.to_pem()) + + self.assertEqual(vk, vk2) + + def test_ed25519_sig_verify(self): + vk_pem = ( + "-----BEGIN PUBLIC KEY-----\n" + "MCowBQYDK2VwAyEAIwBQ0NZkIiiO41WJfm5BV42u3kQm7lYnvIXmCy8qy2U=\n" + "-----END PUBLIC KEY-----\n" + ) + + vk = VerifyingKey.from_pem(vk_pem) + + data = b"data\n" + + # signature created by OpenSSL 3.0.0 beta1 + sig = ( + b"\x64\x47\xab\x6a\x33\xcd\x79\x45\xad\x98\x11\x6c\xb9\xf2\x20\xeb" + b"\x90\xd6\x50\xe3\xc7\x8f\x9f\x60\x10\xec\x75\xe0\x2f\x27\xd3\x96" + b"\xda\xe8\x58\x7f\xe0\xfe\x46\x5c\x81\xef\x50\xec\x29\x9f\xae\xd5" + b"\xad\x46\x3c\x91\x68\x83\x4d\xea\x8d\xa8\x19\x04\x04\x79\x03\x0b" + ) + + self.assertTrue(vk.verify(sig, data)) + + def test_ed448_from_pem(self): + pem_str = ( + "-----BEGIN PUBLIC KEY-----\n" + "MEMwBQYDK2VxAzoAeQtetSu7CMEzE+XWB10Bg47LCA0giNikOxHzdp+tZ/eK/En0\n" + "dTdYD2ll94g58MhSnBiBQB9A1MMA\n" + "-----END PUBLIC KEY-----\n" + ) + + vk = VerifyingKey.from_pem(pem_str) + + self.assertIsInstance(vk.curve, Curve) + self.assertIs(vk.curve, Ed448) + + vk_str = ( + b"\x79\x0b\x5e\xb5\x2b\xbb\x08\xc1\x33\x13\xe5\xd6\x07\x5d\x01\x83" + b"\x8e\xcb\x08\x0d\x20\x88\xd8\xa4\x3b\x11\xf3\x76\x9f\xad\x67\xf7" + b"\x8a\xfc\x49\xf4\x75\x37\x58\x0f\x69\x65\xf7\x88\x39\xf0\xc8\x52" + b"\x9c\x18\x81\x40\x1f\x40\xd4\xc3\x00" + ) + + vk2 = VerifyingKey.from_string(vk_str, Ed448) + + self.assertEqual(vk, vk2) + + def test_ed448_to_pem(self): + vk_str = ( + b"\x79\x0b\x5e\xb5\x2b\xbb\x08\xc1\x33\x13\xe5\xd6\x07\x5d\x01\x83" + b"\x8e\xcb\x08\x0d\x20\x88\xd8\xa4\x3b\x11\xf3\x76\x9f\xad\x67\xf7" + b"\x8a\xfc\x49\xf4\x75\x37\x58\x0f\x69\x65\xf7\x88\x39\xf0\xc8\x52" + b"\x9c\x18\x81\x40\x1f\x40\xd4\xc3\x00" + ) + vk = VerifyingKey.from_string(vk_str, Ed448) + + vk_pem = ( + b"-----BEGIN PUBLIC KEY-----\n" + b"MEMwBQYDK2VxAzoAeQtetSu7CMEzE+XWB10Bg47LCA0giNikOxHzdp+tZ/eK/En0\n" + b"dTdYD2ll94g58MhSnBiBQB9A1MMA\n" + b"-----END PUBLIC KEY-----\n" + ) + + self.assertEqual(vk_pem, vk.to_pem()) + + def test_ed448_export_import(self): + sk = SigningKey.generate(Ed448) + vk = sk.verifying_key + + vk2 = VerifyingKey.from_pem(vk.to_pem()) + + self.assertEqual(vk, vk2) + + def test_ed448_sig_verify(self): + pem_str = ( + "-----BEGIN PUBLIC KEY-----\n" + "MEMwBQYDK2VxAzoAeQtetSu7CMEzE+XWB10Bg47LCA0giNikOxHzdp+tZ/eK/En0\n" + "dTdYD2ll94g58MhSnBiBQB9A1MMA\n" + "-----END PUBLIC KEY-----\n" + ) + + vk = VerifyingKey.from_pem(pem_str) + + data = b"data\n" + + # signature created by OpenSSL 3.0.0 beta1 + sig = ( + b"\x68\xed\x2c\x70\x35\x22\xca\x1c\x35\x03\xf3\xaa\x51\x33\x3d\x00" + b"\xc0\xae\xb0\x54\xc5\xdc\x7f\x6f\x30\x57\xb4\x1d\xcb\xe9\xec\xfa" + b"\xc8\x45\x3e\x51\xc1\xcb\x60\x02\x6a\xd0\x43\x11\x0b\x5f\x9b\xfa" + b"\x32\x88\xb2\x38\x6b\xed\xac\x09\x00\x78\xb1\x7b\x5d\x7e\xf8\x16" + b"\x31\xdd\x1b\x3f\x98\xa0\xce\x19\xe7\xd8\x1c\x9f\x30\xac\x2f\xd4" + b"\x1e\x55\xbf\x21\x98\xf6\x4c\x8c\xbe\x81\xa5\x2d\x80\x4c\x62\x53" + b"\x91\xd5\xee\x03\x30\xc6\x17\x66\x4b\x9e\x0c\x8d\x40\xd0\xad\xae" + b"\x0a\x00" + ) + + self.assertTrue(vk.verify(sig, data)) + + +class TestSigningKey(unittest.TestCase): + """ + Verify that ecdsa.keys.SigningKey.from_der() can be used with + bytes-like objects. + """ + + @classmethod + def setUpClass(cls): + prv_key_str = ( + "-----BEGIN EC PRIVATE KEY-----\n" + "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n" + "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n" + "bA==\n" + "-----END EC PRIVATE KEY-----\n" + ) + cls.sk1 = SigningKey.from_pem(prv_key_str) + + prv_key_str = ( + "-----BEGIN PRIVATE KEY-----\n" + "MG8CAQAwEwYHKoZIzj0CAQYIKoZIzj0DAQEEVTBTAgEBBBheyEIL1u+SUqlC6YkE\n" + "PKKfVh+lJXcOscWhNAMyAAS4gXfQhO8X9eRWOUCAKDYPn1m0pNcmTmLaBlHc5Ho1\n" + "pMW0XPUVk0I6i1V7nCCZ82w=\n" + "-----END PRIVATE KEY-----\n" + ) + cls.sk1_pkcs8 = SigningKey.from_pem(prv_key_str) + + prv_key_str = ( + "-----BEGIN EC PRIVATE KEY-----\n" + "MHcCAQEEIKlL2EAm5NPPZuXwxRf4nXMk0A80y6UUbiQ17be/qFhRoAoGCCqGSM49\n" + "AwEHoUQDQgAE4H3iRbG4TSrsSRb/gusPQB/4YcN8Poqzgjau4kfxBPyZimeRfuY/\n" + "9g/wMmPuhGl4BUve51DsnKJFRr8psk0ieA==\n" + "-----END EC PRIVATE KEY-----\n" + ) + cls.sk2 = SigningKey.from_pem(prv_key_str) + + def test_decoding_explicit_curve_parameters(self): + prv_key_str = ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIBeQIBADCCAQMGByqGSM49AgEwgfcCAQEwLAYHKoZIzj0BAQIhAP////8AAAAB\n" + "AAAAAAAAAAAAAAAA////////////////MFsEIP////8AAAABAAAAAAAAAAAAAAAA\n" + "///////////////8BCBaxjXYqjqT57PrvVV2mIa8ZR0GsMxTsPY7zjw+J9JgSwMV\n" + "AMSdNgiG5wSTamZ44ROdJreBn36QBEEEaxfR8uEsQkf4vOblY6RA8ncDfYEt6zOg\n" + "9KE5RdiYwpZP40Li/hp/m47n60p8D54WK84zV2sxXs7LtkBoN79R9QIhAP////8A\n" + "AAAA//////////+85vqtpxeehPO5ysL8YyVRAgEBBG0wawIBAQQgIXtREfUmR16r\n" + "ZbmvDGD2lAEFPZa2DLPyz0czSja58yChRANCAASK9VJIGLOY5mxXO4rdf2CNl0//\n" + "yJWhIzDWX7daMI6qQWB9hKyR5OFOT6eFr1qtcZh8CGZbB+yIOCpnnwn0ekpl\n" + "-----END PRIVATE KEY-----\n" + ) + + sk = SigningKey.from_pem(prv_key_str) + + sk2 = SigningKey.from_string( + b"\x21\x7b\x51\x11\xf5\x26\x47\x5e\xab\x65\xb9\xaf\x0c\x60\xf6" + b"\x94\x01\x05\x3d\x96\xb6\x0c\xb3\xf2\xcf\x47\x33\x4a\x36\xb9" + b"\xf3\x20", + curve=NIST256p, + ) + + self.assertEqual(sk, sk2) + + def test_decoding_explicit_curve_parameters_with_explicit_disabled(self): + prv_key_str = ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIBeQIBADCCAQMGByqGSM49AgEwgfcCAQEwLAYHKoZIzj0BAQIhAP////8AAAAB\n" + "AAAAAAAAAAAAAAAA////////////////MFsEIP////8AAAABAAAAAAAAAAAAAAAA\n" + "///////////////8BCBaxjXYqjqT57PrvVV2mIa8ZR0GsMxTsPY7zjw+J9JgSwMV\n" + "AMSdNgiG5wSTamZ44ROdJreBn36QBEEEaxfR8uEsQkf4vOblY6RA8ncDfYEt6zOg\n" + "9KE5RdiYwpZP40Li/hp/m47n60p8D54WK84zV2sxXs7LtkBoN79R9QIhAP////8A\n" + "AAAA//////////+85vqtpxeehPO5ysL8YyVRAgEBBG0wawIBAQQgIXtREfUmR16r\n" + "ZbmvDGD2lAEFPZa2DLPyz0czSja58yChRANCAASK9VJIGLOY5mxXO4rdf2CNl0//\n" + "yJWhIzDWX7daMI6qQWB9hKyR5OFOT6eFr1qtcZh8CGZbB+yIOCpnnwn0ekpl\n" + "-----END PRIVATE KEY-----\n" + ) + + with self.assertRaises(UnexpectedDER): + SigningKey.from_pem( + prv_key_str, valid_curve_encodings=["named_curve"] + ) + + def test_equality_on_signing_keys(self): + sk = SigningKey.from_secret_exponent( + self.sk1.privkey.secret_multiplier, self.sk1.curve + ) + self.assertEqual(self.sk1, sk) + self.assertEqual(self.sk1_pkcs8, sk) + + def test_verify_with_empty_message(self): + sig = self.sk1.sign(b"") + + self.assertTrue(sig) + + vk = self.sk1.verifying_key + + self.assertTrue(vk.verify(sig, b"")) + + def test_verify_with_precompute(self): + sig = self.sk1.sign(b"message") + + vk = self.sk1.verifying_key + + vk.precompute() + + self.assertTrue(vk.verify(sig, b"message")) + + def test_compare_verifying_key_with_precompute(self): + vk1 = self.sk1.verifying_key + vk1.precompute() + + vk2 = self.sk1_pkcs8.verifying_key + + self.assertEqual(vk1, vk2) + + def test_verify_with_lazy_precompute(self): + sig = self.sk2.sign(b"other message") + + vk = self.sk2.verifying_key + + vk.precompute(lazy=True) + + self.assertTrue(vk.verify(sig, b"other message")) + + def test_inequality_on_signing_keys(self): + self.assertNotEqual(self.sk1, self.sk2) + + def test_inequality_on_signing_keys_not_implemented(self): + self.assertNotEqual(self.sk1, None) + + def test_ed25519_from_pem(self): + pem_str = ( + "-----BEGIN PRIVATE KEY-----\n" + "MC4CAQAwBQYDK2VwBCIEIDS6x9FO1PG8T4xIPg8Zd0z8uL6sVGZFEZrX17gHC/XU\n" + "-----END PRIVATE KEY-----\n" + ) + + sk = SigningKey.from_pem(pem_str) + + sk_str = SigningKey.from_string( + b"\x34\xBA\xC7\xD1\x4E\xD4\xF1\xBC\x4F\x8C\x48\x3E\x0F\x19\x77\x4C" + b"\xFC\xB8\xBE\xAC\x54\x66\x45\x11\x9A\xD7\xD7\xB8\x07\x0B\xF5\xD4", + Ed25519, + ) + + self.assertEqual(sk, sk_str) + + def test_ed25519_to_pem(self): + sk = SigningKey.from_string( + b"\x34\xBA\xC7\xD1\x4E\xD4\xF1\xBC\x4F\x8C\x48\x3E\x0F\x19\x77\x4C" + b"\xFC\xB8\xBE\xAC\x54\x66\x45\x11\x9A\xD7\xD7\xB8\x07\x0B\xF5\xD4", + Ed25519, + ) + + pem_str = ( + b"-----BEGIN PRIVATE KEY-----\n" + b"MC4CAQAwBQYDK2VwBCIEIDS6x9FO1PG8T4xIPg8Zd0z8uL6sVGZFEZrX17gHC/XU\n" + b"-----END PRIVATE KEY-----\n" + ) + + self.assertEqual(sk.to_pem(format="pkcs8"), pem_str) + + def test_ed25519_to_and_from_pem(self): + sk = SigningKey.generate(Ed25519) + + decoded = SigningKey.from_pem(sk.to_pem(format="pkcs8")) + + self.assertEqual(sk, decoded) + + def test_ed448_from_pem(self): + pem_str = ( + "-----BEGIN PRIVATE KEY-----\n" + "MEcCAQAwBQYDK2VxBDsEOTyFuXqFLXgJlV8uDqcOw9nG4IqzLiZ/i5NfBDoHPzmP\n" + "OP0JMYaLGlTzwovmvCDJ2zLaezu9NLz9aQ==\n" + "-----END PRIVATE KEY-----\n" + ) + sk = SigningKey.from_pem(pem_str) + + sk_str = SigningKey.from_string( + b"\x3C\x85\xB9\x7A\x85\x2D\x78\x09\x95\x5F\x2E\x0E\xA7\x0E\xC3\xD9" + b"\xC6\xE0\x8A\xB3\x2E\x26\x7F\x8B\x93\x5F\x04\x3A\x07\x3F\x39\x8F" + b"\x38\xFD\x09\x31\x86\x8B\x1A\x54\xF3\xC2\x8B\xE6\xBC\x20\xC9\xDB" + b"\x32\xDA\x7B\x3B\xBD\x34\xBC\xFD\x69", + Ed448, + ) + + self.assertEqual(sk, sk_str) + + def test_ed448_to_pem(self): + sk = SigningKey.from_string( + b"\x3C\x85\xB9\x7A\x85\x2D\x78\x09\x95\x5F\x2E\x0E\xA7\x0E\xC3\xD9" + b"\xC6\xE0\x8A\xB3\x2E\x26\x7F\x8B\x93\x5F\x04\x3A\x07\x3F\x39\x8F" + b"\x38\xFD\x09\x31\x86\x8B\x1A\x54\xF3\xC2\x8B\xE6\xBC\x20\xC9\xDB" + b"\x32\xDA\x7B\x3B\xBD\x34\xBC\xFD\x69", + Ed448, + ) + pem_str = ( + b"-----BEGIN PRIVATE KEY-----\n" + b"MEcCAQAwBQYDK2VxBDsEOTyFuXqFLXgJlV8uDqcOw9nG4IqzLiZ/i5NfBDoHPzmP\n" + b"OP0JMYaLGlTzwovmvCDJ2zLaezu9NLz9aQ==\n" + b"-----END PRIVATE KEY-----\n" + ) + + self.assertEqual(sk.to_pem(format="pkcs8"), pem_str) + + def test_ed448_encode_decode(self): + sk = SigningKey.generate(Ed448) + + decoded = SigningKey.from_pem(sk.to_pem(format="pkcs8")) + + self.assertEqual(decoded, sk) + + +class TestTrivialCurve(unittest.TestCase): + @classmethod + def setUpClass(cls): + # To test what happens with r or s in signing happens to be zero we + # need to find a scalar that creates one of the points on a curve that + # has x coordinate equal to zero. + # Even for secp112r2 curve that's non trivial so use this toy + # curve, for which we can iterate over all points quickly + curve = CurveFp(163, 84, 58) + gen = PointJacobi(curve, 2, 87, 1, 167, generator=True) + + cls.toy_curve = Curve("toy_p8", curve, gen, (1, 2, 0)) + + cls.sk = SigningKey.from_secret_exponent( + 140, + cls.toy_curve, + hashfunc=hashlib.sha1, + ) + + def test_generator_sanity(self): + gen = self.toy_curve.generator + + self.assertEqual(gen * gen.order(), INFINITY) + + def test_public_key_sanity(self): + self.assertEqual(self.sk.verifying_key.to_string(), b"\x98\x1e") + + def test_deterministic_sign(self): + sig = self.sk.sign_deterministic(b"message") + + self.assertEqual(sig, b"-.") + + self.assertTrue(self.sk.verifying_key.verify(sig, b"message")) + + def test_deterministic_sign_random_message(self): + msg = os.urandom(32) + sig = self.sk.sign_deterministic(msg) + self.assertEqual(len(sig), 2) + self.assertTrue(self.sk.verifying_key.verify(sig, msg)) + + def test_deterministic_sign_that_rises_R_zero_error(self): + # the raised RSZeroError is caught and handled internally by + # sign_deterministic methods + msg = b"\x00\x4f" + sig = self.sk.sign_deterministic(msg) + self.assertEqual(sig, b"\x36\x9e") + self.assertTrue(self.sk.verifying_key.verify(sig, msg)) + + def test_deterministic_sign_that_rises_S_zero_error(self): + msg = b"\x01\x6d" + sig = self.sk.sign_deterministic(msg) + self.assertEqual(sig, b"\x49\x6c") + self.assertTrue(self.sk.verifying_key.verify(sig, msg)) + + +# test VerifyingKey.verify() +prv_key_str = ( + "-----BEGIN EC PRIVATE KEY-----\n" + "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n" + "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n" + "bA==\n" + "-----END EC PRIVATE KEY-----\n" +) +key_bytes = unpem(prv_key_str) +assert isinstance(key_bytes, bytes) +sk = SigningKey.from_der(key_bytes) +vk = sk.verifying_key + +data = ( + b"some string for signing" + b"contents don't really matter" + b"but do include also some crazy values: " + b"\x00\x01\t\r\n\x00\x00\x00\xff\xf0" +) +assert len(data) % 4 == 0 +sha1 = hashlib.sha1() +sha1.update(data) +data_hash = sha1.digest() +assert isinstance(data_hash, bytes) +sig_raw = sk.sign(data, sigencode=sigencode_string) +assert isinstance(sig_raw, bytes) +sig_der = sk.sign(data, sigencode=sigencode_der) +assert isinstance(sig_der, bytes) +sig_strings = sk.sign(data, sigencode=sigencode_strings) +assert isinstance(sig_strings[0], bytes) + +verifiers = [] +for modifier, fun in [ + ("bytes", lambda x: x), + ("bytes memoryview", lambda x: buffer(x)), + ("bytearray", lambda x: bytearray(x)), + ("bytearray memoryview", lambda x: buffer(bytearray(x))), + ("array.array of bytes", lambda x: array.array("B", x)), + ("array.array of bytes memoryview", lambda x: buffer(array.array("B", x))), + ("array.array of ints", lambda x: array.array("I", x)), + ("array.array of ints memoryview", lambda x: buffer(array.array("I", x))), +]: + if "ints" in modifier: + conv = lambda x: x + else: + conv = fun + for sig_format, signature, decoder, mod_apply in [ + ("raw", sig_raw, sigdecode_string, lambda x: conv(x)), + ("der", sig_der, sigdecode_der, lambda x: conv(x)), + ( + "strings", + sig_strings, + sigdecode_strings, + lambda x: tuple(conv(i) for i in x), + ), + ]: + for method_name, vrf_mthd, vrf_data in [ + ("verify", vk.verify, data), + ("verify_digest", vk.verify_digest, data_hash), + ]: + verifiers.append( + pytest.param( + signature, + decoder, + mod_apply, + fun, + vrf_mthd, + vrf_data, + id="{2}-{0}-{1}".format(modifier, sig_format, method_name), + ) + ) + + +@pytest.mark.parametrize( + "signature,decoder,mod_apply,fun,vrf_mthd,vrf_data", verifiers +) +def test_VerifyingKey_verify( + signature, decoder, mod_apply, fun, vrf_mthd, vrf_data +): + sig = mod_apply(signature) + + assert vrf_mthd(sig, fun(vrf_data), sigdecode=decoder) + + +# test SigningKey.from_string() +prv_key_bytes = ( + b"^\xc8B\x0b\xd6\xef\x92R\xa9B\xe9\x89\x04<\xa2" + b"\x9fV\x1f\xa5%w\x0e\xb1\xc5" +) +assert len(prv_key_bytes) == 24 +converters = [] +for modifier, convert in [ + ("bytes", lambda x: x), + ("bytes memoryview", buffer), + ("bytearray", bytearray), + ("bytearray memoryview", lambda x: buffer(bytearray(x))), + ("array.array of bytes", lambda x: array.array("B", x)), + ("array.array of bytes memoryview", lambda x: buffer(array.array("B", x))), + ("array.array of ints", lambda x: array.array("I", x)), + ("array.array of ints memoryview", lambda x: buffer(array.array("I", x))), +]: + converters.append(pytest.param(convert, id=modifier)) + + +@pytest.mark.parametrize("convert", converters) +def test_SigningKey_from_string(convert): + key = convert(prv_key_bytes) + sk = SigningKey.from_string(key) + + assert sk.to_string() == prv_key_bytes + + +# test SigningKey.from_der() +prv_key_str = ( + "-----BEGIN EC PRIVATE KEY-----\n" + "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n" + "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n" + "bA==\n" + "-----END EC PRIVATE KEY-----\n" +) +key_bytes = unpem(prv_key_str) +assert isinstance(key_bytes, bytes) + +# last two converters are for array.array of ints, those require input +# that's multiple of 4, which no curve we support produces +@pytest.mark.parametrize("convert", converters[:-2]) +def test_SigningKey_from_der(convert): + key = convert(key_bytes) + sk = SigningKey.from_der(key) + + assert sk.to_string() == prv_key_bytes + + +# test SigningKey.sign_deterministic() +extra_entropy = b"\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11" + + +@pytest.mark.parametrize("convert", converters) +def test_SigningKey_sign_deterministic(convert): + sig = sk.sign_deterministic( + convert(data), extra_entropy=convert(extra_entropy) + ) + + vk.verify(sig, data) + + +# test SigningKey.sign_digest_deterministic() +@pytest.mark.parametrize("convert", converters) +def test_SigningKey_sign_digest_deterministic(convert): + sig = sk.sign_digest_deterministic( + convert(data_hash), extra_entropy=convert(extra_entropy) + ) + + vk.verify(sig, data) + + +@pytest.mark.parametrize("convert", converters) +def test_SigningKey_sign(convert): + sig = sk.sign(convert(data)) + + vk.verify(sig, data) + + +@pytest.mark.parametrize("convert", converters) +def test_SigningKey_sign_digest(convert): + sig = sk.sign_digest(convert(data_hash)) + + vk.verify(sig, data) + + +def test_SigningKey_with_unlikely_value(): + sk = SigningKey.from_secret_exponent(NIST256p.order - 1, curve=NIST256p) + vk = sk.verifying_key + sig = sk.sign(b"hello") + assert vk.verify(sig, b"hello") + + +def test_SigningKey_with_custom_curve_old_point(): + generator = generator_brainpoolp160r1 + generator = Point( + generator.curve(), + generator.x(), + generator.y(), + generator.order(), + ) + + curve = Curve( + "BRAINPOOLP160r1", + generator.curve(), + generator, + (1, 3, 36, 3, 3, 2, 8, 1, 1, 1), + ) + + sk = SigningKey.from_secret_exponent(12, curve) + + sk2 = SigningKey.from_secret_exponent(12, BRAINPOOLP160r1) + + assert sk.privkey == sk2.privkey + + +def test_VerifyingKey_inequality_with_different_curves(): + sk1 = SigningKey.from_secret_exponent(2, BRAINPOOLP160r1) + sk2 = SigningKey.from_secret_exponent(2, NIST256p) + + assert sk1.verifying_key != sk2.verifying_key + + +def test_VerifyingKey_inequality_with_different_secret_points(): + sk1 = SigningKey.from_secret_exponent(2, BRAINPOOLP160r1) + sk2 = SigningKey.from_secret_exponent(3, BRAINPOOLP160r1) + + assert sk1.verifying_key != sk2.verifying_key + + +def test_SigningKey_from_pem_pkcs8v2_EdDSA(): + pem = """-----BEGIN PRIVATE KEY----- + MFMCAQEwBQYDK2VwBCIEICc2F2ag1n1QP0jY+g9qWx5sDkx0s/HdNi3cSRHw+zsI + oSMDIQA+HQ2xCif8a/LMWR2m5HaCm5I2pKe/cc8OiRANMHxjKQ== + -----END PRIVATE KEY-----""" + + sk = SigningKey.from_pem(pem) + assert sk.curve == Ed25519 diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_malformed_sigs.py b/myenv/lib/python3.9/site-packages/ecdsa/test_malformed_sigs.py new file mode 100644 index 0000000..8e1b611 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_malformed_sigs.py @@ -0,0 +1,370 @@ +from __future__ import with_statement, division + +import hashlib + +try: + from hashlib import algorithms_available +except ImportError: # pragma: no cover + algorithms_available = [ + "md5", + "sha1", + "sha224", + "sha256", + "sha384", + "sha512", + ] +# skip algorithms broken by change to OpenSSL 3.0 and early versions +# of hashlib that list algorithms that require the legacy provider to work +# https://bugs.python.org/issue38820 +algorithms_available = [ + i + for i in algorithms_available + if i not in ("mdc2", "md2", "md4", "whirlpool", "ripemd160") +] +from functools import partial +import pytest +import sys +import hypothesis.strategies as st +from hypothesis import note, assume, given, settings, example + +from .keys import SigningKey +from .keys import BadSignatureError +from .util import sigencode_der, sigencode_string +from .util import sigdecode_der, sigdecode_string +from .curves import curves +from .der import ( + encode_integer, + encode_bitstring, + encode_octet_string, + encode_oid, + encode_sequence, + encode_constructed, +) +from .ellipticcurve import CurveEdTw + + +example_data = b"some data to sign" +"""Since the data is hashed for processing, really any string will do.""" + + +hash_and_size = [ + (name, hashlib.new(name).digest_size) for name in algorithms_available +] +"""Pairs of hash names and their output sizes. +Needed for pairing with curves as we don't support hashes +bigger than order sizes of curves.""" + + +keys_and_sigs = [] +"""Name of the curve+hash combination, VerifyingKey and DER signature.""" + + +# for hypothesis strategy shrinking we want smallest curves and hashes first +for curve in sorted(curves, key=lambda x: x.baselen): + for hash_alg in [ + name + for name, size in sorted(hash_and_size, key=lambda x: x[1]) + if 0 < size <= curve.baselen + ]: + sk = SigningKey.generate( + curve, hashfunc=partial(hashlib.new, hash_alg) + ) + + keys_and_sigs.append( + ( + "{0} {1}".format(curve, hash_alg), + sk.verifying_key, + sk.sign(example_data, sigencode=sigencode_der), + ) + ) + + +# first make sure that the signatures can be verified +@pytest.mark.parametrize( + "verifying_key,signature", + [pytest.param(vk, sig, id=name) for name, vk, sig in keys_and_sigs], +) +def test_signatures(verifying_key, signature): + assert verifying_key.verify( + signature, example_data, sigdecode=sigdecode_der + ) + + +@st.composite +def st_fuzzed_sig(draw, keys_and_sigs): + """ + Hypothesis strategy that generates pairs of VerifyingKey and malformed + signatures created by fuzzing of a valid signature. + """ + name, verifying_key, old_sig = draw(st.sampled_from(keys_and_sigs)) + note("Configuration: {0}".format(name)) + + sig = bytearray(old_sig) + + # decide which bytes should be removed + to_remove = draw( + st.lists(st.integers(min_value=0, max_value=len(sig) - 1), unique=True) + ) + to_remove.sort() + for i in reversed(to_remove): + del sig[i] + note("Remove bytes: {0}".format(to_remove)) + + # decide which bytes of the original signature should be changed + if sig: # pragma: no branch + xors = draw( + st.dictionaries( + st.integers(min_value=0, max_value=len(sig) - 1), + st.integers(min_value=1, max_value=255), + ) + ) + for i, val in xors.items(): + sig[i] ^= val + note("xors: {0}".format(xors)) + + # decide where new data should be inserted + insert_pos = draw(st.integers(min_value=0, max_value=len(sig))) + # NIST521p signature is about 140 bytes long, test slightly longer + insert_data = draw(st.binary(max_size=256)) + + sig = sig[:insert_pos] + insert_data + sig[insert_pos:] + note( + "Inserted at position {0} bytes: {1!r}".format(insert_pos, insert_data) + ) + + sig = bytes(sig) + # make sure that there was performed at least one mutation on the data + assume(to_remove or xors or insert_data) + # and that the mutations didn't cancel each-other out + assume(sig != old_sig) + + return verifying_key, sig + + +params = {} +# not supported in hypothesis 2.0.0 +if sys.version_info >= (2, 7): # pragma: no branch + from hypothesis import HealthCheck + + # deadline=5s because NIST521p are slow to verify + params["deadline"] = 5000 + params["suppress_health_check"] = [ + HealthCheck.data_too_large, + HealthCheck.filter_too_much, + HealthCheck.too_slow, + ] + +slow_params = dict(params) +slow_params["max_examples"] = 10 + + +@settings(**params) +@given(st_fuzzed_sig(keys_and_sigs)) +def test_fuzzed_der_signatures(args): + verifying_key, sig = args + + with pytest.raises(BadSignatureError): + verifying_key.verify(sig, example_data, sigdecode=sigdecode_der) + + +@st.composite +def st_random_der_ecdsa_sig_value(draw): + """ + Hypothesis strategy for selecting random values and encoding them + to ECDSA-Sig-Value object:: + + ECDSA-Sig-Value ::= SEQUENCE { + r INTEGER, + s INTEGER + } + """ + name, verifying_key, _ = draw(st.sampled_from(keys_and_sigs)) + note("Configuration: {0}".format(name)) + order = int(verifying_key.curve.order) + + # the encode_integer doesn't support negative numbers, would be nice + # to generate them too, but we have coverage for remove_integer() + # verifying that it doesn't accept them, so meh. + # Test all numbers around the ones that can show up (around order) + # way smaller and slightly bigger + r = draw( + st.integers(min_value=0, max_value=order << 4) + | st.integers(min_value=order >> 2, max_value=order + 1) + ) + s = draw( + st.integers(min_value=0, max_value=order << 4) + | st.integers(min_value=order >> 2, max_value=order + 1) + ) + + sig = encode_sequence(encode_integer(r), encode_integer(s)) + + return verifying_key, sig + + +@settings(**slow_params) +@given(st_random_der_ecdsa_sig_value()) +def test_random_der_ecdsa_sig_value(params): + """ + Check if random values encoded in ECDSA-Sig-Value structure are rejected + as signature. + """ + verifying_key, sig = params + + with pytest.raises(BadSignatureError): + verifying_key.verify(sig, example_data, sigdecode=sigdecode_der) + + +def st_der_integer(*args, **kwargs): + """ + Hypothesis strategy that returns a random positive integer as DER + INTEGER. + Parameters are passed to hypothesis.strategy.integer. + """ + if "min_value" not in kwargs: # pragma: no branch + kwargs["min_value"] = 0 + return st.builds(encode_integer, st.integers(*args, **kwargs)) + + +@st.composite +def st_der_bit_string(draw, *args, **kwargs): + """ + Hypothesis strategy that returns a random DER BIT STRING. + Parameters are passed to hypothesis.strategy.binary. + """ + data = draw(st.binary(*args, **kwargs)) + if data: + unused = draw(st.integers(min_value=0, max_value=7)) + data = bytearray(data) + data[-1] &= -(2**unused) + data = bytes(data) + else: + unused = 0 + return encode_bitstring(data, unused) + + +def st_der_octet_string(*args, **kwargs): + """ + Hypothesis strategy that returns a random DER OCTET STRING object. + Parameters are passed to hypothesis.strategy.binary + """ + return st.builds(encode_octet_string, st.binary(*args, **kwargs)) + + +def st_der_null(): + """ + Hypothesis strategy that returns DER NULL object. + """ + return st.just(b"\x05\x00") + + +@st.composite +def st_der_oid(draw): + """ + Hypothesis strategy that returns DER OBJECT IDENTIFIER objects. + """ + first = draw(st.integers(min_value=0, max_value=2)) + if first < 2: + second = draw(st.integers(min_value=0, max_value=39)) + else: + second = draw(st.integers(min_value=0, max_value=2**512)) + rest = draw( + st.lists(st.integers(min_value=0, max_value=2**512), max_size=50) + ) + return encode_oid(first, second, *rest) + + +def st_der(): + """ + Hypothesis strategy that returns random DER structures. + + A valid DER structure is any primitive object, an octet encoding + of a valid DER structure, sequence of valid DER objects or a constructed + encoding of any of the above. + """ + return st.recursive( + st.just(b"") + | st_der_integer(max_value=2**4096) + | st_der_bit_string(max_size=1024**2) + | st_der_octet_string(max_size=1024**2) + | st_der_null() + | st_der_oid(), + lambda children: st.builds( + lambda x: encode_octet_string(x), st.one_of(children) + ) + | st.builds(lambda x: encode_bitstring(x, 0), st.one_of(children)) + | st.builds( + lambda x: encode_sequence(*x), st.lists(children, max_size=200) + ) + | st.builds( + lambda tag, x: encode_constructed(tag, x), + st.integers(min_value=0, max_value=0x3F), + st.one_of(children), + ), + max_leaves=40, + ) + + +@settings(**params) +@given(st.sampled_from(keys_and_sigs), st_der()) +def test_random_der_as_signature(params, der): + """Check if random DER structures are rejected as signature""" + name, verifying_key, _ = params + + with pytest.raises(BadSignatureError): + verifying_key.verify(der, example_data, sigdecode=sigdecode_der) + + +@settings(**params) +@given(st.sampled_from(keys_and_sigs), st.binary(max_size=1024**2)) +@example( + keys_and_sigs[0], encode_sequence(encode_integer(0), encode_integer(0)) +) +@example( + keys_and_sigs[0], + encode_sequence(encode_integer(1), encode_integer(1)) + b"\x00", +) +@example(keys_and_sigs[0], encode_sequence(*[encode_integer(1)] * 3)) +def test_random_bytes_as_signature(params, der): + """Check if random bytes are rejected as signature""" + name, verifying_key, _ = params + + with pytest.raises(BadSignatureError): + verifying_key.verify(der, example_data, sigdecode=sigdecode_der) + + +keys_and_string_sigs = [ + ( + name, + verifying_key, + sigencode_string( + *sigdecode_der(sig, verifying_key.curve.order), + order=verifying_key.curve.order + ), + ) + for name, verifying_key, sig in keys_and_sigs + if not isinstance(verifying_key.curve.curve, CurveEdTw) +] +""" +Name of the curve+hash combination, VerifyingKey and signature as a +byte string. +""" + + +keys_and_string_sigs += [ + ( + name, + verifying_key, + sig, + ) + for name, verifying_key, sig in keys_and_sigs + if isinstance(verifying_key.curve.curve, CurveEdTw) +] + + +@settings(**params) +@given(st_fuzzed_sig(keys_and_string_sigs)) +def test_fuzzed_string_signatures(params): + verifying_key, sig = params + + with pytest.raises(BadSignatureError): + verifying_key.verify(sig, example_data, sigdecode=sigdecode_string) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_numbertheory.py b/myenv/lib/python3.9/site-packages/ecdsa/test_numbertheory.py new file mode 100644 index 0000000..8bc787f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_numbertheory.py @@ -0,0 +1,433 @@ +import operator +from functools import reduce + +try: + import unittest2 as unittest +except ImportError: + import unittest +import hypothesis.strategies as st +import pytest +from hypothesis import given, settings, example + +try: + from hypothesis import HealthCheck + + HC_PRESENT = True +except ImportError: # pragma: no cover + HC_PRESENT = False +from .numbertheory import ( + SquareRootError, + JacobiError, + factorization, + gcd, + lcm, + jacobi, + inverse_mod, + is_prime, + next_prime, + smallprimes, + square_root_mod_prime, +) + + +BIGPRIMES = ( + 999671, + 999683, + 999721, + 999727, + 999749, + 999763, + 999769, + 999773, + 999809, + 999853, + 999863, + 999883, + 999907, + 999917, + 999931, + 999953, + 999959, + 999961, + 999979, + 999983, +) + + +@pytest.mark.parametrize( + "prime, next_p", [(p, q) for p, q in zip(BIGPRIMES[:-1], BIGPRIMES[1:])] +) +def test_next_prime(prime, next_p): + assert next_prime(prime) == next_p + + +@pytest.mark.parametrize("val", [-1, 0, 1]) +def test_next_prime_with_nums_less_2(val): + assert next_prime(val) == 2 + + +@pytest.mark.parametrize("prime", smallprimes) +def test_square_root_mod_prime_for_small_primes(prime): + squares = set() + for num in range(0, 1 + prime // 2): + sq = num * num % prime + squares.add(sq) + root = square_root_mod_prime(sq, prime) + # tested for real with TestNumbertheory.test_square_root_mod_prime + assert root * root % prime == sq + + for nonsquare in range(0, prime): + if nonsquare in squares: + continue + with pytest.raises(SquareRootError): + square_root_mod_prime(nonsquare, prime) + + +def test_square_root_mod_prime_for_2(): + a = square_root_mod_prime(1, 2) + assert a == 1 + + +def test_square_root_mod_prime_for_small_prime(): + root = square_root_mod_prime(98**2 % 101, 101) + assert root * root % 101 == 9 + + +def test_square_root_mod_prime_for_p_congruent_5(): + p = 13 + assert p % 8 == 5 + + root = square_root_mod_prime(3, p) + assert root * root % p == 3 + + +def test_square_root_mod_prime_for_p_congruent_5_large_d(): + p = 29 + assert p % 8 == 5 + + root = square_root_mod_prime(4, p) + assert root * root % p == 4 + + +class TestSquareRootModPrime(unittest.TestCase): + def test_power_of_2_p(self): + with self.assertRaises(JacobiError): + square_root_mod_prime(12, 32) + + def test_no_square(self): + with self.assertRaises(SquareRootError) as e: + square_root_mod_prime(12, 31) + + self.assertIn("no square root", str(e.exception)) + + def test_non_prime(self): + with self.assertRaises(SquareRootError) as e: + square_root_mod_prime(12, 33) + + self.assertIn("p is not prime", str(e.exception)) + + def test_non_prime_with_negative(self): + with self.assertRaises(SquareRootError) as e: + square_root_mod_prime(697 - 1, 697) + + self.assertIn("p is not prime", str(e.exception)) + + +@st.composite +def st_two_nums_rel_prime(draw): + # 521-bit is the biggest curve we operate on, use 1024 for a bit + # of breathing space + mod = draw(st.integers(min_value=2, max_value=2**1024)) + num = draw( + st.integers(min_value=1, max_value=mod - 1).filter( + lambda x: gcd(x, mod) == 1 + ) + ) + return num, mod + + +@st.composite +def st_primes(draw, *args, **kwargs): + if "min_value" not in kwargs: # pragma: no branch + kwargs["min_value"] = 1 + prime = draw( + st.sampled_from(smallprimes) + | st.integers(*args, **kwargs).filter(is_prime) + ) + return prime + + +@st.composite +def st_num_square_prime(draw): + prime = draw(st_primes(max_value=2**1024)) + num = draw(st.integers(min_value=0, max_value=1 + prime // 2)) + sq = num * num % prime + return sq, prime + + +@st.composite +def st_comp_with_com_fac(draw): + """ + Strategy that returns lists of numbers, all having a common factor. + """ + primes = draw( + st.lists(st_primes(max_value=2**512), min_size=1, max_size=10) + ) + # select random prime(s) that will make the common factor of composites + com_fac_primes = draw( + st.lists(st.sampled_from(primes), min_size=1, max_size=20) + ) + com_fac = reduce(operator.mul, com_fac_primes, 1) + + # select at most 20 lists (returned numbers), + # each having at most 30 primes (factors) including none (then the number + # will be 1) + comp_primes = draw( + st.integers(min_value=1, max_value=20).flatmap( + lambda n: st.lists( + st.lists(st.sampled_from(primes), max_size=30), + min_size=1, + max_size=n, + ) + ) + ) + + return [reduce(operator.mul, nums, 1) * com_fac for nums in comp_primes] + + +@st.composite +def st_comp_no_com_fac(draw): + """ + Strategy that returns lists of numbers that don't have a common factor. + """ + primes = draw( + st.lists( + st_primes(max_value=2**512), min_size=2, max_size=10, unique=True + ) + ) + # first select the primes that will create the uncommon factor + # between returned numbers + uncom_fac_primes = draw( + st.lists( + st.sampled_from(primes), + min_size=1, + max_size=len(primes) - 1, + unique=True, + ) + ) + uncom_fac = reduce(operator.mul, uncom_fac_primes, 1) + + # then build composites from leftover primes + leftover_primes = [i for i in primes if i not in uncom_fac_primes] + + assert leftover_primes + assert uncom_fac_primes + + # select at most 20 lists, each having at most 30 primes + # selected from the leftover_primes list + number_primes = draw( + st.integers(min_value=1, max_value=20).flatmap( + lambda n: st.lists( + st.lists(st.sampled_from(leftover_primes), max_size=30), + min_size=1, + max_size=n, + ) + ) + ) + + numbers = [reduce(operator.mul, nums, 1) for nums in number_primes] + + insert_at = draw(st.integers(min_value=0, max_value=len(numbers))) + numbers.insert(insert_at, uncom_fac) + return numbers + + +HYP_SETTINGS = {} +if HC_PRESENT: # pragma: no branch + HYP_SETTINGS["suppress_health_check"] = [ + HealthCheck.filter_too_much, + HealthCheck.too_slow, + ] + # the factorization() sometimes takes a long time to finish + HYP_SETTINGS["deadline"] = 5000 + + +HYP_SLOW_SETTINGS = dict(HYP_SETTINGS) +HYP_SLOW_SETTINGS["max_examples"] = 10 + + +class TestIsPrime(unittest.TestCase): + def test_very_small_prime(self): + assert is_prime(23) + + def test_very_small_composite(self): + assert not is_prime(22) + + def test_small_prime(self): + assert is_prime(123456791) + + def test_special_composite(self): + assert not is_prime(10261) + + def test_medium_prime_1(self): + # nextPrime[2^256] + assert is_prime(2**256 + 0x129) + + def test_medium_prime_2(self): + # nextPrime(2^256+0x129) + assert is_prime(2**256 + 0x12D) + + def test_medium_trivial_composite(self): + assert not is_prime(2**256 + 0x130) + + def test_medium_non_trivial_composite(self): + assert not is_prime(2**256 + 0x12F) + + def test_large_prime(self): + # nextPrime[2^2048] + assert is_prime(2**2048 + 0x3D5) + + +class TestNumbertheory(unittest.TestCase): + def test_gcd(self): + assert gcd(3 * 5 * 7, 3 * 5 * 11, 3 * 5 * 13) == 3 * 5 + assert gcd([3 * 5 * 7, 3 * 5 * 11, 3 * 5 * 13]) == 3 * 5 + assert gcd(3) == 3 + + @unittest.skipUnless( + HC_PRESENT, + "Hypothesis 2.0.0 can't be made tolerant of hard to " + "meet requirements (like `is_prime()`), the test " + "case times-out on it", + ) + @settings(**HYP_SLOW_SETTINGS) + @given(st_comp_with_com_fac()) + def test_gcd_with_com_factor(self, numbers): + n = gcd(numbers) + assert 1 in numbers or n != 1 + for i in numbers: + assert i % n == 0 + + @unittest.skipUnless( + HC_PRESENT, + "Hypothesis 2.0.0 can't be made tolerant of hard to " + "meet requirements (like `is_prime()`), the test " + "case times-out on it", + ) + @settings(**HYP_SLOW_SETTINGS) + @given(st_comp_no_com_fac()) + def test_gcd_with_uncom_factor(self, numbers): + n = gcd(numbers) + assert n == 1 + + @given( + st.lists( + st.integers(min_value=1, max_value=2**8192), + min_size=1, + max_size=20, + ) + ) + def test_gcd_with_random_numbers(self, numbers): + n = gcd(numbers) + for i in numbers: + # check that at least it's a divider + assert i % n == 0 + + def test_lcm(self): + assert lcm(3, 5 * 3, 7 * 3) == 3 * 5 * 7 + assert lcm([3, 5 * 3, 7 * 3]) == 3 * 5 * 7 + assert lcm(3) == 3 + + @given( + st.lists( + st.integers(min_value=1, max_value=2**8192), + min_size=1, + max_size=20, + ) + ) + def test_lcm_with_random_numbers(self, numbers): + n = lcm(numbers) + for i in numbers: + assert n % i == 0 + + @unittest.skipUnless( + HC_PRESENT, + "Hypothesis 2.0.0 can't be made tolerant of hard to " + "meet requirements (like `is_prime()`), the test " + "case times-out on it", + ) + @settings(**HYP_SETTINGS) + @given(st_num_square_prime()) + def test_square_root_mod_prime(self, vals): + square, prime = vals + + calc = square_root_mod_prime(square, prime) + assert calc * calc % prime == square + + @settings(**HYP_SETTINGS) + @given(st.integers(min_value=1, max_value=10**12)) + @example(265399 * 1526929) + @example(373297**2 * 553991) + def test_factorization(self, num): + factors = factorization(num) + mult = 1 + for i in factors: + mult *= i[0] ** i[1] + assert mult == num + + def test_factorisation_smallprimes(self): + exp = 101 * 103 + assert 101 in smallprimes + assert 103 in smallprimes + factors = factorization(exp) + mult = 1 + for i in factors: + mult *= i[0] ** i[1] + assert mult == exp + + def test_factorisation_not_smallprimes(self): + exp = 1231 * 1237 + assert 1231 not in smallprimes + assert 1237 not in smallprimes + factors = factorization(exp) + mult = 1 + for i in factors: + mult *= i[0] ** i[1] + assert mult == exp + + def test_jacobi_with_zero(self): + assert jacobi(0, 3) == 0 + + def test_jacobi_with_one(self): + assert jacobi(1, 3) == 1 + + @settings(**HYP_SETTINGS) + @given(st.integers(min_value=3, max_value=1000).filter(lambda x: x % 2)) + def test_jacobi(self, mod): + if is_prime(mod): + squares = set() + for root in range(1, mod): + assert jacobi(root * root, mod) == 1 + squares.add(root * root % mod) + for i in range(1, mod): + if i not in squares: + assert jacobi(i, mod) == -1 + else: + factors = factorization(mod) + for a in range(1, mod): + c = 1 + for i in factors: + c *= jacobi(a, i[0]) ** i[1] + assert c == jacobi(a, mod) + + @given(st_two_nums_rel_prime()) + def test_inverse_mod(self, nums): + num, mod = nums + + inv = inverse_mod(num, mod) + + assert 0 < inv < mod + assert num * inv % mod == 1 + + def test_inverse_mod_with_zero(self): + assert 0 == inverse_mod(0, 11) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_pyecdsa.py b/myenv/lib/python3.9/site-packages/ecdsa/test_pyecdsa.py new file mode 100644 index 0000000..d61f508 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_pyecdsa.py @@ -0,0 +1,2267 @@ +from __future__ import with_statement, division + +try: + import unittest2 as unittest +except ImportError: + import unittest +import os +import sys +import shutil +import subprocess +import pytest +from binascii import hexlify, unhexlify +from hashlib import sha1, sha256, sha384, sha512 +import hashlib +from functools import partial + +from hypothesis import given +import hypothesis.strategies as st + +from six import b, print_, binary_type +from .keys import SigningKey, VerifyingKey +from .keys import BadSignatureError, MalformedPointError, BadDigestError +from . import util +from .util import sigencode_der, sigencode_strings +from .util import sigdecode_der, sigdecode_strings +from .util import number_to_string, encoded_oid_ecPublicKey, MalformedSignature +from .curves import Curve, UnknownCurveError +from .curves import ( + SECP112r1, + SECP112r2, + SECP128r1, + SECP160r1, + NIST192p, + NIST224p, + NIST256p, + NIST384p, + NIST521p, + SECP256k1, + BRAINPOOLP160r1, + BRAINPOOLP192r1, + BRAINPOOLP224r1, + BRAINPOOLP256r1, + BRAINPOOLP320r1, + BRAINPOOLP384r1, + BRAINPOOLP512r1, + Ed25519, + Ed448, + curves, +) +from .ecdsa import ( + curve_brainpoolp224r1, + curve_brainpoolp256r1, + curve_brainpoolp384r1, + curve_brainpoolp512r1, +) +from .ellipticcurve import Point +from . import der +from . import rfc6979 +from . import ecdsa + + +class SubprocessError(Exception): + pass + + +def run_openssl(cmd): + OPENSSL = "openssl" + p = subprocess.Popen( + [OPENSSL] + cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + stdout, ignored = p.communicate() + if p.returncode != 0: + raise SubprocessError( + "cmd '%s %s' failed: rc=%s, stdout/err was %s" + % (OPENSSL, cmd, p.returncode, stdout) + ) + return stdout.decode() + + +class ECDSA(unittest.TestCase): + def test_basic(self): + priv = SigningKey.generate() + pub = priv.get_verifying_key() + + data = b("blahblah") + sig = priv.sign(data) + + self.assertTrue(pub.verify(sig, data)) + self.assertRaises(BadSignatureError, pub.verify, sig, data + b("bad")) + + pub2 = VerifyingKey.from_string(pub.to_string()) + self.assertTrue(pub2.verify(sig, data)) + + def test_deterministic(self): + data = b("blahblah") + secexp = int("9d0219792467d7d37b4d43298a7d0c05", 16) + + priv = SigningKey.from_secret_exponent(secexp, SECP256k1, sha256) + pub = priv.get_verifying_key() + + k = rfc6979.generate_k( + SECP256k1.generator.order(), secexp, sha256, sha256(data).digest() + ) + + sig1 = priv.sign(data, k=k) + self.assertTrue(pub.verify(sig1, data)) + + sig2 = priv.sign(data, k=k) + self.assertTrue(pub.verify(sig2, data)) + + sig3 = priv.sign_deterministic(data, sha256) + self.assertTrue(pub.verify(sig3, data)) + + self.assertEqual(sig1, sig2) + self.assertEqual(sig1, sig3) + + def test_bad_usage(self): + # sk=SigningKey() is wrong + self.assertRaises(TypeError, SigningKey) + self.assertRaises(TypeError, VerifyingKey) + + def test_lengths(self): + default = NIST192p + priv = SigningKey.generate() + pub = priv.get_verifying_key() + self.assertEqual(len(pub.to_string()), default.verifying_key_length) + sig = priv.sign(b("data")) + self.assertEqual(len(sig), default.signature_length) + for curve in ( + NIST192p, + NIST224p, + NIST256p, + NIST384p, + NIST521p, + BRAINPOOLP160r1, + BRAINPOOLP192r1, + BRAINPOOLP224r1, + BRAINPOOLP256r1, + BRAINPOOLP320r1, + BRAINPOOLP384r1, + BRAINPOOLP512r1, + ): + priv = SigningKey.generate(curve=curve) + pub1 = priv.get_verifying_key() + pub2 = VerifyingKey.from_string(pub1.to_string(), curve) + self.assertEqual(pub1.to_string(), pub2.to_string()) + self.assertEqual(len(pub1.to_string()), curve.verifying_key_length) + sig = priv.sign(b("data")) + self.assertEqual(len(sig), curve.signature_length) + + def test_serialize(self): + seed = b("secret") + curve = NIST192p + secexp1 = util.randrange_from_seed__trytryagain(seed, curve.order) + secexp2 = util.randrange_from_seed__trytryagain(seed, curve.order) + self.assertEqual(secexp1, secexp2) + priv1 = SigningKey.from_secret_exponent(secexp1, curve) + priv2 = SigningKey.from_secret_exponent(secexp2, curve) + self.assertEqual( + hexlify(priv1.to_string()), hexlify(priv2.to_string()) + ) + self.assertEqual(priv1.to_pem(), priv2.to_pem()) + pub1 = priv1.get_verifying_key() + pub2 = priv2.get_verifying_key() + data = b("data") + sig1 = priv1.sign(data) + sig2 = priv2.sign(data) + self.assertTrue(pub1.verify(sig1, data)) + self.assertTrue(pub2.verify(sig1, data)) + self.assertTrue(pub1.verify(sig2, data)) + self.assertTrue(pub2.verify(sig2, data)) + self.assertEqual(hexlify(pub1.to_string()), hexlify(pub2.to_string())) + + def test_nonrandom(self): + s = b("all the entropy in the entire world, compressed into one line") + + def not_much_entropy(numbytes): + return s[:numbytes] + + # we control the entropy source, these two keys should be identical: + priv1 = SigningKey.generate(entropy=not_much_entropy) + priv2 = SigningKey.generate(entropy=not_much_entropy) + self.assertEqual( + hexlify(priv1.get_verifying_key().to_string()), + hexlify(priv2.get_verifying_key().to_string()), + ) + # likewise, signatures should be identical. Obviously you'd never + # want to do this with keys you care about, because the secrecy of + # the private key depends upon using different random numbers for + # each signature + sig1 = priv1.sign(b("data"), entropy=not_much_entropy) + sig2 = priv2.sign(b("data"), entropy=not_much_entropy) + self.assertEqual(hexlify(sig1), hexlify(sig2)) + + def assertTruePrivkeysEqual(self, priv1, priv2): + self.assertEqual( + priv1.privkey.secret_multiplier, priv2.privkey.secret_multiplier + ) + self.assertEqual( + priv1.privkey.public_key.generator, + priv2.privkey.public_key.generator, + ) + + def test_privkey_creation(self): + s = b("all the entropy in the entire world, compressed into one line") + + def not_much_entropy(numbytes): + return s[:numbytes] + + priv1 = SigningKey.generate() + self.assertEqual(priv1.baselen, NIST192p.baselen) + + priv1 = SigningKey.generate(curve=NIST224p) + self.assertEqual(priv1.baselen, NIST224p.baselen) + + priv1 = SigningKey.generate(entropy=not_much_entropy) + self.assertEqual(priv1.baselen, NIST192p.baselen) + priv2 = SigningKey.generate(entropy=not_much_entropy) + self.assertEqual(priv2.baselen, NIST192p.baselen) + self.assertTruePrivkeysEqual(priv1, priv2) + + priv1 = SigningKey.from_secret_exponent(secexp=3) + self.assertEqual(priv1.baselen, NIST192p.baselen) + priv2 = SigningKey.from_secret_exponent(secexp=3) + self.assertTruePrivkeysEqual(priv1, priv2) + + priv1 = SigningKey.from_secret_exponent(secexp=4, curve=NIST224p) + self.assertEqual(priv1.baselen, NIST224p.baselen) + + def test_privkey_strings(self): + priv1 = SigningKey.generate() + s1 = priv1.to_string() + self.assertEqual(type(s1), binary_type) + self.assertEqual(len(s1), NIST192p.baselen) + priv2 = SigningKey.from_string(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + s1 = priv1.to_pem() + self.assertEqual(type(s1), binary_type) + self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----"))) + self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----"))) + priv2 = SigningKey.from_pem(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + s1 = priv1.to_der() + self.assertEqual(type(s1), binary_type) + priv2 = SigningKey.from_der(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + priv1 = SigningKey.generate(curve=NIST256p) + s1 = priv1.to_pem() + self.assertEqual(type(s1), binary_type) + self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----"))) + self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----"))) + priv2 = SigningKey.from_pem(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + s1 = priv1.to_der() + self.assertEqual(type(s1), binary_type) + priv2 = SigningKey.from_der(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + def test_privkey_strings_brainpool(self): + priv1 = SigningKey.generate(curve=BRAINPOOLP512r1) + s1 = priv1.to_pem() + self.assertEqual(type(s1), binary_type) + self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----"))) + self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----"))) + priv2 = SigningKey.from_pem(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + s1 = priv1.to_der() + self.assertEqual(type(s1), binary_type) + priv2 = SigningKey.from_der(s1) + self.assertTruePrivkeysEqual(priv1, priv2) + + def assertTruePubkeysEqual(self, pub1, pub2): + self.assertEqual(pub1.pubkey.point, pub2.pubkey.point) + self.assertEqual(pub1.pubkey.generator, pub2.pubkey.generator) + self.assertEqual(pub1.curve, pub2.curve) + + def test_pubkey_strings(self): + priv1 = SigningKey.generate() + pub1 = priv1.get_verifying_key() + s1 = pub1.to_string() + self.assertEqual(type(s1), binary_type) + self.assertEqual(len(s1), NIST192p.verifying_key_length) + pub2 = VerifyingKey.from_string(s1) + self.assertTruePubkeysEqual(pub1, pub2) + + priv1 = SigningKey.generate(curve=NIST256p) + pub1 = priv1.get_verifying_key() + s1 = pub1.to_string() + self.assertEqual(type(s1), binary_type) + self.assertEqual(len(s1), NIST256p.verifying_key_length) + pub2 = VerifyingKey.from_string(s1, curve=NIST256p) + self.assertTruePubkeysEqual(pub1, pub2) + + pub1_der = pub1.to_der() + self.assertEqual(type(pub1_der), binary_type) + pub2 = VerifyingKey.from_der(pub1_der) + self.assertTruePubkeysEqual(pub1, pub2) + + self.assertRaises( + der.UnexpectedDER, VerifyingKey.from_der, pub1_der + b("junk") + ) + badpub = VerifyingKey.from_der(pub1_der) + + class FakeGenerator: + def order(self): + return 123456789 + + class FakeCurveFp: + def p(self): + return int( + "6525534529039240705020950546962731340" + "4541085228058844382513856749047873406763" + ) + + badcurve = Curve( + "unknown", FakeCurveFp(), FakeGenerator(), (1, 2, 3, 4, 5, 6), None + ) + badpub.curve = badcurve + badder = badpub.to_der() + self.assertRaises(UnknownCurveError, VerifyingKey.from_der, badder) + + pem = pub1.to_pem() + self.assertEqual(type(pem), binary_type) + self.assertTrue(pem.startswith(b("-----BEGIN PUBLIC KEY-----")), pem) + self.assertTrue( + pem.strip().endswith(b("-----END PUBLIC KEY-----")), pem + ) + pub2 = VerifyingKey.from_pem(pem) + self.assertTruePubkeysEqual(pub1, pub2) + + def test_pubkey_strings_brainpool(self): + priv1 = SigningKey.generate(curve=BRAINPOOLP512r1) + pub1 = priv1.get_verifying_key() + s1 = pub1.to_string() + self.assertEqual(type(s1), binary_type) + self.assertEqual(len(s1), BRAINPOOLP512r1.verifying_key_length) + pub2 = VerifyingKey.from_string(s1, curve=BRAINPOOLP512r1) + self.assertTruePubkeysEqual(pub1, pub2) + + pub1_der = pub1.to_der() + self.assertEqual(type(pub1_der), binary_type) + pub2 = VerifyingKey.from_der(pub1_der) + self.assertTruePubkeysEqual(pub1, pub2) + + def test_vk_to_der_with_invalid_point_encoding(self): + sk = SigningKey.generate() + vk = sk.verifying_key + + with self.assertRaises(ValueError): + vk.to_der("raw") + + def test_sk_to_der_with_invalid_point_encoding(self): + sk = SigningKey.generate() + + with self.assertRaises(ValueError): + sk.to_der("raw") + + def test_vk_from_der_garbage_after_curve_oid(self): + type_oid_der = encoded_oid_ecPublicKey + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + b( + "garbage" + ) + enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der) + point_der = der.encode_bitstring(b"\x00\xff", None) + to_decode = der.encode_sequence(enc_type_der, point_der) + + with self.assertRaises(der.UnexpectedDER): + VerifyingKey.from_der(to_decode) + + def test_vk_from_der_invalid_key_type(self): + type_oid_der = der.encode_oid(*(1, 2, 3)) + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der) + point_der = der.encode_bitstring(b"\x00\xff", None) + to_decode = der.encode_sequence(enc_type_der, point_der) + + with self.assertRaises(der.UnexpectedDER): + VerifyingKey.from_der(to_decode) + + def test_vk_from_der_garbage_after_point_string(self): + type_oid_der = encoded_oid_ecPublicKey + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der) + point_der = der.encode_bitstring(b"\x00\xff", None) + b("garbage") + to_decode = der.encode_sequence(enc_type_der, point_der) + + with self.assertRaises(der.UnexpectedDER): + VerifyingKey.from_der(to_decode) + + def test_vk_from_der_invalid_bitstring(self): + type_oid_der = encoded_oid_ecPublicKey + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der) + point_der = der.encode_bitstring(b"\x08\xff", None) + to_decode = der.encode_sequence(enc_type_der, point_der) + + with self.assertRaises(der.UnexpectedDER): + VerifyingKey.from_der(to_decode) + + def test_vk_from_der_with_invalid_length_of_encoding(self): + type_oid_der = encoded_oid_ecPublicKey + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der) + point_der = der.encode_bitstring(b"\xff" * 64, 0) + to_decode = der.encode_sequence(enc_type_der, point_der) + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_der(to_decode) + + def test_vk_from_der_with_raw_encoding(self): + type_oid_der = encoded_oid_ecPublicKey + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der) + point_der = der.encode_bitstring(b"\xff" * 48, 0) + to_decode = der.encode_sequence(enc_type_der, point_der) + + with self.assertRaises(der.UnexpectedDER): + VerifyingKey.from_der(to_decode) + + def test_signature_strings(self): + priv1 = SigningKey.generate() + pub1 = priv1.get_verifying_key() + data = b("data") + + sig = priv1.sign(data) + self.assertEqual(type(sig), binary_type) + self.assertEqual(len(sig), NIST192p.signature_length) + self.assertTrue(pub1.verify(sig, data)) + + sig = priv1.sign(data, sigencode=sigencode_strings) + self.assertEqual(type(sig), tuple) + self.assertEqual(len(sig), 2) + self.assertEqual(type(sig[0]), binary_type) + self.assertEqual(type(sig[1]), binary_type) + self.assertEqual(len(sig[0]), NIST192p.baselen) + self.assertEqual(len(sig[1]), NIST192p.baselen) + self.assertTrue(pub1.verify(sig, data, sigdecode=sigdecode_strings)) + + sig_der = priv1.sign(data, sigencode=sigencode_der) + self.assertEqual(type(sig_der), binary_type) + self.assertTrue(pub1.verify(sig_der, data, sigdecode=sigdecode_der)) + + def test_sig_decode_strings_with_invalid_count(self): + with self.assertRaises(MalformedSignature): + sigdecode_strings([b("one"), b("two"), b("three")], 0xFF) + + def test_sig_decode_strings_with_wrong_r_len(self): + with self.assertRaises(MalformedSignature): + sigdecode_strings([b("one"), b("two")], 0xFF) + + def test_sig_decode_strings_with_wrong_s_len(self): + with self.assertRaises(MalformedSignature): + sigdecode_strings([b("\xa0"), b("\xb0\xff")], 0xFF) + + def test_verify_with_too_long_input(self): + sk = SigningKey.generate() + vk = sk.verifying_key + + with self.assertRaises(BadDigestError): + vk.verify_digest(None, b("\x00") * 128) + + def test_sk_from_secret_exponent_with_wrong_sec_exponent(self): + with self.assertRaises(MalformedPointError): + SigningKey.from_secret_exponent(0) + + def test_sk_from_string_with_wrong_len_string(self): + with self.assertRaises(MalformedPointError): + SigningKey.from_string(b("\x01")) + + def test_sk_from_der_with_junk_after_sequence(self): + ver_der = der.encode_integer(1) + to_decode = der.encode_sequence(ver_der) + b("garbage") + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_der_with_wrong_version(self): + ver_der = der.encode_integer(0) + to_decode = der.encode_sequence(ver_der) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_der_invalid_const_tag(self): + ver_der = der.encode_integer(1) + privkey_der = der.encode_octet_string(b("\x00\xff")) + curve_oid_der = der.encode_oid(*(1, 2, 3)) + const_der = der.encode_constructed(1, curve_oid_der) + to_decode = der.encode_sequence( + ver_der, privkey_der, const_der, curve_oid_der + ) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_der_garbage_after_privkey_oid(self): + ver_der = der.encode_integer(1) + privkey_der = der.encode_octet_string(b("\x00\xff")) + curve_oid_der = der.encode_oid(*(1, 2, 3)) + b("garbage") + const_der = der.encode_constructed(0, curve_oid_der) + to_decode = der.encode_sequence( + ver_der, privkey_der, const_der, curve_oid_der + ) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_der_with_short_privkey(self): + ver_der = der.encode_integer(1) + privkey_der = der.encode_octet_string(b("\x00\xff")) + curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + const_der = der.encode_constructed(0, curve_oid_der) + to_decode = der.encode_sequence( + ver_der, privkey_der, const_der, curve_oid_der + ) + + sk = SigningKey.from_der(to_decode) + self.assertEqual(sk.privkey.secret_multiplier, 255) + + def test_sk_from_p8_der_with_wrong_version(self): + ver_der = der.encode_integer(2) + algorithm_der = der.encode_sequence( + der.encode_oid(1, 2, 840, 10045, 2, 1), + der.encode_oid(1, 2, 840, 10045, 3, 1, 1), + ) + privkey_der = der.encode_octet_string( + der.encode_sequence( + der.encode_integer(1), der.encode_octet_string(b"\x00\xff") + ) + ) + to_decode = der.encode_sequence(ver_der, algorithm_der, privkey_der) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_p8_der_with_wrong_algorithm(self): + ver_der = der.encode_integer(1) + algorithm_der = der.encode_sequence( + der.encode_oid(1, 2, 3), der.encode_oid(1, 2, 840, 10045, 3, 1, 1) + ) + privkey_der = der.encode_octet_string( + der.encode_sequence( + der.encode_integer(1), der.encode_octet_string(b"\x00\xff") + ) + ) + to_decode = der.encode_sequence(ver_der, algorithm_der, privkey_der) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_p8_der_with_trailing_junk_after_algorithm(self): + ver_der = der.encode_integer(1) + algorithm_der = der.encode_sequence( + der.encode_oid(1, 2, 840, 10045, 2, 1), + der.encode_oid(1, 2, 840, 10045, 3, 1, 1), + der.encode_octet_string(b"junk"), + ) + privkey_der = der.encode_octet_string( + der.encode_sequence( + der.encode_integer(1), der.encode_octet_string(b"\x00\xff") + ) + ) + to_decode = der.encode_sequence(ver_der, algorithm_der, privkey_der) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sk_from_p8_der_with_trailing_junk_after_key(self): + ver_der = der.encode_integer(1) + algorithm_der = der.encode_sequence( + der.encode_oid(1, 2, 840, 10045, 2, 1), + der.encode_oid(1, 2, 840, 10045, 3, 1, 1), + ) + privkey_der = der.encode_octet_string( + der.encode_sequence( + der.encode_integer(1), der.encode_octet_string(b"\x00\xff") + ) + + der.encode_integer(999) + ) + to_decode = der.encode_sequence( + ver_der, + algorithm_der, + privkey_der, + der.encode_octet_string(b"junk"), + ) + + with self.assertRaises(der.UnexpectedDER): + SigningKey.from_der(to_decode) + + def test_sign_with_too_long_hash(self): + sk = SigningKey.from_secret_exponent(12) + + with self.assertRaises(BadDigestError): + sk.sign_digest(b("\xff") * 64) + + def test_hashfunc(self): + sk = SigningKey.generate(curve=NIST256p, hashfunc=sha256) + data = b("security level is 128 bits") + sig = sk.sign(data) + vk = VerifyingKey.from_string( + sk.get_verifying_key().to_string(), curve=NIST256p, hashfunc=sha256 + ) + self.assertTrue(vk.verify(sig, data)) + + sk2 = SigningKey.generate(curve=NIST256p) + sig2 = sk2.sign(data, hashfunc=sha256) + vk2 = VerifyingKey.from_string( + sk2.get_verifying_key().to_string(), + curve=NIST256p, + hashfunc=sha256, + ) + self.assertTrue(vk2.verify(sig2, data)) + + vk3 = VerifyingKey.from_string( + sk.get_verifying_key().to_string(), curve=NIST256p + ) + self.assertTrue(vk3.verify(sig, data, hashfunc=sha256)) + + def test_public_key_recovery(self): + # Create keys + curve = BRAINPOOLP160r1 + + sk = SigningKey.generate(curve=curve) + vk = sk.get_verifying_key() + + # Sign a message + data = b("blahblah") + signature = sk.sign(data) + + # Recover verifying keys + recovered_vks = VerifyingKey.from_public_key_recovery( + signature, data, curve + ) + + # Test if each pk is valid + for recovered_vk in recovered_vks: + # Test if recovered vk is valid for the data + self.assertTrue(recovered_vk.verify(signature, data)) + + # Test if properties are equal + self.assertEqual(vk.curve, recovered_vk.curve) + self.assertEqual( + vk.default_hashfunc, recovered_vk.default_hashfunc + ) + + # Test if original vk is the list of recovered keys + self.assertIn( + vk.pubkey.point, + [recovered_vk.pubkey.point for recovered_vk in recovered_vks], + ) + + def test_public_key_recovery_with_custom_hash(self): + # Create keys + curve = BRAINPOOLP160r1 + + sk = SigningKey.generate(curve=curve, hashfunc=sha256) + vk = sk.get_verifying_key() + + # Sign a message + data = b("blahblah") + signature = sk.sign(data) + + # Recover verifying keys + recovered_vks = VerifyingKey.from_public_key_recovery( + signature, data, curve, hashfunc=sha256, allow_truncate=True + ) + + # Test if each pk is valid + for recovered_vk in recovered_vks: + # Test if recovered vk is valid for the data + self.assertTrue(recovered_vk.verify(signature, data)) + + # Test if properties are equal + self.assertEqual(vk.curve, recovered_vk.curve) + self.assertEqual(sha256, recovered_vk.default_hashfunc) + + # Test if original vk is the list of recovered keys + self.assertIn( + vk.pubkey.point, + [recovered_vk.pubkey.point for recovered_vk in recovered_vks], + ) + + def test_encoding(self): + sk = SigningKey.from_secret_exponent(123456789) + vk = sk.verifying_key + + exp = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + self.assertEqual(vk.to_string(), exp) + self.assertEqual(vk.to_string("raw"), exp) + self.assertEqual(vk.to_string("uncompressed"), b("\x04") + exp) + self.assertEqual(vk.to_string("compressed"), b("\x02") + exp[:24]) + self.assertEqual(vk.to_string("hybrid"), b("\x06") + exp) + + def test_decoding(self): + sk = SigningKey.from_secret_exponent(123456789) + vk = sk.verifying_key + + enc = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + + from_raw = VerifyingKey.from_string(enc) + self.assertEqual(from_raw.pubkey.point, vk.pubkey.point) + + from_uncompressed = VerifyingKey.from_string(b("\x04") + enc) + self.assertEqual(from_uncompressed.pubkey.point, vk.pubkey.point) + + from_compressed = VerifyingKey.from_string(b("\x02") + enc[:24]) + self.assertEqual(from_compressed.pubkey.point, vk.pubkey.point) + + from_uncompressed = VerifyingKey.from_string(b("\x06") + enc) + self.assertEqual(from_uncompressed.pubkey.point, vk.pubkey.point) + + def test_uncompressed_decoding_as_only_alowed(self): + enc = b( + "\x04" + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + vk = VerifyingKey.from_string(enc, valid_encodings=("uncompressed",)) + sk = SigningKey.from_secret_exponent(123456789) + + self.assertEqual(vk, sk.verifying_key) + + def test_raw_decoding_with_blocked_format(self): + enc = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + with self.assertRaises(MalformedPointError) as exp: + VerifyingKey.from_string(enc, valid_encodings=("hybrid",)) + + self.assertIn("hybrid", str(exp.exception)) + + def test_decoding_with_unknown_format(self): + with self.assertRaises(ValueError) as e: + VerifyingKey.from_string(b"", valid_encodings=("raw", "foobar")) + + self.assertIn("Only uncompressed, compressed", str(e.exception)) + + def test_uncompressed_decoding_with_blocked_format(self): + enc = b( + "\x04" + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + with self.assertRaises(MalformedPointError) as exp: + VerifyingKey.from_string(enc, valid_encodings=("hybrid",)) + + self.assertIn("Invalid X9.62 encoding", str(exp.exception)) + + def test_hybrid_decoding_with_blocked_format(self): + enc = b( + "\x06" + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + with self.assertRaises(MalformedPointError) as exp: + VerifyingKey.from_string(enc, valid_encodings=("uncompressed",)) + + self.assertIn("Invalid X9.62 encoding", str(exp.exception)) + + def test_compressed_decoding_with_blocked_format(self): + enc = b( + "\x02" + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + )[:25] + with self.assertRaises(MalformedPointError) as exp: + VerifyingKey.from_string(enc, valid_encodings=("hybrid", "raw")) + + self.assertIn("(hybrid, raw)", str(exp.exception)) + + def test_decoding_with_malformed_uncompressed(self): + enc = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x02") + enc) + + def test_decoding_with_malformed_compressed(self): + enc = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x01") + enc[:24]) + + def test_decoding_with_inconsistent_hybrid(self): + enc = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x07") + enc) + + def test_decoding_with_point_not_on_curve(self): + enc = b( + "\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3" + "\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4" + "z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*" + ) + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(enc[:47] + b("\x00")) + + def test_decoding_with_point_at_infinity(self): + # decoding it is unsupported, as it's not necessary to encode it + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x00")) + + def test_not_lying_on_curve(self): + enc = number_to_string(NIST192p.curve.p(), NIST192p.curve.p() + 1) + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x02") + enc) + + def test_from_string_with_invalid_curve_too_short_ver_key_len(self): + # both verifying_key_length and baselen are calculated internally + # by the Curve constructor, but since we depend on them verify + # that inconsistent values are detected + curve = Curve("test", ecdsa.curve_192, ecdsa.generator_192, (1, 2)) + curve.verifying_key_length = 16 + curve.baselen = 32 + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x00") * 16, curve) + + def test_from_string_with_invalid_curve_too_long_ver_key_len(self): + # both verifying_key_length and baselen are calculated internally + # by the Curve constructor, but since we depend on them verify + # that inconsistent values are detected + curve = Curve("test", ecdsa.curve_192, ecdsa.generator_192, (1, 2)) + curve.verifying_key_length = 16 + curve.baselen = 16 + + with self.assertRaises(MalformedPointError): + VerifyingKey.from_string(b("\x00") * 16, curve) + + +@pytest.mark.parametrize( + "val,even", [(i, j) for i in range(256) for j in [True, False]] +) +def test_VerifyingKey_decode_with_small_values(val, even): + enc = number_to_string(val, NIST192p.order) + + if even: + enc = b("\x02") + enc + else: + enc = b("\x03") + enc + + # small values can both be actual valid public keys and not, verify that + # only expected exceptions are raised if they are not + try: + vk = VerifyingKey.from_string(enc) + assert isinstance(vk, VerifyingKey) + except MalformedPointError: + assert True + + +params = [] +for curve in curves: + for enc in ["raw", "uncompressed", "compressed", "hybrid"]: + params.append( + pytest.param(curve, enc, id="{0}-{1}".format(curve.name, enc)) + ) + + +@pytest.mark.parametrize("curve,encoding", params) +def test_VerifyingKey_encode_decode(curve, encoding): + sk = SigningKey.generate(curve=curve) + vk = sk.verifying_key + + encoded = vk.to_string(encoding) + + from_enc = VerifyingKey.from_string(encoded, curve=curve) + + assert vk.pubkey.point == from_enc.pubkey.point + + +class OpenSSL(unittest.TestCase): + # test interoperability with OpenSSL tools. Note that openssl's ECDSA + # sign/verify arguments changed between 0.9.8 and 1.0.0: the early + # versions require "-ecdsa-with-SHA1", the later versions want just + # "-SHA1" (or to leave out that argument entirely, which means the + # signature will use some default digest algorithm, probably determined + # by the key, probably always SHA1). + # + # openssl ecparam -name secp224r1 -genkey -out privkey.pem + # openssl ec -in privkey.pem -text -noout # get the priv/pub keys + # openssl dgst -ecdsa-with-SHA1 -sign privkey.pem -out data.sig data.txt + # openssl asn1parse -in data.sig -inform DER + # data.sig is 64 bytes, probably 56b plus ASN1 overhead + # openssl dgst -ecdsa-with-SHA1 -prverify privkey.pem -signature data.sig data.txt ; echo $? + # openssl ec -in privkey.pem -pubout -out pubkey.pem + # openssl ec -in privkey.pem -pubout -outform DER -out pubkey.der + + OPENSSL_SUPPORTED_CURVES = set( + c.split(":")[0].strip() + for c in run_openssl("ecparam -list_curves").split("\n") + ) + + def get_openssl_messagedigest_arg(self, hash_name): + v = run_openssl("version") + # e.g. "OpenSSL 1.0.0 29 Mar 2010", or "OpenSSL 1.0.0a 1 Jun 2010", + # or "OpenSSL 0.9.8o 01 Jun 2010" + vs = v.split()[1].split(".") + if vs >= ["1", "0", "0"]: # pragma: no cover + return "-{0}".format(hash_name) + else: # pragma: no cover + return "-ecdsa-with-{0}".format(hash_name) + + # sk: 1:OpenSSL->python 2:python->OpenSSL + # vk: 3:OpenSSL->python 4:python->OpenSSL + # sig: 5:OpenSSL->python 6:python->OpenSSL + + @pytest.mark.skipif( + "secp112r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp112r1", + ) + def test_from_openssl_secp112r1(self): + return self.do_test_from_openssl(SECP112r1) + + @pytest.mark.skipif( + "secp112r2" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp112r2", + ) + def test_from_openssl_secp112r2(self): + return self.do_test_from_openssl(SECP112r2) + + @pytest.mark.skipif( + "secp128r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp128r1", + ) + def test_from_openssl_secp128r1(self): + return self.do_test_from_openssl(SECP128r1) + + @pytest.mark.skipif( + "secp160r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp160r1", + ) + def test_from_openssl_secp160r1(self): + return self.do_test_from_openssl(SECP160r1) + + @pytest.mark.skipif( + "prime192v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime192v1", + ) + def test_from_openssl_nist192p(self): + return self.do_test_from_openssl(NIST192p) + + @pytest.mark.skipif( + "prime192v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime192v1", + ) + def test_from_openssl_nist192p_sha256(self): + return self.do_test_from_openssl(NIST192p, "SHA256") + + @pytest.mark.skipif( + "secp224r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp224r1", + ) + def test_from_openssl_nist224p(self): + return self.do_test_from_openssl(NIST224p) + + @pytest.mark.skipif( + "prime256v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime256v1", + ) + def test_from_openssl_nist256p(self): + return self.do_test_from_openssl(NIST256p) + + @pytest.mark.skipif( + "prime256v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime256v1", + ) + def test_from_openssl_nist256p_sha384(self): + return self.do_test_from_openssl(NIST256p, "SHA384") + + @pytest.mark.skipif( + "prime256v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime256v1", + ) + def test_from_openssl_nist256p_sha512(self): + return self.do_test_from_openssl(NIST256p, "SHA512") + + @pytest.mark.skipif( + "secp384r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp384r1", + ) + def test_from_openssl_nist384p(self): + return self.do_test_from_openssl(NIST384p) + + @pytest.mark.skipif( + "secp521r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp521r1", + ) + def test_from_openssl_nist521p(self): + return self.do_test_from_openssl(NIST521p) + + @pytest.mark.skipif( + "secp256k1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp256k1", + ) + def test_from_openssl_secp256k1(self): + return self.do_test_from_openssl(SECP256k1) + + @pytest.mark.skipif( + "brainpoolP160r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP160r1", + ) + def test_from_openssl_brainpoolp160r1(self): + return self.do_test_from_openssl(BRAINPOOLP160r1) + + @pytest.mark.skipif( + "brainpoolP192r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP192r1", + ) + def test_from_openssl_brainpoolp192r1(self): + return self.do_test_from_openssl(BRAINPOOLP192r1) + + @pytest.mark.skipif( + "brainpoolP224r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP224r1", + ) + def test_from_openssl_brainpoolp224r1(self): + return self.do_test_from_openssl(BRAINPOOLP224r1) + + @pytest.mark.skipif( + "brainpoolP256r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP256r1", + ) + def test_from_openssl_brainpoolp256r1(self): + return self.do_test_from_openssl(BRAINPOOLP256r1) + + @pytest.mark.skipif( + "brainpoolP320r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP320r1", + ) + def test_from_openssl_brainpoolp320r1(self): + return self.do_test_from_openssl(BRAINPOOLP320r1) + + @pytest.mark.skipif( + "brainpoolP384r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP384r1", + ) + def test_from_openssl_brainpoolp384r1(self): + return self.do_test_from_openssl(BRAINPOOLP384r1) + + @pytest.mark.skipif( + "brainpoolP512r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP512r1", + ) + def test_from_openssl_brainpoolp512r1(self): + return self.do_test_from_openssl(BRAINPOOLP512r1) + + def do_test_from_openssl(self, curve, hash_name="SHA1"): + curvename = curve.openssl_name + assert curvename + # OpenSSL: create sk, vk, sign. + # Python: read vk(3), checksig(5), read sk(1), sign, check + mdarg = self.get_openssl_messagedigest_arg(hash_name) + if os.path.isdir("t"): # pragma: no cover + shutil.rmtree("t") + os.mkdir("t") + run_openssl("ecparam -name %s -genkey -out t/privkey.pem" % curvename) + run_openssl("ec -in t/privkey.pem -pubout -out t/pubkey.pem") + data = b("data") + with open("t/data.txt", "wb") as e: + e.write(data) + run_openssl( + "dgst %s -sign t/privkey.pem -out t/data.sig t/data.txt" % mdarg + ) + run_openssl( + "dgst %s -verify t/pubkey.pem -signature t/data.sig t/data.txt" + % mdarg + ) + with open("t/pubkey.pem", "rb") as e: + pubkey_pem = e.read() + vk = VerifyingKey.from_pem(pubkey_pem) # 3 + with open("t/data.sig", "rb") as e: + sig_der = e.read() + self.assertTrue( + vk.verify( + sig_der, + data, # 5 + hashfunc=partial(hashlib.new, hash_name), + sigdecode=sigdecode_der, + ) + ) + + with open("t/privkey.pem") as e: + fp = e.read() + sk = SigningKey.from_pem(fp) # 1 + sig = sk.sign(data, hashfunc=partial(hashlib.new, hash_name)) + self.assertTrue( + vk.verify(sig, data, hashfunc=partial(hashlib.new, hash_name)) + ) + + run_openssl( + "pkcs8 -topk8 -nocrypt " + "-in t/privkey.pem -outform pem -out t/privkey-p8.pem" + ) + with open("t/privkey-p8.pem", "rb") as e: + privkey_p8_pem = e.read() + sk_from_p8 = SigningKey.from_pem(privkey_p8_pem) + self.assertEqual(sk, sk_from_p8) + + @pytest.mark.skipif( + "secp112r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp112r1", + ) + def test_to_openssl_secp112r1(self): + self.do_test_to_openssl(SECP112r1) + + @pytest.mark.skipif( + "secp112r2" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp112r2", + ) + def test_to_openssl_secp112r2(self): + self.do_test_to_openssl(SECP112r2) + + @pytest.mark.skipif( + "secp128r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp128r1", + ) + def test_to_openssl_secp128r1(self): + self.do_test_to_openssl(SECP128r1) + + @pytest.mark.skipif( + "secp160r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp160r1", + ) + def test_to_openssl_secp160r1(self): + self.do_test_to_openssl(SECP160r1) + + @pytest.mark.skipif( + "prime192v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime192v1", + ) + def test_to_openssl_nist192p(self): + self.do_test_to_openssl(NIST192p) + + @pytest.mark.skipif( + "prime192v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime192v1", + ) + def test_to_openssl_nist192p_sha256(self): + self.do_test_to_openssl(NIST192p, "SHA256") + + @pytest.mark.skipif( + "secp224r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp224r1", + ) + def test_to_openssl_nist224p(self): + self.do_test_to_openssl(NIST224p) + + @pytest.mark.skipif( + "prime256v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime256v1", + ) + def test_to_openssl_nist256p(self): + self.do_test_to_openssl(NIST256p) + + @pytest.mark.skipif( + "prime256v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime256v1", + ) + def test_to_openssl_nist256p_sha384(self): + self.do_test_to_openssl(NIST256p, "SHA384") + + @pytest.mark.skipif( + "prime256v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime256v1", + ) + def test_to_openssl_nist256p_sha512(self): + self.do_test_to_openssl(NIST256p, "SHA512") + + @pytest.mark.skipif( + "secp384r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp384r1", + ) + def test_to_openssl_nist384p(self): + self.do_test_to_openssl(NIST384p) + + @pytest.mark.skipif( + "secp521r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp521r1", + ) + def test_to_openssl_nist521p(self): + self.do_test_to_openssl(NIST521p) + + @pytest.mark.skipif( + "secp256k1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support secp256k1", + ) + def test_to_openssl_secp256k1(self): + self.do_test_to_openssl(SECP256k1) + + @pytest.mark.skipif( + "brainpoolP160r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP160r1", + ) + def test_to_openssl_brainpoolp160r1(self): + self.do_test_to_openssl(BRAINPOOLP160r1) + + @pytest.mark.skipif( + "brainpoolP192r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP192r1", + ) + def test_to_openssl_brainpoolp192r1(self): + self.do_test_to_openssl(BRAINPOOLP192r1) + + @pytest.mark.skipif( + "brainpoolP224r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP224r1", + ) + def test_to_openssl_brainpoolp224r1(self): + self.do_test_to_openssl(BRAINPOOLP224r1) + + @pytest.mark.skipif( + "brainpoolP256r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP256r1", + ) + def test_to_openssl_brainpoolp256r1(self): + self.do_test_to_openssl(BRAINPOOLP256r1) + + @pytest.mark.skipif( + "brainpoolP320r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP320r1", + ) + def test_to_openssl_brainpoolp320r1(self): + self.do_test_to_openssl(BRAINPOOLP320r1) + + @pytest.mark.skipif( + "brainpoolP384r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP384r1", + ) + def test_to_openssl_brainpoolp384r1(self): + self.do_test_to_openssl(BRAINPOOLP384r1) + + @pytest.mark.skipif( + "brainpoolP512r1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support brainpoolP512r1", + ) + def test_to_openssl_brainpoolp512r1(self): + self.do_test_to_openssl(BRAINPOOLP512r1) + + def do_test_to_openssl(self, curve, hash_name="SHA1"): + curvename = curve.openssl_name + assert curvename + # Python: create sk, vk, sign. + # OpenSSL: read vk(4), checksig(6), read sk(2), sign, check + mdarg = self.get_openssl_messagedigest_arg(hash_name) + if os.path.isdir("t"): # pragma: no cover + shutil.rmtree("t") + os.mkdir("t") + sk = SigningKey.generate(curve=curve) + vk = sk.get_verifying_key() + data = b("data") + with open("t/pubkey.der", "wb") as e: + e.write(vk.to_der()) # 4 + with open("t/pubkey.pem", "wb") as e: + e.write(vk.to_pem()) # 4 + sig_der = sk.sign( + data, + hashfunc=partial(hashlib.new, hash_name), + sigencode=sigencode_der, + ) + + with open("t/data.sig", "wb") as e: + e.write(sig_der) # 6 + with open("t/data.txt", "wb") as e: + e.write(data) + with open("t/baddata.txt", "wb") as e: + e.write(data + b("corrupt")) + + self.assertRaises( + SubprocessError, + run_openssl, + "dgst %s -verify t/pubkey.der -keyform DER -signature t/data.sig t/baddata.txt" + % mdarg, + ) + run_openssl( + "dgst %s -verify t/pubkey.der -keyform DER -signature t/data.sig t/data.txt" + % mdarg + ) + + with open("t/privkey.pem", "wb") as e: + e.write(sk.to_pem()) # 2 + run_openssl( + "dgst %s -sign t/privkey.pem -out t/data.sig2 t/data.txt" % mdarg + ) + run_openssl( + "dgst %s -verify t/pubkey.pem -signature t/data.sig2 t/data.txt" + % mdarg + ) + + with open("t/privkey-explicit.pem", "wb") as e: + e.write(sk.to_pem(curve_parameters_encoding="explicit")) + run_openssl( + "dgst %s -sign t/privkey-explicit.pem -out t/data.sig2 t/data.txt" + % mdarg + ) + run_openssl( + "dgst %s -verify t/pubkey.pem -signature t/data.sig2 t/data.txt" + % mdarg + ) + + with open("t/privkey-p8.pem", "wb") as e: + e.write(sk.to_pem(format="pkcs8")) + run_openssl( + "dgst %s -sign t/privkey-p8.pem -out t/data.sig3 t/data.txt" + % mdarg + ) + run_openssl( + "dgst %s -verify t/pubkey.pem -signature t/data.sig3 t/data.txt" + % mdarg + ) + + with open("t/privkey-p8-explicit.pem", "wb") as e: + e.write( + sk.to_pem(format="pkcs8", curve_parameters_encoding="explicit") + ) + run_openssl( + "dgst %s -sign t/privkey-p8-explicit.pem -out t/data.sig3 t/data.txt" + % mdarg + ) + run_openssl( + "dgst %s -verify t/pubkey.pem -signature t/data.sig3 t/data.txt" + % mdarg + ) + + OPENSSL_SUPPORTED_TYPES = set() + try: + if "-rawin" in run_openssl("pkeyutl -help"): + OPENSSL_SUPPORTED_TYPES = set( + c.lower() + for c in ("ED25519", "ED448") + if c in run_openssl("list -public-key-methods") + ) + except SubprocessError: + pass + + def do_eddsa_test_to_openssl(self, curve): + curvename = curve.name.upper() + + if os.path.isdir("t"): + shutil.rmtree("t") + os.mkdir("t") + + sk = SigningKey.generate(curve=curve) + vk = sk.get_verifying_key() + + data = b"data" + with open("t/pubkey.der", "wb") as e: + e.write(vk.to_der()) + with open("t/pubkey.pem", "wb") as e: + e.write(vk.to_pem()) + + sig = sk.sign(data) + + with open("t/data.sig", "wb") as e: + e.write(sig) + with open("t/data.txt", "wb") as e: + e.write(data) + with open("t/baddata.txt", "wb") as e: + e.write(data + b"corrupt") + + with self.assertRaises(SubprocessError): + run_openssl( + "pkeyutl -verify -pubin -inkey t/pubkey.pem -rawin " + "-in t/baddata.txt -sigfile t/data.sig" + ) + run_openssl( + "pkeyutl -verify -pubin -inkey t/pubkey.pem -rawin " + "-in t/data.txt -sigfile t/data.sig" + ) + + shutil.rmtree("t") + + # in practice at least OpenSSL 3.0.0 is needed to make EdDSA signatures + # earlier versions support EdDSA only in X.509 certificates + @pytest.mark.skipif( + "ed25519" not in OPENSSL_SUPPORTED_TYPES, + reason="system openssl does not support signing with Ed25519", + ) + def test_to_openssl_ed25519(self): + return self.do_eddsa_test_to_openssl(Ed25519) + + @pytest.mark.skipif( + "ed448" not in OPENSSL_SUPPORTED_TYPES, + reason="system openssl does not support signing with Ed448", + ) + def test_to_openssl_ed448(self): + return self.do_eddsa_test_to_openssl(Ed448) + + def do_eddsa_test_from_openssl(self, curve): + curvename = curve.name + + if os.path.isdir("t"): + shutil.rmtree("t") + os.mkdir("t") + + data = b"data" + + run_openssl( + "genpkey -algorithm {0} -outform PEM -out t/privkey.pem".format( + curvename + ) + ) + run_openssl( + "pkey -outform PEM -pubout -in t/privkey.pem -out t/pubkey.pem" + ) + + with open("t/data.txt", "wb") as e: + e.write(data) + run_openssl( + "pkeyutl -sign -inkey t/privkey.pem " + "-rawin -in t/data.txt -out t/data.sig" + ) + + with open("t/data.sig", "rb") as e: + sig = e.read() + with open("t/pubkey.pem", "rb") as e: + vk = VerifyingKey.from_pem(e.read()) + + self.assertIs(vk.curve, curve) + + vk.verify(sig, data) + + shutil.rmtree("t") + + @pytest.mark.skipif( + "ed25519" not in OPENSSL_SUPPORTED_TYPES, + reason="system openssl does not support signing with Ed25519", + ) + def test_from_openssl_ed25519(self): + return self.do_eddsa_test_from_openssl(Ed25519) + + @pytest.mark.skipif( + "ed448" not in OPENSSL_SUPPORTED_TYPES, + reason="system openssl does not support signing with Ed448", + ) + def test_from_openssl_ed448(self): + return self.do_eddsa_test_from_openssl(Ed448) + + +class TooSmallCurve(unittest.TestCase): + OPENSSL_SUPPORTED_CURVES = set( + c.split(":")[0].strip() + for c in run_openssl("ecparam -list_curves").split("\n") + ) + + @pytest.mark.skipif( + "prime192v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime192v1", + ) + def test_sign_too_small_curve_dont_allow_truncate_raises(self): + sk = SigningKey.generate(curve=NIST192p) + data = b("data") + with self.assertRaises(BadDigestError): + sk.sign( + data, + hashfunc=partial(hashlib.new, "SHA256"), + sigencode=sigencode_der, + allow_truncate=False, + ) + + @pytest.mark.skipif( + "prime192v1" not in OPENSSL_SUPPORTED_CURVES, + reason="system openssl does not support prime192v1", + ) + def test_verify_too_small_curve_dont_allow_truncate_raises(self): + sk = SigningKey.generate(curve=NIST192p) + vk = sk.get_verifying_key() + data = b("data") + sig_der = sk.sign( + data, + hashfunc=partial(hashlib.new, "SHA256"), + sigencode=sigencode_der, + allow_truncate=True, + ) + with self.assertRaises(BadDigestError): + vk.verify( + sig_der, + data, + hashfunc=partial(hashlib.new, "SHA256"), + sigdecode=sigdecode_der, + allow_truncate=False, + ) + + +class DER(unittest.TestCase): + def test_integer(self): + self.assertEqual(der.encode_integer(0), b("\x02\x01\x00")) + self.assertEqual(der.encode_integer(1), b("\x02\x01\x01")) + self.assertEqual(der.encode_integer(127), b("\x02\x01\x7f")) + self.assertEqual(der.encode_integer(128), b("\x02\x02\x00\x80")) + self.assertEqual(der.encode_integer(256), b("\x02\x02\x01\x00")) + # self.assertEqual(der.encode_integer(-1), b("\x02\x01\xff")) + + def s(n): + return der.remove_integer(der.encode_integer(n) + b("junk")) + + self.assertEqual(s(0), (0, b("junk"))) + self.assertEqual(s(1), (1, b("junk"))) + self.assertEqual(s(127), (127, b("junk"))) + self.assertEqual(s(128), (128, b("junk"))) + self.assertEqual(s(256), (256, b("junk"))) + self.assertEqual( + s(1234567890123456789012345678901234567890), + (1234567890123456789012345678901234567890, b("junk")), + ) + + def test_number(self): + self.assertEqual(der.encode_number(0), b("\x00")) + self.assertEqual(der.encode_number(127), b("\x7f")) + self.assertEqual(der.encode_number(128), b("\x81\x00")) + self.assertEqual(der.encode_number(3 * 128 + 7), b("\x83\x07")) + # self.assertEqual(der.read_number("\x81\x9b" + "more"), (155, 2)) + # self.assertEqual(der.encode_number(155), b("\x81\x9b")) + for n in (0, 1, 2, 127, 128, 3 * 128 + 7, 840, 10045): # , 155): + x = der.encode_number(n) + b("more") + n1, llen = der.read_number(x) + self.assertEqual(n1, n) + self.assertEqual(x[llen:], b("more")) + + def test_length(self): + self.assertEqual(der.encode_length(0), b("\x00")) + self.assertEqual(der.encode_length(127), b("\x7f")) + self.assertEqual(der.encode_length(128), b("\x81\x80")) + self.assertEqual(der.encode_length(255), b("\x81\xff")) + self.assertEqual(der.encode_length(256), b("\x82\x01\x00")) + self.assertEqual(der.encode_length(3 * 256 + 7), b("\x82\x03\x07")) + self.assertEqual(der.read_length(b("\x81\x9b") + b("more")), (155, 2)) + self.assertEqual(der.encode_length(155), b("\x81\x9b")) + for n in (0, 1, 2, 127, 128, 255, 256, 3 * 256 + 7, 155): + x = der.encode_length(n) + b("more") + n1, llen = der.read_length(x) + self.assertEqual(n1, n) + self.assertEqual(x[llen:], b("more")) + + def test_sequence(self): + x = der.encode_sequence(b("ABC"), b("DEF")) + b("GHI") + self.assertEqual(x, b("\x30\x06ABCDEFGHI")) + x1, rest = der.remove_sequence(x) + self.assertEqual(x1, b("ABCDEF")) + self.assertEqual(rest, b("GHI")) + + def test_constructed(self): + x = der.encode_constructed(0, NIST224p.encoded_oid) + self.assertEqual(hexlify(x), b("a007") + b("06052b81040021")) + x = der.encode_constructed(1, unhexlify(b("0102030a0b0c"))) + self.assertEqual(hexlify(x), b("a106") + b("0102030a0b0c")) + + +class Util(unittest.TestCase): + def test_trytryagain(self): + tta = util.randrange_from_seed__trytryagain + for i in range(1000): + seed = "seed-%d" % i + for order in ( + 2**8 - 2, + 2**8 - 1, + 2**8, + 2**8 + 1, + 2**8 + 2, + 2**16 - 1, + 2**16 + 1, + ): + n = tta(seed, order) + self.assertTrue(1 <= n < order, (1, n, order)) + # this trytryagain *does* provide long-term stability + self.assertEqual( + ("%x" % (tta("seed", NIST224p.order))).encode(), + b("6fa59d73bf0446ae8743cf748fc5ac11d5585a90356417e97155c3bc"), + ) + + def test_trytryagain_single(self): + tta = util.randrange_from_seed__trytryagain + order = 2**8 - 2 + seed = b"text" + n = tta(seed, order) + # known issue: https://github.com/warner/python-ecdsa/issues/221 + if sys.version_info < (3, 0): # pragma: no branch + self.assertEqual(n, 228) + else: + self.assertEqual(n, 18) + + @given(st.integers(min_value=0, max_value=10**200)) + def test_randrange(self, i): + # util.randrange does not provide long-term stability: we might + # change the algorithm in the future. + entropy = util.PRNG("seed-%d" % i) + for order in ( + 2**8 - 2, + 2**8 - 1, + 2**8, + 2**16 - 1, + 2**16 + 1, + ): + # that oddball 2**16+1 takes half our runtime + n = util.randrange(order, entropy=entropy) + self.assertTrue(1 <= n < order, (1, n, order)) + + def OFF_test_prove_uniformity(self): # pragma: no cover + order = 2**8 - 2 + counts = dict([(i, 0) for i in range(1, order)]) + assert 0 not in counts + assert order not in counts + for i in range(1000000): + seed = "seed-%d" % i + n = util.randrange_from_seed__trytryagain(seed, order) + counts[n] += 1 + # this technique should use the full range + self.assertTrue(counts[order - 1]) + for i in range(1, order): + print_("%3d: %s" % (i, "*" * (counts[i] // 100))) + + +class RFC6979(unittest.TestCase): + # https://tools.ietf.org/html/rfc6979#appendix-A.1 + def _do(self, generator, secexp, hsh, hash_func, expected): + actual = rfc6979.generate_k(generator.order(), secexp, hash_func, hsh) + self.assertEqual(expected, actual) + + def test_SECP256k1(self): + """RFC doesn't contain test vectors for SECP256k1 used in bitcoin. + This vector has been computed by Golang reference implementation instead.""" + self._do( + generator=SECP256k1.generator, + secexp=int("9d0219792467d7d37b4d43298a7d0c05", 16), + hsh=sha256(b("sample")).digest(), + hash_func=sha256, + expected=int( + "8fa1f95d514760e498f28957b824ee6ec39ed64826ff4fecc2b5739ec45b91cd", + 16, + ), + ) + + def test_SECP256k1_2(self): + self._do( + generator=SECP256k1.generator, + secexp=int( + "cca9fbcc1b41e5a95d369eaa6ddcff73b61a4efaa279cfc6567e8daa39cbaf50", + 16, + ), + hsh=sha256(b("sample")).digest(), + hash_func=sha256, + expected=int( + "2df40ca70e639d89528a6b670d9d48d9165fdc0febc0974056bdce192b8e16a3", + 16, + ), + ) + + def test_SECP256k1_3(self): + self._do( + generator=SECP256k1.generator, + secexp=0x1, + hsh=sha256(b("Satoshi Nakamoto")).digest(), + hash_func=sha256, + expected=0x8F8A276C19F4149656B280621E358CCE24F5F52542772691EE69063B74F15D15, + ) + + def test_SECP256k1_4(self): + self._do( + generator=SECP256k1.generator, + secexp=0x1, + hsh=sha256( + b( + "All those moments will be lost in time, like tears in rain. Time to die..." + ) + ).digest(), + hash_func=sha256, + expected=0x38AA22D72376B4DBC472E06C3BA403EE0A394DA63FC58D88686C611ABA98D6B3, + ) + + def test_SECP256k1_5(self): + self._do( + generator=SECP256k1.generator, + secexp=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140, + hsh=sha256(b("Satoshi Nakamoto")).digest(), + hash_func=sha256, + expected=0x33A19B60E25FB6F4435AF53A3D42D493644827367E6453928554F43E49AA6F90, + ) + + def test_SECP256k1_6(self): + self._do( + generator=SECP256k1.generator, + secexp=0xF8B8AF8CE3C7CCA5E300D33939540C10D45CE001B8F252BFBC57BA0342904181, + hsh=sha256(b("Alan Turing")).digest(), + hash_func=sha256, + expected=0x525A82B70E67874398067543FD84C83D30C175FDC45FDEEE082FE13B1D7CFDF1, + ) + + def test_1(self): + # Basic example of the RFC, it also tests 'try-try-again' from Step H of rfc6979 + self._do( + generator=Point( + None, + 0, + 0, + int("4000000000000000000020108A2E0CC0D99F8A5EF", 16), + ), + secexp=int("09A4D6792295A7F730FC3F2B49CBC0F62E862272F", 16), + hsh=unhexlify( + b( + "AF2BDBE1AA9B6EC1E2ADE1D694F41FC71A831D0268E9891562113D8A62ADD1BF" + ) + ), + hash_func=sha256, + expected=int("23AF4074C90A02B3FE61D286D5C87F425E6BDD81B", 16), + ) + + def test_2(self): + self._do( + generator=NIST192p.generator, + secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16), + hsh=sha1(b("sample")).digest(), + hash_func=sha1, + expected=int( + "37D7CA00D2C7B0E5E412AC03BD44BA837FDD5B28CD3B0021", 16 + ), + ) + + def test_3(self): + self._do( + generator=NIST192p.generator, + secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16), + hsh=sha256(b("sample")).digest(), + hash_func=sha256, + expected=int( + "32B1B6D7D42A05CB449065727A84804FB1A3E34D8F261496", 16 + ), + ) + + def test_4(self): + self._do( + generator=NIST192p.generator, + secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16), + hsh=sha512(b("sample")).digest(), + hash_func=sha512, + expected=int( + "A2AC7AB055E4F20692D49209544C203A7D1F2C0BFBC75DB1", 16 + ), + ) + + def test_5(self): + self._do( + generator=NIST192p.generator, + secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16), + hsh=sha1(b("test")).digest(), + hash_func=sha1, + expected=int( + "D9CF9C3D3297D3260773A1DA7418DB5537AB8DD93DE7FA25", 16 + ), + ) + + def test_6(self): + self._do( + generator=NIST192p.generator, + secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16), + hsh=sha256(b("test")).digest(), + hash_func=sha256, + expected=int( + "5C4CE89CF56D9E7C77C8585339B006B97B5F0680B4306C6C", 16 + ), + ) + + def test_7(self): + self._do( + generator=NIST192p.generator, + secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16), + hsh=sha512(b("test")).digest(), + hash_func=sha512, + expected=int( + "0758753A5254759C7CFBAD2E2D9B0792EEE44136C9480527", 16 + ), + ) + + def test_8(self): + self._do( + generator=NIST521p.generator, + secexp=int( + "0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", + 16, + ), + hsh=sha1(b("sample")).digest(), + hash_func=sha1, + expected=int( + "089C071B419E1C2820962321787258469511958E80582E95D8378E0C2CCDB3CB42BEDE42F50E3FA3C71F5A76724281D31D9C89F0F91FC1BE4918DB1C03A5838D0F9", + 16, + ), + ) + + def test_9(self): + self._do( + generator=NIST521p.generator, + secexp=int( + "0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", + 16, + ), + hsh=sha256(b("sample")).digest(), + hash_func=sha256, + expected=int( + "0EDF38AFCAAECAB4383358B34D67C9F2216C8382AAEA44A3DAD5FDC9C32575761793FEF24EB0FC276DFC4F6E3EC476752F043CF01415387470BCBD8678ED2C7E1A0", + 16, + ), + ) + + def test_10(self): + self._do( + generator=NIST521p.generator, + secexp=int( + "0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", + 16, + ), + hsh=sha512(b("test")).digest(), + hash_func=sha512, + expected=int( + "16200813020EC986863BEDFC1B121F605C1215645018AEA1A7B215A564DE9EB1B38A67AA1128B80CE391C4FB71187654AAA3431027BFC7F395766CA988C964DC56D", + 16, + ), + ) + + +class ECDH(unittest.TestCase): + def _do(self, curve, generator, dA, x_qA, y_qA, dB, x_qB, y_qB, x_Z, y_Z): + qA = dA * generator + qB = dB * generator + Z = dA * qB + self.assertEqual(Point(curve, x_qA, y_qA), qA) + self.assertEqual(Point(curve, x_qB, y_qB), qB) + self.assertTrue( + (dA * qB) + == (dA * dB * generator) + == (dB * dA * generator) + == (dB * qA) + ) + self.assertEqual(Point(curve, x_Z, y_Z), Z) + + +class RFC6932(ECDH): + # https://tools.ietf.org/html/rfc6932#appendix-A.1 + + def test_brainpoolP224r1(self): + self._do( + curve=curve_brainpoolp224r1, + generator=BRAINPOOLP224r1.generator, + dA=int( + "7C4B7A2C8A4BAD1FBB7D79CC0955DB7C6A4660CA64CC4778159B495E", 16 + ), + x_qA=int( + "B104A67A6F6E85E14EC1825E1539E8ECDBBF584922367DD88C6BDCF2", 16 + ), + y_qA=int( + "46D782E7FDB5F60CD8404301AC5949C58EDB26BC68BA07695B750A94", 16 + ), + dB=int( + "63976D4AAE6CD0F6DD18DEFEF55D96569D0507C03E74D6486FFA28FB", 16 + ), + x_qB=int( + "2A97089A9296147B71B21A4B574E1278245B536F14D8C2B9D07A874E", 16 + ), + y_qB=int( + "9B900D7C77A709A797276B8CA1BA61BB95B546FC29F862E44D59D25B", 16 + ), + x_Z=int( + "312DFD98783F9FB77B9704945A73BEB6DCCBE3B65D0F967DCAB574EB", 16 + ), + y_Z=int( + "6F800811D64114B1C48C621AB3357CF93F496E4238696A2A012B3C98", 16 + ), + ) + + def test_brainpoolP256r1(self): + self._do( + curve=curve_brainpoolp256r1, + generator=BRAINPOOLP256r1.generator, + dA=int( + "041EB8B1E2BC681BCE8E39963B2E9FC415B05283313DD1A8BCC055F11AE" + "49699", + 16, + ), + x_qA=int( + "78028496B5ECAAB3C8B6C12E45DB1E02C9E4D26B4113BC4F015F60C5C" + "CC0D206", + 16, + ), + y_qA=int( + "A2AE1762A3831C1D20F03F8D1E3C0C39AFE6F09B4D44BBE80CD100987" + "B05F92B", + 16, + ), + dB=int( + "06F5240EACDB9837BC96D48274C8AA834B6C87BA9CC3EEDD81F99A16B8D" + "804D3", + 16, + ), + x_qB=int( + "8E07E219BA588916C5B06AA30A2F464C2F2ACFC1610A3BE2FB240B635" + "341F0DB", + 16, + ), + y_qB=int( + "148EA1D7D1E7E54B9555B6C9AC90629C18B63BEE5D7AA6949EBBF47B2" + "4FDE40D", + 16, + ), + x_Z=int( + "05E940915549E9F6A4A75693716E37466ABA79B4BF2919877A16DD2CC2" + "E23708", + 16, + ), + y_Z=int( + "6BC23B6702BC5A019438CEEA107DAAD8B94232FFBBC350F3B137628FE6" + "FD134C", + 16, + ), + ) + + def test_brainpoolP384r1(self): + self._do( + curve=curve_brainpoolp384r1, + generator=BRAINPOOLP384r1.generator, + dA=int( + "014EC0755B78594BA47FB0A56F6173045B4331E74BA1A6F47322E70D79D" + "828D97E095884CA72B73FDABD5910DF0FA76A", + 16, + ), + x_qA=int( + "45CB26E4384DAF6FB776885307B9A38B7AD1B5C692E0C32F012533277" + "8F3B8D3F50CA358099B30DEB5EE69A95C058B4E", + 16, + ), + y_qA=int( + "8173A1C54AFFA7E781D0E1E1D12C0DC2B74F4DF58E4A4E3AF7026C5D3" + "2DC530A2CD89C859BB4B4B768497F49AB8CC859", + 16, + ), + dB=int( + "6B461CB79BD0EA519A87D6828815D8CE7CD9B3CAA0B5A8262CBCD550A01" + "5C90095B976F3529957506E1224A861711D54", + 16, + ), + x_qB=int( + "01BF92A92EE4BE8DED1A911125C209B03F99E3161CFCC986DC7711383" + "FC30AF9CE28CA3386D59E2C8D72CE1E7B4666E8", + 16, + ), + y_qB=int( + "3289C4A3A4FEE035E39BDB885D509D224A142FF9FBCC5CFE5CCBB3026" + "8EE47487ED8044858D31D848F7A95C635A347AC", + 16, + ), + x_Z=int( + "04CC4FF3DCCCB07AF24E0ACC529955B36D7C807772B92FCBE48F3AFE9A" + "2F370A1F98D3FA73FD0C0747C632E12F1423EC", + 16, + ), + y_Z=int( + "7F465F90BD69AFB8F828A214EB9716D66ABC59F17AF7C75EE7F1DE22AB" + "5D05085F5A01A9382D05BF72D96698FE3FF64E", + 16, + ), + ) + + def test_brainpoolP512r1(self): + self._do( + curve=curve_brainpoolp512r1, + generator=BRAINPOOLP512r1.generator, + dA=int( + "636B6BE0482A6C1C41AA7AE7B245E983392DB94CECEA2660A379CFE1595" + "59E357581825391175FC195D28BAC0CF03A7841A383B95C262B98378287" + "4CCE6FE333", + 16, + ), + x_qA=int( + "0562E68B9AF7CBFD5565C6B16883B777FF11C199161ECC427A39D17EC" + "2166499389571D6A994977C56AD8252658BA8A1B72AE42F4FB7532151" + "AFC3EF0971CCDA", + 16, + ), + y_qA=int( + "A7CA2D8191E21776A89860AFBC1F582FAA308D551C1DC6133AF9F9C3C" + "AD59998D70079548140B90B1F311AFB378AA81F51B275B2BE6B7DEE97" + "8EFC7343EA642E", + 16, + ), + dB=int( + "0AF4E7F6D52EDD52907BB8DBAB3992A0BB696EC10DF11892FF205B66D38" + "1ECE72314E6A6EA079CEA06961DBA5AE6422EF2E9EE803A1F236FB96A17" + "99B86E5C8B", + 16, + ), + x_qB=int( + "5A7954E32663DFF11AE24712D87419F26B708AC2B92877D6BFEE2BFC4" + "3714D89BBDB6D24D807BBD3AEB7F0C325F862E8BADE4F74636B97EAAC" + "E739E11720D323", + 16, + ), + y_qB=int( + "96D14621A9283A1BED84DE8DD64836B2C0758B11441179DC0C54C0D49" + "A47C03807D171DD544B72CAAEF7B7CE01C7753E2CAD1A861ECA55A719" + "54EE1BA35E04BE", + 16, + ), + x_Z=int( + "1EE8321A4BBF93B9CF8921AB209850EC9B7066D1984EF08C2BB7232362" + "08AC8F1A483E79461A00E0D5F6921CE9D360502F85C812BEDEE23AC5B2" + "10E5811B191E", + 16, + ), + y_Z=int( + "2632095B7B936174B41FD2FAF369B1D18DCADEED7E410A7E251F083109" + "7C50D02CFED02607B6A2D5ADB4C0006008562208631875B58B54ECDA5A" + "4F9FE9EAABA6", + 16, + ), + ) + + +class RFC7027(ECDH): + # https://tools.ietf.org/html/rfc7027#appendix-A + + def test_brainpoolP256r1(self): + self._do( + curve=curve_brainpoolp256r1, + generator=BRAINPOOLP256r1.generator, + dA=int( + "81DB1EE100150FF2EA338D708271BE38300CB54241D79950F77B0630398" + "04F1D", + 16, + ), + x_qA=int( + "44106E913F92BC02A1705D9953A8414DB95E1AAA49E81D9E85F929A8E" + "3100BE5", + 16, + ), + y_qA=int( + "8AB4846F11CACCB73CE49CBDD120F5A900A69FD32C272223F789EF10E" + "B089BDC", + 16, + ), + dB=int( + "55E40BC41E37E3E2AD25C3C6654511FFA8474A91A0032087593852D3E7D" + "76BD3", + 16, + ), + x_qB=int( + "8D2D688C6CF93E1160AD04CC4429117DC2C41825E1E9FCA0ADDD34E6F" + "1B39F7B", + 16, + ), + y_qB=int( + "990C57520812BE512641E47034832106BC7D3E8DD0E4C7F1136D70065" + "47CEC6A", + 16, + ), + x_Z=int( + "89AFC39D41D3B327814B80940B042590F96556EC91E6AE7939BCE31F3A" + "18BF2B", + 16, + ), + y_Z=int( + "49C27868F4ECA2179BFD7D59B1E3BF34C1DBDE61AE12931648F43E5963" + "2504DE", + 16, + ), + ) + + def test_brainpoolP384r1(self): + self._do( + curve=curve_brainpoolp384r1, + generator=BRAINPOOLP384r1.generator, + dA=int( + "1E20F5E048A5886F1F157C74E91BDE2B98C8B52D58E5003D57053FC4B0B" + "D65D6F15EB5D1EE1610DF870795143627D042", + 16, + ), + x_qA=int( + "68B665DD91C195800650CDD363C625F4E742E8134667B767B1B476793" + "588F885AB698C852D4A6E77A252D6380FCAF068", + 16, + ), + y_qA=int( + "55BC91A39C9EC01DEE36017B7D673A931236D2F1F5C83942D049E3FA2" + "0607493E0D038FF2FD30C2AB67D15C85F7FAA59", + 16, + ), + dB=int( + "032640BC6003C59260F7250C3DB58CE647F98E1260ACCE4ACDA3DD869F7" + "4E01F8BA5E0324309DB6A9831497ABAC96670", + 16, + ), + x_qB=int( + "4D44326F269A597A5B58BBA565DA5556ED7FD9A8A9EB76C25F46DB69D" + "19DC8CE6AD18E404B15738B2086DF37E71D1EB4", + 16, + ), + y_qB=int( + "62D692136DE56CBE93BF5FA3188EF58BC8A3A0EC6C1E151A21038A42E" + "9185329B5B275903D192F8D4E1F32FE9CC78C48", + 16, + ), + x_Z=int( + "0BD9D3A7EA0B3D519D09D8E48D0785FB744A6B355E6304BC51C229FBBC" + "E239BBADF6403715C35D4FB2A5444F575D4F42", + 16, + ), + y_Z=int( + "0DF213417EBE4D8E40A5F76F66C56470C489A3478D146DECF6DF0D94BA" + "E9E598157290F8756066975F1DB34B2324B7BD", + 16, + ), + ) + + def test_brainpoolP512r1(self): + self._do( + curve=curve_brainpoolp512r1, + generator=BRAINPOOLP512r1.generator, + dA=int( + "16302FF0DBBB5A8D733DAB7141C1B45ACBC8715939677F6A56850A38BD8" + "7BD59B09E80279609FF333EB9D4C061231FB26F92EEB04982A5F1D1764C" + "AD57665422", + 16, + ), + x_qA=int( + "0A420517E406AAC0ACDCE90FCD71487718D3B953EFD7FBEC5F7F27E28" + "C6149999397E91E029E06457DB2D3E640668B392C2A7E737A7F0BF044" + "36D11640FD09FD", + 16, + ), + y_qA=int( + "72E6882E8DB28AAD36237CD25D580DB23783961C8DC52DFA2EC138AD4" + "72A0FCEF3887CF62B623B2A87DE5C588301EA3E5FC269B373B60724F5" + "E82A6AD147FDE7", + 16, + ), + dB=int( + "230E18E1BCC88A362FA54E4EA3902009292F7F8033624FD471B5D8ACE49" + "D12CFABBC19963DAB8E2F1EBA00BFFB29E4D72D13F2224562F405CB8050" + "3666B25429", + 16, + ), + x_qB=int( + "9D45F66DE5D67E2E6DB6E93A59CE0BB48106097FF78A081DE781CDB31" + "FCE8CCBAAEA8DD4320C4119F1E9CD437A2EAB3731FA9668AB268D871D" + "EDA55A5473199F", + 16, + ), + y_qB=int( + "2FDC313095BCDD5FB3A91636F07A959C8E86B5636A1E930E8396049CB" + "481961D365CC11453A06C719835475B12CB52FC3C383BCE35E27EF194" + "512B71876285FA", + 16, + ), + x_Z=int( + "A7927098655F1F9976FA50A9D566865DC530331846381C87256BAF3226" + "244B76D36403C024D7BBF0AA0803EAFF405D3D24F11A9B5C0BEF679FE1" + "454B21C4CD1F", + 16, + ), + y_Z=int( + "7DB71C3DEF63212841C463E881BDCF055523BD368240E6C3143BD8DEF8" + "B3B3223B95E0F53082FF5E412F4222537A43DF1C6D25729DDB51620A83" + "2BE6A26680A2", + 16, + ), + ) + + +# https://tools.ietf.org/html/rfc4754#page-5 +@pytest.mark.parametrize( + "w, gwx, gwy, k, msg, md, r, s, curve", + [ + pytest.param( + "DC51D3866A15BACDE33D96F992FCA99DA7E6EF0934E7097559C27F1614C88A7F", + "2442A5CC0ECD015FA3CA31DC8E2BBC70BF42D60CBCA20085E0822CB04235E970", + "6FC98BD7E50211A4A27102FA3549DF79EBCB4BF246B80945CDDFE7D509BBFD7D", + "9E56F509196784D963D1C0A401510EE7ADA3DCC5DEE04B154BF61AF1D5A6DECE", + b"abc", + sha256, + "CB28E0999B9C7715FD0A80D8E47A77079716CBBF917DD72E97566EA1C066957C", + "86FA3BB4E26CAD5BF90B7F81899256CE7594BB1EA0C89212748BFF3B3D5B0315", + NIST256p, + id="ECDSA-256", + ), + pytest.param( + "0BEB646634BA87735D77AE4809A0EBEA865535DE4C1E1DCB692E84708E81A5AF" + "62E528C38B2A81B35309668D73524D9F", + "96281BF8DD5E0525CA049C048D345D3082968D10FEDF5C5ACA0C64E6465A97EA" + "5CE10C9DFEC21797415710721F437922", + "447688BA94708EB6E2E4D59F6AB6D7EDFF9301D249FE49C33096655F5D502FAD" + "3D383B91C5E7EDAA2B714CC99D5743CA", + "B4B74E44D71A13D568003D7489908D564C7761E229C58CBFA18950096EB7463B" + "854D7FA992F934D927376285E63414FA", + b"abc", + sha384, + "FB017B914E29149432D8BAC29A514640B46F53DDAB2C69948084E2930F1C8F7E" + "08E07C9C63F2D21A07DCB56A6AF56EB3", + "B263A1305E057F984D38726A1B46874109F417BCA112674C528262A40A629AF1" + "CBB9F516CE0FA7D2FF630863A00E8B9F", + NIST384p, + id="ECDSA-384", + ), + pytest.param( + "0065FDA3409451DCAB0A0EAD45495112A3D813C17BFD34BDF8C1209D7DF58491" + "20597779060A7FF9D704ADF78B570FFAD6F062E95C7E0C5D5481C5B153B48B37" + "5FA1", + "0151518F1AF0F563517EDD5485190DF95A4BF57B5CBA4CF2A9A3F6474725A35F" + "7AFE0A6DDEB8BEDBCD6A197E592D40188901CECD650699C9B5E456AEA5ADD190" + "52A8", + "006F3B142EA1BFFF7E2837AD44C9E4FF6D2D34C73184BBAD90026DD5E6E85317" + "D9DF45CAD7803C6C20035B2F3FF63AFF4E1BA64D1C077577DA3F4286C58F0AEA" + "E643", + "00C1C2B305419F5A41344D7E4359933D734096F556197A9B244342B8B62F46F9" + "373778F9DE6B6497B1EF825FF24F42F9B4A4BD7382CFC3378A540B1B7F0C1B95" + "6C2F", + b"abc", + sha512, + "0154FD3836AF92D0DCA57DD5341D3053988534FDE8318FC6AAAAB68E2E6F4339" + "B19F2F281A7E0B22C269D93CF8794A9278880ED7DBB8D9362CAEACEE54432055" + "2251", + "017705A7030290D1CEB605A9A1BB03FF9CDD521E87A696EC926C8C10C8362DF4" + "975367101F67D1CF9BCCBF2F3D239534FA509E70AAC851AE01AAC68D62F86647" + "2660", + NIST521p, + id="ECDSA-521", + ), + ], +) +def test_RFC4754_vectors(w, gwx, gwy, k, msg, md, r, s, curve): + sk = SigningKey.from_string(unhexlify(w), curve) + vk = VerifyingKey.from_string(unhexlify(gwx + gwy), curve) + assert sk.verifying_key == vk + sig = sk.sign(msg, hashfunc=md, sigencode=sigencode_strings, k=int(k, 16)) + + assert sig == (unhexlify(r), unhexlify(s)) + + assert vk.verify(sig, msg, md, sigdecode_strings) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_rw_lock.py b/myenv/lib/python3.9/site-packages/ecdsa/test_rw_lock.py new file mode 100644 index 0000000..0a84b9c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_rw_lock.py @@ -0,0 +1,180 @@ +# Copyright Mateusz Kobos, (c) 2011 +# https://code.activestate.com/recipes/577803-reader-writer-lock-with-priority-for-writers/ +# released under the MIT licence + +try: + import unittest2 as unittest +except ImportError: + import unittest +import threading +import time +import copy +from ._rwlock import RWLock + + +class Writer(threading.Thread): + def __init__( + self, buffer_, rw_lock, init_sleep_time, sleep_time, to_write + ): + """ + @param buffer_: common buffer_ shared by the readers and writers + @type buffer_: list + @type rw_lock: L{RWLock} + @param init_sleep_time: sleep time before doing any action + @type init_sleep_time: C{float} + @param sleep_time: sleep time while in critical section + @type sleep_time: C{float} + @param to_write: data that will be appended to the buffer + """ + threading.Thread.__init__(self) + self.__buffer = buffer_ + self.__rw_lock = rw_lock + self.__init_sleep_time = init_sleep_time + self.__sleep_time = sleep_time + self.__to_write = to_write + self.entry_time = None + """Time of entry to the critical section""" + self.exit_time = None + """Time of exit from the critical section""" + + def run(self): + time.sleep(self.__init_sleep_time) + self.__rw_lock.writer_acquire() + self.entry_time = time.time() + time.sleep(self.__sleep_time) + self.__buffer.append(self.__to_write) + self.exit_time = time.time() + self.__rw_lock.writer_release() + + +class Reader(threading.Thread): + def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time): + """ + @param buffer_: common buffer shared by the readers and writers + @type buffer_: list + @type rw_lock: L{RWLock} + @param init_sleep_time: sleep time before doing any action + @type init_sleep_time: C{float} + @param sleep_time: sleep time while in critical section + @type sleep_time: C{float} + """ + threading.Thread.__init__(self) + self.__buffer = buffer_ + self.__rw_lock = rw_lock + self.__init_sleep_time = init_sleep_time + self.__sleep_time = sleep_time + self.buffer_read = None + """a copy of a the buffer read while in critical section""" + self.entry_time = None + """Time of entry to the critical section""" + self.exit_time = None + """Time of exit from the critical section""" + + def run(self): + time.sleep(self.__init_sleep_time) + self.__rw_lock.reader_acquire() + self.entry_time = time.time() + time.sleep(self.__sleep_time) + self.buffer_read = copy.deepcopy(self.__buffer) + self.exit_time = time.time() + self.__rw_lock.reader_release() + + +class RWLockTestCase(unittest.TestCase): + def test_readers_nonexclusive_access(self): + (buffer_, rw_lock, threads) = self.__init_variables() + + threads.append(Reader(buffer_, rw_lock, 0, 0)) + threads.append(Writer(buffer_, rw_lock, 0.2, 0.4, 1)) + threads.append(Reader(buffer_, rw_lock, 0.3, 0.3)) + threads.append(Reader(buffer_, rw_lock, 0.5, 0)) + + self.__start_and_join_threads(threads) + + ## The third reader should enter after the second one but it should + ## exit before the second one exits + ## (i.e. the readers should be in the critical section + ## at the same time) + + self.assertEqual([], threads[0].buffer_read) + self.assertEqual([1], threads[2].buffer_read) + self.assertEqual([1], threads[3].buffer_read) + self.assertTrue(threads[1].exit_time <= threads[2].entry_time) + self.assertTrue(threads[2].entry_time <= threads[3].entry_time) + self.assertTrue(threads[3].exit_time < threads[2].exit_time) + + def test_writers_exclusive_access(self): + (buffer_, rw_lock, threads) = self.__init_variables() + + threads.append(Writer(buffer_, rw_lock, 0, 0.4, 1)) + threads.append(Writer(buffer_, rw_lock, 0.1, 0, 2)) + threads.append(Reader(buffer_, rw_lock, 0.2, 0)) + + self.__start_and_join_threads(threads) + + ## The second writer should wait for the first one to exit + + self.assertEqual([1, 2], threads[2].buffer_read) + self.assertTrue(threads[0].exit_time <= threads[1].entry_time) + self.assertTrue(threads[1].exit_time <= threads[2].exit_time) + + def test_writer_priority(self): + (buffer_, rw_lock, threads) = self.__init_variables() + + threads.append(Writer(buffer_, rw_lock, 0, 0, 1)) + threads.append(Reader(buffer_, rw_lock, 0.1, 0.4)) + threads.append(Writer(buffer_, rw_lock, 0.2, 0, 2)) + threads.append(Reader(buffer_, rw_lock, 0.3, 0)) + threads.append(Reader(buffer_, rw_lock, 0.3, 0)) + + self.__start_and_join_threads(threads) + + ## The second writer should go before the second and the third reader + + self.assertEqual([1], threads[1].buffer_read) + self.assertEqual([1, 2], threads[3].buffer_read) + self.assertEqual([1, 2], threads[4].buffer_read) + self.assertTrue(threads[0].exit_time < threads[1].entry_time) + self.assertTrue(threads[1].exit_time <= threads[2].entry_time) + self.assertTrue(threads[2].exit_time <= threads[3].entry_time) + self.assertTrue(threads[2].exit_time <= threads[4].entry_time) + + def test_many_writers_priority(self): + (buffer_, rw_lock, threads) = self.__init_variables() + + threads.append(Writer(buffer_, rw_lock, 0, 0, 1)) + threads.append(Reader(buffer_, rw_lock, 0.1, 0.6)) + threads.append(Writer(buffer_, rw_lock, 0.2, 0.1, 2)) + threads.append(Reader(buffer_, rw_lock, 0.3, 0)) + threads.append(Reader(buffer_, rw_lock, 0.4, 0)) + threads.append(Writer(buffer_, rw_lock, 0.5, 0.1, 3)) + + self.__start_and_join_threads(threads) + + ## The two last writers should go first -- after the first reader and + ## before the second and the third reader + + self.assertEqual([1], threads[1].buffer_read) + self.assertEqual([1, 2, 3], threads[3].buffer_read) + self.assertEqual([1, 2, 3], threads[4].buffer_read) + self.assertTrue(threads[0].exit_time < threads[1].entry_time) + self.assertTrue(threads[1].exit_time <= threads[2].entry_time) + self.assertTrue(threads[1].exit_time <= threads[5].entry_time) + self.assertTrue(threads[2].exit_time <= threads[3].entry_time) + self.assertTrue(threads[2].exit_time <= threads[4].entry_time) + self.assertTrue(threads[5].exit_time <= threads[3].entry_time) + self.assertTrue(threads[5].exit_time <= threads[4].entry_time) + + @staticmethod + def __init_variables(): + buffer_ = [] + rw_lock = RWLock() + threads = [] + return (buffer_, rw_lock, threads) + + @staticmethod + def __start_and_join_threads(threads): + for t in threads: + t.start() + for t in threads: + t.join() diff --git a/myenv/lib/python3.9/site-packages/ecdsa/test_sha3.py b/myenv/lib/python3.9/site-packages/ecdsa/test_sha3.py new file mode 100644 index 0000000..2c6bd15 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/test_sha3.py @@ -0,0 +1,111 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest +import pytest + +try: + from gmpy2 import mpz + + GMPY = True +except ImportError: + try: + from gmpy import mpz + + GMPY = True + except ImportError: + GMPY = False + +from ._sha3 import shake_256 +from ._compat import bytes_to_int, int_to_bytes + +B2I_VECTORS = [ + (b"\x00\x01", "big", 1), + (b"\x00\x01", "little", 0x0100), + (b"", "big", 0), + (b"\x00", "little", 0), +] + + +@pytest.mark.parametrize("bytes_in,endian,int_out", B2I_VECTORS) +def test_bytes_to_int(bytes_in, endian, int_out): + out = bytes_to_int(bytes_in, endian) + assert out == int_out + + +class TestBytesToInt(unittest.TestCase): + def test_bytes_to_int_wrong_endian(self): + with self.assertRaises(ValueError): + bytes_to_int(b"\x00", "middle") + + def test_int_to_bytes_wrong_endian(self): + with self.assertRaises(ValueError): + int_to_bytes(0, byteorder="middle") + + +@pytest.mark.skipif(GMPY == False, reason="requites gmpy or gmpy2") +def test_int_to_bytes_with_gmpy(): + assert int_to_bytes(mpz(1)) == b"\x01" + + +I2B_VECTORS = [ + (0, None, "big", b""), + (0, 1, "big", b"\x00"), + (1, None, "big", b"\x01"), + (0x0100, None, "little", b"\x00\x01"), + (0x0100, 4, "little", b"\x00\x01\x00\x00"), + (1, 4, "big", b"\x00\x00\x00\x01"), +] + + +@pytest.mark.parametrize("int_in,length,endian,bytes_out", I2B_VECTORS) +def test_int_to_bytes(int_in, length, endian, bytes_out): + out = int_to_bytes(int_in, length, endian) + assert out == bytes_out + + +SHAKE_256_VECTORS = [ + ( + b"Message.", + 32, + b"\x78\xa1\x37\xbb\x33\xae\xe2\x72\xb1\x02\x4f\x39\x43\xe5\xcf\x0c" + b"\x4e\x9c\x72\x76\x2e\x34\x4c\xf8\xf9\xc3\x25\x9d\x4f\x91\x2c\x3a", + ), + ( + b"", + 32, + b"\x46\xb9\xdd\x2b\x0b\xa8\x8d\x13\x23\x3b\x3f\xeb\x74\x3e\xeb\x24" + b"\x3f\xcd\x52\xea\x62\xb8\x1b\x82\xb5\x0c\x27\x64\x6e\xd5\x76\x2f", + ), + ( + b"message", + 32, + b"\x86\x16\xe1\xe4\xcf\xd8\xb5\xf7\xd9\x2d\x43\xd8\x6e\x1b\x14\x51" + b"\xa2\xa6\x5a\xf8\x64\xfc\xb1\x26\xc2\x66\x0a\xb3\x46\x51\xb1\x75", + ), + ( + b"message", + 16, + b"\x86\x16\xe1\xe4\xcf\xd8\xb5\xf7\xd9\x2d\x43\xd8\x6e\x1b\x14\x51", + ), + ( + b"message", + 64, + b"\x86\x16\xe1\xe4\xcf\xd8\xb5\xf7\xd9\x2d\x43\xd8\x6e\x1b\x14\x51" + b"\xa2\xa6\x5a\xf8\x64\xfc\xb1\x26\xc2\x66\x0a\xb3\x46\x51\xb1\x75" + b"\x30\xd6\xba\x2a\x46\x65\xf1\x9d\xf0\x62\x25\xb1\x26\xd1\x3e\xed" + b"\x91\xd5\x0d\xe7\xb9\xcb\x65\xf3\x3a\x46\xae\xd3\x6c\x7d\xc5\xe8", + ), + ( + b"A" * 1024, + 32, + b"\xa5\xef\x7e\x30\x8b\xe8\x33\x64\xe5\x9c\xf3\xb5\xf3\xba\x20\xa3" + b"\x5a\xe7\x30\xfd\xbc\x33\x11\xbf\x83\x89\x50\x82\xb4\x41\xe9\xb3", + ), +] + + +@pytest.mark.parametrize("msg,olen,ohash", SHAKE_256_VECTORS) +def test_shake_256(msg, olen, ohash): + out = shake_256(msg, olen) + assert out == bytearray(ohash) diff --git a/myenv/lib/python3.9/site-packages/ecdsa/util.py b/myenv/lib/python3.9/site-packages/ecdsa/util.py new file mode 100644 index 0000000..9a56110 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/ecdsa/util.py @@ -0,0 +1,433 @@ +from __future__ import division + +import os +import math +import binascii +import sys +from hashlib import sha256 +from six import PY2, int2byte, b, next +from . import der +from ._compat import normalise_bytes + +# RFC5480: +# The "unrestricted" algorithm identifier is: +# id-ecPublicKey OBJECT IDENTIFIER ::= { +# iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } + +oid_ecPublicKey = (1, 2, 840, 10045, 2, 1) +encoded_oid_ecPublicKey = der.encode_oid(*oid_ecPublicKey) + +# RFC5480: +# The ECDH algorithm uses the following object identifier: +# id-ecDH OBJECT IDENTIFIER ::= { +# iso(1) identified-organization(3) certicom(132) schemes(1) +# ecdh(12) } + +oid_ecDH = (1, 3, 132, 1, 12) + +# RFC5480: +# The ECMQV algorithm uses the following object identifier: +# id-ecMQV OBJECT IDENTIFIER ::= { +# iso(1) identified-organization(3) certicom(132) schemes(1) +# ecmqv(13) } + +oid_ecMQV = (1, 3, 132, 1, 13) + +if sys.version_info >= (3,): # pragma: no branch + + def entropy_to_bits(ent_256): + """Convert a bytestring to string of 0's and 1's""" + return bin(int.from_bytes(ent_256, "big"))[2:].zfill(len(ent_256) * 8) + +else: + + def entropy_to_bits(ent_256): + """Convert a bytestring to string of 0's and 1's""" + return "".join(bin(ord(x))[2:].zfill(8) for x in ent_256) + + +if sys.version_info < (2, 7): # pragma: no branch + # Can't add a method to a built-in type so we are stuck with this + def bit_length(x): + return len(bin(x)) - 2 + +else: + + def bit_length(x): + return x.bit_length() or 1 + + +def orderlen(order): + return (1 + len("%x" % order)) // 2 # bytes + + +def randrange(order, entropy=None): + """Return a random integer k such that 1 <= k < order, uniformly + distributed across that range. Worst case should be a mean of 2 loops at + (2**k)+2. + + Note that this function is not declared to be forwards-compatible: we may + change the behavior in future releases. The entropy= argument (which + should get a callable that behaves like os.urandom) can be used to + achieve stability within a given release (for repeatable unit tests), but + should not be used as a long-term-compatible key generation algorithm. + """ + assert order > 1 + if entropy is None: + entropy = os.urandom + upper_2 = bit_length(order - 2) + upper_256 = upper_2 // 8 + 1 + while True: # I don't think this needs a counter with bit-wise randrange + ent_256 = entropy(upper_256) + ent_2 = entropy_to_bits(ent_256) + rand_num = int(ent_2[:upper_2], base=2) + 1 + if 0 < rand_num < order: + return rand_num + + +class PRNG: + # this returns a callable which, when invoked with an integer N, will + # return N pseudorandom bytes. Note: this is a short-term PRNG, meant + # primarily for the needs of randrange_from_seed__trytryagain(), which + # only needs to run it a few times per seed. It does not provide + # protection against state compromise (forward security). + def __init__(self, seed): + self.generator = self.block_generator(seed) + + def __call__(self, numbytes): + a = [next(self.generator) for i in range(numbytes)] + + if PY2: # pragma: no branch + return "".join(a) + else: + return bytes(a) + + def block_generator(self, seed): + counter = 0 + while True: + for byte in sha256( + ("prng-%d-%s" % (counter, seed)).encode() + ).digest(): + yield byte + counter += 1 + + +def randrange_from_seed__overshoot_modulo(seed, order): + # hash the data, then turn the digest into a number in [1,order). + # + # We use David-Sarah Hopwood's suggestion: turn it into a number that's + # sufficiently larger than the group order, then modulo it down to fit. + # This should give adequate (but not perfect) uniformity, and simple + # code. There are other choices: try-try-again is the main one. + base = PRNG(seed)(2 * orderlen(order)) + number = (int(binascii.hexlify(base), 16) % (order - 1)) + 1 + assert 1 <= number < order, (1, number, order) + return number + + +def lsb_of_ones(numbits): + return (1 << numbits) - 1 + + +def bits_and_bytes(order): + bits = int(math.log(order - 1, 2) + 1) + bytes = bits // 8 + extrabits = bits % 8 + return bits, bytes, extrabits + + +# the following randrange_from_seed__METHOD() functions take an +# arbitrarily-sized secret seed and turn it into a number that obeys the same +# range limits as randrange() above. They are meant for deriving consistent +# signing keys from a secret rather than generating them randomly, for +# example a protocol in which three signing keys are derived from a master +# secret. You should use a uniformly-distributed unguessable seed with about +# curve.baselen bytes of entropy. To use one, do this: +# seed = os.urandom(curve.baselen) # or other starting point +# secexp = ecdsa.util.randrange_from_seed__trytryagain(sed, curve.order) +# sk = SigningKey.from_secret_exponent(secexp, curve) + + +def randrange_from_seed__truncate_bytes(seed, order, hashmod=sha256): + # hash the seed, then turn the digest into a number in [1,order), but + # don't worry about trying to uniformly fill the range. This will lose, + # on average, four bits of entropy. + bits, _bytes, extrabits = bits_and_bytes(order) + if extrabits: + _bytes += 1 + base = hashmod(seed).digest()[:_bytes] + base = "\x00" * (_bytes - len(base)) + base + number = 1 + int(binascii.hexlify(base), 16) + assert 1 <= number < order + return number + + +def randrange_from_seed__truncate_bits(seed, order, hashmod=sha256): + # like string_to_randrange_truncate_bytes, but only lose an average of + # half a bit + bits = int(math.log(order - 1, 2) + 1) + maxbytes = (bits + 7) // 8 + base = hashmod(seed).digest()[:maxbytes] + base = "\x00" * (maxbytes - len(base)) + base + topbits = 8 * maxbytes - bits + if topbits: + base = int2byte(ord(base[0]) & lsb_of_ones(topbits)) + base[1:] + number = 1 + int(binascii.hexlify(base), 16) + assert 1 <= number < order + return number + + +def randrange_from_seed__trytryagain(seed, order): + # figure out exactly how many bits we need (rounded up to the nearest + # bit), so we can reduce the chance of looping to less than 0.5 . This is + # specified to feed from a byte-oriented PRNG, and discards the + # high-order bits of the first byte as necessary to get the right number + # of bits. The average number of loops will range from 1.0 (when + # order=2**k-1) to 2.0 (when order=2**k+1). + assert order > 1 + bits, bytes, extrabits = bits_and_bytes(order) + generate = PRNG(seed) + while True: + extrabyte = b("") + if extrabits: + extrabyte = int2byte(ord(generate(1)) & lsb_of_ones(extrabits)) + guess = string_to_number(extrabyte + generate(bytes)) + 1 + if 1 <= guess < order: + return guess + + +def number_to_string(num, order): + l = orderlen(order) + fmt_str = "%0" + str(2 * l) + "x" + string = binascii.unhexlify((fmt_str % num).encode()) + assert len(string) == l, (len(string), l) + return string + + +def number_to_string_crop(num, order): + l = orderlen(order) + fmt_str = "%0" + str(2 * l) + "x" + string = binascii.unhexlify((fmt_str % num).encode()) + return string[:l] + + +def string_to_number(string): + return int(binascii.hexlify(string), 16) + + +def string_to_number_fixedlen(string, order): + l = orderlen(order) + assert len(string) == l, (len(string), l) + return int(binascii.hexlify(string), 16) + + +# these methods are useful for the sigencode= argument to SK.sign() and the +# sigdecode= argument to VK.verify(), and control how the signature is packed +# or unpacked. + + +def sigencode_strings(r, s, order): + r_str = number_to_string(r, order) + s_str = number_to_string(s, order) + return (r_str, s_str) + + +def sigencode_string(r, s, order): + """ + Encode the signature to raw format (:term:`raw encoding`) + + It's expected that this function will be used as a `sigencode=` parameter + in :func:`ecdsa.keys.SigningKey.sign` method. + + :param int r: first parameter of the signature + :param int s: second parameter of the signature + :param int order: the order of the curve over which the signature was + computed + + :return: raw encoding of ECDSA signature + :rtype: bytes + """ + # for any given curve, the size of the signature numbers is + # fixed, so just use simple concatenation + r_str, s_str = sigencode_strings(r, s, order) + return r_str + s_str + + +def sigencode_der(r, s, order): + """ + Encode the signature into the ECDSA-Sig-Value structure using :term:`DER`. + + Encodes the signature to the following :term:`ASN.1` structure:: + + Ecdsa-Sig-Value ::= SEQUENCE { + r INTEGER, + s INTEGER + } + + It's expected that this function will be used as a `sigencode=` parameter + in :func:`ecdsa.keys.SigningKey.sign` method. + + :param int r: first parameter of the signature + :param int s: second parameter of the signature + :param int order: the order of the curve over which the signature was + computed + + :return: DER encoding of ECDSA signature + :rtype: bytes + """ + return der.encode_sequence(der.encode_integer(r), der.encode_integer(s)) + + +# canonical versions of sigencode methods +# these enforce low S values, by negating the value (modulo the order) if +# above order/2 see CECKey::Sign() +# https://github.com/bitcoin/bitcoin/blob/master/src/key.cpp#L214 +def sigencode_strings_canonize(r, s, order): + if s > order / 2: + s = order - s + return sigencode_strings(r, s, order) + + +def sigencode_string_canonize(r, s, order): + if s > order / 2: + s = order - s + return sigencode_string(r, s, order) + + +def sigencode_der_canonize(r, s, order): + if s > order / 2: + s = order - s + return sigencode_der(r, s, order) + + +class MalformedSignature(Exception): + """ + Raised by decoding functions when the signature is malformed. + + Malformed in this context means that the relevant strings or integers + do not match what a signature over provided curve would create. Either + because the byte strings have incorrect lengths or because the encoded + values are too large. + """ + + pass + + +def sigdecode_string(signature, order): + """ + Decoder for :term:`raw encoding` of ECDSA signatures. + + raw encoding is a simple concatenation of the two integers that comprise + the signature, with each encoded using the same amount of bytes depending + on curve size/order. + + It's expected that this function will be used as the `sigdecode=` + parameter to the :func:`ecdsa.keys.VerifyingKey.verify` method. + + :param signature: encoded signature + :type signature: bytes like object + :param order: order of the curve over which the signature was computed + :type order: int + + :raises MalformedSignature: when the encoding of the signature is invalid + + :return: tuple with decoded 'r' and 's' values of signature + :rtype: tuple of ints + """ + signature = normalise_bytes(signature) + l = orderlen(order) + if not len(signature) == 2 * l: + raise MalformedSignature( + "Invalid length of signature, expected {0} bytes long, " + "provided string is {1} bytes long".format(2 * l, len(signature)) + ) + r = string_to_number_fixedlen(signature[:l], order) + s = string_to_number_fixedlen(signature[l:], order) + return r, s + + +def sigdecode_strings(rs_strings, order): + """ + Decode the signature from two strings. + + First string needs to be a big endian encoding of 'r', second needs to + be a big endian encoding of the 's' parameter of an ECDSA signature. + + It's expected that this function will be used as the `sigdecode=` + parameter to the :func:`ecdsa.keys.VerifyingKey.verify` method. + + :param list rs_strings: list of two bytes-like objects, each encoding one + parameter of signature + :param int order: order of the curve over which the signature was computed + + :raises MalformedSignature: when the encoding of the signature is invalid + + :return: tuple with decoded 'r' and 's' values of signature + :rtype: tuple of ints + """ + if not len(rs_strings) == 2: + raise MalformedSignature( + "Invalid number of strings provided: {0}, expected 2".format( + len(rs_strings) + ) + ) + (r_str, s_str) = rs_strings + r_str = normalise_bytes(r_str) + s_str = normalise_bytes(s_str) + l = orderlen(order) + if not len(r_str) == l: + raise MalformedSignature( + "Invalid length of first string ('r' parameter), " + "expected {0} bytes long, provided string is {1} " + "bytes long".format(l, len(r_str)) + ) + if not len(s_str) == l: + raise MalformedSignature( + "Invalid length of second string ('s' parameter), " + "expected {0} bytes long, provided string is {1} " + "bytes long".format(l, len(s_str)) + ) + r = string_to_number_fixedlen(r_str, order) + s = string_to_number_fixedlen(s_str, order) + return r, s + + +def sigdecode_der(sig_der, order): + """ + Decoder for DER format of ECDSA signatures. + + DER format of signature is one that uses the :term:`ASN.1` :term:`DER` + rules to encode it as a sequence of two integers:: + + Ecdsa-Sig-Value ::= SEQUENCE { + r INTEGER, + s INTEGER + } + + It's expected that this function will be used as as the `sigdecode=` + parameter to the :func:`ecdsa.keys.VerifyingKey.verify` method. + + :param sig_der: encoded signature + :type sig_der: bytes like object + :param order: order of the curve over which the signature was computed + :type order: int + + :raises UnexpectedDER: when the encoding of signature is invalid + + :return: tuple with decoded 'r' and 's' values of signature + :rtype: tuple of ints + """ + sig_der = normalise_bytes(sig_der) + # return der.encode_sequence(der.encode_integer(r), der.encode_integer(s)) + rs_strings, empty = der.remove_sequence(sig_der) + if empty != b"": + raise der.UnexpectedDER( + "trailing junk after DER sig: %s" % binascii.hexlify(empty) + ) + r, rest = der.remove_integer(rs_strings) + s, empty = der.remove_integer(rest) + if empty != b"": + raise der.UnexpectedDER( + "trailing junk after DER numbers: %s" % binascii.hexlify(empty) + ) + return r, s diff --git a/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/LICENSE new file mode 100644 index 0000000..3e92463 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Sebastián Ramírez + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/METADATA new file mode 100644 index 0000000..f62f221 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/METADATA @@ -0,0 +1,545 @@ +Metadata-Version: 2.1 +Name: fastapi +Version: 0.70.1 +Summary: FastAPI framework, high performance, easy to learn, fast to code, ready for production +Home-page: https://github.com/tiangolo/fastapi +Author: Sebastián Ramírez +Author-email: tiangolo@gmail.com +Requires-Python: >=3.6.1 +Description-Content-Type: text/markdown +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries :: Application Frameworks +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development +Classifier: Typing :: Typed +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Framework :: AsyncIO +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Dist: starlette ==0.16.0 +Requires-Dist: pydantic >=1.6.2,!=1.7,!=1.7.1,!=1.7.2,!=1.7.3,!=1.8,!=1.8.1,<2.0.0 +Requires-Dist: requests >=2.24.0,<3.0.0 ; extra == "all" +Requires-Dist: jinja2 >=2.11.2,<4.0.0 ; extra == "all" +Requires-Dist: python-multipart >=0.0.5,<0.0.6 ; extra == "all" +Requires-Dist: itsdangerous >=1.1.0,<3.0.0 ; extra == "all" +Requires-Dist: pyyaml >=5.3.1,<6.0.0 ; extra == "all" +Requires-Dist: ujson >=4.0.1,<5.0.0 ; extra == "all" +Requires-Dist: orjson >=3.2.1,<4.0.0 ; extra == "all" +Requires-Dist: email_validator >=1.1.1,<2.0.0 ; extra == "all" +Requires-Dist: uvicorn[standard] >=0.12.0,<0.16.0 ; extra == "all" +Requires-Dist: python-jose[cryptography] >=3.3.0,<4.0.0 ; extra == "dev" +Requires-Dist: passlib[bcrypt] >=1.7.2,<2.0.0 ; extra == "dev" +Requires-Dist: autoflake >=1.4.0,<2.0.0 ; extra == "dev" +Requires-Dist: flake8 >=3.8.3,<4.0.0 ; extra == "dev" +Requires-Dist: uvicorn[standard] >=0.12.0,<0.16.0 ; extra == "dev" +Requires-Dist: mkdocs >=1.1.2,<2.0.0 ; extra == "doc" +Requires-Dist: mkdocs-material >=7.1.9,<8.0.0 ; extra == "doc" +Requires-Dist: mdx-include >=1.4.1,<2.0.0 ; extra == "doc" +Requires-Dist: mkdocs-markdownextradata-plugin >=0.1.7,<0.3.0 ; extra == "doc" +Requires-Dist: typer-cli >=0.0.12,<0.0.13 ; extra == "doc" +Requires-Dist: pyyaml >=5.3.1,<6.0.0 ; extra == "doc" +Requires-Dist: pytest >=6.2.4,<7.0.0 ; extra == "test" +Requires-Dist: pytest-cov >=2.12.0,<4.0.0 ; extra == "test" +Requires-Dist: mypy ==0.910 ; extra == "test" +Requires-Dist: flake8 >=3.8.3,<4.0.0 ; extra == "test" +Requires-Dist: black ==21.9b0 ; extra == "test" +Requires-Dist: isort >=5.0.6,<6.0.0 ; extra == "test" +Requires-Dist: requests >=2.24.0,<3.0.0 ; extra == "test" +Requires-Dist: httpx >=0.14.0,<0.19.0 ; extra == "test" +Requires-Dist: email_validator >=1.1.1,<2.0.0 ; extra == "test" +Requires-Dist: sqlalchemy >=1.3.18,<1.5.0 ; extra == "test" +Requires-Dist: peewee >=3.13.3,<4.0.0 ; extra == "test" +Requires-Dist: databases[sqlite] >=0.3.2,<0.6.0 ; extra == "test" +Requires-Dist: orjson >=3.2.1,<4.0.0 ; extra == "test" +Requires-Dist: ujson >=4.0.1,<5.0.0 ; extra == "test" +Requires-Dist: python-multipart >=0.0.5,<0.0.6 ; extra == "test" +Requires-Dist: flask >=1.1.2,<3.0.0 ; extra == "test" +Requires-Dist: anyio[trio] >=3.2.1,<4.0.0 ; extra == "test" +Requires-Dist: types-ujson ==0.1.1 ; extra == "test" +Requires-Dist: types-orjson ==3.6.0 ; extra == "test" +Requires-Dist: types-dataclasses ==0.1.7 ; extra == "test" and ( python_version<'3.7') +Project-URL: Documentation, https://fastapi.tiangolo.com/ +Provides-Extra: all +Provides-Extra: dev +Provides-Extra: doc +Provides-Extra: test + +

+ FastAPI +

+

+ FastAPI framework, high performance, easy to learn, fast to code, ready for production +

+

+ + Test + + + Coverage + + + Package version + + + Supported Python versions + +

+ +--- + +**Documentation**: https://fastapi.tiangolo.com + +**Source Code**: https://github.com/tiangolo/fastapi + +--- + +FastAPI is a modern, fast (high-performance), web framework for building APIs with Python 3.6+ based on standard Python type hints. + +The key features are: + +* **Fast**: Very high performance, on par with **NodeJS** and **Go** (thanks to Starlette and Pydantic). [One of the fastest Python frameworks available](#performance). + +* **Fast to code**: Increase the speed to develop features by about 200% to 300%. * +* **Fewer bugs**: Reduce about 40% of human (developer) induced errors. * +* **Intuitive**: Great editor support. Completion everywhere. Less time debugging. +* **Easy**: Designed to be easy to use and learn. Less time reading docs. +* **Short**: Minimize code duplication. Multiple features from each parameter declaration. Fewer bugs. +* **Robust**: Get production-ready code. With automatic interactive documentation. +* **Standards-based**: Based on (and fully compatible with) the open standards for APIs: OpenAPI (previously known as Swagger) and JSON Schema. + +* estimation based on tests on an internal development team, building production applications. + +## Sponsors + + + + + + + + + + + + + + +Other sponsors + +## Opinions + +"_[...] I'm using **FastAPI** a ton these days. [...] I'm actually planning to use it for all of my team's **ML services at Microsoft**. Some of them are getting integrated into the core **Windows** product and some **Office** products._" + +
Kabir Khan - Microsoft (ref)
+ +--- + +"_We adopted the **FastAPI** library to spawn a **REST** server that can be queried to obtain **predictions**. [for Ludwig]_" + +
Piero Molino, Yaroslav Dudin, and Sai Sumanth Miryala - Uber (ref)
+ +--- + +"_**Netflix** is pleased to announce the open-source release of our **crisis management** orchestration framework: **Dispatch**! [built with **FastAPI**]_" + +
Kevin Glisson, Marc Vilanova, Forest Monsen - Netflix (ref)
+ +--- + +"_I’m over the moon excited about **FastAPI**. It’s so fun!_" + +
Brian Okken - Python Bytes podcast host (ref)
+ +--- + +"_Honestly, what you've built looks super solid and polished. In many ways, it's what I wanted **Hug** to be - it's really inspiring to see someone build that._" + +
Timothy Crosley - Hug creator (ref)
+ +--- + +"_If you're looking to learn one **modern framework** for building REST APIs, check out **FastAPI** [...] It's fast, easy to use and easy to learn [...]_" + +"_We've switched over to **FastAPI** for our **APIs** [...] I think you'll like it [...]_" + +
Ines Montani - Matthew Honnibal - Explosion AI founders - spaCy creators (ref) - (ref)
+ +--- + +## **Typer**, the FastAPI of CLIs + + + +If you are building a CLI app to be used in the terminal instead of a web API, check out **Typer**. + +**Typer** is FastAPI's little sibling. And it's intended to be the **FastAPI of CLIs**. ⌨️ 🚀 + +## Requirements + +Python 3.6+ + +FastAPI stands on the shoulders of giants: + +* Starlette for the web parts. +* Pydantic for the data parts. + +## Installation + +
+ +```console +$ pip install fastapi + +---> 100% +``` + +
+ +You will also need an ASGI server, for production such as Uvicorn or Hypercorn. + +
+ +```console +$ pip install "uvicorn[standard]" + +---> 100% +``` + +
+ +## Example + +### Create it + +* Create a file `main.py` with: + +```Python +from typing import Optional + +from fastapi import FastAPI + +app = FastAPI() + + +@app.get("/") +def read_root(): + return {"Hello": "World"} + + +@app.get("/items/{item_id}") +def read_item(item_id: int, q: Optional[str] = None): + return {"item_id": item_id, "q": q} +``` + +
+Or use async def... + +If your code uses `async` / `await`, use `async def`: + +```Python hl_lines="9 14" +from typing import Optional + +from fastapi import FastAPI + +app = FastAPI() + + +@app.get("/") +async def read_root(): + return {"Hello": "World"} + + +@app.get("/items/{item_id}") +async def read_item(item_id: int, q: Optional[str] = None): + return {"item_id": item_id, "q": q} +``` + +**Note**: + +If you don't know, check the _"In a hurry?"_ section about `async` and `await` in the docs. + +
+ +### Run it + +Run the server with: + +
+ +```console +$ uvicorn main:app --reload + +INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit) +INFO: Started reloader process [28720] +INFO: Started server process [28722] +INFO: Waiting for application startup. +INFO: Application startup complete. +``` + +
+ +
+About the command uvicorn main:app --reload... + +The command `uvicorn main:app` refers to: + +* `main`: the file `main.py` (the Python "module"). +* `app`: the object created inside of `main.py` with the line `app = FastAPI()`. +* `--reload`: make the server restart after code changes. Only do this for development. + +
+ +### Check it + +Open your browser at http://127.0.0.1:8000/items/5?q=somequery. + +You will see the JSON response as: + +```JSON +{"item_id": 5, "q": "somequery"} +``` + +You already created an API that: + +* Receives HTTP requests in the _paths_ `/` and `/items/{item_id}`. +* Both _paths_ take `GET` operations (also known as HTTP _methods_). +* The _path_ `/items/{item_id}` has a _path parameter_ `item_id` that should be an `int`. +* The _path_ `/items/{item_id}` has an optional `str` _query parameter_ `q`. + +### Interactive API docs + +Now go to http://127.0.0.1:8000/docs. + +You will see the automatic interactive API documentation (provided by Swagger UI): + +![Swagger UI](https://fastapi.tiangolo.com/img/index/index-01-swagger-ui-simple.png) + +### Alternative API docs + +And now, go to http://127.0.0.1:8000/redoc. + +You will see the alternative automatic documentation (provided by ReDoc): + +![ReDoc](https://fastapi.tiangolo.com/img/index/index-02-redoc-simple.png) + +## Example upgrade + +Now modify the file `main.py` to receive a body from a `PUT` request. + +Declare the body using standard Python types, thanks to Pydantic. + +```Python hl_lines="4 9-12 25-27" +from typing import Optional + +from fastapi import FastAPI +from pydantic import BaseModel + +app = FastAPI() + + +class Item(BaseModel): + name: str + price: float + is_offer: Optional[bool] = None + + +@app.get("/") +def read_root(): + return {"Hello": "World"} + + +@app.get("/items/{item_id}") +def read_item(item_id: int, q: Optional[str] = None): + return {"item_id": item_id, "q": q} + + +@app.put("/items/{item_id}") +def update_item(item_id: int, item: Item): + return {"item_name": item.name, "item_id": item_id} +``` + +The server should reload automatically (because you added `--reload` to the `uvicorn` command above). + +### Interactive API docs upgrade + +Now go to http://127.0.0.1:8000/docs. + +* The interactive API documentation will be automatically updated, including the new body: + +![Swagger UI](https://fastapi.tiangolo.com/img/index/index-03-swagger-02.png) + +* Click on the button "Try it out", it allows you to fill the parameters and directly interact with the API: + +![Swagger UI interaction](https://fastapi.tiangolo.com/img/index/index-04-swagger-03.png) + +* Then click on the "Execute" button, the user interface will communicate with your API, send the parameters, get the results and show them on the screen: + +![Swagger UI interaction](https://fastapi.tiangolo.com/img/index/index-05-swagger-04.png) + +### Alternative API docs upgrade + +And now, go to http://127.0.0.1:8000/redoc. + +* The alternative documentation will also reflect the new query parameter and body: + +![ReDoc](https://fastapi.tiangolo.com/img/index/index-06-redoc-02.png) + +### Recap + +In summary, you declare **once** the types of parameters, body, etc. as function parameters. + +You do that with standard modern Python types. + +You don't have to learn a new syntax, the methods or classes of a specific library, etc. + +Just standard **Python 3.6+**. + +For example, for an `int`: + +```Python +item_id: int +``` + +or for a more complex `Item` model: + +```Python +item: Item +``` + +...and with that single declaration you get: + +* Editor support, including: + * Completion. + * Type checks. +* Validation of data: + * Automatic and clear errors when the data is invalid. + * Validation even for deeply nested JSON objects. +* Conversion of input data: coming from the network to Python data and types. Reading from: + * JSON. + * Path parameters. + * Query parameters. + * Cookies. + * Headers. + * Forms. + * Files. +* Conversion of output data: converting from Python data and types to network data (as JSON): + * Convert Python types (`str`, `int`, `float`, `bool`, `list`, etc). + * `datetime` objects. + * `UUID` objects. + * Database models. + * ...and many more. +* Automatic interactive API documentation, including 2 alternative user interfaces: + * Swagger UI. + * ReDoc. + +--- + +Coming back to the previous code example, **FastAPI** will: + +* Validate that there is an `item_id` in the path for `GET` and `PUT` requests. +* Validate that the `item_id` is of type `int` for `GET` and `PUT` requests. + * If it is not, the client will see a useful, clear error. +* Check if there is an optional query parameter named `q` (as in `http://127.0.0.1:8000/items/foo?q=somequery`) for `GET` requests. + * As the `q` parameter is declared with `= None`, it is optional. + * Without the `None` it would be required (as is the body in the case with `PUT`). +* For `PUT` requests to `/items/{item_id}`, Read the body as JSON: + * Check that it has a required attribute `name` that should be a `str`. + * Check that it has a required attribute `price` that has to be a `float`. + * Check that it has an optional attribute `is_offer`, that should be a `bool`, if present. + * All this would also work for deeply nested JSON objects. +* Convert from and to JSON automatically. +* Document everything with OpenAPI, that can be used by: + * Interactive documentation systems. + * Automatic client code generation systems, for many languages. +* Provide 2 interactive documentation web interfaces directly. + +--- + +We just scratched the surface, but you already get the idea of how it all works. + +Try changing the line with: + +```Python + return {"item_name": item.name, "item_id": item_id} +``` + +...from: + +```Python + ... "item_name": item.name ... +``` + +...to: + +```Python + ... "item_price": item.price ... +``` + +...and see how your editor will auto-complete the attributes and know their types: + +![editor support](https://fastapi.tiangolo.com/img/vscode-completion.png) + +For a more complete example including more features, see the Tutorial - User Guide. + +**Spoiler alert**: the tutorial - user guide includes: + +* Declaration of **parameters** from other different places as: **headers**, **cookies**, **form fields** and **files**. +* How to set **validation constraints** as `maximum_length` or `regex`. +* A very powerful and easy to use **Dependency Injection** system. +* Security and authentication, including support for **OAuth2** with **JWT tokens** and **HTTP Basic** auth. +* More advanced (but equally easy) techniques for declaring **deeply nested JSON models** (thanks to Pydantic). +* **GraphQL** integration with Strawberry and other libraries. +* Many extra features (thanks to Starlette) as: + * **WebSockets** + * extremely easy tests based on `requests` and `pytest` + * **CORS** + * **Cookie Sessions** + * ...and more. + +## Performance + +Independent TechEmpower benchmarks show **FastAPI** applications running under Uvicorn as one of the fastest Python frameworks available, only below Starlette and Uvicorn themselves (used internally by FastAPI). (*) + +To understand more about it, see the section Benchmarks. + +## Optional Dependencies + +Used by Pydantic: + +* ujson - for faster JSON "parsing". +* email_validator - for email validation. + +Used by Starlette: + +* requests - Required if you want to use the `TestClient`. +* jinja2 - Required if you want to use the default template configuration. +* python-multipart - Required if you want to support form "parsing", with `request.form()`. +* itsdangerous - Required for `SessionMiddleware` support. +* pyyaml - Required for Starlette's `SchemaGenerator` support (you probably don't need it with FastAPI). +* ujson - Required if you want to use `UJSONResponse`. + +Used by FastAPI / Starlette: + +* uvicorn - for the server that loads and serves your application. +* orjson - Required if you want to use `ORJSONResponse`. + +You can install all of these with `pip install "fastapi[all]"`. + +## License + +This project is licensed under the terms of the MIT license. + diff --git a/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/RECORD new file mode 100644 index 0000000..2c0634f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/RECORD @@ -0,0 +1,47 @@ +fastapi/__init__.py,sha256=DCuKSRU6kaM12G9kTSa1GBVYftxLI99U7UEYvvMI3zs,1015 +fastapi/applications.py,sha256=3_VBHSCd8FoA0sTWLltGbo0sYZgyiZeiYablU6ENzkg,33127 +fastapi/background.py,sha256=HtN5_pJJrOdalSbuGSMKJAPNWUU5h7rY_BXXubu7-IQ,76 +fastapi/concurrency.py,sha256=ne71p0K426vPxvI0_H7mcs3nr2_8lkXyvEIfbHZiCXg,1072 +fastapi/datastructures.py,sha256=5wDaZ4-54n1xL-cjDgMdw9aLqhihpuVQRTOT5XUi8x4,1743 +fastapi/encoders.py,sha256=VJDILyb8fVE69aCK-x3lMT3mrxTHSgmXqvO2okTqUoY,5380 +fastapi/exception_handlers.py,sha256=UVYCCe4qt5-5_NuQ3SxTXjDvOdKMHiTfcLp3RUKXhg8,912 +fastapi/exceptions.py,sha256=8B4f4gmHUVaX04L9IxxfEbUzX6OhJy4y6-utQbqNX0Q,1131 +fastapi/logger.py,sha256=I9NNi3ov8AcqbsbC9wl1X-hdItKgYt2XTrx1f99Zpl4,54 +fastapi/param_functions.py,sha256=XQZKy9q-1LFLupDdiBnqX2G_06q7QqYotyk9b1Ke6XM,7065 +fastapi/params.py,sha256=4JTDim09Sb1K2wS_Y9j0vsPqJzvjwbLWaavBTeV6bJM,10017 +fastapi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi/requests.py,sha256=zayepKFcienBllv3snmWI20Gk0oHNVLU4DDhqXBb4LU,142 +fastapi/responses.py,sha256=K9Gzj0E5GfwTgLiuCE-BuiXn9JdqGJHeXBMkvd8lFC8,1196 +fastapi/routing.py,sha256=GZtad4pK9SqCsUdLsukl3JwGrZAJEPohMsu-8cutNa4,49671 +fastapi/staticfiles.py,sha256=iirGIt3sdY2QZXd36ijs3Cj-T0FuGFda3cd90kM9Ikw,69 +fastapi/templating.py,sha256=4zsuTWgcjcEainMJFAlW6-gnslm6AgOS1SiiDWfmQxk,76 +fastapi/testclient.py,sha256=nBvaAmX66YldReJNZXPOk1sfuo2Q6hs8bOvIaCep6LQ,66 +fastapi/types.py,sha256=r6MngTHzkZOP9lzXgduje9yeZe5EInWAzCLuRJlhIuE,118 +fastapi/utils.py,sha256=g_H9Owy8vbUgY_L4tfYBJRdX9ofIqKPXkhh0LTRLRYE,5545 +fastapi/websockets.py,sha256=SroIkqE-lfChvtRP3mFaNKKtD6TmePDWBZtQfgM4noo,148 +fastapi/dependencies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi/dependencies/models.py,sha256=zNbioxICuOeb-9ADDVQ45hUHOC0PBtPVEfVU3f1l_nc,2494 +fastapi/dependencies/utils.py,sha256=AExSzce8f0SL383fLuV1WN91NL33AT63E8iwK0_HJcs,28054 +fastapi/middleware/__init__.py,sha256=oQDxiFVcc1fYJUOIFvphnK7pTT5kktmfL32QXpBFvvo,58 +fastapi/middleware/cors.py,sha256=ynwjWQZoc_vbhzZ3_ZXceoaSrslHFHPdoM52rXr0WUU,79 +fastapi/middleware/gzip.py,sha256=xM5PcsH8QlAimZw4VDvcmTnqQamslThsfe3CVN2voa0,79 +fastapi/middleware/httpsredirect.py,sha256=rL8eXMnmLijwVkH7_400zHri1AekfeBd6D6qs8ix950,115 +fastapi/middleware/trustedhost.py,sha256=eE5XGRxGa7c5zPnMJDGp3BxaL25k5iVQlhnv-Pk0Pss,109 +fastapi/middleware/wsgi.py,sha256=Z3Ue-7wni4lUZMvH3G9ek__acgYdJstbnpZX_HQAboY,79 +fastapi/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi/openapi/constants.py,sha256=sJSpZzRp7Kky9R-jucU-K6_pJzLBRO75ddW7-MixZWc,166 +fastapi/openapi/docs.py,sha256=XyDQ4t2Ca95ZN_sSfwjCP3DcwM5Rv21FrwqTfk4x_H4,5538 +fastapi/openapi/models.py,sha256=iGtr1Xk5limukAi7JCW7-wRcasVJMc7M8y-2hRDH0xo,10936 +fastapi/openapi/utils.py,sha256=k26LXwhzG21w5VspJbi9FtZqs5_0Oqv2eQpWgG2c4Kk,17276 +fastapi/security/__init__.py,sha256=bO8pNmxqVRXUjfl2mOKiVZLn0FpBQ61VUYVjmppnbJw,881 +fastapi/security/api_key.py,sha256=NbVpS9TxDOaipoZa8-SREHyMtTcM3bmy5szMiQxEX9s,2793 +fastapi/security/base.py,sha256=dl4pvbC-RxjfbWgPtCWd8MVU-7CB2SZ22rJDXVCXO6c,141 +fastapi/security/http.py,sha256=ZSy3DFKFDLa3-I4vwsY1r8hQB_VrtAXw4-fMJauZIK0,5984 +fastapi/security/oauth2.py,sha256=xkbUW0b-G4aiEhSO7BZyE7iAdYK41cXB-SL1MnQvBh4,8183 +fastapi/security/open_id_connect_url.py,sha256=iikzuJCz_DG44Q77VrupqSoCbJYaiXkuo_W-kdmAzeo,1145 +fastapi/security/utils.py,sha256=izlh-HBaL1VnJeOeRTQnyNgI3hgTFs73eCyLy-snb4A,266 +fastapi-0.70.1.dist-info/LICENSE,sha256=Tsif_IFIW5f-xYSy1KlhAy7v_oNEU4lP2cEnSQbMdE4,1086 +fastapi-0.70.1.dist-info/WHEEL,sha256=LVOPL_YDMEiGvRLgDK1hLkfhFCnTcxcAYZJtpNFses0,81 +fastapi-0.70.1.dist-info/METADATA,sha256=9cgceXBWBzPlk7u9YLK_GC4jKNmJDGe18UPIhmJvK_A,24068 +fastapi-0.70.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +fastapi-0.70.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/WHEEL new file mode 100644 index 0000000..884ceb5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi-0.70.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.5.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/myenv/lib/python3.9/site-packages/fastapi/__init__.py b/myenv/lib/python3.9/site-packages/fastapi/__init__.py new file mode 100644 index 0000000..8bb6ce1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/__init__.py @@ -0,0 +1,24 @@ +"""FastAPI framework, high performance, easy to learn, fast to code, ready for production""" + +__version__ = "0.70.1" + +from starlette import status as status + +from .applications import FastAPI as FastAPI +from .background import BackgroundTasks as BackgroundTasks +from .datastructures import UploadFile as UploadFile +from .exceptions import HTTPException as HTTPException +from .param_functions import Body as Body +from .param_functions import Cookie as Cookie +from .param_functions import Depends as Depends +from .param_functions import File as File +from .param_functions import Form as Form +from .param_functions import Header as Header +from .param_functions import Path as Path +from .param_functions import Query as Query +from .param_functions import Security as Security +from .requests import Request as Request +from .responses import Response as Response +from .routing import APIRouter as APIRouter +from .websockets import WebSocket as WebSocket +from .websockets import WebSocketDisconnect as WebSocketDisconnect diff --git a/myenv/lib/python3.9/site-packages/fastapi/applications.py b/myenv/lib/python3.9/site-packages/fastapi/applications.py new file mode 100644 index 0000000..0c25026 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/applications.py @@ -0,0 +1,768 @@ +from typing import Any, Callable, Coroutine, Dict, List, Optional, Sequence, Type, Union + +from fastapi import routing +from fastapi.concurrency import AsyncExitStack +from fastapi.datastructures import Default, DefaultPlaceholder +from fastapi.encoders import DictIntStrAny, SetIntStr +from fastapi.exception_handlers import ( + http_exception_handler, + request_validation_exception_handler, +) +from fastapi.exceptions import RequestValidationError +from fastapi.logger import logger +from fastapi.openapi.docs import ( + get_redoc_html, + get_swagger_ui_html, + get_swagger_ui_oauth2_redirect_html, +) +from fastapi.openapi.utils import get_openapi +from fastapi.params import Depends +from fastapi.types import DecoratedCallable +from starlette.applications import Starlette +from starlette.datastructures import State +from starlette.exceptions import HTTPException +from starlette.middleware import Middleware +from starlette.requests import Request +from starlette.responses import HTMLResponse, JSONResponse, Response +from starlette.routing import BaseRoute +from starlette.types import ASGIApp, Receive, Scope, Send + + +class FastAPI(Starlette): + def __init__( + self, + *, + debug: bool = False, + routes: Optional[List[BaseRoute]] = None, + title: str = "FastAPI", + description: str = "", + version: str = "0.1.0", + openapi_url: Optional[str] = "/openapi.json", + openapi_tags: Optional[List[Dict[str, Any]]] = None, + servers: Optional[List[Dict[str, Union[str, Any]]]] = None, + dependencies: Optional[Sequence[Depends]] = None, + default_response_class: Type[Response] = Default(JSONResponse), + docs_url: Optional[str] = "/docs", + redoc_url: Optional[str] = "/redoc", + swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect", + swagger_ui_init_oauth: Optional[Dict[str, Any]] = None, + middleware: Optional[Sequence[Middleware]] = None, + exception_handlers: Optional[ + Dict[ + Union[int, Type[Exception]], + Callable[[Request, Any], Coroutine[Any, Any, Response]], + ] + ] = None, + on_startup: Optional[Sequence[Callable[[], Any]]] = None, + on_shutdown: Optional[Sequence[Callable[[], Any]]] = None, + terms_of_service: Optional[str] = None, + contact: Optional[Dict[str, Union[str, Any]]] = None, + license_info: Optional[Dict[str, Union[str, Any]]] = None, + openapi_prefix: str = "", + root_path: str = "", + root_path_in_servers: bool = True, + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + callbacks: Optional[List[BaseRoute]] = None, + deprecated: Optional[bool] = None, + include_in_schema: bool = True, + **extra: Any, + ) -> None: + self._debug: bool = debug + self.state: State = State() + self.router: routing.APIRouter = routing.APIRouter( + routes=routes, + dependency_overrides_provider=self, + on_startup=on_startup, + on_shutdown=on_shutdown, + default_response_class=default_response_class, + dependencies=dependencies, + callbacks=callbacks, + deprecated=deprecated, + include_in_schema=include_in_schema, + responses=responses, + ) + self.exception_handlers: Dict[ + Union[int, Type[Exception]], + Callable[[Request, Any], Coroutine[Any, Any, Response]], + ] = ( + {} if exception_handlers is None else dict(exception_handlers) + ) + self.exception_handlers.setdefault(HTTPException, http_exception_handler) + self.exception_handlers.setdefault( + RequestValidationError, request_validation_exception_handler + ) + + self.user_middleware: List[Middleware] = ( + [] if middleware is None else list(middleware) + ) + self.middleware_stack: ASGIApp = self.build_middleware_stack() + + self.title = title + self.description = description + self.version = version + self.terms_of_service = terms_of_service + self.contact = contact + self.license_info = license_info + self.servers = servers or [] + self.openapi_url = openapi_url + self.openapi_tags = openapi_tags + # TODO: remove when discarding the openapi_prefix parameter + if openapi_prefix: + logger.warning( + '"openapi_prefix" has been deprecated in favor of "root_path", which ' + "follows more closely the ASGI standard, is simpler, and more " + "automatic. Check the docs at " + "https://fastapi.tiangolo.com/advanced/sub-applications/" + ) + self.root_path = root_path or openapi_prefix + self.root_path_in_servers = root_path_in_servers + self.docs_url = docs_url + self.redoc_url = redoc_url + self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url + self.swagger_ui_init_oauth = swagger_ui_init_oauth + self.extra = extra + self.dependency_overrides: Dict[Callable[..., Any], Callable[..., Any]] = {} + + self.openapi_version = "3.0.2" + + if self.openapi_url: + assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'" + assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'" + self.openapi_schema: Optional[Dict[str, Any]] = None + self.setup() + + def openapi(self) -> Dict[str, Any]: + if not self.openapi_schema: + self.openapi_schema = get_openapi( + title=self.title, + version=self.version, + openapi_version=self.openapi_version, + description=self.description, + terms_of_service=self.terms_of_service, + contact=self.contact, + license_info=self.license_info, + routes=self.routes, + tags=self.openapi_tags, + servers=self.servers, + ) + return self.openapi_schema + + def setup(self) -> None: + if self.openapi_url: + urls = (server_data.get("url") for server_data in self.servers) + server_urls = {url for url in urls if url} + + async def openapi(req: Request) -> JSONResponse: + root_path = req.scope.get("root_path", "").rstrip("/") + if root_path not in server_urls: + if root_path and self.root_path_in_servers: + self.servers.insert(0, {"url": root_path}) + server_urls.add(root_path) + return JSONResponse(self.openapi()) + + self.add_route(self.openapi_url, openapi, include_in_schema=False) + if self.openapi_url and self.docs_url: + + async def swagger_ui_html(req: Request) -> HTMLResponse: + root_path = req.scope.get("root_path", "").rstrip("/") + openapi_url = root_path + self.openapi_url + oauth2_redirect_url = self.swagger_ui_oauth2_redirect_url + if oauth2_redirect_url: + oauth2_redirect_url = root_path + oauth2_redirect_url + return get_swagger_ui_html( + openapi_url=openapi_url, + title=self.title + " - Swagger UI", + oauth2_redirect_url=oauth2_redirect_url, + init_oauth=self.swagger_ui_init_oauth, + ) + + self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False) + + if self.swagger_ui_oauth2_redirect_url: + + async def swagger_ui_redirect(req: Request) -> HTMLResponse: + return get_swagger_ui_oauth2_redirect_html() + + self.add_route( + self.swagger_ui_oauth2_redirect_url, + swagger_ui_redirect, + include_in_schema=False, + ) + if self.openapi_url and self.redoc_url: + + async def redoc_html(req: Request) -> HTMLResponse: + root_path = req.scope.get("root_path", "").rstrip("/") + openapi_url = root_path + self.openapi_url + return get_redoc_html( + openapi_url=openapi_url, title=self.title + " - ReDoc" + ) + + self.add_route(self.redoc_url, redoc_html, include_in_schema=False) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if self.root_path: + scope["root_path"] = self.root_path + if AsyncExitStack: + async with AsyncExitStack() as stack: + scope["fastapi_astack"] = stack + await super().__call__(scope, receive, send) + else: + await super().__call__(scope, receive, send) # pragma: no cover + + def add_api_route( + self, + path: str, + endpoint: Callable[..., Coroutine[Any, Any, Response]], + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[List[str]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Union[Type[Response], DefaultPlaceholder] = Default( + JSONResponse + ), + name: Optional[str] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> None: + self.router.add_api_route( + path, + endpoint=endpoint, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + openapi_extra=openapi_extra, + ) + + def api_route( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[List[str]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.router.add_api_route( + path, + func, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + openapi_extra=openapi_extra, + ) + return func + + return decorator + + def add_api_websocket_route( + self, path: str, endpoint: Callable[..., Any], name: Optional[str] = None + ) -> None: + self.router.add_api_websocket_route(path, endpoint, name=name) + + def websocket( + self, path: str, name: Optional[str] = None + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_api_websocket_route(path, func, name=name) + return func + + return decorator + + def include_router( + self, + router: routing.APIRouter, + *, + prefix: str = "", + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + include_in_schema: bool = True, + default_response_class: Type[Response] = Default(JSONResponse), + callbacks: Optional[List[BaseRoute]] = None, + ) -> None: + self.router.include_router( + router, + prefix=prefix, + tags=tags, + dependencies=dependencies, + responses=responses, + deprecated=deprecated, + include_in_schema=include_in_schema, + default_response_class=default_response_class, + callbacks=callbacks, + ) + + def get( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.get( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def put( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.put( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def post( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.post( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def delete( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.delete( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + operation_id=operation_id, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def options( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.options( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def head( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.head( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def patch( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.patch( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def trace( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.router.trace( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) diff --git a/myenv/lib/python3.9/site-packages/fastapi/background.py b/myenv/lib/python3.9/site-packages/fastapi/background.py new file mode 100644 index 0000000..dd3bbe2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/background.py @@ -0,0 +1 @@ +from starlette.background import BackgroundTasks as BackgroundTasks # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/concurrency.py b/myenv/lib/python3.9/site-packages/fastapi/concurrency.py new file mode 100644 index 0000000..04382c6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/concurrency.py @@ -0,0 +1,32 @@ +import sys +from typing import AsyncGenerator, ContextManager, TypeVar + +from starlette.concurrency import iterate_in_threadpool as iterate_in_threadpool # noqa +from starlette.concurrency import run_in_threadpool as run_in_threadpool # noqa +from starlette.concurrency import ( # noqa + run_until_first_complete as run_until_first_complete, +) + +if sys.version_info >= (3, 7): + from contextlib import AsyncExitStack as AsyncExitStack + from contextlib import asynccontextmanager as asynccontextmanager +else: + from contextlib2 import AsyncExitStack as AsyncExitStack # noqa + from contextlib2 import asynccontextmanager as asynccontextmanager # noqa + + +_T = TypeVar("_T") + + +@asynccontextmanager +async def contextmanager_in_threadpool( + cm: ContextManager[_T], +) -> AsyncGenerator[_T, None]: + try: + yield await run_in_threadpool(cm.__enter__) + except Exception as e: + ok = await run_in_threadpool(cm.__exit__, type(e), e, None) + if not ok: + raise e + else: + await run_in_threadpool(cm.__exit__, None, None, None) diff --git a/myenv/lib/python3.9/site-packages/fastapi/datastructures.py b/myenv/lib/python3.9/site-packages/fastapi/datastructures.py new file mode 100644 index 0000000..b131712 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/datastructures.py @@ -0,0 +1,52 @@ +from typing import Any, Callable, Iterable, Type, TypeVar + +from starlette.datastructures import URL as URL # noqa: F401 +from starlette.datastructures import Address as Address # noqa: F401 +from starlette.datastructures import FormData as FormData # noqa: F401 +from starlette.datastructures import Headers as Headers # noqa: F401 +from starlette.datastructures import QueryParams as QueryParams # noqa: F401 +from starlette.datastructures import State as State # noqa: F401 +from starlette.datastructures import UploadFile as StarletteUploadFile + + +class UploadFile(StarletteUploadFile): + @classmethod + def __get_validators__(cls: Type["UploadFile"]) -> Iterable[Callable[..., Any]]: + yield cls.validate + + @classmethod + def validate(cls: Type["UploadFile"], v: Any) -> Any: + if not isinstance(v, StarletteUploadFile): + raise ValueError(f"Expected UploadFile, received: {type(v)}") + return v + + +class DefaultPlaceholder: + """ + You shouldn't use this class directly. + + It's used internally to recognize when a default value has been overwritten, even + if the overridden default value was truthy. + """ + + def __init__(self, value: Any): + self.value = value + + def __bool__(self) -> bool: + return bool(self.value) + + def __eq__(self, o: object) -> bool: + return isinstance(o, DefaultPlaceholder) and o.value == self.value + + +DefaultType = TypeVar("DefaultType") + + +def Default(value: DefaultType) -> DefaultType: + """ + You shouldn't use this function directly. + + It's used internally to recognize when a default value has been overwritten, even + if the overridden default value was truthy. + """ + return DefaultPlaceholder(value) # type: ignore diff --git a/myenv/lib/python3.9/site-packages/fastapi/dependencies/__init__.py b/myenv/lib/python3.9/site-packages/fastapi/dependencies/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/fastapi/dependencies/models.py b/myenv/lib/python3.9/site-packages/fastapi/dependencies/models.py new file mode 100644 index 0000000..443590b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/dependencies/models.py @@ -0,0 +1,58 @@ +from typing import Any, Callable, List, Optional, Sequence + +from fastapi.security.base import SecurityBase +from pydantic.fields import ModelField + + +class SecurityRequirement: + def __init__( + self, security_scheme: SecurityBase, scopes: Optional[Sequence[str]] = None + ): + self.security_scheme = security_scheme + self.scopes = scopes + + +class Dependant: + def __init__( + self, + *, + path_params: Optional[List[ModelField]] = None, + query_params: Optional[List[ModelField]] = None, + header_params: Optional[List[ModelField]] = None, + cookie_params: Optional[List[ModelField]] = None, + body_params: Optional[List[ModelField]] = None, + dependencies: Optional[List["Dependant"]] = None, + security_schemes: Optional[List[SecurityRequirement]] = None, + name: Optional[str] = None, + call: Optional[Callable[..., Any]] = None, + request_param_name: Optional[str] = None, + websocket_param_name: Optional[str] = None, + http_connection_param_name: Optional[str] = None, + response_param_name: Optional[str] = None, + background_tasks_param_name: Optional[str] = None, + security_scopes_param_name: Optional[str] = None, + security_scopes: Optional[List[str]] = None, + use_cache: bool = True, + path: Optional[str] = None, + ) -> None: + self.path_params = path_params or [] + self.query_params = query_params or [] + self.header_params = header_params or [] + self.cookie_params = cookie_params or [] + self.body_params = body_params or [] + self.dependencies = dependencies or [] + self.security_requirements = security_schemes or [] + self.request_param_name = request_param_name + self.websocket_param_name = websocket_param_name + self.http_connection_param_name = http_connection_param_name + self.response_param_name = response_param_name + self.background_tasks_param_name = background_tasks_param_name + self.security_scopes = security_scopes + self.security_scopes_param_name = security_scopes_param_name + self.name = name + self.call = call + self.use_cache = use_cache + # Store the path to be able to re-generate a dependable from it in overrides + self.path = path + # Save the cache key at creation to optimize performance + self.cache_key = (self.call, tuple(sorted(set(self.security_scopes or [])))) diff --git a/myenv/lib/python3.9/site-packages/fastapi/dependencies/utils.py b/myenv/lib/python3.9/site-packages/fastapi/dependencies/utils.py new file mode 100644 index 0000000..35ba44a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/dependencies/utils.py @@ -0,0 +1,769 @@ +import dataclasses +import inspect +from contextlib import contextmanager +from copy import deepcopy +from typing import ( + Any, + Callable, + Coroutine, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +import anyio +from fastapi import params +from fastapi.concurrency import ( + AsyncExitStack, + asynccontextmanager, + contextmanager_in_threadpool, +) +from fastapi.dependencies.models import Dependant, SecurityRequirement +from fastapi.logger import logger +from fastapi.security.base import SecurityBase +from fastapi.security.oauth2 import OAuth2, SecurityScopes +from fastapi.security.open_id_connect_url import OpenIdConnect +from fastapi.utils import create_response_field, get_path_param_names +from pydantic import BaseModel, create_model +from pydantic.error_wrappers import ErrorWrapper +from pydantic.errors import MissingError +from pydantic.fields import ( + SHAPE_LIST, + SHAPE_SEQUENCE, + SHAPE_SET, + SHAPE_SINGLETON, + SHAPE_TUPLE, + SHAPE_TUPLE_ELLIPSIS, + FieldInfo, + ModelField, + Required, +) +from pydantic.schema import get_annotation_from_field_info +from pydantic.typing import ForwardRef, evaluate_forwardref +from pydantic.utils import lenient_issubclass +from starlette.background import BackgroundTasks +from starlette.concurrency import run_in_threadpool +from starlette.datastructures import FormData, Headers, QueryParams, UploadFile +from starlette.requests import HTTPConnection, Request +from starlette.responses import Response +from starlette.websockets import WebSocket + +sequence_shapes = { + SHAPE_LIST, + SHAPE_SET, + SHAPE_TUPLE, + SHAPE_SEQUENCE, + SHAPE_TUPLE_ELLIPSIS, +} +sequence_types = (list, set, tuple) +sequence_shape_to_type = { + SHAPE_LIST: list, + SHAPE_SET: set, + SHAPE_TUPLE: tuple, + SHAPE_SEQUENCE: list, + SHAPE_TUPLE_ELLIPSIS: list, +} + + +multipart_not_installed_error = ( + 'Form data requires "python-multipart" to be installed. \n' + 'You can install "python-multipart" with: \n\n' + "pip install python-multipart\n" +) +multipart_incorrect_install_error = ( + 'Form data requires "python-multipart" to be installed. ' + 'It seems you installed "multipart" instead. \n' + 'You can remove "multipart" with: \n\n' + "pip uninstall multipart\n\n" + 'And then install "python-multipart" with: \n\n' + "pip install python-multipart\n" +) + + +def check_file_field(field: ModelField) -> None: + field_info = field.field_info + if isinstance(field_info, params.Form): + try: + # __version__ is available in both multiparts, and can be mocked + from multipart import __version__ # type: ignore + + assert __version__ + try: + # parse_options_header is only available in the right multipart + from multipart.multipart import parse_options_header # type: ignore + + assert parse_options_header + except ImportError: + logger.error(multipart_incorrect_install_error) + raise RuntimeError(multipart_incorrect_install_error) + except ImportError: + logger.error(multipart_not_installed_error) + raise RuntimeError(multipart_not_installed_error) + + +def get_param_sub_dependant( + *, param: inspect.Parameter, path: str, security_scopes: Optional[List[str]] = None +) -> Dependant: + depends: params.Depends = param.default + if depends.dependency: + dependency = depends.dependency + else: + dependency = param.annotation + return get_sub_dependant( + depends=depends, + dependency=dependency, + path=path, + name=param.name, + security_scopes=security_scopes, + ) + + +def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant: + assert callable( + depends.dependency + ), "A parameter-less dependency must have a callable dependency" + return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path) + + +def get_sub_dependant( + *, + depends: params.Depends, + dependency: Callable[..., Any], + path: str, + name: Optional[str] = None, + security_scopes: Optional[List[str]] = None, +) -> Dependant: + security_requirement = None + security_scopes = security_scopes or [] + if isinstance(depends, params.Security): + dependency_scopes = depends.scopes + security_scopes.extend(dependency_scopes) + if isinstance(dependency, SecurityBase): + use_scopes: List[str] = [] + if isinstance(dependency, (OAuth2, OpenIdConnect)): + use_scopes = security_scopes + security_requirement = SecurityRequirement( + security_scheme=dependency, scopes=use_scopes + ) + sub_dependant = get_dependant( + path=path, + call=dependency, + name=name, + security_scopes=security_scopes, + use_cache=depends.use_cache, + ) + if security_requirement: + sub_dependant.security_requirements.append(security_requirement) + sub_dependant.security_scopes = security_scopes + return sub_dependant + + +CacheKey = Tuple[Optional[Callable[..., Any]], Tuple[str, ...]] + + +def get_flat_dependant( + dependant: Dependant, + *, + skip_repeats: bool = False, + visited: Optional[List[CacheKey]] = None, +) -> Dependant: + if visited is None: + visited = [] + visited.append(dependant.cache_key) + + flat_dependant = Dependant( + path_params=dependant.path_params.copy(), + query_params=dependant.query_params.copy(), + header_params=dependant.header_params.copy(), + cookie_params=dependant.cookie_params.copy(), + body_params=dependant.body_params.copy(), + security_schemes=dependant.security_requirements.copy(), + use_cache=dependant.use_cache, + path=dependant.path, + ) + for sub_dependant in dependant.dependencies: + if skip_repeats and sub_dependant.cache_key in visited: + continue + flat_sub = get_flat_dependant( + sub_dependant, skip_repeats=skip_repeats, visited=visited + ) + flat_dependant.path_params.extend(flat_sub.path_params) + flat_dependant.query_params.extend(flat_sub.query_params) + flat_dependant.header_params.extend(flat_sub.header_params) + flat_dependant.cookie_params.extend(flat_sub.cookie_params) + flat_dependant.body_params.extend(flat_sub.body_params) + flat_dependant.security_requirements.extend(flat_sub.security_requirements) + return flat_dependant + + +def get_flat_params(dependant: Dependant) -> List[ModelField]: + flat_dependant = get_flat_dependant(dependant, skip_repeats=True) + return ( + flat_dependant.path_params + + flat_dependant.query_params + + flat_dependant.header_params + + flat_dependant.cookie_params + ) + + +def is_scalar_field(field: ModelField) -> bool: + field_info = field.field_info + if not ( + field.shape == SHAPE_SINGLETON + and not lenient_issubclass(field.type_, BaseModel) + and not lenient_issubclass(field.type_, sequence_types + (dict,)) + and not dataclasses.is_dataclass(field.type_) + and not isinstance(field_info, params.Body) + ): + return False + if field.sub_fields: + if not all(is_scalar_field(f) for f in field.sub_fields): + return False + return True + + +def is_scalar_sequence_field(field: ModelField) -> bool: + if (field.shape in sequence_shapes) and not lenient_issubclass( + field.type_, BaseModel + ): + if field.sub_fields is not None: + for sub_field in field.sub_fields: + if not is_scalar_field(sub_field): + return False + return True + if lenient_issubclass(field.type_, sequence_types): + return True + return False + + +def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: + signature = inspect.signature(call) + globalns = getattr(call, "__globals__", {}) + typed_params = [ + inspect.Parameter( + name=param.name, + kind=param.kind, + default=param.default, + annotation=get_typed_annotation(param, globalns), + ) + for param in signature.parameters.values() + ] + typed_signature = inspect.Signature(typed_params) + return typed_signature + + +def get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any: + annotation = param.annotation + if isinstance(annotation, str): + annotation = ForwardRef(annotation) + annotation = evaluate_forwardref(annotation, globalns, globalns) + return annotation + + +def get_dependant( + *, + path: str, + call: Callable[..., Any], + name: Optional[str] = None, + security_scopes: Optional[List[str]] = None, + use_cache: bool = True, +) -> Dependant: + path_param_names = get_path_param_names(path) + endpoint_signature = get_typed_signature(call) + signature_params = endpoint_signature.parameters + dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache) + for param_name, param in signature_params.items(): + if isinstance(param.default, params.Depends): + sub_dependant = get_param_sub_dependant( + param=param, path=path, security_scopes=security_scopes + ) + dependant.dependencies.append(sub_dependant) + continue + if add_non_field_param_to_dependency(param=param, dependant=dependant): + continue + param_field = get_param_field( + param=param, default_field_info=params.Query, param_name=param_name + ) + if param_name in path_param_names: + assert is_scalar_field( + field=param_field + ), "Path params must be of one of the supported types" + if isinstance(param.default, params.Path): + ignore_default = False + else: + ignore_default = True + param_field = get_param_field( + param=param, + param_name=param_name, + default_field_info=params.Path, + force_type=params.ParamTypes.path, + ignore_default=ignore_default, + ) + add_param_to_fields(field=param_field, dependant=dependant) + elif is_scalar_field(field=param_field): + add_param_to_fields(field=param_field, dependant=dependant) + elif isinstance( + param.default, (params.Query, params.Header) + ) and is_scalar_sequence_field(param_field): + add_param_to_fields(field=param_field, dependant=dependant) + else: + field_info = param_field.field_info + assert isinstance( + field_info, params.Body + ), f"Param: {param_field.name} can only be a request body, using Body(...)" + dependant.body_params.append(param_field) + return dependant + + +def add_non_field_param_to_dependency( + *, param: inspect.Parameter, dependant: Dependant +) -> Optional[bool]: + if lenient_issubclass(param.annotation, Request): + dependant.request_param_name = param.name + return True + elif lenient_issubclass(param.annotation, WebSocket): + dependant.websocket_param_name = param.name + return True + elif lenient_issubclass(param.annotation, HTTPConnection): + dependant.http_connection_param_name = param.name + return True + elif lenient_issubclass(param.annotation, Response): + dependant.response_param_name = param.name + return True + elif lenient_issubclass(param.annotation, BackgroundTasks): + dependant.background_tasks_param_name = param.name + return True + elif lenient_issubclass(param.annotation, SecurityScopes): + dependant.security_scopes_param_name = param.name + return True + return None + + +def get_param_field( + *, + param: inspect.Parameter, + param_name: str, + default_field_info: Type[params.Param] = params.Param, + force_type: Optional[params.ParamTypes] = None, + ignore_default: bool = False, +) -> ModelField: + default_value = Required + had_schema = False + if not param.default == param.empty and ignore_default is False: + default_value = param.default + if isinstance(default_value, FieldInfo): + had_schema = True + field_info = default_value + default_value = field_info.default + if ( + isinstance(field_info, params.Param) + and getattr(field_info, "in_", None) is None + ): + field_info.in_ = default_field_info.in_ + if force_type: + field_info.in_ = force_type # type: ignore + else: + field_info = default_field_info(default_value) + required = default_value == Required + annotation: Any = Any + if not param.annotation == param.empty: + annotation = param.annotation + annotation = get_annotation_from_field_info(annotation, field_info, param_name) + if not field_info.alias and getattr(field_info, "convert_underscores", None): + alias = param.name.replace("_", "-") + else: + alias = field_info.alias or param.name + field = create_response_field( + name=param.name, + type_=annotation, + default=None if required else default_value, + alias=alias, + required=required, + field_info=field_info, + ) + field.required = required + if not had_schema and not is_scalar_field(field=field): + field.field_info = params.Body(field_info.default) + + return field + + +def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None: + field_info = cast(params.Param, field.field_info) + if field_info.in_ == params.ParamTypes.path: + dependant.path_params.append(field) + elif field_info.in_ == params.ParamTypes.query: + dependant.query_params.append(field) + elif field_info.in_ == params.ParamTypes.header: + dependant.header_params.append(field) + else: + assert ( + field_info.in_ == params.ParamTypes.cookie + ), f"non-body parameters must be in path, query, header or cookie: {field.name}" + dependant.cookie_params.append(field) + + +def is_coroutine_callable(call: Callable[..., Any]) -> bool: + if inspect.isroutine(call): + return inspect.iscoroutinefunction(call) + if inspect.isclass(call): + return False + call = getattr(call, "__call__", None) + return inspect.iscoroutinefunction(call) + + +def is_async_gen_callable(call: Callable[..., Any]) -> bool: + if inspect.isasyncgenfunction(call): + return True + call = getattr(call, "__call__", None) + return inspect.isasyncgenfunction(call) + + +def is_gen_callable(call: Callable[..., Any]) -> bool: + if inspect.isgeneratorfunction(call): + return True + call = getattr(call, "__call__", None) + return inspect.isgeneratorfunction(call) + + +async def solve_generator( + *, call: Callable[..., Any], stack: AsyncExitStack, sub_values: Dict[str, Any] +) -> Any: + if is_gen_callable(call): + cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values)) + elif is_async_gen_callable(call): + cm = asynccontextmanager(call)(**sub_values) + return await stack.enter_async_context(cm) + + +async def solve_dependencies( + *, + request: Union[Request, WebSocket], + dependant: Dependant, + body: Optional[Union[Dict[str, Any], FormData]] = None, + background_tasks: Optional[BackgroundTasks] = None, + response: Optional[Response] = None, + dependency_overrides_provider: Optional[Any] = None, + dependency_cache: Optional[Dict[Tuple[Callable[..., Any], Tuple[str]], Any]] = None, +) -> Tuple[ + Dict[str, Any], + List[ErrorWrapper], + Optional[BackgroundTasks], + Response, + Dict[Tuple[Callable[..., Any], Tuple[str]], Any], +]: + values: Dict[str, Any] = {} + errors: List[ErrorWrapper] = [] + response = response or Response( + content=None, + status_code=None, # type: ignore + headers=None, # type: ignore # in Starlette + media_type=None, # type: ignore # in Starlette + background=None, # type: ignore # in Starlette + ) + dependency_cache = dependency_cache or {} + sub_dependant: Dependant + for sub_dependant in dependant.dependencies: + sub_dependant.call = cast(Callable[..., Any], sub_dependant.call) + sub_dependant.cache_key = cast( + Tuple[Callable[..., Any], Tuple[str]], sub_dependant.cache_key + ) + call = sub_dependant.call + use_sub_dependant = sub_dependant + if ( + dependency_overrides_provider + and dependency_overrides_provider.dependency_overrides + ): + original_call = sub_dependant.call + call = getattr( + dependency_overrides_provider, "dependency_overrides", {} + ).get(original_call, original_call) + use_path: str = sub_dependant.path # type: ignore + use_sub_dependant = get_dependant( + path=use_path, + call=call, + name=sub_dependant.name, + security_scopes=sub_dependant.security_scopes, + ) + use_sub_dependant.security_scopes = sub_dependant.security_scopes + + solved_result = await solve_dependencies( + request=request, + dependant=use_sub_dependant, + body=body, + background_tasks=background_tasks, + response=response, + dependency_overrides_provider=dependency_overrides_provider, + dependency_cache=dependency_cache, + ) + ( + sub_values, + sub_errors, + background_tasks, + _, # the subdependency returns the same response we have + sub_dependency_cache, + ) = solved_result + dependency_cache.update(sub_dependency_cache) + if sub_errors: + errors.extend(sub_errors) + continue + if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache: + solved = dependency_cache[sub_dependant.cache_key] + elif is_gen_callable(call) or is_async_gen_callable(call): + stack = request.scope.get("fastapi_astack") + assert isinstance(stack, AsyncExitStack) + solved = await solve_generator( + call=call, stack=stack, sub_values=sub_values + ) + elif is_coroutine_callable(call): + solved = await call(**sub_values) + else: + solved = await run_in_threadpool(call, **sub_values) + if sub_dependant.name is not None: + values[sub_dependant.name] = solved + if sub_dependant.cache_key not in dependency_cache: + dependency_cache[sub_dependant.cache_key] = solved + path_values, path_errors = request_params_to_args( + dependant.path_params, request.path_params + ) + query_values, query_errors = request_params_to_args( + dependant.query_params, request.query_params + ) + header_values, header_errors = request_params_to_args( + dependant.header_params, request.headers + ) + cookie_values, cookie_errors = request_params_to_args( + dependant.cookie_params, request.cookies + ) + values.update(path_values) + values.update(query_values) + values.update(header_values) + values.update(cookie_values) + errors += path_errors + query_errors + header_errors + cookie_errors + if dependant.body_params: + ( + body_values, + body_errors, + ) = await request_body_to_args( # body_params checked above + required_params=dependant.body_params, received_body=body + ) + values.update(body_values) + errors.extend(body_errors) + if dependant.http_connection_param_name: + values[dependant.http_connection_param_name] = request + if dependant.request_param_name and isinstance(request, Request): + values[dependant.request_param_name] = request + elif dependant.websocket_param_name and isinstance(request, WebSocket): + values[dependant.websocket_param_name] = request + if dependant.background_tasks_param_name: + if background_tasks is None: + background_tasks = BackgroundTasks() + values[dependant.background_tasks_param_name] = background_tasks + if dependant.response_param_name: + values[dependant.response_param_name] = response + if dependant.security_scopes_param_name: + values[dependant.security_scopes_param_name] = SecurityScopes( + scopes=dependant.security_scopes + ) + return values, errors, background_tasks, response, dependency_cache + + +def request_params_to_args( + required_params: Sequence[ModelField], + received_params: Union[Mapping[str, Any], QueryParams, Headers], +) -> Tuple[Dict[str, Any], List[ErrorWrapper]]: + values = {} + errors = [] + for field in required_params: + if is_scalar_sequence_field(field) and isinstance( + received_params, (QueryParams, Headers) + ): + value = received_params.getlist(field.alias) or field.default + else: + value = received_params.get(field.alias) + field_info = field.field_info + assert isinstance( + field_info, params.Param + ), "Params must be subclasses of Param" + if value is None: + if field.required: + errors.append( + ErrorWrapper( + MissingError(), loc=(field_info.in_.value, field.alias) + ) + ) + else: + values[field.name] = deepcopy(field.default) + continue + v_, errors_ = field.validate( + value, values, loc=(field_info.in_.value, field.alias) + ) + if isinstance(errors_, ErrorWrapper): + errors.append(errors_) + elif isinstance(errors_, list): + errors.extend(errors_) + else: + values[field.name] = v_ + return values, errors + + +async def request_body_to_args( + required_params: List[ModelField], + received_body: Optional[Union[Dict[str, Any], FormData]], +) -> Tuple[Dict[str, Any], List[ErrorWrapper]]: + values = {} + errors = [] + if required_params: + field = required_params[0] + field_info = field.field_info + embed = getattr(field_info, "embed", None) + field_alias_omitted = len(required_params) == 1 and not embed + if field_alias_omitted: + received_body = {field.alias: received_body} + + for field in required_params: + loc: Tuple[str, ...] + if field_alias_omitted: + loc = ("body",) + else: + loc = ("body", field.alias) + + value: Optional[Any] = None + if received_body is not None: + if ( + field.shape in sequence_shapes or field.type_ in sequence_types + ) and isinstance(received_body, FormData): + value = received_body.getlist(field.alias) + else: + try: + value = received_body.get(field.alias) + except AttributeError: + errors.append(get_missing_field_error(loc)) + continue + if ( + value is None + or (isinstance(field_info, params.Form) and value == "") + or ( + isinstance(field_info, params.Form) + and field.shape in sequence_shapes + and len(value) == 0 + ) + ): + if field.required: + errors.append(get_missing_field_error(loc)) + else: + values[field.name] = deepcopy(field.default) + continue + if ( + isinstance(field_info, params.File) + and lenient_issubclass(field.type_, bytes) + and isinstance(value, UploadFile) + ): + value = await value.read() + elif ( + field.shape in sequence_shapes + and isinstance(field_info, params.File) + and lenient_issubclass(field.type_, bytes) + and isinstance(value, sequence_types) + ): + results: List[Union[bytes, str]] = [] + + async def process_fn( + fn: Callable[[], Coroutine[Any, Any, Any]] + ) -> None: + result = await fn() + results.append(result) + + async with anyio.create_task_group() as tg: + for sub_value in value: + tg.start_soon(process_fn, sub_value.read) + value = sequence_shape_to_type[field.shape](results) + + v_, errors_ = field.validate(value, values, loc=loc) + + if isinstance(errors_, ErrorWrapper): + errors.append(errors_) + elif isinstance(errors_, list): + errors.extend(errors_) + else: + values[field.name] = v_ + return values, errors + + +def get_missing_field_error(loc: Tuple[str, ...]) -> ErrorWrapper: + missing_field_error = ErrorWrapper(MissingError(), loc=loc) + return missing_field_error + + +def get_schema_compatible_field(*, field: ModelField) -> ModelField: + out_field = field + if lenient_issubclass(field.type_, UploadFile): + use_type: type = bytes + if field.shape in sequence_shapes: + use_type = List[bytes] + out_field = create_response_field( + name=field.name, + type_=use_type, + class_validators=field.class_validators, + model_config=field.model_config, + default=field.default, + required=field.required, + alias=field.alias, + field_info=field.field_info, + ) + return out_field + + +def get_body_field(*, dependant: Dependant, name: str) -> Optional[ModelField]: + flat_dependant = get_flat_dependant(dependant) + if not flat_dependant.body_params: + return None + first_param = flat_dependant.body_params[0] + field_info = first_param.field_info + embed = getattr(field_info, "embed", None) + body_param_names_set = {param.name for param in flat_dependant.body_params} + if len(body_param_names_set) == 1 and not embed: + final_field = get_schema_compatible_field(field=first_param) + check_file_field(final_field) + return final_field + # If one field requires to embed, all have to be embedded + # in case a sub-dependency is evaluated with a single unique body field + # That is combined (embedded) with other body fields + for param in flat_dependant.body_params: + setattr(param.field_info, "embed", True) + model_name = "Body_" + name + BodyModel: Type[BaseModel] = create_model(model_name) + for f in flat_dependant.body_params: + BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f) + required = any(True for f in flat_dependant.body_params if f.required) + + BodyFieldInfo_kwargs: Dict[str, Any] = dict(default=None) + if any(isinstance(f.field_info, params.File) for f in flat_dependant.body_params): + BodyFieldInfo: Type[params.Body] = params.File + elif any(isinstance(f.field_info, params.Form) for f in flat_dependant.body_params): + BodyFieldInfo = params.Form + else: + BodyFieldInfo = params.Body + + body_param_media_types = [ + getattr(f.field_info, "media_type") + for f in flat_dependant.body_params + if isinstance(f.field_info, params.Body) + ] + if len(set(body_param_media_types)) == 1: + BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0] + final_field = create_response_field( + name="body", + type_=BodyModel, + required=required, + alias="body", + field_info=BodyFieldInfo(**BodyFieldInfo_kwargs), + ) + check_file_field(final_field) + return final_field diff --git a/myenv/lib/python3.9/site-packages/fastapi/encoders.py b/myenv/lib/python3.9/site-packages/fastapi/encoders.py new file mode 100644 index 0000000..3f599c9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/encoders.py @@ -0,0 +1,153 @@ +import dataclasses +from collections import defaultdict +from enum import Enum +from pathlib import PurePath +from types import GeneratorType +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +from pydantic import BaseModel +from pydantic.json import ENCODERS_BY_TYPE + +SetIntStr = Set[Union[int, str]] +DictIntStrAny = Dict[Union[int, str], Any] + + +def generate_encoders_by_class_tuples( + type_encoder_map: Dict[Any, Callable[[Any], Any]] +) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]: + encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict( + tuple + ) + for type_, encoder in type_encoder_map.items(): + encoders_by_class_tuples[encoder] += (type_,) + return encoders_by_class_tuples + + +encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE) + + +def jsonable_encoder( + obj: Any, + include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + by_alias: bool = True, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + custom_encoder: Dict[Any, Callable[[Any], Any]] = {}, + sqlalchemy_safe: bool = True, +) -> Any: + if include is not None and not isinstance(include, (set, dict)): + include = set(include) + if exclude is not None and not isinstance(exclude, (set, dict)): + exclude = set(exclude) + if isinstance(obj, BaseModel): + encoder = getattr(obj.__config__, "json_encoders", {}) + if custom_encoder: + encoder.update(custom_encoder) + obj_dict = obj.dict( + include=include, # type: ignore # in Pydantic + exclude=exclude, # type: ignore # in Pydantic + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_none=exclude_none, + exclude_defaults=exclude_defaults, + ) + if "__root__" in obj_dict: + obj_dict = obj_dict["__root__"] + return jsonable_encoder( + obj_dict, + exclude_none=exclude_none, + exclude_defaults=exclude_defaults, + custom_encoder=encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + if dataclasses.is_dataclass(obj): + return dataclasses.asdict(obj) + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, PurePath): + return str(obj) + if isinstance(obj, (str, int, float, type(None))): + return obj + if isinstance(obj, dict): + encoded_dict = {} + for key, value in obj.items(): + if ( + ( + not sqlalchemy_safe + or (not isinstance(key, str)) + or (not key.startswith("_sa")) + ) + and (value is not None or not exclude_none) + and ((include and key in include) or not exclude or key not in exclude) + ): + encoded_key = jsonable_encoder( + key, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + encoded_value = jsonable_encoder( + value, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + encoded_dict[encoded_key] = encoded_value + return encoded_dict + if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): + encoded_list = [] + for item in obj: + encoded_list.append( + jsonable_encoder( + item, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + ) + return encoded_list + + if custom_encoder: + if type(obj) in custom_encoder: + return custom_encoder[type(obj)](obj) + else: + for encoder_type, encoder in custom_encoder.items(): + if isinstance(obj, encoder_type): + return encoder(obj) + + if type(obj) in ENCODERS_BY_TYPE: + return ENCODERS_BY_TYPE[type(obj)](obj) + for encoder, classes_tuple in encoders_by_class_tuples.items(): + if isinstance(obj, classes_tuple): + return encoder(obj) + + errors: List[Exception] = [] + try: + data = dict(obj) + except Exception as e: + errors.append(e) + try: + data = vars(obj) + except Exception as e: + errors.append(e) + raise ValueError(errors) + return jsonable_encoder( + data, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) diff --git a/myenv/lib/python3.9/site-packages/fastapi/exception_handlers.py b/myenv/lib/python3.9/site-packages/fastapi/exception_handlers.py new file mode 100644 index 0000000..2b286d7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/exception_handlers.py @@ -0,0 +1,25 @@ +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import RequestValidationError +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import JSONResponse +from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY + + +async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse: + headers = getattr(exc, "headers", None) + if headers: + return JSONResponse( + {"detail": exc.detail}, status_code=exc.status_code, headers=headers + ) + else: + return JSONResponse({"detail": exc.detail}, status_code=exc.status_code) + + +async def request_validation_exception_handler( + request: Request, exc: RequestValidationError +) -> JSONResponse: + return JSONResponse( + status_code=HTTP_422_UNPROCESSABLE_ENTITY, + content={"detail": jsonable_encoder(exc.errors())}, + ) diff --git a/myenv/lib/python3.9/site-packages/fastapi/exceptions.py b/myenv/lib/python3.9/site-packages/fastapi/exceptions.py new file mode 100644 index 0000000..f4a837b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/exceptions.py @@ -0,0 +1,37 @@ +from typing import Any, Dict, Optional, Sequence, Type + +from pydantic import BaseModel, ValidationError, create_model +from pydantic.error_wrappers import ErrorList +from starlette.exceptions import HTTPException as StarletteHTTPException + + +class HTTPException(StarletteHTTPException): + def __init__( + self, + status_code: int, + detail: Any = None, + headers: Optional[Dict[str, Any]] = None, + ) -> None: + super().__init__(status_code=status_code, detail=detail) + self.headers = headers + + +RequestErrorModel: Type[BaseModel] = create_model("Request") +WebSocketErrorModel: Type[BaseModel] = create_model("WebSocket") + + +class FastAPIError(RuntimeError): + """ + A generic, FastAPI-specific error. + """ + + +class RequestValidationError(ValidationError): + def __init__(self, errors: Sequence[ErrorList], *, body: Any = None) -> None: + self.body = body + super().__init__(errors, RequestErrorModel) + + +class WebSocketRequestValidationError(ValidationError): + def __init__(self, errors: Sequence[ErrorList]) -> None: + super().__init__(errors, WebSocketErrorModel) diff --git a/myenv/lib/python3.9/site-packages/fastapi/logger.py b/myenv/lib/python3.9/site-packages/fastapi/logger.py new file mode 100644 index 0000000..5b2c4ad --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/logger.py @@ -0,0 +1,3 @@ +import logging + +logger = logging.getLogger("fastapi") diff --git a/myenv/lib/python3.9/site-packages/fastapi/middleware/__init__.py b/myenv/lib/python3.9/site-packages/fastapi/middleware/__init__.py new file mode 100644 index 0000000..620296d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/middleware/__init__.py @@ -0,0 +1 @@ +from starlette.middleware import Middleware as Middleware diff --git a/myenv/lib/python3.9/site-packages/fastapi/middleware/cors.py b/myenv/lib/python3.9/site-packages/fastapi/middleware/cors.py new file mode 100644 index 0000000..8dfaad0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/middleware/cors.py @@ -0,0 +1 @@ +from starlette.middleware.cors import CORSMiddleware as CORSMiddleware # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/middleware/gzip.py b/myenv/lib/python3.9/site-packages/fastapi/middleware/gzip.py new file mode 100644 index 0000000..bbeb2cc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/middleware/gzip.py @@ -0,0 +1 @@ +from starlette.middleware.gzip import GZipMiddleware as GZipMiddleware # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/middleware/httpsredirect.py b/myenv/lib/python3.9/site-packages/fastapi/middleware/httpsredirect.py new file mode 100644 index 0000000..b7a3d8e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/middleware/httpsredirect.py @@ -0,0 +1,3 @@ +from starlette.middleware.httpsredirect import ( # noqa + HTTPSRedirectMiddleware as HTTPSRedirectMiddleware, +) diff --git a/myenv/lib/python3.9/site-packages/fastapi/middleware/trustedhost.py b/myenv/lib/python3.9/site-packages/fastapi/middleware/trustedhost.py new file mode 100644 index 0000000..08d7e03 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/middleware/trustedhost.py @@ -0,0 +1,3 @@ +from starlette.middleware.trustedhost import ( # noqa + TrustedHostMiddleware as TrustedHostMiddleware, +) diff --git a/myenv/lib/python3.9/site-packages/fastapi/middleware/wsgi.py b/myenv/lib/python3.9/site-packages/fastapi/middleware/wsgi.py new file mode 100644 index 0000000..c4c6a79 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/middleware/wsgi.py @@ -0,0 +1 @@ +from starlette.middleware.wsgi import WSGIMiddleware as WSGIMiddleware # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/openapi/__init__.py b/myenv/lib/python3.9/site-packages/fastapi/openapi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/fastapi/openapi/constants.py b/myenv/lib/python3.9/site-packages/fastapi/openapi/constants.py new file mode 100644 index 0000000..3e69e55 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/openapi/constants.py @@ -0,0 +1,3 @@ +METHODS_WITH_BODY = {"GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"} +STATUS_CODES_WITH_NO_BODY = {100, 101, 102, 103, 204, 304} +REF_PREFIX = "#/components/schemas/" diff --git a/myenv/lib/python3.9/site-packages/fastapi/openapi/docs.py b/myenv/lib/python3.9/site-packages/fastapi/openapi/docs.py new file mode 100644 index 0000000..fd22e4e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/openapi/docs.py @@ -0,0 +1,177 @@ +import json +from typing import Any, Dict, Optional + +from fastapi.encoders import jsonable_encoder +from starlette.responses import HTMLResponse + + +def get_swagger_ui_html( + *, + openapi_url: str, + title: str, + swagger_js_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui-bundle.js", + swagger_css_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui.css", + swagger_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png", + oauth2_redirect_url: Optional[str] = None, + init_oauth: Optional[Dict[str, Any]] = None, +) -> HTMLResponse: + + html = f""" + + + + + + {title} + + +
+
+ + + + + + """ + return HTMLResponse(html) + + +def get_redoc_html( + *, + openapi_url: str, + title: str, + redoc_js_url: str = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js", + redoc_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png", + with_google_fonts: bool = True, +) -> HTMLResponse: + html = f""" + + + + {title} + + + + """ + if with_google_fonts: + html += """ + + """ + html += f""" + + + + + + + + + + """ + return HTMLResponse(html) + + +def get_swagger_ui_oauth2_redirect_html() -> HTMLResponse: + html = """ + + + + + + + """ + return HTMLResponse(content=html) diff --git a/myenv/lib/python3.9/site-packages/fastapi/openapi/models.py b/myenv/lib/python3.9/site-packages/fastapi/openapi/models.py new file mode 100644 index 0000000..361c750 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/openapi/models.py @@ -0,0 +1,406 @@ +from enum import Enum +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +from fastapi.logger import logger +from pydantic import AnyUrl, BaseModel, Field + +try: + import email_validator # type: ignore + + assert email_validator # make autoflake ignore the unused import + from pydantic import EmailStr +except ImportError: # pragma: no cover + + class EmailStr(str): # type: ignore + @classmethod + def __get_validators__(cls) -> Iterable[Callable[..., Any]]: + yield cls.validate + + @classmethod + def validate(cls, v: Any) -> str: + logger.warning( + "email-validator not installed, email fields will be treated as str.\n" + "To install, run: pip install email-validator" + ) + return str(v) + + +class Contact(BaseModel): + name: Optional[str] = None + url: Optional[AnyUrl] = None + email: Optional[EmailStr] = None + + class Config: + extra = "allow" + + +class License(BaseModel): + name: str + url: Optional[AnyUrl] = None + + class Config: + extra = "allow" + + +class Info(BaseModel): + title: str + description: Optional[str] = None + termsOfService: Optional[str] = None + contact: Optional[Contact] = None + license: Optional[License] = None + version: str + + class Config: + extra = "allow" + + +class ServerVariable(BaseModel): + enum: Optional[List[str]] = None + default: str + description: Optional[str] = None + + class Config: + extra = "allow" + + +class Server(BaseModel): + url: Union[AnyUrl, str] + description: Optional[str] = None + variables: Optional[Dict[str, ServerVariable]] = None + + class Config: + extra = "allow" + + +class Reference(BaseModel): + ref: str = Field(..., alias="$ref") + + +class Discriminator(BaseModel): + propertyName: str + mapping: Optional[Dict[str, str]] = None + + +class XML(BaseModel): + name: Optional[str] = None + namespace: Optional[str] = None + prefix: Optional[str] = None + attribute: Optional[bool] = None + wrapped: Optional[bool] = None + + class Config: + extra = "allow" + + +class ExternalDocumentation(BaseModel): + description: Optional[str] = None + url: AnyUrl + + class Config: + extra = "allow" + + +class Schema(BaseModel): + ref: Optional[str] = Field(None, alias="$ref") + title: Optional[str] = None + multipleOf: Optional[float] = None + maximum: Optional[float] = None + exclusiveMaximum: Optional[float] = None + minimum: Optional[float] = None + exclusiveMinimum: Optional[float] = None + maxLength: Optional[int] = Field(None, gte=0) + minLength: Optional[int] = Field(None, gte=0) + pattern: Optional[str] = None + maxItems: Optional[int] = Field(None, gte=0) + minItems: Optional[int] = Field(None, gte=0) + uniqueItems: Optional[bool] = None + maxProperties: Optional[int] = Field(None, gte=0) + minProperties: Optional[int] = Field(None, gte=0) + required: Optional[List[str]] = None + enum: Optional[List[Any]] = None + type: Optional[str] = None + allOf: Optional[List["Schema"]] = None + oneOf: Optional[List["Schema"]] = None + anyOf: Optional[List["Schema"]] = None + not_: Optional["Schema"] = Field(None, alias="not") + items: Optional["Schema"] = None + properties: Optional[Dict[str, "Schema"]] = None + additionalProperties: Optional[Union["Schema", Reference, bool]] = None + description: Optional[str] = None + format: Optional[str] = None + default: Optional[Any] = None + nullable: Optional[bool] = None + discriminator: Optional[Discriminator] = None + readOnly: Optional[bool] = None + writeOnly: Optional[bool] = None + xml: Optional[XML] = None + externalDocs: Optional[ExternalDocumentation] = None + example: Optional[Any] = None + deprecated: Optional[bool] = None + + class Config: + extra: str = "allow" + + +class Example(BaseModel): + summary: Optional[str] = None + description: Optional[str] = None + value: Optional[Any] = None + externalValue: Optional[AnyUrl] = None + + class Config: + extra = "allow" + + +class ParameterInType(Enum): + query = "query" + header = "header" + path = "path" + cookie = "cookie" + + +class Encoding(BaseModel): + contentType: Optional[str] = None + headers: Optional[Dict[str, Union["Header", Reference]]] = None + style: Optional[str] = None + explode: Optional[bool] = None + allowReserved: Optional[bool] = None + + class Config: + extra = "allow" + + +class MediaType(BaseModel): + schema_: Optional[Union[Schema, Reference]] = Field(None, alias="schema") + example: Optional[Any] = None + examples: Optional[Dict[str, Union[Example, Reference]]] = None + encoding: Optional[Dict[str, Encoding]] = None + + class Config: + extra = "allow" + + +class ParameterBase(BaseModel): + description: Optional[str] = None + required: Optional[bool] = None + deprecated: Optional[bool] = None + # Serialization rules for simple scenarios + style: Optional[str] = None + explode: Optional[bool] = None + allowReserved: Optional[bool] = None + schema_: Optional[Union[Schema, Reference]] = Field(None, alias="schema") + example: Optional[Any] = None + examples: Optional[Dict[str, Union[Example, Reference]]] = None + # Serialization rules for more complex scenarios + content: Optional[Dict[str, MediaType]] = None + + class Config: + extra = "allow" + + +class Parameter(ParameterBase): + name: str + in_: ParameterInType = Field(..., alias="in") + + +class Header(ParameterBase): + pass + + +class RequestBody(BaseModel): + description: Optional[str] = None + content: Dict[str, MediaType] + required: Optional[bool] = None + + class Config: + extra = "allow" + + +class Link(BaseModel): + operationRef: Optional[str] = None + operationId: Optional[str] = None + parameters: Optional[Dict[str, Union[Any, str]]] = None + requestBody: Optional[Union[Any, str]] = None + description: Optional[str] = None + server: Optional[Server] = None + + class Config: + extra = "allow" + + +class Response(BaseModel): + description: str + headers: Optional[Dict[str, Union[Header, Reference]]] = None + content: Optional[Dict[str, MediaType]] = None + links: Optional[Dict[str, Union[Link, Reference]]] = None + + class Config: + extra = "allow" + + +class Operation(BaseModel): + tags: Optional[List[str]] = None + summary: Optional[str] = None + description: Optional[str] = None + externalDocs: Optional[ExternalDocumentation] = None + operationId: Optional[str] = None + parameters: Optional[List[Union[Parameter, Reference]]] = None + requestBody: Optional[Union[RequestBody, Reference]] = None + # Using Any for Specification Extensions + responses: Dict[str, Union[Response, Any]] + callbacks: Optional[Dict[str, Union[Dict[str, "PathItem"], Reference]]] = None + deprecated: Optional[bool] = None + security: Optional[List[Dict[str, List[str]]]] = None + servers: Optional[List[Server]] = None + + class Config: + extra = "allow" + + +class PathItem(BaseModel): + ref: Optional[str] = Field(None, alias="$ref") + summary: Optional[str] = None + description: Optional[str] = None + get: Optional[Operation] = None + put: Optional[Operation] = None + post: Optional[Operation] = None + delete: Optional[Operation] = None + options: Optional[Operation] = None + head: Optional[Operation] = None + patch: Optional[Operation] = None + trace: Optional[Operation] = None + servers: Optional[List[Server]] = None + parameters: Optional[List[Union[Parameter, Reference]]] = None + + class Config: + extra = "allow" + + +class SecuritySchemeType(Enum): + apiKey = "apiKey" + http = "http" + oauth2 = "oauth2" + openIdConnect = "openIdConnect" + + +class SecurityBase(BaseModel): + type_: SecuritySchemeType = Field(..., alias="type") + description: Optional[str] = None + + class Config: + extra = "allow" + + +class APIKeyIn(Enum): + query = "query" + header = "header" + cookie = "cookie" + + +class APIKey(SecurityBase): + type_ = Field(SecuritySchemeType.apiKey, alias="type") + in_: APIKeyIn = Field(..., alias="in") + name: str + + +class HTTPBase(SecurityBase): + type_ = Field(SecuritySchemeType.http, alias="type") + scheme: str + + +class HTTPBearer(HTTPBase): + scheme = "bearer" + bearerFormat: Optional[str] = None + + +class OAuthFlow(BaseModel): + refreshUrl: Optional[str] = None + scopes: Dict[str, str] = {} + + class Config: + extra = "allow" + + +class OAuthFlowImplicit(OAuthFlow): + authorizationUrl: str + + +class OAuthFlowPassword(OAuthFlow): + tokenUrl: str + + +class OAuthFlowClientCredentials(OAuthFlow): + tokenUrl: str + + +class OAuthFlowAuthorizationCode(OAuthFlow): + authorizationUrl: str + tokenUrl: str + + +class OAuthFlows(BaseModel): + implicit: Optional[OAuthFlowImplicit] = None + password: Optional[OAuthFlowPassword] = None + clientCredentials: Optional[OAuthFlowClientCredentials] = None + authorizationCode: Optional[OAuthFlowAuthorizationCode] = None + + class Config: + extra = "allow" + + +class OAuth2(SecurityBase): + type_ = Field(SecuritySchemeType.oauth2, alias="type") + flows: OAuthFlows + + +class OpenIdConnect(SecurityBase): + type_ = Field(SecuritySchemeType.openIdConnect, alias="type") + openIdConnectUrl: str + + +SecurityScheme = Union[APIKey, HTTPBase, OAuth2, OpenIdConnect, HTTPBearer] + + +class Components(BaseModel): + schemas: Optional[Dict[str, Union[Schema, Reference]]] = None + responses: Optional[Dict[str, Union[Response, Reference]]] = None + parameters: Optional[Dict[str, Union[Parameter, Reference]]] = None + examples: Optional[Dict[str, Union[Example, Reference]]] = None + requestBodies: Optional[Dict[str, Union[RequestBody, Reference]]] = None + headers: Optional[Dict[str, Union[Header, Reference]]] = None + securitySchemes: Optional[Dict[str, Union[SecurityScheme, Reference]]] = None + links: Optional[Dict[str, Union[Link, Reference]]] = None + # Using Any for Specification Extensions + callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference, Any]]] = None + + class Config: + extra = "allow" + + +class Tag(BaseModel): + name: str + description: Optional[str] = None + externalDocs: Optional[ExternalDocumentation] = None + + class Config: + extra = "allow" + + +class OpenAPI(BaseModel): + openapi: str + info: Info + servers: Optional[List[Server]] = None + # Using Any for Specification Extensions + paths: Dict[str, Union[PathItem, Any]] + components: Optional[Components] = None + security: Optional[List[Dict[str, List[str]]]] = None + tags: Optional[List[Tag]] = None + externalDocs: Optional[ExternalDocumentation] = None + + class Config: + extra = "allow" + + +Schema.update_forward_refs() +Operation.update_forward_refs() +Encoding.update_forward_refs() diff --git a/myenv/lib/python3.9/site-packages/fastapi/openapi/utils.py b/myenv/lib/python3.9/site-packages/fastapi/openapi/utils.py new file mode 100644 index 0000000..0e73e21 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/openapi/utils.py @@ -0,0 +1,410 @@ +import http.client +import inspect +from enum import Enum +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast + +from fastapi import routing +from fastapi.datastructures import DefaultPlaceholder +from fastapi.dependencies.models import Dependant +from fastapi.dependencies.utils import get_flat_dependant, get_flat_params +from fastapi.encoders import jsonable_encoder +from fastapi.openapi.constants import ( + METHODS_WITH_BODY, + REF_PREFIX, + STATUS_CODES_WITH_NO_BODY, +) +from fastapi.openapi.models import OpenAPI +from fastapi.params import Body, Param +from fastapi.responses import Response +from fastapi.utils import ( + deep_dict_update, + generate_operation_id_for_path, + get_model_definitions, +) +from pydantic import BaseModel +from pydantic.fields import ModelField, Undefined +from pydantic.schema import ( + field_schema, + get_flat_models_from_fields, + get_model_name_map, +) +from pydantic.utils import lenient_issubclass +from starlette.responses import JSONResponse +from starlette.routing import BaseRoute +from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY + +validation_error_definition = { + "title": "ValidationError", + "type": "object", + "properties": { + "loc": {"title": "Location", "type": "array", "items": {"type": "string"}}, + "msg": {"title": "Message", "type": "string"}, + "type": {"title": "Error Type", "type": "string"}, + }, + "required": ["loc", "msg", "type"], +} + +validation_error_response_definition = { + "title": "HTTPValidationError", + "type": "object", + "properties": { + "detail": { + "title": "Detail", + "type": "array", + "items": {"$ref": REF_PREFIX + "ValidationError"}, + } + }, +} + +status_code_ranges: Dict[str, str] = { + "1XX": "Information", + "2XX": "Success", + "3XX": "Redirection", + "4XX": "Client Error", + "5XX": "Server Error", + "DEFAULT": "Default Response", +} + + +def get_openapi_security_definitions( + flat_dependant: Dependant, +) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: + security_definitions = {} + operation_security = [] + for security_requirement in flat_dependant.security_requirements: + security_definition = jsonable_encoder( + security_requirement.security_scheme.model, + by_alias=True, + exclude_none=True, + ) + security_name = security_requirement.security_scheme.scheme_name + security_definitions[security_name] = security_definition + operation_security.append({security_name: security_requirement.scopes}) + return security_definitions, operation_security + + +def get_openapi_operation_parameters( + *, + all_route_params: Sequence[ModelField], + model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str], +) -> List[Dict[str, Any]]: + parameters = [] + for param in all_route_params: + field_info = param.field_info + field_info = cast(Param, field_info) + parameter = { + "name": param.alias, + "in": field_info.in_.value, + "required": param.required, + "schema": field_schema( + param, model_name_map=model_name_map, ref_prefix=REF_PREFIX + )[0], + } + if field_info.description: + parameter["description"] = field_info.description + if field_info.examples: + parameter["examples"] = jsonable_encoder(field_info.examples) + elif field_info.example != Undefined: + parameter["example"] = jsonable_encoder(field_info.example) + if field_info.deprecated: + parameter["deprecated"] = field_info.deprecated + parameters.append(parameter) + return parameters + + +def get_openapi_operation_request_body( + *, + body_field: Optional[ModelField], + model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str], +) -> Optional[Dict[str, Any]]: + if not body_field: + return None + assert isinstance(body_field, ModelField) + body_schema, _, _ = field_schema( + body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX + ) + field_info = cast(Body, body_field.field_info) + request_media_type = field_info.media_type + required = body_field.required + request_body_oai: Dict[str, Any] = {} + if required: + request_body_oai["required"] = required + request_media_content: Dict[str, Any] = {"schema": body_schema} + if field_info.examples: + request_media_content["examples"] = jsonable_encoder(field_info.examples) + elif field_info.example != Undefined: + request_media_content["example"] = jsonable_encoder(field_info.example) + request_body_oai["content"] = {request_media_type: request_media_content} + return request_body_oai + + +def generate_operation_id(*, route: routing.APIRoute, method: str) -> str: + if route.operation_id: + return route.operation_id + path: str = route.path_format + return generate_operation_id_for_path(name=route.name, path=path, method=method) + + +def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str: + if route.summary: + return route.summary + return route.name.replace("_", " ").title() + + +def get_openapi_operation_metadata( + *, route: routing.APIRoute, method: str +) -> Dict[str, Any]: + operation: Dict[str, Any] = {} + if route.tags: + operation["tags"] = route.tags + operation["summary"] = generate_operation_summary(route=route, method=method) + if route.description: + operation["description"] = route.description + operation["operationId"] = generate_operation_id(route=route, method=method) + if route.deprecated: + operation["deprecated"] = route.deprecated + return operation + + +def get_openapi_path( + *, route: routing.APIRoute, model_name_map: Dict[type, str] +) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]: + path = {} + security_schemes: Dict[str, Any] = {} + definitions: Dict[str, Any] = {} + assert route.methods is not None, "Methods must be a list" + if isinstance(route.response_class, DefaultPlaceholder): + current_response_class: Type[Response] = route.response_class.value + else: + current_response_class = route.response_class + assert current_response_class, "A response class is needed to generate OpenAPI" + route_response_media_type: Optional[str] = current_response_class.media_type + if route.include_in_schema: + for method in route.methods: + operation = get_openapi_operation_metadata(route=route, method=method) + parameters: List[Dict[str, Any]] = [] + flat_dependant = get_flat_dependant(route.dependant, skip_repeats=True) + security_definitions, operation_security = get_openapi_security_definitions( + flat_dependant=flat_dependant + ) + if operation_security: + operation.setdefault("security", []).extend(operation_security) + if security_definitions: + security_schemes.update(security_definitions) + all_route_params = get_flat_params(route.dependant) + operation_parameters = get_openapi_operation_parameters( + all_route_params=all_route_params, model_name_map=model_name_map + ) + parameters.extend(operation_parameters) + if parameters: + operation["parameters"] = list( + {param["name"]: param for param in parameters}.values() + ) + if method in METHODS_WITH_BODY: + request_body_oai = get_openapi_operation_request_body( + body_field=route.body_field, model_name_map=model_name_map + ) + if request_body_oai: + operation["requestBody"] = request_body_oai + if route.callbacks: + callbacks = {} + for callback in route.callbacks: + if isinstance(callback, routing.APIRoute): + ( + cb_path, + cb_security_schemes, + cb_definitions, + ) = get_openapi_path( + route=callback, model_name_map=model_name_map + ) + callbacks[callback.name] = {callback.path: cb_path} + operation["callbacks"] = callbacks + if route.status_code is not None: + status_code = str(route.status_code) + else: + # It would probably make more sense for all response classes to have an + # explicit default status_code, and to extract it from them, instead of + # doing this inspection tricks, that would probably be in the future + # TODO: probably make status_code a default class attribute for all + # responses in Starlette + response_signature = inspect.signature(current_response_class.__init__) + status_code_param = response_signature.parameters.get("status_code") + if status_code_param is not None: + if isinstance(status_code_param.default, int): + status_code = str(status_code_param.default) + operation.setdefault("responses", {}).setdefault(status_code, {})[ + "description" + ] = route.response_description + if ( + route_response_media_type + and route.status_code not in STATUS_CODES_WITH_NO_BODY + ): + response_schema = {"type": "string"} + if lenient_issubclass(current_response_class, JSONResponse): + if route.response_field: + response_schema, _, _ = field_schema( + route.response_field, + model_name_map=model_name_map, + ref_prefix=REF_PREFIX, + ) + else: + response_schema = {} + operation.setdefault("responses", {}).setdefault( + status_code, {} + ).setdefault("content", {}).setdefault(route_response_media_type, {})[ + "schema" + ] = response_schema + if route.responses: + operation_responses = operation.setdefault("responses", {}) + for ( + additional_status_code, + additional_response, + ) in route.responses.items(): + process_response = additional_response.copy() + process_response.pop("model", None) + status_code_key = str(additional_status_code).upper() + if status_code_key == "DEFAULT": + status_code_key = "default" + openapi_response = operation_responses.setdefault( + status_code_key, {} + ) + assert isinstance( + process_response, dict + ), "An additional response must be a dict" + field = route.response_fields.get(additional_status_code) + additional_field_schema: Optional[Dict[str, Any]] = None + if field: + additional_field_schema, _, _ = field_schema( + field, model_name_map=model_name_map, ref_prefix=REF_PREFIX + ) + media_type = route_response_media_type or "application/json" + additional_schema = ( + process_response.setdefault("content", {}) + .setdefault(media_type, {}) + .setdefault("schema", {}) + ) + deep_dict_update(additional_schema, additional_field_schema) + status_text: Optional[str] = status_code_ranges.get( + str(additional_status_code).upper() + ) or http.client.responses.get(int(additional_status_code)) + description = ( + process_response.get("description") + or openapi_response.get("description") + or status_text + or "Additional Response" + ) + deep_dict_update(openapi_response, process_response) + openapi_response["description"] = description + http422 = str(HTTP_422_UNPROCESSABLE_ENTITY) + if (all_route_params or route.body_field) and not any( + [ + status in operation["responses"] + for status in [http422, "4XX", "default"] + ] + ): + operation["responses"][http422] = { + "description": "Validation Error", + "content": { + "application/json": { + "schema": {"$ref": REF_PREFIX + "HTTPValidationError"} + } + }, + } + if "ValidationError" not in definitions: + definitions.update( + { + "ValidationError": validation_error_definition, + "HTTPValidationError": validation_error_response_definition, + } + ) + if route.openapi_extra: + deep_dict_update(operation, route.openapi_extra) + path[method.lower()] = operation + return path, security_schemes, definitions + + +def get_flat_models_from_routes( + routes: Sequence[BaseRoute], +) -> Set[Union[Type[BaseModel], Type[Enum]]]: + body_fields_from_routes: List[ModelField] = [] + responses_from_routes: List[ModelField] = [] + request_fields_from_routes: List[ModelField] = [] + callback_flat_models: Set[Union[Type[BaseModel], Type[Enum]]] = set() + for route in routes: + if getattr(route, "include_in_schema", None) and isinstance( + route, routing.APIRoute + ): + if route.body_field: + assert isinstance( + route.body_field, ModelField + ), "A request body must be a Pydantic Field" + body_fields_from_routes.append(route.body_field) + if route.response_field: + responses_from_routes.append(route.response_field) + if route.response_fields: + responses_from_routes.extend(route.response_fields.values()) + if route.callbacks: + callback_flat_models |= get_flat_models_from_routes(route.callbacks) + params = get_flat_params(route.dependant) + request_fields_from_routes.extend(params) + + flat_models = callback_flat_models | get_flat_models_from_fields( + body_fields_from_routes + responses_from_routes + request_fields_from_routes, + known_models=set(), + ) + return flat_models + + +def get_openapi( + *, + title: str, + version: str, + openapi_version: str = "3.0.2", + description: Optional[str] = None, + routes: Sequence[BaseRoute], + tags: Optional[List[Dict[str, Any]]] = None, + servers: Optional[List[Dict[str, Union[str, Any]]]] = None, + terms_of_service: Optional[str] = None, + contact: Optional[Dict[str, Union[str, Any]]] = None, + license_info: Optional[Dict[str, Union[str, Any]]] = None, +) -> Dict[str, Any]: + info: Dict[str, Any] = {"title": title, "version": version} + if description: + info["description"] = description + if terms_of_service: + info["termsOfService"] = terms_of_service + if contact: + info["contact"] = contact + if license_info: + info["license"] = license_info + output: Dict[str, Any] = {"openapi": openapi_version, "info": info} + if servers: + output["servers"] = servers + components: Dict[str, Dict[str, Any]] = {} + paths: Dict[str, Dict[str, Any]] = {} + flat_models = get_flat_models_from_routes(routes) + model_name_map = get_model_name_map(flat_models) + definitions = get_model_definitions( + flat_models=flat_models, model_name_map=model_name_map + ) + for route in routes: + if isinstance(route, routing.APIRoute): + result = get_openapi_path(route=route, model_name_map=model_name_map) + if result: + path, security_schemes, path_definitions = result + if path: + paths.setdefault(route.path_format, {}).update(path) + if security_schemes: + components.setdefault("securitySchemes", {}).update( + security_schemes + ) + if path_definitions: + definitions.update(path_definitions) + if definitions: + components["schemas"] = {k: definitions[k] for k in sorted(definitions)} + if components: + output["components"] = components + output["paths"] = paths + if tags: + output["tags"] = tags + return jsonable_encoder(OpenAPI(**output), by_alias=True, exclude_none=True) # type: ignore diff --git a/myenv/lib/python3.9/site-packages/fastapi/param_functions.py b/myenv/lib/python3.9/site-packages/fastapi/param_functions.py new file mode 100644 index 0000000..ff65d72 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/param_functions.py @@ -0,0 +1,282 @@ +from typing import Any, Callable, Dict, Optional, Sequence + +from fastapi import params +from pydantic.fields import Undefined + + +def Path( # noqa: N802 + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, +) -> Any: + return params.Path( + default=default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + deprecated=deprecated, + **extra, + ) + + +def Query( # noqa: N802 + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, +) -> Any: + return params.Query( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + deprecated=deprecated, + **extra, + ) + + +def Header( # noqa: N802 + default: Any, + *, + alias: Optional[str] = None, + convert_underscores: bool = True, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, +) -> Any: + return params.Header( + default, + alias=alias, + convert_underscores=convert_underscores, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + deprecated=deprecated, + **extra, + ) + + +def Cookie( # noqa: N802 + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, +) -> Any: + return params.Cookie( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + deprecated=deprecated, + **extra, + ) + + +def Body( # noqa: N802 + default: Any, + *, + embed: bool = False, + media_type: str = "application/json", + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + **extra: Any, +) -> Any: + return params.Body( + default, + embed=embed, + media_type=media_type, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + **extra, + ) + + +def Form( # noqa: N802 + default: Any, + *, + media_type: str = "application/x-www-form-urlencoded", + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + **extra: Any, +) -> Any: + return params.Form( + default, + media_type=media_type, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + **extra, + ) + + +def File( # noqa: N802 + default: Any, + *, + media_type: str = "multipart/form-data", + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + **extra: Any, +) -> Any: + return params.File( + default, + media_type=media_type, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + **extra, + ) + + +def Depends( # noqa: N802 + dependency: Optional[Callable[..., Any]] = None, *, use_cache: bool = True +) -> Any: + return params.Depends(dependency=dependency, use_cache=use_cache) + + +def Security( # noqa: N802 + dependency: Optional[Callable[..., Any]] = None, + *, + scopes: Optional[Sequence[str]] = None, + use_cache: bool = True, +) -> Any: + return params.Security(dependency=dependency, scopes=scopes, use_cache=use_cache) diff --git a/myenv/lib/python3.9/site-packages/fastapi/params.py b/myenv/lib/python3.9/site-packages/fastapi/params.py new file mode 100644 index 0000000..3cab98b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/params.py @@ -0,0 +1,370 @@ +from enum import Enum +from typing import Any, Callable, Dict, Optional, Sequence + +from pydantic.fields import FieldInfo, Undefined + + +class ParamTypes(Enum): + query = "query" + header = "header" + path = "path" + cookie = "cookie" + + +class Param(FieldInfo): + in_: ParamTypes + + def __init__( + self, + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, + ): + self.deprecated = deprecated + self.example = example + self.examples = examples + super().__init__( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + **extra, + ) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.default})" + + +class Path(Param): + in_ = ParamTypes.path + + def __init__( + self, + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, + ): + self.in_ = self.in_ + super().__init__( + ..., + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + deprecated=deprecated, + example=example, + examples=examples, + **extra, + ) + + +class Query(Param): + in_ = ParamTypes.query + + def __init__( + self, + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, + ): + super().__init__( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + deprecated=deprecated, + example=example, + examples=examples, + **extra, + ) + + +class Header(Param): + in_ = ParamTypes.header + + def __init__( + self, + default: Any, + *, + alias: Optional[str] = None, + convert_underscores: bool = True, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, + ): + self.convert_underscores = convert_underscores + super().__init__( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + deprecated=deprecated, + example=example, + examples=examples, + **extra, + ) + + +class Cookie(Param): + in_ = ParamTypes.cookie + + def __init__( + self, + default: Any, + *, + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + deprecated: Optional[bool] = None, + **extra: Any, + ): + super().__init__( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + deprecated=deprecated, + example=example, + examples=examples, + **extra, + ) + + +class Body(FieldInfo): + def __init__( + self, + default: Any, + *, + embed: bool = False, + media_type: str = "application/json", + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + **extra: Any, + ): + self.embed = embed + self.media_type = media_type + self.example = example + self.examples = examples + super().__init__( + default, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + **extra, + ) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.default})" + + +class Form(Body): + def __init__( + self, + default: Any, + *, + media_type: str = "application/x-www-form-urlencoded", + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + **extra: Any, + ): + super().__init__( + default, + embed=True, + media_type=media_type, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + **extra, + ) + + +class File(Form): + def __init__( + self, + default: Any, + *, + media_type: str = "multipart/form-data", + alias: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + regex: Optional[str] = None, + example: Any = Undefined, + examples: Optional[Dict[str, Any]] = None, + **extra: Any, + ): + super().__init__( + default, + media_type=media_type, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + regex=regex, + example=example, + examples=examples, + **extra, + ) + + +class Depends: + def __init__( + self, dependency: Optional[Callable[..., Any]] = None, *, use_cache: bool = True + ): + self.dependency = dependency + self.use_cache = use_cache + + def __repr__(self) -> str: + attr = getattr(self.dependency, "__name__", type(self.dependency).__name__) + cache = "" if self.use_cache else ", use_cache=False" + return f"{self.__class__.__name__}({attr}{cache})" + + +class Security(Depends): + def __init__( + self, + dependency: Optional[Callable[..., Any]] = None, + *, + scopes: Optional[Sequence[str]] = None, + use_cache: bool = True, + ): + super().__init__(dependency=dependency, use_cache=use_cache) + self.scopes = scopes or [] diff --git a/myenv/lib/python3.9/site-packages/fastapi/py.typed b/myenv/lib/python3.9/site-packages/fastapi/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/fastapi/requests.py b/myenv/lib/python3.9/site-packages/fastapi/requests.py new file mode 100644 index 0000000..d16552c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/requests.py @@ -0,0 +1,2 @@ +from starlette.requests import HTTPConnection as HTTPConnection # noqa: F401 +from starlette.requests import Request as Request # noqa: F401 diff --git a/myenv/lib/python3.9/site-packages/fastapi/responses.py b/myenv/lib/python3.9/site-packages/fastapi/responses.py new file mode 100644 index 0000000..6cd7931 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/responses.py @@ -0,0 +1,34 @@ +from typing import Any + +from starlette.responses import FileResponse as FileResponse # noqa +from starlette.responses import HTMLResponse as HTMLResponse # noqa +from starlette.responses import JSONResponse as JSONResponse # noqa +from starlette.responses import PlainTextResponse as PlainTextResponse # noqa +from starlette.responses import RedirectResponse as RedirectResponse # noqa +from starlette.responses import Response as Response # noqa +from starlette.responses import StreamingResponse as StreamingResponse # noqa + +try: + import ujson +except ImportError: # pragma: nocover + ujson = None # type: ignore + + +try: + import orjson +except ImportError: # pragma: nocover + orjson = None # type: ignore + + +class UJSONResponse(JSONResponse): + def render(self, content: Any) -> bytes: + assert ujson is not None, "ujson must be installed to use UJSONResponse" + return ujson.dumps(content, ensure_ascii=False).encode("utf-8") + + +class ORJSONResponse(JSONResponse): + media_type = "application/json" + + def render(self, content: Any) -> bytes: + assert orjson is not None, "orjson must be installed to use ORJSONResponse" + return orjson.dumps(content) diff --git a/myenv/lib/python3.9/site-packages/fastapi/routing.py b/myenv/lib/python3.9/site-packages/fastapi/routing.py new file mode 100644 index 0000000..63ad729 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/routing.py @@ -0,0 +1,1150 @@ +import asyncio +import dataclasses +import email.message +import enum +import inspect +import json +from typing import ( + Any, + Callable, + Coroutine, + Dict, + List, + Optional, + Sequence, + Set, + Type, + Union, +) + +from fastapi import params +from fastapi.datastructures import Default, DefaultPlaceholder +from fastapi.dependencies.models import Dependant +from fastapi.dependencies.utils import ( + get_body_field, + get_dependant, + get_parameterless_sub_dependant, + solve_dependencies, +) +from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder +from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError +from fastapi.openapi.constants import STATUS_CODES_WITH_NO_BODY +from fastapi.types import DecoratedCallable +from fastapi.utils import ( + create_cloned_field, + create_response_field, + generate_operation_id_for_path, + get_value_or_default, +) +from pydantic import BaseModel +from pydantic.error_wrappers import ErrorWrapper, ValidationError +from pydantic.fields import ModelField, Undefined +from starlette import routing +from starlette.concurrency import run_in_threadpool +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import JSONResponse, Response +from starlette.routing import BaseRoute +from starlette.routing import Mount as Mount # noqa +from starlette.routing import ( + compile_path, + get_name, + request_response, + websocket_session, +) +from starlette.status import WS_1008_POLICY_VIOLATION +from starlette.types import ASGIApp +from starlette.websockets import WebSocket + + +def _prepare_response_content( + res: Any, + *, + exclude_unset: bool, + exclude_defaults: bool = False, + exclude_none: bool = False, +) -> Any: + if isinstance(res, BaseModel): + read_with_orm_mode = getattr(res.__config__, "read_with_orm_mode", None) + if read_with_orm_mode: + # Let from_orm extract the data from this model instead of converting + # it now to a dict. + # Otherwise there's no way to extract lazy data that requires attribute + # access instead of dict iteration, e.g. lazy relationships. + return res + return res.dict( + by_alias=True, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + elif isinstance(res, list): + return [ + _prepare_response_content( + item, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + for item in res + ] + elif isinstance(res, dict): + return { + k: _prepare_response_content( + v, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + for k, v in res.items() + } + elif dataclasses.is_dataclass(res): + return dataclasses.asdict(res) + return res + + +async def serialize_response( + *, + field: Optional[ModelField] = None, + response_content: Any, + include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + by_alias: bool = True, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + is_coroutine: bool = True, +) -> Any: + if field: + errors = [] + response_content = _prepare_response_content( + response_content, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + if is_coroutine: + value, errors_ = field.validate(response_content, {}, loc=("response",)) + else: + value, errors_ = await run_in_threadpool( + field.validate, response_content, {}, loc=("response",) + ) + if isinstance(errors_, ErrorWrapper): + errors.append(errors_) + elif isinstance(errors_, list): + errors.extend(errors_) + if errors: + raise ValidationError(errors, field.type_) + return jsonable_encoder( + value, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + else: + return jsonable_encoder(response_content) + + +async def run_endpoint_function( + *, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool +) -> Any: + # Only called by get_request_handler. Has been split into its own function to + # facilitate profiling endpoints, since inner functions are harder to profile. + assert dependant.call is not None, "dependant.call must be a function" + + if is_coroutine: + return await dependant.call(**values) + else: + return await run_in_threadpool(dependant.call, **values) + + +def get_request_handler( + dependant: Dependant, + body_field: Optional[ModelField] = None, + status_code: Optional[int] = None, + response_class: Union[Type[Response], DefaultPlaceholder] = Default(JSONResponse), + response_field: Optional[ModelField] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + dependency_overrides_provider: Optional[Any] = None, +) -> Callable[[Request], Coroutine[Any, Any, Response]]: + assert dependant.call is not None, "dependant.call must be a function" + is_coroutine = asyncio.iscoroutinefunction(dependant.call) + is_body_form = body_field and isinstance(body_field.field_info, params.Form) + if isinstance(response_class, DefaultPlaceholder): + actual_response_class: Type[Response] = response_class.value + else: + actual_response_class = response_class + + async def app(request: Request) -> Response: + try: + body: Any = None + if body_field: + if is_body_form: + body = await request.form() + else: + body_bytes = await request.body() + if body_bytes: + json_body: Any = Undefined + content_type_value = request.headers.get("content-type") + if not content_type_value: + json_body = await request.json() + else: + message = email.message.Message() + message["content-type"] = content_type_value + if message.get_content_maintype() == "application": + subtype = message.get_content_subtype() + if subtype == "json" or subtype.endswith("+json"): + json_body = await request.json() + if json_body != Undefined: + body = json_body + else: + body = body_bytes + except json.JSONDecodeError as e: + raise RequestValidationError([ErrorWrapper(e, ("body", e.pos))], body=e.doc) + except Exception as e: + raise HTTPException( + status_code=400, detail="There was an error parsing the body" + ) from e + solved_result = await solve_dependencies( + request=request, + dependant=dependant, + body=body, + dependency_overrides_provider=dependency_overrides_provider, + ) + values, errors, background_tasks, sub_response, _ = solved_result + if errors: + raise RequestValidationError(errors, body=body) + else: + raw_response = await run_endpoint_function( + dependant=dependant, values=values, is_coroutine=is_coroutine + ) + + if isinstance(raw_response, Response): + if raw_response.background is None: + raw_response.background = background_tasks + return raw_response + response_data = await serialize_response( + field=response_field, + response_content=raw_response, + include=response_model_include, + exclude=response_model_exclude, + by_alias=response_model_by_alias, + exclude_unset=response_model_exclude_unset, + exclude_defaults=response_model_exclude_defaults, + exclude_none=response_model_exclude_none, + is_coroutine=is_coroutine, + ) + response_args: Dict[str, Any] = {"background": background_tasks} + # If status_code was set, use it, otherwise use the default from the + # response class, in the case of redirect it's 307 + if status_code is not None: + response_args["status_code"] = status_code + response = actual_response_class(response_data, **response_args) + response.headers.raw.extend(sub_response.headers.raw) + if sub_response.status_code: + response.status_code = sub_response.status_code + return response + + return app + + +def get_websocket_app( + dependant: Dependant, dependency_overrides_provider: Optional[Any] = None +) -> Callable[[WebSocket], Coroutine[Any, Any, Any]]: + async def app(websocket: WebSocket) -> None: + solved_result = await solve_dependencies( + request=websocket, + dependant=dependant, + dependency_overrides_provider=dependency_overrides_provider, + ) + values, errors, _, _2, _3 = solved_result + if errors: + await websocket.close(code=WS_1008_POLICY_VIOLATION) + raise WebSocketRequestValidationError(errors) + assert dependant.call is not None, "dependant.call must be a function" + await dependant.call(**values) + + return app + + +class APIWebSocketRoute(routing.WebSocketRoute): + def __init__( + self, + path: str, + endpoint: Callable[..., Any], + *, + name: Optional[str] = None, + dependency_overrides_provider: Optional[Any] = None, + ) -> None: + self.path = path + self.endpoint = endpoint + self.name = get_name(endpoint) if name is None else name + self.dependant = get_dependant(path=path, call=self.endpoint) + self.app = websocket_session( + get_websocket_app( + dependant=self.dependant, + dependency_overrides_provider=dependency_overrides_provider, + ) + ) + self.path_regex, self.path_format, self.param_convertors = compile_path(path) + + +class APIRoute(routing.Route): + def __init__( + self, + path: str, + endpoint: Callable[..., Any], + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + name: Optional[str] = None, + methods: Optional[Union[Set[str], List[str]]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Union[Type[Response], DefaultPlaceholder] = Default( + JSONResponse + ), + dependency_overrides_provider: Optional[Any] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> None: + # normalise enums e.g. http.HTTPStatus + if isinstance(status_code, enum.IntEnum): + status_code = int(status_code) + self.path = path + self.endpoint = endpoint + self.name = get_name(endpoint) if name is None else name + self.path_regex, self.path_format, self.param_convertors = compile_path(path) + if methods is None: + methods = ["GET"] + self.methods: Set[str] = set([method.upper() for method in methods]) + self.unique_id = generate_operation_id_for_path( + name=self.name, path=self.path_format, method=list(methods)[0] + ) + self.response_model = response_model + if self.response_model: + assert ( + status_code not in STATUS_CODES_WITH_NO_BODY + ), f"Status code {status_code} must not have a response body" + response_name = "Response_" + self.unique_id + self.response_field = create_response_field( + name=response_name, type_=self.response_model + ) + # Create a clone of the field, so that a Pydantic submodel is not returned + # as is just because it's an instance of a subclass of a more limited class + # e.g. UserInDB (containing hashed_password) could be a subclass of User + # that doesn't have the hashed_password. But because it's a subclass, it + # would pass the validation and be returned as is. + # By being a new field, no inheritance will be passed as is. A new model + # will be always created. + self.secure_cloned_response_field: Optional[ + ModelField + ] = create_cloned_field(self.response_field) + else: + self.response_field = None # type: ignore + self.secure_cloned_response_field = None + self.status_code = status_code + self.tags = tags or [] + if dependencies: + self.dependencies = list(dependencies) + else: + self.dependencies = [] + self.summary = summary + self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "") + # if a "form feed" character (page break) is found in the description text, + # truncate description text to the content preceding the first "form feed" + self.description = self.description.split("\f")[0] + self.response_description = response_description + self.responses = responses or {} + response_fields = {} + for additional_status_code, response in self.responses.items(): + assert isinstance(response, dict), "An additional response must be a dict" + model = response.get("model") + if model: + assert ( + additional_status_code not in STATUS_CODES_WITH_NO_BODY + ), f"Status code {additional_status_code} must not have a response body" + response_name = f"Response_{additional_status_code}_{self.unique_id}" + response_field = create_response_field(name=response_name, type_=model) + response_fields[additional_status_code] = response_field + if response_fields: + self.response_fields: Dict[Union[int, str], ModelField] = response_fields + else: + self.response_fields = {} + self.deprecated = deprecated + self.operation_id = operation_id + self.response_model_include = response_model_include + self.response_model_exclude = response_model_exclude + self.response_model_by_alias = response_model_by_alias + self.response_model_exclude_unset = response_model_exclude_unset + self.response_model_exclude_defaults = response_model_exclude_defaults + self.response_model_exclude_none = response_model_exclude_none + self.include_in_schema = include_in_schema + self.response_class = response_class + + assert callable(endpoint), "An endpoint must be a callable" + self.dependant = get_dependant(path=self.path_format, call=self.endpoint) + for depends in self.dependencies[::-1]: + self.dependant.dependencies.insert( + 0, + get_parameterless_sub_dependant(depends=depends, path=self.path_format), + ) + self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id) + self.dependency_overrides_provider = dependency_overrides_provider + self.callbacks = callbacks + self.app = request_response(self.get_route_handler()) + self.openapi_extra = openapi_extra + + def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]: + return get_request_handler( + dependant=self.dependant, + body_field=self.body_field, + status_code=self.status_code, + response_class=self.response_class, + response_field=self.secure_cloned_response_field, + response_model_include=self.response_model_include, + response_model_exclude=self.response_model_exclude, + response_model_by_alias=self.response_model_by_alias, + response_model_exclude_unset=self.response_model_exclude_unset, + response_model_exclude_defaults=self.response_model_exclude_defaults, + response_model_exclude_none=self.response_model_exclude_none, + dependency_overrides_provider=self.dependency_overrides_provider, + ) + + +class APIRouter(routing.Router): + def __init__( + self, + *, + prefix: str = "", + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + default_response_class: Type[Response] = Default(JSONResponse), + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + callbacks: Optional[List[BaseRoute]] = None, + routes: Optional[List[routing.BaseRoute]] = None, + redirect_slashes: bool = True, + default: Optional[ASGIApp] = None, + dependency_overrides_provider: Optional[Any] = None, + route_class: Type[APIRoute] = APIRoute, + on_startup: Optional[Sequence[Callable[[], Any]]] = None, + on_shutdown: Optional[Sequence[Callable[[], Any]]] = None, + deprecated: Optional[bool] = None, + include_in_schema: bool = True, + ) -> None: + super().__init__( + routes=routes, # type: ignore # in Starlette + redirect_slashes=redirect_slashes, + default=default, # type: ignore # in Starlette + on_startup=on_startup, # type: ignore # in Starlette + on_shutdown=on_shutdown, # type: ignore # in Starlette + ) + if prefix: + assert prefix.startswith("/"), "A path prefix must start with '/'" + assert not prefix.endswith( + "/" + ), "A path prefix must not end with '/', as the routes will start with '/'" + self.prefix = prefix + self.tags: List[str] = tags or [] + self.dependencies = list(dependencies or []) or [] + self.deprecated = deprecated + self.include_in_schema = include_in_schema + self.responses = responses or {} + self.callbacks = callbacks or [] + self.dependency_overrides_provider = dependency_overrides_provider + self.route_class = route_class + self.default_response_class = default_response_class + + def add_api_route( + self, + path: str, + endpoint: Callable[..., Any], + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[Union[Set[str], List[str]]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Union[Type[Response], DefaultPlaceholder] = Default( + JSONResponse + ), + name: Optional[str] = None, + route_class_override: Optional[Type[APIRoute]] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> None: + route_class = route_class_override or self.route_class + responses = responses or {} + combined_responses = {**self.responses, **responses} + current_response_class = get_value_or_default( + response_class, self.default_response_class + ) + current_tags = self.tags.copy() + if tags: + current_tags.extend(tags) + current_dependencies = self.dependencies.copy() + if dependencies: + current_dependencies.extend(dependencies) + current_callbacks = self.callbacks.copy() + if callbacks: + current_callbacks.extend(callbacks) + route = route_class( + self.prefix + path, + endpoint=endpoint, + response_model=response_model, + status_code=status_code, + tags=current_tags, + dependencies=current_dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=combined_responses, + deprecated=deprecated or self.deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema and self.include_in_schema, + response_class=current_response_class, + name=name, + dependency_overrides_provider=self.dependency_overrides_provider, + callbacks=current_callbacks, + openapi_extra=openapi_extra, + ) + self.routes.append(route) + + def api_route( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[List[str]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_api_route( + path, + func, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + return func + + return decorator + + def add_api_websocket_route( + self, path: str, endpoint: Callable[..., Any], name: Optional[str] = None + ) -> None: + route = APIWebSocketRoute( + path, + endpoint=endpoint, + name=name, + dependency_overrides_provider=self.dependency_overrides_provider, + ) + self.routes.append(route) + + def websocket( + self, path: str, name: Optional[str] = None + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_api_websocket_route(path, func, name=name) + return func + + return decorator + + def include_router( + self, + router: "APIRouter", + *, + prefix: str = "", + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + default_response_class: Type[Response] = Default(JSONResponse), + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + callbacks: Optional[List[BaseRoute]] = None, + deprecated: Optional[bool] = None, + include_in_schema: bool = True, + ) -> None: + if prefix: + assert prefix.startswith("/"), "A path prefix must start with '/'" + assert not prefix.endswith( + "/" + ), "A path prefix must not end with '/', as the routes will start with '/'" + else: + for r in router.routes: + path = getattr(r, "path") + name = getattr(r, "name", "unknown") + if path is not None and not path: + raise Exception( + f"Prefix and path cannot be both empty (path operation: {name})" + ) + if responses is None: + responses = {} + for route in router.routes: + if isinstance(route, APIRoute): + combined_responses = {**responses, **route.responses} + use_response_class = get_value_or_default( + route.response_class, + router.default_response_class, + default_response_class, + self.default_response_class, + ) + current_tags = [] + if tags: + current_tags.extend(tags) + if route.tags: + current_tags.extend(route.tags) + current_dependencies: List[params.Depends] = [] + if dependencies: + current_dependencies.extend(dependencies) + if route.dependencies: + current_dependencies.extend(route.dependencies) + current_callbacks = [] + if callbacks: + current_callbacks.extend(callbacks) + if route.callbacks: + current_callbacks.extend(route.callbacks) + self.add_api_route( + prefix + route.path, + route.endpoint, + response_model=route.response_model, + status_code=route.status_code, + tags=current_tags, + dependencies=current_dependencies, + summary=route.summary, + description=route.description, + response_description=route.response_description, + responses=combined_responses, + deprecated=route.deprecated or deprecated or self.deprecated, + methods=route.methods, + operation_id=route.operation_id, + response_model_include=route.response_model_include, + response_model_exclude=route.response_model_exclude, + response_model_by_alias=route.response_model_by_alias, + response_model_exclude_unset=route.response_model_exclude_unset, + response_model_exclude_defaults=route.response_model_exclude_defaults, + response_model_exclude_none=route.response_model_exclude_none, + include_in_schema=route.include_in_schema + and self.include_in_schema + and include_in_schema, + response_class=use_response_class, + name=route.name, + route_class_override=type(route), + callbacks=current_callbacks, + openapi_extra=route.openapi_extra, + ) + elif isinstance(route, routing.Route): + methods = list(route.methods or []) # type: ignore # in Starlette + self.add_route( + prefix + route.path, + route.endpoint, + methods=methods, + include_in_schema=route.include_in_schema, + name=route.name, + ) + elif isinstance(route, APIWebSocketRoute): + self.add_api_websocket_route( + prefix + route.path, route.endpoint, name=route.name + ) + elif isinstance(route, routing.WebSocketRoute): + self.add_websocket_route( + prefix + route.path, route.endpoint, name=route.name + ) + for handler in router.on_startup: + self.add_event_handler("startup", handler) + for handler in router.on_shutdown: + self.add_event_handler("shutdown", handler) + + def get( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["GET"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def put( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["PUT"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def post( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["POST"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def delete( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["DELETE"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def options( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["OPTIONS"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def head( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["HEAD"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def patch( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["PATCH"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) + + def trace( + self, + path: str, + *, + response_model: Optional[Type[Any]] = None, + status_code: Optional[int] = None, + tags: Optional[List[str]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["TRACE"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + ) diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/__init__.py b/myenv/lib/python3.9/site-packages/fastapi/security/__init__.py new file mode 100644 index 0000000..3aa6bf2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/__init__.py @@ -0,0 +1,15 @@ +from .api_key import APIKeyCookie as APIKeyCookie +from .api_key import APIKeyHeader as APIKeyHeader +from .api_key import APIKeyQuery as APIKeyQuery +from .http import HTTPAuthorizationCredentials as HTTPAuthorizationCredentials +from .http import HTTPBasic as HTTPBasic +from .http import HTTPBasicCredentials as HTTPBasicCredentials +from .http import HTTPBearer as HTTPBearer +from .http import HTTPDigest as HTTPDigest +from .oauth2 import OAuth2 as OAuth2 +from .oauth2 import OAuth2AuthorizationCodeBearer as OAuth2AuthorizationCodeBearer +from .oauth2 import OAuth2PasswordBearer as OAuth2PasswordBearer +from .oauth2 import OAuth2PasswordRequestForm as OAuth2PasswordRequestForm +from .oauth2 import OAuth2PasswordRequestFormStrict as OAuth2PasswordRequestFormStrict +from .oauth2 import SecurityScopes as SecurityScopes +from .open_id_connect_url import OpenIdConnect as OpenIdConnect diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/api_key.py b/myenv/lib/python3.9/site-packages/fastapi/security/api_key.py new file mode 100644 index 0000000..36ab60e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/api_key.py @@ -0,0 +1,92 @@ +from typing import Optional + +from fastapi.openapi.models import APIKey, APIKeyIn +from fastapi.security.base import SecurityBase +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.status import HTTP_403_FORBIDDEN + + +class APIKeyBase(SecurityBase): + pass + + +class APIKeyQuery(APIKeyBase): + def __init__( + self, + *, + name: str, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True + ): + self.model: APIKey = APIKey( + **{"in": APIKeyIn.query}, name=name, description=description + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + api_key: str = request.query_params.get(self.model.name) + if not api_key: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return api_key + + +class APIKeyHeader(APIKeyBase): + def __init__( + self, + *, + name: str, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True + ): + self.model: APIKey = APIKey( + **{"in": APIKeyIn.header}, name=name, description=description + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + api_key: str = request.headers.get(self.model.name) + if not api_key: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return api_key + + +class APIKeyCookie(APIKeyBase): + def __init__( + self, + *, + name: str, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True + ): + self.model: APIKey = APIKey( + **{"in": APIKeyIn.cookie}, name=name, description=description + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + api_key = request.cookies.get(self.model.name) + if not api_key: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return api_key diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/base.py b/myenv/lib/python3.9/site-packages/fastapi/security/base.py new file mode 100644 index 0000000..c43555d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/base.py @@ -0,0 +1,6 @@ +from fastapi.openapi.models import SecurityBase as SecurityBaseModel + + +class SecurityBase: + model: SecurityBaseModel + scheme_name: str diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/http.py b/myenv/lib/python3.9/site-packages/fastapi/security/http.py new file mode 100644 index 0000000..1b473c6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/http.py @@ -0,0 +1,165 @@ +import binascii +from base64 import b64decode +from typing import Optional + +from fastapi.exceptions import HTTPException +from fastapi.openapi.models import HTTPBase as HTTPBaseModel +from fastapi.openapi.models import HTTPBearer as HTTPBearerModel +from fastapi.security.base import SecurityBase +from fastapi.security.utils import get_authorization_scheme_param +from pydantic import BaseModel +from starlette.requests import Request +from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN + + +class HTTPBasicCredentials(BaseModel): + username: str + password: str + + +class HTTPAuthorizationCredentials(BaseModel): + scheme: str + credentials: str + + +class HTTPBase(SecurityBase): + def __init__( + self, + *, + scheme: str, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + self.model = HTTPBaseModel(scheme=scheme, description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__( + self, request: Request + ) -> Optional[HTTPAuthorizationCredentials]: + authorization: str = request.headers.get("Authorization") + scheme, credentials = get_authorization_scheme_param(authorization) + if not (authorization and scheme and credentials): + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) + + +class HTTPBasic(HTTPBase): + def __init__( + self, + *, + scheme_name: Optional[str] = None, + realm: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + self.model = HTTPBaseModel(scheme="basic", description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.realm = realm + self.auto_error = auto_error + + async def __call__( # type: ignore + self, request: Request + ) -> Optional[HTTPBasicCredentials]: + authorization: str = request.headers.get("Authorization") + scheme, param = get_authorization_scheme_param(authorization) + if self.realm: + unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'} + else: + unauthorized_headers = {"WWW-Authenticate": "Basic"} + invalid_user_credentials_exc = HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Invalid authentication credentials", + headers=unauthorized_headers, + ) + if not authorization or scheme.lower() != "basic": + if self.auto_error: + raise HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers=unauthorized_headers, + ) + else: + return None + try: + data = b64decode(param).decode("ascii") + except (ValueError, UnicodeDecodeError, binascii.Error): + raise invalid_user_credentials_exc + username, separator, password = data.partition(":") + if not separator: + raise invalid_user_credentials_exc + return HTTPBasicCredentials(username=username, password=password) + + +class HTTPBearer(HTTPBase): + def __init__( + self, + *, + bearerFormat: Optional[str] = None, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + self.model = HTTPBearerModel(bearerFormat=bearerFormat, description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__( + self, request: Request + ) -> Optional[HTTPAuthorizationCredentials]: + authorization: str = request.headers.get("Authorization") + scheme, credentials = get_authorization_scheme_param(authorization) + if not (authorization and scheme and credentials): + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + if scheme.lower() != "bearer": + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, + detail="Invalid authentication credentials", + ) + else: + return None + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) + + +class HTTPDigest(HTTPBase): + def __init__( + self, + *, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + self.model = HTTPBaseModel(scheme="digest", description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__( + self, request: Request + ) -> Optional[HTTPAuthorizationCredentials]: + authorization: str = request.headers.get("Authorization") + scheme, credentials = get_authorization_scheme_param(authorization) + if not (authorization and scheme and credentials): + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + if scheme.lower() != "digest": + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, + detail="Invalid authentication credentials", + ) + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/oauth2.py b/myenv/lib/python3.9/site-packages/fastapi/security/oauth2.py new file mode 100644 index 0000000..bdc6e2e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/oauth2.py @@ -0,0 +1,220 @@ +from typing import Any, Dict, List, Optional, Union + +from fastapi.exceptions import HTTPException +from fastapi.openapi.models import OAuth2 as OAuth2Model +from fastapi.openapi.models import OAuthFlows as OAuthFlowsModel +from fastapi.param_functions import Form +from fastapi.security.base import SecurityBase +from fastapi.security.utils import get_authorization_scheme_param +from starlette.requests import Request +from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN + + +class OAuth2PasswordRequestForm: + """ + This is a dependency class, use it like: + + @app.post("/login") + def login(form_data: OAuth2PasswordRequestForm = Depends()): + data = form_data.parse() + print(data.username) + print(data.password) + for scope in data.scopes: + print(scope) + if data.client_id: + print(data.client_id) + if data.client_secret: + print(data.client_secret) + return data + + + It creates the following Form request parameters in your endpoint: + + grant_type: the OAuth2 spec says it is required and MUST be the fixed string "password". + Nevertheless, this dependency class is permissive and allows not passing it. If you want to enforce it, + use instead the OAuth2PasswordRequestFormStrict dependency. + username: username string. The OAuth2 spec requires the exact field name "username". + password: password string. The OAuth2 spec requires the exact field name "password". + scope: Optional string. Several scopes (each one a string) separated by spaces. E.g. + "items:read items:write users:read profile openid" + client_id: optional string. OAuth2 recommends sending the client_id and client_secret (if any) + using HTTP Basic auth, as: client_id:client_secret + client_secret: optional string. OAuth2 recommends sending the client_id and client_secret (if any) + using HTTP Basic auth, as: client_id:client_secret + """ + + def __init__( + self, + grant_type: str = Form(None, regex="password"), + username: str = Form(...), + password: str = Form(...), + scope: str = Form(""), + client_id: Optional[str] = Form(None), + client_secret: Optional[str] = Form(None), + ): + self.grant_type = grant_type + self.username = username + self.password = password + self.scopes = scope.split() + self.client_id = client_id + self.client_secret = client_secret + + +class OAuth2PasswordRequestFormStrict(OAuth2PasswordRequestForm): + """ + This is a dependency class, use it like: + + @app.post("/login") + def login(form_data: OAuth2PasswordRequestFormStrict = Depends()): + data = form_data.parse() + print(data.username) + print(data.password) + for scope in data.scopes: + print(scope) + if data.client_id: + print(data.client_id) + if data.client_secret: + print(data.client_secret) + return data + + + It creates the following Form request parameters in your endpoint: + + grant_type: the OAuth2 spec says it is required and MUST be the fixed string "password". + This dependency is strict about it. If you want to be permissive, use instead the + OAuth2PasswordRequestForm dependency class. + username: username string. The OAuth2 spec requires the exact field name "username". + password: password string. The OAuth2 spec requires the exact field name "password". + scope: Optional string. Several scopes (each one a string) separated by spaces. E.g. + "items:read items:write users:read profile openid" + client_id: optional string. OAuth2 recommends sending the client_id and client_secret (if any) + using HTTP Basic auth, as: client_id:client_secret + client_secret: optional string. OAuth2 recommends sending the client_id and client_secret (if any) + using HTTP Basic auth, as: client_id:client_secret + """ + + def __init__( + self, + grant_type: str = Form(..., regex="password"), + username: str = Form(...), + password: str = Form(...), + scope: str = Form(""), + client_id: Optional[str] = Form(None), + client_secret: Optional[str] = Form(None), + ): + super().__init__( + grant_type=grant_type, + username=username, + password=password, + scope=scope, + client_id=client_id, + client_secret=client_secret, + ) + + +class OAuth2(SecurityBase): + def __init__( + self, + *, + flows: Union[OAuthFlowsModel, Dict[str, Dict[str, Any]]] = OAuthFlowsModel(), + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: Optional[bool] = True + ): + self.model = OAuth2Model(flows=flows, description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + authorization: str = request.headers.get("Authorization") + if not authorization: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return authorization + + +class OAuth2PasswordBearer(OAuth2): + def __init__( + self, + tokenUrl: str, + scheme_name: Optional[str] = None, + scopes: Optional[Dict[str, str]] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + if not scopes: + scopes = {} + flows = OAuthFlowsModel(password={"tokenUrl": tokenUrl, "scopes": scopes}) + super().__init__( + flows=flows, + scheme_name=scheme_name, + description=description, + auto_error=auto_error, + ) + + async def __call__(self, request: Request) -> Optional[str]: + authorization: str = request.headers.get("Authorization") + scheme, param = get_authorization_scheme_param(authorization) + if not authorization or scheme.lower() != "bearer": + if self.auto_error: + raise HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers={"WWW-Authenticate": "Bearer"}, + ) + else: + return None + return param + + +class OAuth2AuthorizationCodeBearer(OAuth2): + def __init__( + self, + authorizationUrl: str, + tokenUrl: str, + refreshUrl: Optional[str] = None, + scheme_name: Optional[str] = None, + scopes: Optional[Dict[str, str]] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + if not scopes: + scopes = {} + flows = OAuthFlowsModel( + authorizationCode={ + "authorizationUrl": authorizationUrl, + "tokenUrl": tokenUrl, + "refreshUrl": refreshUrl, + "scopes": scopes, + } + ) + super().__init__( + flows=flows, + scheme_name=scheme_name, + description=description, + auto_error=auto_error, + ) + + async def __call__(self, request: Request) -> Optional[str]: + authorization: str = request.headers.get("Authorization") + scheme, param = get_authorization_scheme_param(authorization) + if not authorization or scheme.lower() != "bearer": + if self.auto_error: + raise HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers={"WWW-Authenticate": "Bearer"}, + ) + else: + return None # pragma: nocover + return param + + +class SecurityScopes: + def __init__(self, scopes: Optional[List[str]] = None): + self.scopes = scopes or [] + self.scope_str = " ".join(self.scopes) diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/open_id_connect_url.py b/myenv/lib/python3.9/site-packages/fastapi/security/open_id_connect_url.py new file mode 100644 index 0000000..dfe9f7b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/open_id_connect_url.py @@ -0,0 +1,34 @@ +from typing import Optional + +from fastapi.openapi.models import OpenIdConnect as OpenIdConnectModel +from fastapi.security.base import SecurityBase +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.status import HTTP_403_FORBIDDEN + + +class OpenIdConnect(SecurityBase): + def __init__( + self, + *, + openIdConnectUrl: str, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True + ): + self.model = OpenIdConnectModel( + openIdConnectUrl=openIdConnectUrl, description=description + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + authorization: str = request.headers.get("Authorization") + if not authorization: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return authorization diff --git a/myenv/lib/python3.9/site-packages/fastapi/security/utils.py b/myenv/lib/python3.9/site-packages/fastapi/security/utils.py new file mode 100644 index 0000000..2da0dd2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/security/utils.py @@ -0,0 +1,8 @@ +from typing import Tuple + + +def get_authorization_scheme_param(authorization_header_value: str) -> Tuple[str, str]: + if not authorization_header_value: + return "", "" + scheme, _, param = authorization_header_value.partition(" ") + return scheme, param diff --git a/myenv/lib/python3.9/site-packages/fastapi/staticfiles.py b/myenv/lib/python3.9/site-packages/fastapi/staticfiles.py new file mode 100644 index 0000000..299015d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/staticfiles.py @@ -0,0 +1 @@ +from starlette.staticfiles import StaticFiles as StaticFiles # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/templating.py b/myenv/lib/python3.9/site-packages/fastapi/templating.py new file mode 100644 index 0000000..0cb8684 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/templating.py @@ -0,0 +1 @@ +from starlette.templating import Jinja2Templates as Jinja2Templates # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/testclient.py b/myenv/lib/python3.9/site-packages/fastapi/testclient.py new file mode 100644 index 0000000..4012406 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/testclient.py @@ -0,0 +1 @@ +from starlette.testclient import TestClient as TestClient # noqa diff --git a/myenv/lib/python3.9/site-packages/fastapi/types.py b/myenv/lib/python3.9/site-packages/fastapi/types.py new file mode 100644 index 0000000..e0bca46 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/types.py @@ -0,0 +1,3 @@ +from typing import Any, Callable, TypeVar + +DecoratedCallable = TypeVar("DecoratedCallable", bound=Callable[..., Any]) diff --git a/myenv/lib/python3.9/site-packages/fastapi/utils.py b/myenv/lib/python3.9/site-packages/fastapi/utils.py new file mode 100644 index 0000000..8913d85 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/utils.py @@ -0,0 +1,156 @@ +import functools +import re +from dataclasses import is_dataclass +from enum import Enum +from typing import Any, Dict, Optional, Set, Type, Union, cast + +import fastapi +from fastapi.datastructures import DefaultPlaceholder, DefaultType +from fastapi.openapi.constants import REF_PREFIX +from pydantic import BaseConfig, BaseModel, create_model +from pydantic.class_validators import Validator +from pydantic.fields import FieldInfo, ModelField, UndefinedType +from pydantic.schema import model_process_schema +from pydantic.utils import lenient_issubclass + + +def get_model_definitions( + *, + flat_models: Set[Union[Type[BaseModel], Type[Enum]]], + model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str], +) -> Dict[str, Any]: + definitions: Dict[str, Dict[str, Any]] = {} + for model in flat_models: + m_schema, m_definitions, m_nested_models = model_process_schema( + model, model_name_map=model_name_map, ref_prefix=REF_PREFIX + ) + definitions.update(m_definitions) + model_name = model_name_map[model] + definitions[model_name] = m_schema + return definitions + + +def get_path_param_names(path: str) -> Set[str]: + return set(re.findall("{(.*?)}", path)) + + +def create_response_field( + name: str, + type_: Type[Any], + class_validators: Optional[Dict[str, Validator]] = None, + default: Optional[Any] = None, + required: Union[bool, UndefinedType] = False, + model_config: Type[BaseConfig] = BaseConfig, + field_info: Optional[FieldInfo] = None, + alias: Optional[str] = None, +) -> ModelField: + """ + Create a new response field. Raises if type_ is invalid. + """ + class_validators = class_validators or {} + field_info = field_info or FieldInfo(None) + + response_field = functools.partial( + ModelField, + name=name, + type_=type_, + class_validators=class_validators, + default=default, + required=required, + model_config=model_config, + alias=alias, + ) + + try: + return response_field(field_info=field_info) + except RuntimeError: + raise fastapi.exceptions.FastAPIError( + f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type" + ) + + +def create_cloned_field( + field: ModelField, + *, + cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None, +) -> ModelField: + # _cloned_types has already cloned types, to support recursive models + if cloned_types is None: + cloned_types = dict() + original_type = field.type_ + if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"): + original_type = original_type.__pydantic_model__ + use_type = original_type + if lenient_issubclass(original_type, BaseModel): + original_type = cast(Type[BaseModel], original_type) + use_type = cloned_types.get(original_type) + if use_type is None: + use_type = create_model(original_type.__name__, __base__=original_type) + cloned_types[original_type] = use_type + for f in original_type.__fields__.values(): + use_type.__fields__[f.name] = create_cloned_field( + f, cloned_types=cloned_types + ) + new_field = create_response_field(name=field.name, type_=use_type) + new_field.has_alias = field.has_alias + new_field.alias = field.alias + new_field.class_validators = field.class_validators + new_field.default = field.default + new_field.required = field.required + new_field.model_config = field.model_config + new_field.field_info = field.field_info + new_field.allow_none = field.allow_none + new_field.validate_always = field.validate_always + if field.sub_fields: + new_field.sub_fields = [ + create_cloned_field(sub_field, cloned_types=cloned_types) + for sub_field in field.sub_fields + ] + if field.key_field: + new_field.key_field = create_cloned_field( + field.key_field, cloned_types=cloned_types + ) + new_field.validators = field.validators + new_field.pre_validators = field.pre_validators + new_field.post_validators = field.post_validators + new_field.parse_json = field.parse_json + new_field.shape = field.shape + new_field.populate_validators() + return new_field + + +def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str: + operation_id = name + path + operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id) + operation_id = operation_id + "_" + method.lower() + return operation_id + + +def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None: + for key in update_dict: + if ( + key in main_dict + and isinstance(main_dict[key], dict) + and isinstance(update_dict[key], dict) + ): + deep_dict_update(main_dict[key], update_dict[key]) + else: + main_dict[key] = update_dict[key] + + +def get_value_or_default( + first_item: Union[DefaultPlaceholder, DefaultType], + *extra_items: Union[DefaultPlaceholder, DefaultType], +) -> Union[DefaultPlaceholder, DefaultType]: + """ + Pass items or `DefaultPlaceholder`s by descending priority. + + The first one to _not_ be a `DefaultPlaceholder` will be returned. + + Otherwise, the first item (a `DefaultPlaceholder`) will be returned. + """ + items = (first_item,) + extra_items + for item in items: + if not isinstance(item, DefaultPlaceholder): + return item + return first_item diff --git a/myenv/lib/python3.9/site-packages/fastapi/websockets.py b/myenv/lib/python3.9/site-packages/fastapi/websockets.py new file mode 100644 index 0000000..bed672a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/fastapi/websockets.py @@ -0,0 +1,2 @@ +from starlette.websockets import WebSocket as WebSocket # noqa +from starlette.websockets import WebSocketDisconnect as WebSocketDisconnect # noqa diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/LICENSE new file mode 100644 index 0000000..522fbe2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Anthony Sottile + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/METADATA new file mode 100644 index 0000000..49ff084 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/METADATA @@ -0,0 +1,110 @@ +Metadata-Version: 2.1 +Name: future-fstrings +Version: 1.2.0 +Summary: A backport of fstrings to python<3.6 +Home-page: https://github.com/asottile/future-fstrings +Author: Anthony Sottile +Author-email: asottile@umich.edu +License: MIT +Platform: UNKNOWN +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +Description-Content-Type: text/markdown +Requires-Dist: tokenize-rt (>=3) ; python_version < "3.6" +Provides-Extra: rewrite +Requires-Dist: tokenize-rt (>=3) ; extra == 'rewrite' + +[![Build Status](https://asottile.visualstudio.com/asottile/_apis/build/status/asottile.future-fstrings?branchName=master)](https://asottile.visualstudio.com/asottile/_build/latest?definitionId=15&branchName=master) +[![Azure DevOps coverage](https://img.shields.io/azure-devops/coverage/asottile/asottile/15/master.svg)](https://dev.azure.com/asottile/asottile/_build/latest?definitionId=15&branchName=master) + +future-fstrings +=============== + +A backport of fstrings to python<3.6. + + +## Installation + +`pip install future-fstrings` + + +## Usage + +Include the following encoding cookie at the top of your file (this replaces +the utf-8 cookie if you already have it): + +```python +# -*- coding: future_fstrings -*- +``` + +And then write python3.6 fstring code as usual! + +```python +# -*- coding: future_fstrings -*- +thing = 'world' +print(f'hello {thing}') +``` + +```console +$ python2.7 main.py +hello world +``` + +## Showing transformed source + +`future-fstrings` also includes a cli to show transformed source. + +```console +$ future-fstrings-show main.py +# -*- coding: future_fstrings -*- +thing = 'world' +print('hello {}'.format((thing))) +``` + +## Transform source for micropython + +The `future-fstrings-show` command can be used to transform source before +distributing. This can allow you to write f-string code but target platforms +which do not support f-strings, such as [micropython]. + +To use this on modern versions of python, install using: + +```bash +pip install future-fstrings[rewrite] +``` + +and then use `future-fstrings-show` as above. + +For instance: + +```bash +future-fstrings-show code.py > code_rewritten.py +``` + +[micropython]: https://github.com/micropython/micropython + +## How does this work? + +`future-fstrings` has two parts: + +1. A utf-8 compatible `codec` which performs source manipulation + - The `codec` first decodes the source bytes using the UTF-8 codec + - The `codec` then leverages + [tokenize-rt](https://github.com/asottile/tokenize-rt) to rewrite + f-strings. +2. A `.pth` file which registers a codec on interpreter startup. + +## you may also like + +- [future-breakpoint](https://github.com/asottile/future-breakpoint) + + diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/RECORD new file mode 100644 index 0000000..3106ee6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/RECORD @@ -0,0 +1,10 @@ +../../../bin/future-fstrings-show,sha256=8dSO0wZooi7AfRP9kZ2KvYvOTb-wPFQp8aOOFIlPwEU,255 +aaaaa_future_fstrings.pth,sha256=guBUlzeG4Sx1hSyL74lnGDGibEolIDKiWX3nKRZjDPw,126 +future_fstrings.py,sha256=rISrOdh1700HnkCueIPWTS3ssUiw_FHNrBusXCnJd1c,8262 +future_fstrings-1.2.0.dist-info/LICENSE,sha256=7B5zOcT_kkhS3y9ddBcBIOOnIqqyjItu8gZgw9vporc,1059 +future_fstrings-1.2.0.dist-info/METADATA,sha256=0ji_RA4E-e5D7xUvt77zHTYqt-JqaLImL4woAddyMTg,3160 +future_fstrings-1.2.0.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110 +future_fstrings-1.2.0.dist-info/entry_points.txt,sha256=O36UVAN_0W2gH0yhOquLcMNhstouT3kKhrZ4ErL-QnU,63 +future_fstrings-1.2.0.dist-info/top_level.txt,sha256=LEIf3UDAlbXiS6plxfUkTXe8hgmDEBPWVwso3KMHxDc,16 +future_fstrings-1.2.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +future_fstrings-1.2.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/WHEEL new file mode 100644 index 0000000..78e6f69 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.4) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/entry_points.txt new file mode 100644 index 0000000..0c8d51b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +future-fstrings-show = future_fstrings:main + diff --git a/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/top_level.txt new file mode 100644 index 0000000..5c59166 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings-1.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +future_fstrings diff --git a/myenv/lib/python3.9/site-packages/future_fstrings.py b/myenv/lib/python3.9/site-packages/future_fstrings.py new file mode 100644 index 0000000..97e4711 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/future_fstrings.py @@ -0,0 +1,297 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +import argparse +import codecs +import encodings +import io +import sys + + +utf_8 = encodings.search_function('utf8') + + +class TokenSyntaxError(SyntaxError): + def __init__(self, e, token): + super(TokenSyntaxError, self).__init__(e) + self.e = e + self.token = token + + +def _find_literal(s, start, level, parts, exprs): + """Roughly Python/ast.c:fstring_find_literal""" + i = start + parse_expr = True + + while i < len(s): + ch = s[i] + + if ch in ('{', '}'): + if level == 0: + if i + 1 < len(s) and s[i + 1] == ch: + i += 2 + parse_expr = False + break + elif ch == '}': + raise SyntaxError("f-string: single '}' is not allowed") + break + + i += 1 + + parts.append(s[start:i]) + return i, parse_expr and i < len(s) + + +def _find_expr(s, start, level, parts, exprs): + """Roughly Python/ast.c:fstring_find_expr""" + i = start + nested_depth = 0 + quote_char = None + triple_quoted = None + + def _check_end(): + if i == len(s): + raise SyntaxError("f-string: expecting '}'") + + if level >= 2: + raise SyntaxError("f-string: expressions nested too deeply") + + parts.append(s[i]) + i += 1 + + while i < len(s): + ch = s[i] + + if ch == '\\': + raise SyntaxError( + 'f-string expression part cannot include a backslash', + ) + if quote_char is not None: + if ch == quote_char: + if triple_quoted: + if i + 2 < len(s) and s[i + 1] == ch and s[i + 2] == ch: + i += 2 + quote_char = None + triple_quoted = None + else: + quote_char = None + triple_quoted = None + elif ch in ('"', "'"): + quote_char = ch + if i + 2 < len(s) and s[i + 1] == ch and s[i + 2] == ch: + triple_quoted = True + i += 2 + else: + triple_quoted = False + elif ch in ('[', '{', '('): + nested_depth += 1 + elif nested_depth and ch in (']', '}', ')'): + nested_depth -= 1 + elif ch == '#': + raise SyntaxError("f-string expression cannot include '#'") + elif nested_depth == 0 and ch in ('!', ':', '}'): + if ch == '!' and i + 1 < len(s) and s[i + 1] == '=': + # Allow != at top level as `=` isn't a valid conversion + pass + else: + break + i += 1 + + if quote_char is not None: + raise SyntaxError('f-string: unterminated string') + elif nested_depth: + raise SyntaxError("f-string: mismatched '(', '{', or '['") + _check_end() + + exprs.append(s[start + 1:i]) + + if s[i] == '!': + parts.append(s[i]) + i += 1 + _check_end() + parts.append(s[i]) + i += 1 + + _check_end() + + if s[i] == ':': + parts.append(s[i]) + i += 1 + _check_end() + i = _fstring_parse(s, i, level + 1, parts, exprs) + + _check_end() + if s[i] != '}': + raise SyntaxError("f-string: expecting '}'") + + parts.append(s[i]) + i += 1 + return i + + +def _fstring_parse(s, i, level, parts, exprs): + """Roughly Python/ast.c:fstring_find_literal_and_expr""" + while True: + i, parse_expr = _find_literal(s, i, level, parts, exprs) + if i == len(s) or s[i] == '}': + return i + if parse_expr: + i = _find_expr(s, i, level, parts, exprs) + + +def _fstring_parse_outer(s, i, level, parts, exprs): + for q in ('"' * 3, "'" * 3, '"', "'"): + if s.startswith(q): + s = s[len(q):len(s) - len(q)] + break + else: + raise AssertionError('unreachable') + parts.append(q) + ret = _fstring_parse(s, i, level, parts, exprs) + parts.append(q) + return ret + + +def _is_f(token): + import tokenize_rt + + prefix, _ = tokenize_rt.parse_string_literal(token.src) + return 'f' in prefix.lower() + + +def _make_fstring(tokens): + import tokenize_rt + + new_tokens = [] + exprs = [] + + for i, token in enumerate(tokens): + if token.name == 'STRING' and _is_f(token): + prefix, s = tokenize_rt.parse_string_literal(token.src) + parts = [] + try: + _fstring_parse_outer(s, 0, 0, parts, exprs) + except SyntaxError as e: + raise TokenSyntaxError(e, tokens[i - 1]) + if 'r' in prefix.lower(): + parts = [s.replace('\\', '\\\\') for s in parts] + token = token._replace(src=''.join(parts)) + elif token.name == 'STRING': + new_src = token.src.replace('{', '{{').replace('}', '}}') + token = token._replace(src=new_src) + new_tokens.append(token) + + exprs = ('({})'.format(expr) for expr in exprs) + format_src = '.format({})'.format(', '.join(exprs)) + new_tokens.append(tokenize_rt.Token('FORMAT', src=format_src)) + + return new_tokens + + +def decode(b, errors='strict'): + import tokenize_rt # pip install future-fstrings[rewrite] + + u, length = utf_8.decode(b, errors) + tokens = tokenize_rt.src_to_tokens(u) + + to_replace = [] + start = end = seen_f = None + + for i, token in enumerate(tokens): + if start is None: + if token.name == 'STRING': + start, end = i, i + 1 + seen_f = _is_f(token) + elif token.name == 'STRING': + end = i + 1 + seen_f |= _is_f(token) + elif token.name not in tokenize_rt.NON_CODING_TOKENS: + if seen_f: + to_replace.append((start, end)) + start = end = seen_f = None + + for start, end in reversed(to_replace): + try: + tokens[start:end] = _make_fstring(tokens[start:end]) + except TokenSyntaxError as e: + msg = str(e.e) + line = u.splitlines()[e.token.line - 1] + bts = line.encode('UTF-8')[:e.token.utf8_byte_offset] + indent = len(bts.decode('UTF-8')) + raise SyntaxError(msg + '\n\n' + line + '\n' + ' ' * indent + '^') + return tokenize_rt.tokens_to_src(tokens), length + + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, input, errors, final): # pragma: no cover + if final: + return decode(input, errors) + else: + return '', 0 + + +class StreamReader(utf_8.streamreader, object): + """decode is deferred to support better error messages""" + _stream = None + _decoded = False + + @property + def stream(self): + if not self._decoded: + text, _ = decode(self._stream.read()) + self._stream = io.BytesIO(text.encode('UTF-8')) + self._decoded = True + return self._stream + + @stream.setter + def stream(self, stream): + self._stream = stream + self._decoded = False + + +def _natively_supports_fstrings(): + try: + return eval('f"hi"') == 'hi' + except SyntaxError: + return False + + +fstring_decode = decode +SUPPORTS_FSTRINGS = _natively_supports_fstrings() +if SUPPORTS_FSTRINGS: # pragma: no cover + decode = utf_8.decode # noqa + IncrementalDecoder = utf_8.incrementaldecoder # noqa + StreamReader = utf_8.streamreader # noqa + +# codec api + +codec_map = { + name: codecs.CodecInfo( + name=name, + encode=utf_8.encode, + decode=decode, + incrementalencoder=utf_8.incrementalencoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=utf_8.streamwriter, + ) + for name in ('future-fstrings', 'future_fstrings') +} + + +def register(): # pragma: no cover + codecs.register(codec_map.get) + + +def main(argv=None): + parser = argparse.ArgumentParser(description='Prints transformed source.') + parser.add_argument('filename') + args = parser.parse_args(argv) + + with open(args.filename, 'rb') as f: + text, _ = fstring_decode(f.read()) + getattr(sys.stdout, 'buffer', sys.stdout).write(text.encode('UTF-8')) + + +if __name__ == '__main__': + exit(main()) diff --git a/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/LICENSE.txt b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/LICENSE.txt new file mode 100644 index 0000000..8f080ea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/LICENSE.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Nathaniel J. Smith and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/METADATA new file mode 100644 index 0000000..5478c3c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/METADATA @@ -0,0 +1,194 @@ +Metadata-Version: 2.1 +Name: h11 +Version: 0.12.0 +Summary: A pure-Python, bring-your-own-I/O implementation of HTTP/1.1 +Home-page: https://github.com/python-hyper/h11 +Author: Nathaniel J. Smith +Author-email: njs@pobox.com +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: System :: Networking +Requires-Python: >=3.6 + +h11 +=== + +.. image:: https://travis-ci.org/python-hyper/h11.svg?branch=master + :target: https://travis-ci.org/python-hyper/h11 + :alt: Automated test status + +.. image:: https://codecov.io/gh/python-hyper/h11/branch/master/graph/badge.svg + :target: https://codecov.io/gh/python-hyper/h11 + :alt: Test coverage + +.. image:: https://readthedocs.org/projects/h11/badge/?version=latest + :target: http://h11.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +This is a little HTTP/1.1 library written from scratch in Python, +heavily inspired by `hyper-h2 `_. + +It's a "bring-your-own-I/O" library; h11 contains no IO code +whatsoever. This means you can hook h11 up to your favorite network +API, and that could be anything you want: synchronous, threaded, +asynchronous, or your own implementation of `RFC 6214 +`_ -- h11 won't judge you. +(Compare this to the current state of the art, where every time a `new +network API `_ comes along then someone +gets to start over reimplementing the entire HTTP protocol from +scratch.) Cory Benfield made an `excellent blog post describing the +benefits of this approach +`_, or if you like video +then here's his `PyCon 2016 talk on the same theme +`_. + +This also means that h11 is not immediately useful out of the box: +it's a toolkit for building programs that speak HTTP, not something +that could directly replace ``requests`` or ``twisted.web`` or +whatever. But h11 makes it much easier to implement something like +``requests`` or ``twisted.web``. + +At a high level, working with h11 goes like this: + +1) First, create an ``h11.Connection`` object to track the state of a + single HTTP/1.1 connection. + +2) When you read data off the network, pass it to + ``conn.receive_data(...)``; you'll get back a list of objects + representing high-level HTTP "events". + +3) When you want to send a high-level HTTP event, create the + corresponding "event" object and pass it to ``conn.send(...)``; + this will give you back some bytes that you can then push out + through the network. + +For example, a client might instantiate and then send a +``h11.Request`` object, then zero or more ``h11.Data`` objects for the +request body (e.g., if this is a POST), and then a +``h11.EndOfMessage`` to indicate the end of the message. Then the +server would then send back a ``h11.Response``, some ``h11.Data``, and +its own ``h11.EndOfMessage``. If either side violates the protocol, +you'll get a ``h11.ProtocolError`` exception. + +h11 is suitable for implementing both servers and clients, and has a +pleasantly symmetric API: the events you send as a client are exactly +the ones that you receive as a server and vice-versa. + +`Here's an example of a tiny HTTP client +`_ + +It also has `a fine manual `_. + +FAQ +--- + +*Whyyyyy?* + +I wanted to play with HTTP in `Curio +`__ and `Trio +`__, which at the time didn't have any +HTTP libraries. So I thought, no big deal, Python has, like, a dozen +different implementations of HTTP, surely I can find one that's +reusable. I didn't find one, but I did find Cory's call-to-arms +blog-post. So I figured, well, fine, if I have to implement HTTP from +scratch, at least I can make sure no-one *else* has to ever again. + +*Should I use it?* + +Maybe. You should be aware that it's a very young project. But, it's +feature complete and has an exhaustive test-suite and complete docs, +so the next step is for people to try using it and see how it goes +:-). If you do then please let us know -- if nothing else we'll want +to talk to you before making any incompatible changes! + +*What are the features/limitations?* + +Roughly speaking, it's trying to be a robust, complete, and non-hacky +implementation of the first "chapter" of the HTTP/1.1 spec: `RFC 7230: +HTTP/1.1 Message Syntax and Routing +`_. That is, it mostly focuses on +implementing HTTP at the level of taking bytes on and off the wire, +and the headers related to that, and tries to be anal about spec +conformance. It doesn't know about higher-level concerns like URL +routing, conditional GETs, cross-origin cookie policies, or content +negotiation. But it does know how to take care of framing, +cross-version differences in keep-alive handling, and the "obsolete +line folding" rule, so you can focus your energies on the hard / +interesting parts for your application, and it tries to support the +full specification in the sense that any useful HTTP/1.1 conformant +application should be able to use h11. + +It's pure Python, and has no dependencies outside of the standard +library. + +It has a test suite with 100.0% coverage for both statements and +branches. + +Currently it supports Python 3 (testing on 3.6-3.9) and PyPy 3. +The last Python 2-compatible version was h11 0.11.x. +(Originally it had a Cython wrapper for `http-parser +`_ and a beautiful nested state +machine implemented with ``yield from`` to postprocess the output. But +I had to take these out -- the new *parser* needs fewer lines-of-code +than the old *parser wrapper*, is written in pure Python, uses no +exotic language syntax, and has more features. It's sad, really; that +old state machine was really slick. I just need a few sentences here +to mourn that.) + +I don't know how fast it is. I haven't benchmarked or profiled it yet, +so it's probably got a few pointless hot spots, and I've been trying +to err on the side of simplicity and robustness instead of +micro-optimization. But at the architectural level I tried hard to +avoid fundamentally bad decisions, e.g., I believe that all the +parsing algorithms remain linear-time even in the face of pathological +input like slowloris, and there are no byte-by-byte loops. (I also +believe that it maintains bounded memory usage in the face of +arbitrary/pathological input.) + +The whole library is ~800 lines-of-code. You can read and understand +the whole thing in less than an hour. Most of the energy invested in +this so far has been spent on trying to keep things simple by +minimizing special-cases and ad hoc state manipulation; even though it +is now quite small and simple, I'm still annoyed that I haven't +figured out how to make it even smaller and simpler. (Unfortunately, +HTTP does not lend itself to simplicity.) + +The API is ~feature complete and I don't expect the general outlines +to change much, but you can't judge an API's ergonomics until you +actually document and use it, so I'd expect some changes in the +details. + +*How do I try it?* + +.. code-block:: sh + + $ pip install h11 + $ git clone git@github.com:python-hyper/h11 + $ cd h11/examples + $ python basic-client.py + +and go from there. + +*License?* + +MIT + +*Code of conduct?* + +Contributors are requested to follow our `code of conduct +`_ in +all project spaces. + + diff --git a/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/RECORD new file mode 100644 index 0000000..a5f2182 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/RECORD @@ -0,0 +1,29 @@ +h11/__init__.py,sha256=3gYpvQiX8_6-dyXaAxQt_sIYREVTz1T-zB5Lf4hjKt0,909 +h11/_abnf.py,sha256=tMKqgOEkTHHp8sPd_gmU9Qowe_yXXrihct63RX2zJsg,4637 +h11/_connection.py,sha256=XFZ-LPb3C2vgF4v5ifmcJqX-a2tHkItucJ7uIGvPYZA,24964 +h11/_events.py,sha256=IJtM7i2TxKv0S-givq2b-oehPVsmsbsIelTW6NHcIvg,9834 +h11/_headers.py,sha256=P2h8Q39SIFiRS9CpYjAwo_99XKJUvLHjn0U3tnm4qHE,9130 +h11/_readers.py,sha256=DmJKQwH9Iu7U3WNljKB09d6iJIO6P2_WtylJEY3HvPY,7280 +h11/_receivebuffer.py,sha256=pMOLWjS53haaCm73O6tSWKFD_6BQQWzVLqLCm2ouvcE,5029 +h11/_state.py,sha256=Upg0_uiO_C_QNXHxLB4YUprEeoeso0i_ma12SOrrA54,12167 +h11/_util.py,sha256=Lw_CoIUMR8wjnvgKwo94FCdmFcIbRQsokmxpBV7LcTI,4387 +h11/_version.py,sha256=14wRZqPo0n2t5kFKCQLsldnyZAfOZoKPJbbwJnbGPcc,686 +h11/_writers.py,sha256=dj8HQ4Pnzq5SjkUZrgh3RKQ6-8Ecy9RKC1MjSo27y4s,4173 +h11/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +h11/tests/helpers.py,sha256=nKheRzldPf278C81d_9_Mb9yWsYJ5udwKg_oq-fAz-U,2528 +h11/tests/test_against_stdlib_http.py,sha256=aA4oDd3_jXkapvW0ER9dbGxIiNt6Ytsfs3U2Rd5XtUc,3700 +h11/tests/test_connection.py,sha256=1WybI9IQROZ0QPtR2wQjetPIR_Jwsvw5i5j2fO7XtcI,36375 +h11/tests/test_events.py,sha256=RTPFBIg81Muc7ZoDhsLwaZxthD76R1UCzHF5nzsbM-Q,5182 +h11/tests/test_headers.py,sha256=pa-WMjCk8ZXJFABkojr2db7ZKrgNKiwl-D-hjjt6-Eg,5390 +h11/tests/test_helpers.py,sha256=mPOAiv4HtyG0_T23K_ihh1JUs0y71ykD47c9r3iVtz0,573 +h11/tests/test_io.py,sha256=oaIEAy3ktA_e1xuyP09fX_GiSlS7GKMlFhQIdkg-EhI,15494 +h11/tests/test_receivebuffer.py,sha256=nZ9_LXj3wfyOn4dkgvjnDjZeNTEtxO8-lNphAB0FVF0,3399 +h11/tests/test_state.py,sha256=JMKqA2d2wtskf7FbsAr1s9qsIul4WtwdXVAOCUJgalk,8551 +h11/tests/test_util.py,sha256=j28tMloUSuhlpUxmgvS1PRurRFSbyzWb7yCTp6qy9_Q,2710 +h11/tests/data/test-file,sha256=ZJ03Rqs98oJw29OHzJg7LlMzyGQaRAY0r3AqBeM2wVU,65 +h11-0.12.0.dist-info/LICENSE.txt,sha256=N9tbuFkm2yikJ6JYZ_ELEjIAOuob5pzLhRE4rbjm82E,1124 +h11-0.12.0.dist-info/METADATA,sha256=_X-4TWqWCxSJ_mDyAbZPzdxHqP290_yVu09nelJOk04,8109 +h11-0.12.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +h11-0.12.0.dist-info/top_level.txt,sha256=F7dC4jl3zeh8TGHEPaWJrMbeuoWbS379Gwdi-Yvdcis,4 +h11-0.12.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +h11-0.12.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/WHEEL new file mode 100644 index 0000000..385faab --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/top_level.txt new file mode 100644 index 0000000..0d24def --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11-0.12.0.dist-info/top_level.txt @@ -0,0 +1 @@ +h11 diff --git a/myenv/lib/python3.9/site-packages/h11/__init__.py b/myenv/lib/python3.9/site-packages/h11/__init__.py new file mode 100644 index 0000000..ae39e01 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/__init__.py @@ -0,0 +1,21 @@ +# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230), +# containing no networking code at all, loosely modelled on hyper-h2's generic +# implementation of HTTP/2 (and in particular the h2.connection.H2Connection +# class). There's still a bunch of subtle details you need to get right if you +# want to make this actually useful, because it doesn't implement all the +# semantics to check that what you're asking to write to the wire is sensible, +# but at least it gets you out of dealing with the wire itself. + +from ._connection import * +from ._events import * +from ._state import * +from ._util import LocalProtocolError, ProtocolError, RemoteProtocolError +from ._version import __version__ + +PRODUCT_ID = "python-h11/" + __version__ + + +__all__ = ["ProtocolError", "LocalProtocolError", "RemoteProtocolError"] +__all__ += _events.__all__ +__all__ += _connection.__all__ +__all__ += _state.__all__ diff --git a/myenv/lib/python3.9/site-packages/h11/_abnf.py b/myenv/lib/python3.9/site-packages/h11/_abnf.py new file mode 100644 index 0000000..e6d49e1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_abnf.py @@ -0,0 +1,129 @@ +# We use native strings for all the re patterns, to take advantage of string +# formatting, and then convert to bytestrings when compiling the final re +# objects. + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace +# OWS = *( SP / HTAB ) +# ; optional whitespace +OWS = r"[ \t]*" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators +# token = 1*tchar +# +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# ; any VCHAR, except delimiters +token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields +# field-name = token +field_name = token + +# The standard says: +# +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# obs-fold = CRLF 1*( SP / HTAB ) +# ; obsolete line folding +# ; see Section 3.2.4 +# +# https://tools.ietf.org/html/rfc5234#appendix-B.1 +# +# VCHAR = %x21-7E +# ; visible (printing) characters +# +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string +# obs-text = %x80-FF +# +# However, the standard definition of field-content is WRONG! It disallows +# fields containing a single visible character surrounded by whitespace, +# e.g. "foo a bar". +# +# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# +# So our definition of field_content attempts to fix it up... +# +# Also, we allow lots of control characters, because apparently people assume +# that they're legal in practice (e.g., google analytics makes cookies with +# \x01 in them!): +# https://github.com/python-hyper/h11/issues/57 +# We still don't allow NUL or whitespace, because those are often treated as +# meta-characters and letting them through can lead to nasty issues like SSRF. +vchar = r"[\x21-\x7e]" +vchar_or_obs_text = r"[^\x00\s]" +field_vchar = vchar_or_obs_text +field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals()) + +# We handle obs-fold at a different level, and our fixed-up field_content +# already grows to swallow the whole value, so ? instead of * +field_value = r"({field_content})?".format(**globals()) + +# header-field = field-name ":" OWS field-value OWS +header_field = ( + r"(?P{field_name})" + r":" + r"{OWS}" + r"(?P{field_value})" + r"{OWS}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line +# +# request-line = method SP request-target SP HTTP-version CRLF +# method = token +# HTTP-version = HTTP-name "/" DIGIT "." DIGIT +# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive +# +# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full +# URL, host+port (for connect), or even "*", but in any case we are guaranteed +# that it contists of the visible printing characters. +method = token +request_target = r"{vchar}+".format(**globals()) +http_version = r"HTTP/(?P[0-9]\.[0-9])" +request_line = ( + r"(?P{method})" + r" " + r"(?P{request_target})" + r" " + r"{http_version}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line +# +# status-line = HTTP-version SP status-code SP reason-phrase CRLF +# status-code = 3DIGIT +# reason-phrase = *( HTAB / SP / VCHAR / obs-text ) +status_code = r"[0-9]{3}" +reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals()) +status_line = ( + r"{http_version}" + r" " + r"(?P{status_code})" + # However, there are apparently a few too many servers out there that just + # leave out the reason phrase: + # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036 + # https://github.com/seanmonstar/httparse/issues/29 + # so make it optional. ?: is a non-capturing group. + r"(?: (?P{reason_phrase}))?".format(**globals()) +) + +HEXDIG = r"[0-9A-Fa-f]" +# Actually +# +# chunk-size = 1*HEXDIG +# +# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20 +chunk_size = r"({HEXDIG}){{1,20}}".format(**globals()) +# Actually +# +# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) +# +# but we aren't parsing the things so we don't really care. +chunk_ext = r";.*" +chunk_header = ( + r"(?P{chunk_size})" + r"(?P{chunk_ext})?" + r"\r\n".format(**globals()) +) diff --git a/myenv/lib/python3.9/site-packages/h11/_connection.py b/myenv/lib/python3.9/site-packages/h11/_connection.py new file mode 100644 index 0000000..6f796ef --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_connection.py @@ -0,0 +1,585 @@ +# This contains the main Connection class. Everything in h11 revolves around +# this. + +from ._events import * # Import all event types +from ._headers import get_comma_header, has_expect_100_continue, set_comma_header +from ._readers import READERS +from ._receivebuffer import ReceiveBuffer +from ._state import * # Import all state sentinels +from ._state import _SWITCH_CONNECT, _SWITCH_UPGRADE, ConnectionState +from ._util import ( # Import the internal things we need + LocalProtocolError, + make_sentinel, + RemoteProtocolError, +) +from ._writers import WRITERS + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = ["Connection", "NEED_DATA", "PAUSED"] + +NEED_DATA = make_sentinel("NEED_DATA") +PAUSED = make_sentinel("PAUSED") + +# If we ever have this much buffered without it making a complete parseable +# event, we error out. The only time we really buffer is when reading the +# request/reponse line + headers together, so this is effectively the limit on +# the size of that. +# +# Some precedents for defaults: +# - node.js: 80 * 1024 +# - tomcat: 8 * 1024 +# - IIS: 16 * 1024 +# - Apache: <8 KiB per line> +DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 + +# RFC 7230's rules for connection lifecycles: +# - If either side says they want to close the connection, then the connection +# must close. +# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close +# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive +# (and even this is a mess -- e.g. if you're implementing a proxy then +# sending Connection: keep-alive is forbidden). +# +# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So +# our rule is: +# - If someone says Connection: close, we will close +# - If someone uses HTTP/1.0, we will close. +def _keep_alive(event): + connection = get_comma_header(event.headers, b"connection") + if b"close" in connection: + return False + if getattr(event, "http_version", b"1.1") < b"1.1": + return False + return True + + +def _body_framing(request_method, event): + # Called when we enter SEND_BODY to figure out framing information for + # this body. + # + # These are the only two events that can trigger a SEND_BODY state: + assert type(event) in (Request, Response) + # Returns one of: + # + # ("content-length", count) + # ("chunked", ()) + # ("http/1.0", ()) + # + # which are (lookup key, *args) for constructing body reader/writer + # objects. + # + # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 + # + # Step 1: some responses always have an empty body, regardless of what the + # headers say. + if type(event) is Response: + if ( + event.status_code in (204, 304) + or request_method == b"HEAD" + or (request_method == b"CONNECT" and 200 <= event.status_code < 300) + ): + return ("content-length", (0,)) + # Section 3.3.3 also lists another case -- responses with status_code + # < 200. For us these are InformationalResponses, not Responses, so + # they can't get into this function in the first place. + assert event.status_code >= 200 + + # Step 2: check for Transfer-Encoding (T-E beats C-L): + transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") + if transfer_encodings: + assert transfer_encodings == [b"chunked"] + return ("chunked", ()) + + # Step 3: check for Content-Length + content_lengths = get_comma_header(event.headers, b"content-length") + if content_lengths: + return ("content-length", (int(content_lengths[0]),)) + + # Step 4: no applicable headers; fallback/default depends on type + if type(event) is Request: + return ("content-length", (0,)) + else: + return ("http/1.0", ()) + + +################################################################ +# +# The main Connection class +# +################################################################ + + +class Connection: + """An object encapsulating the state of an HTTP connection. + + Args: + our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If + you're implementing a server, pass :data:`h11.SERVER`. + + max_incomplete_event_size (int): + The maximum number of bytes we're willing to buffer of an + incomplete event. In practice this mostly sets a limit on the + maximum size of the request/response line + headers. If this is + exceeded, then :meth:`next_event` will raise + :exc:`RemoteProtocolError`. + + """ + + def __init__( + self, our_role, max_incomplete_event_size=DEFAULT_MAX_INCOMPLETE_EVENT_SIZE + ): + self._max_incomplete_event_size = max_incomplete_event_size + # State and role tracking + if our_role not in (CLIENT, SERVER): + raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role)) + self.our_role = our_role + if our_role is CLIENT: + self.their_role = SERVER + else: + self.their_role = CLIENT + self._cstate = ConnectionState() + + # Callables for converting data->events or vice-versa given the + # current state + self._writer = self._get_io_object(self.our_role, None, WRITERS) + self._reader = self._get_io_object(self.their_role, None, READERS) + + # Holds any unprocessed received data + self._receive_buffer = ReceiveBuffer() + # If this is true, then it indicates that the incoming connection was + # closed *after* the end of whatever's in self._receive_buffer: + self._receive_buffer_closed = False + + # Extra bits of state that don't fit into the state machine. + # + # These two are only used to interpret framing headers for figuring + # out how to read/write response bodies. their_http_version is also + # made available as a convenient public API. + self.their_http_version = None + self._request_method = None + # This is pure flow-control and doesn't at all affect the set of legal + # transitions, so no need to bother ConnectionState with it: + self.client_is_waiting_for_100_continue = False + + @property + def states(self): + """A dictionary like:: + + {CLIENT: , SERVER: } + + See :ref:`state-machine` for details. + + """ + return dict(self._cstate.states) + + @property + def our_state(self): + """The current state of whichever role we are playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.our_role] + + @property + def their_state(self): + """The current state of whichever role we are NOT playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.their_role] + + @property + def they_are_waiting_for_100_continue(self): + return self.their_role is CLIENT and self.client_is_waiting_for_100_continue + + def start_next_cycle(self): + """Attempt to reset our connection state for a new request/response + cycle. + + If both client and server are in :data:`DONE` state, then resets them + both to :data:`IDLE` state in preparation for a new request/response + cycle on this same connection. Otherwise, raises a + :exc:`LocalProtocolError`. + + See :ref:`keepalive-and-pipelining`. + + """ + old_states = dict(self._cstate.states) + self._cstate.start_next_cycle() + self._request_method = None + # self.their_http_version gets left alone, since it presumably lasts + # beyond a single request/response cycle + assert not self.client_is_waiting_for_100_continue + self._respond_to_state_changes(old_states) + + def _process_error(self, role): + old_states = dict(self._cstate.states) + self._cstate.process_error(role) + self._respond_to_state_changes(old_states) + + def _server_switch_event(self, event): + if type(event) is InformationalResponse and event.status_code == 101: + return _SWITCH_UPGRADE + if type(event) is Response: + if ( + _SWITCH_CONNECT in self._cstate.pending_switch_proposals + and 200 <= event.status_code < 300 + ): + return _SWITCH_CONNECT + return None + + # All events go through here + def _process_event(self, role, event): + # First, pass the event through the state machine to make sure it + # succeeds. + old_states = dict(self._cstate.states) + if role is CLIENT and type(event) is Request: + if event.method == b"CONNECT": + self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) + if get_comma_header(event.headers, b"upgrade"): + self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) + server_switch_event = None + if role is SERVER: + server_switch_event = self._server_switch_event(event) + self._cstate.process_event(role, type(event), server_switch_event) + + # Then perform the updates triggered by it. + + # self._request_method + if type(event) is Request: + self._request_method = event.method + + # self.their_http_version + if role is self.their_role and type(event) in ( + Request, + Response, + InformationalResponse, + ): + self.their_http_version = event.http_version + + # Keep alive handling + # + # RFC 7230 doesn't really say what one should do if Connection: close + # shows up on a 1xx InformationalResponse. I think the idea is that + # this is not supposed to happen. In any case, if it does happen, we + # ignore it. + if type(event) in (Request, Response) and not _keep_alive(event): + self._cstate.process_keep_alive_disabled() + + # 100-continue + if type(event) is Request and has_expect_100_continue(event): + self.client_is_waiting_for_100_continue = True + if type(event) in (InformationalResponse, Response): + self.client_is_waiting_for_100_continue = False + if role is CLIENT and type(event) in (Data, EndOfMessage): + self.client_is_waiting_for_100_continue = False + + self._respond_to_state_changes(old_states, event) + + def _get_io_object(self, role, event, io_dict): + # event may be None; it's only used when entering SEND_BODY + state = self._cstate.states[role] + if state is SEND_BODY: + # Special case: the io_dict has a dict of reader/writer factories + # that depend on the request/response framing. + framing_type, args = _body_framing(self._request_method, event) + return io_dict[SEND_BODY][framing_type](*args) + else: + # General case: the io_dict just has the appropriate reader/writer + # for this state + return io_dict.get((role, state)) + + # This must be called after any action that might have caused + # self._cstate.states to change. + def _respond_to_state_changes(self, old_states, event=None): + # Update reader/writer + if self.our_state != old_states[self.our_role]: + self._writer = self._get_io_object(self.our_role, event, WRITERS) + if self.their_state != old_states[self.their_role]: + self._reader = self._get_io_object(self.their_role, event, READERS) + + @property + def trailing_data(self): + """Data that has been received, but not yet processed, represented as + a tuple with two elements, where the first is a byte-string containing + the unprocessed data itself, and the second is a bool that is True if + the receive connection was closed. + + See :ref:`switching-protocols` for discussion of why you'd want this. + """ + return (bytes(self._receive_buffer), self._receive_buffer_closed) + + def receive_data(self, data): + """Add data to our internal receive buffer. + + This does not actually do any processing on the data, just stores + it. To trigger processing, you have to call :meth:`next_event`. + + Args: + data (:term:`bytes-like object`): + The new data that was just received. + + Special case: If *data* is an empty byte-string like ``b""``, + then this indicates that the remote side has closed the + connection (end of file). Normally this is convenient, because + standard Python APIs like :meth:`file.read` or + :meth:`socket.recv` use ``b""`` to indicate end-of-file, while + other failures to read are indicated using other mechanisms + like raising :exc:`TimeoutError`. When using such an API you + can just blindly pass through whatever you get from ``read`` + to :meth:`receive_data`, and everything will work. + + But, if you have an API where reading an empty string is a + valid non-EOF condition, then you need to be aware of this and + make sure to check for such strings and avoid passing them to + :meth:`receive_data`. + + Returns: + Nothing, but after calling this you should call :meth:`next_event` + to parse the newly received data. + + Raises: + RuntimeError: + Raised if you pass an empty *data*, indicating EOF, and then + pass a non-empty *data*, indicating more data that somehow + arrived after the EOF. + + (Calling ``receive_data(b"")`` multiple times is fine, + and equivalent to calling it once.) + + """ + if data: + if self._receive_buffer_closed: + raise RuntimeError("received close, then received more data?") + self._receive_buffer += data + else: + self._receive_buffer_closed = True + + def _extract_next_receive_event(self): + state = self.their_state + # We don't pause immediately when they enter DONE, because even in + # DONE state we can still process a ConnectionClosed() event. But + # if we have data in our buffer, then we definitely aren't getting + # a ConnectionClosed() immediately and we need to pause. + if state is DONE and self._receive_buffer: + return PAUSED + if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: + return PAUSED + assert self._reader is not None + event = self._reader(self._receive_buffer) + if event is None: + if not self._receive_buffer and self._receive_buffer_closed: + # In some unusual cases (basically just HTTP/1.0 bodies), EOF + # triggers an actual protocol event; in that case, we want to + # return that event, and then the state will change and we'll + # get called again to generate the actual ConnectionClosed(). + if hasattr(self._reader, "read_eof"): + event = self._reader.read_eof() + else: + event = ConnectionClosed() + if event is None: + event = NEED_DATA + return event + + def next_event(self): + """Parse the next event out of our receive buffer, update our internal + state, and return it. + + This is a mutating operation -- think of it like calling :func:`next` + on an iterator. + + Returns: + : One of three things: + + 1) An event object -- see :ref:`events`. + + 2) The special constant :data:`NEED_DATA`, which indicates that + you need to read more data from your socket and pass it to + :meth:`receive_data` before this method will be able to return + any more events. + + 3) The special constant :data:`PAUSED`, which indicates that we + are not in a state where we can process incoming data (usually + because the peer has finished their part of the current + request/response cycle, and you have not yet called + :meth:`start_next_cycle`). See :ref:`flow-control` for details. + + Raises: + RemoteProtocolError: + The peer has misbehaved. You should close the connection + (possibly after sending some kind of 4xx response). + + Once this method returns :class:`ConnectionClosed` once, then all + subsequent calls will also return :class:`ConnectionClosed`. + + If this method raises any exception besides :exc:`RemoteProtocolError` + then that's a bug -- if it happens please file a bug report! + + If this method raises any exception then it also sets + :attr:`Connection.their_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + + if self.their_state is ERROR: + raise RemoteProtocolError("Can't receive data when peer state is ERROR") + try: + event = self._extract_next_receive_event() + if event not in [NEED_DATA, PAUSED]: + self._process_event(self.their_role, event) + if event is NEED_DATA: + if len(self._receive_buffer) > self._max_incomplete_event_size: + # 431 is "Request header fields too large" which is pretty + # much the only situation where we can get here + raise RemoteProtocolError( + "Receive buffer too long", error_status_hint=431 + ) + if self._receive_buffer_closed: + # We're still trying to complete some event, but that's + # never going to happen because no more data is coming + raise RemoteProtocolError("peer unexpectedly closed connection") + return event + except BaseException as exc: + self._process_error(self.their_role) + if isinstance(exc, LocalProtocolError): + exc._reraise_as_remote_protocol_error() + else: + raise + + def send(self, event): + """Convert a high-level event into bytes that can be sent to the peer, + while updating our internal state machine. + + Args: + event: The :ref:`event ` to send. + + Returns: + If ``type(event) is ConnectionClosed``, then returns + ``None``. Otherwise, returns a :term:`bytes-like object`. + + Raises: + LocalProtocolError: + Sending this event at this time would violate our + understanding of the HTTP/1.1 protocol. + + If this method raises any exception then it also sets + :attr:`Connection.our_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + data_list = self.send_with_data_passthrough(event) + if data_list is None: + return None + else: + return b"".join(data_list) + + def send_with_data_passthrough(self, event): + """Identical to :meth:`send`, except that in situations where + :meth:`send` returns a single :term:`bytes-like object`, this instead + returns a list of them -- and when sending a :class:`Data` event, this + list is guaranteed to contain the exact object you passed in as + :attr:`Data.data`. See :ref:`sendfile` for discussion. + + """ + if self.our_state is ERROR: + raise LocalProtocolError("Can't send data when our state is ERROR") + try: + if type(event) is Response: + self._clean_up_response_headers_for_sending(event) + # We want to call _process_event before calling the writer, + # because if someone tries to do something invalid then this will + # give a sensible error message, while our writers all just assume + # they will only receive valid events. But, _process_event might + # change self._writer. So we have to do a little dance: + writer = self._writer + self._process_event(self.our_role, event) + if type(event) is ConnectionClosed: + return None + else: + # In any situation where writer is None, process_event should + # have raised ProtocolError + assert writer is not None + data_list = [] + writer(event, data_list.append) + return data_list + except: + self._process_error(self.our_role) + raise + + def send_failed(self): + """Notify the state machine that we failed to send the data it gave + us. + + This causes :attr:`Connection.our_state` to immediately become + :data:`ERROR` -- see :ref:`error-handling` for discussion. + + """ + self._process_error(self.our_role) + + # When sending a Response, we take responsibility for a few things: + # + # - Sometimes you MUST set Connection: close. We take care of those + # times. (You can also set it yourself if you want, and if you do then + # we'll respect that and close the connection at the right time. But you + # don't have to worry about that unless you want to.) + # + # - The user has to set Content-Length if they want it. Otherwise, for + # responses that have bodies (e.g. not HEAD), then we will automatically + # select the right mechanism for streaming a body of unknown length, + # which depends on depending on the peer's HTTP version. + # + # This function's *only* responsibility is making sure headers are set up + # right -- everything downstream just looks at the headers. There are no + # side channels. It mutates the response event in-place (but not the + # response.headers list object). + def _clean_up_response_headers_for_sending(self, response): + assert type(response) is Response + + headers = response.headers + need_close = False + + # HEAD requests need some special handling: they always act like they + # have Content-Length: 0, and that's how _body_framing treats + # them. But their headers are supposed to match what we would send if + # the request was a GET. (Technically there is one deviation allowed: + # we're allowed to leave out the framing headers -- see + # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as + # easy to get them right.) + method_for_choosing_headers = self._request_method + if method_for_choosing_headers == b"HEAD": + method_for_choosing_headers = b"GET" + framing_type, _ = _body_framing(method_for_choosing_headers, response) + if framing_type in ("chunked", "http/1.0"): + # This response has a body of unknown length. + # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked + # If our peer is HTTP/1.0, we use no framing headers, and close the + # connection afterwards. + # + # Make sure to clear Content-Length (in principle user could have + # set both and then we ignored Content-Length b/c + # Transfer-Encoding overwrote it -- this would be naughty of them, + # but the HTTP spec says that if our peer does this then we have + # to fix it instead of erroring out, so we'll accord the user the + # same respect). + headers = set_comma_header(headers, b"content-length", []) + if self.their_http_version is None or self.their_http_version < b"1.1": + # Either we never got a valid request and are sending back an + # error (their_http_version is None), so we assume the worst; + # or else we did get a valid HTTP/1.0 request, so we know that + # they don't understand chunked encoding. + headers = set_comma_header(headers, b"transfer-encoding", []) + # This is actually redundant ATM, since currently we + # unconditionally disable keep-alive when talking to HTTP/1.0 + # peers. But let's be defensive just in case we add + # Connection: keep-alive support later: + if self._request_method != b"HEAD": + need_close = True + else: + headers = set_comma_header(headers, b"transfer-encoding", ["chunked"]) + + if not self._cstate.keep_alive or need_close: + # Make sure Connection: close is set + connection = set(get_comma_header(headers, b"connection")) + connection.discard(b"keep-alive") + connection.add(b"close") + headers = set_comma_header(headers, b"connection", sorted(connection)) + + response.headers = headers diff --git a/myenv/lib/python3.9/site-packages/h11/_events.py b/myenv/lib/python3.9/site-packages/h11/_events.py new file mode 100644 index 0000000..1827930 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_events.py @@ -0,0 +1,302 @@ +# High level events that make up HTTP/1.1 conversations. Loosely inspired by +# the corresponding events in hyper-h2: +# +# http://python-hyper.org/h2/en/stable/api.html#events +# +# Don't subclass these. Stuff will break. + +import re + +from . import _headers +from ._abnf import request_target +from ._util import bytesify, LocalProtocolError, validate + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "Request", + "InformationalResponse", + "Response", + "Data", + "EndOfMessage", + "ConnectionClosed", +] + +request_target_re = re.compile(request_target.encode("ascii")) + + +class _EventBundle: + _fields = [] + _defaults = {} + + def __init__(self, **kwargs): + _parsed = kwargs.pop("_parsed", False) + allowed = set(self._fields) + for kwarg in kwargs: + if kwarg not in allowed: + raise TypeError( + "unrecognized kwarg {} for {}".format( + kwarg, self.__class__.__name__ + ) + ) + required = allowed.difference(self._defaults) + for field in required: + if field not in kwargs: + raise TypeError( + "missing required kwarg {} for {}".format( + field, self.__class__.__name__ + ) + ) + self.__dict__.update(self._defaults) + self.__dict__.update(kwargs) + + # Special handling for some fields + + if "headers" in self.__dict__: + self.headers = _headers.normalize_and_validate( + self.headers, _parsed=_parsed + ) + + if not _parsed: + for field in ["method", "target", "http_version", "reason"]: + if field in self.__dict__: + self.__dict__[field] = bytesify(self.__dict__[field]) + + if "status_code" in self.__dict__: + if not isinstance(self.status_code, int): + raise LocalProtocolError("status code must be integer") + # Because IntEnum objects are instances of int, but aren't + # duck-compatible (sigh), see gh-72. + self.status_code = int(self.status_code) + + self._validate() + + def _validate(self): + pass + + def __repr__(self): + name = self.__class__.__name__ + kwarg_strs = [ + "{}={}".format(field, self.__dict__[field]) for field in self._fields + ] + kwarg_str = ", ".join(kwarg_strs) + return "{}({})".format(name, kwarg_str) + + # Useful for tests + def __eq__(self, other): + return self.__class__ == other.__class__ and self.__dict__ == other.__dict__ + + # This is an unhashable type. + __hash__ = None + + +class Request(_EventBundle): + """The beginning of an HTTP request. + + Fields: + + .. attribute:: method + + An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: target + + The target of an HTTP request, e.g. ``b"/index.html"``, or one of the + more exotic formats described in `RFC 7320, section 5.3 + `_. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + """ + + _fields = ["method", "target", "headers", "http_version"] + _defaults = {"http_version": b"1.1"} + + def _validate(self): + # "A server MUST respond with a 400 (Bad Request) status code to any + # HTTP/1.1 request message that lacks a Host header field and to any + # request message that contains more than one Host header field or a + # Host header field with an invalid field-value." + # -- https://tools.ietf.org/html/rfc7230#section-5.4 + host_count = 0 + for name, value in self.headers: + if name == b"host": + host_count += 1 + if self.http_version == b"1.1" and host_count == 0: + raise LocalProtocolError("Missing mandatory Host: header") + if host_count > 1: + raise LocalProtocolError("Found multiple Host: headers") + + validate(request_target_re, self.target, "Illegal target characters") + + +class _ResponseBase(_EventBundle): + _fields = ["status_code", "headers", "http_version", "reason"] + _defaults = {"http_version": b"1.1", "reason": b""} + + +class InformationalResponse(_ResponseBase): + """An HTTP informational response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`InformationalResponse`, this is always in the range [100, + 200). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for + details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def _validate(self): + if not (100 <= self.status_code < 200): + raise LocalProtocolError( + "InformationalResponse status_code should be in range " + "[100, 200), not {}".format(self.status_code) + ) + + +class Response(_ResponseBase): + """The beginning of an HTTP response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`Response`, this is always in the range [200, + 600). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def _validate(self): + if not (200 <= self.status_code < 600): + raise LocalProtocolError( + "Response status_code should be in range [200, 600), not {}".format( + self.status_code + ) + ) + + +class Data(_EventBundle): + """Part of an HTTP message body. + + Fields: + + .. attribute:: data + + A :term:`bytes-like object` containing part of a message body. Or, if + using the ``combine=False`` argument to :meth:`Connection.send`, then + any object that your socket writing code knows what to do with, and for + which calling :func:`len` returns the number of bytes that will be + written -- see :ref:`sendfile` for details. + + .. attribute:: chunk_start + + A marker that indicates whether this data object is from the start of a + chunked transfer encoding chunk. This field is ignored when when a Data + event is provided to :meth:`Connection.send`: it is only valid on + events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + .. attribute:: chunk_end + + A marker that indicates whether this data object is the last for a + given chunked transfer encoding chunk. This field is ignored when when + a Data event is provided to :meth:`Connection.send`: it is only valid + on events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + """ + + _fields = ["data", "chunk_start", "chunk_end"] + _defaults = {"chunk_start": False, "chunk_end": False} + + +# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that +# are forbidden to be sent in a trailer, since processing them as if they were +# present in the header section might bypass external security filters." +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part +# Unfortunately, the list of forbidden fields is long and vague :-/ +class EndOfMessage(_EventBundle): + """The end of an HTTP message. + + Fields: + + .. attribute:: headers + + Default value: ``[]`` + + Any trailing headers attached to this message, represented as a list of + (name, value) pairs. See :ref:`the header normalization rules + ` for details. + + Must be empty unless ``Transfer-Encoding: chunked`` is in use. + + """ + + _fields = ["headers"] + _defaults = {"headers": []} + + +class ConnectionClosed(_EventBundle): + """This event indicates that the sender has closed their outgoing + connection. + + Note that this does not necessarily mean that they can't *receive* further + data, because TCP connections are composed to two one-way channels which + can be closed independently. See :ref:`closing` for details. + + No fields. + """ + + pass diff --git a/myenv/lib/python3.9/site-packages/h11/_headers.py b/myenv/lib/python3.9/site-packages/h11/_headers.py new file mode 100644 index 0000000..7ed39bc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_headers.py @@ -0,0 +1,242 @@ +import re + +from ._abnf import field_name, field_value +from ._util import bytesify, LocalProtocolError, validate + +# Facts +# ----- +# +# Headers are: +# keys: case-insensitive ascii +# values: mixture of ascii and raw bytes +# +# "Historically, HTTP has allowed field content with text in the ISO-8859-1 +# charset [ISO-8859-1], supporting other charsets only through use of +# [RFC2047] encoding. In practice, most HTTP header field values use only a +# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD +# limit their field values to US-ASCII octets. A recipient SHOULD treat other +# octets in field content (obs-text) as opaque data." +# And it deprecates all non-ascii values +# +# Leading/trailing whitespace in header names is forbidden +# +# Values get leading/trailing whitespace stripped +# +# Content-Disposition actually needs to contain unicode semantically; to +# accomplish this it has a terrifically weird way of encoding the filename +# itself as ascii (and even this still has lots of cross-browser +# incompatibilities) +# +# Order is important: +# "a proxy MUST NOT change the order of these field values when forwarding a +# message" +# (and there are several headers where the order indicates a preference) +# +# Multiple occurences of the same header: +# "A sender MUST NOT generate multiple header fields with the same field name +# in a message unless either the entire field value for that header field is +# defined as a comma-separated list [or the header is Set-Cookie which gets a +# special exception]" - RFC 7230. (cookies are in RFC 6265) +# +# So every header aside from Set-Cookie can be merged by b", ".join if it +# occurs repeatedly. But, of course, they can't necessarily be split by +# .split(b","), because quoting. +# +# Given all this mess (case insensitive, duplicates allowed, order is +# important, ...), there doesn't appear to be any standard way to handle +# headers in Python -- they're almost like dicts, but... actually just +# aren't. For now we punt and just use a super simple representation: headers +# are a list of pairs +# +# [(name1, value1), (name2, value2), ...] +# +# where all entries are bytestrings, names are lowercase and have no +# leading/trailing whitespace, and values are bytestrings with no +# leading/trailing whitespace. Searching and updating are done via naive O(n) +# methods. +# +# Maybe a dict-of-lists would be better? + +_content_length_re = re.compile(br"[0-9]+") +_field_name_re = re.compile(field_name.encode("ascii")) +_field_value_re = re.compile(field_value.encode("ascii")) + + +class Headers: + """ + A list-like interface that allows iterating over headers as byte-pairs + of (lowercased-name, value). + + Internally we actually store the representation as three-tuples, + including both the raw original casing, in order to preserve casing + over-the-wire, and the lowercased name, for case-insensitive comparisions. + + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert r.headers == [ + (b"host", b"example.org"), + (b"connection", b"keep-alive") + ] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive") + ] + """ + + __slots__ = "_full_items" + + def __init__(self, full_items): + self._full_items = full_items + + def __iter__(self): + for _, name, value in self._full_items: + yield name, value + + def __bool__(self): + return bool(self._full_items) + + def __eq__(self, other): + return list(self) == list(other) + + def __len__(self): + return len(self._full_items) + + def __repr__(self): + return "" % repr(list(self)) + + def __getitem__(self, idx): + _, name, value = self._full_items[idx] + return (name, value) + + def raw_items(self): + return [(raw_name, value) for raw_name, _, value in self._full_items] + + +def normalize_and_validate(headers, _parsed=False): + new_headers = [] + seen_content_length = None + saw_transfer_encoding = False + for name, value in headers: + # For headers coming out of the parser, we can safely skip some steps, + # because it always returns bytes and has already run these regexes + # over the data: + if not _parsed: + name = bytesify(name) + value = bytesify(value) + validate(_field_name_re, name, "Illegal header name {!r}", name) + validate(_field_value_re, value, "Illegal header value {!r}", value) + raw_name = name + name = name.lower() + if name == b"content-length": + lengths = {length.strip() for length in value.split(b",")} + if len(lengths) != 1: + raise LocalProtocolError("conflicting Content-Length headers") + value = lengths.pop() + validate(_content_length_re, value, "bad Content-Length") + if seen_content_length is None: + seen_content_length = value + new_headers.append((raw_name, name, value)) + elif seen_content_length != value: + raise LocalProtocolError("conflicting Content-Length headers") + elif name == b"transfer-encoding": + # "A server that receives a request message with a transfer coding + # it does not understand SHOULD respond with 501 (Not + # Implemented)." + # https://tools.ietf.org/html/rfc7230#section-3.3.1 + if saw_transfer_encoding: + raise LocalProtocolError( + "multiple Transfer-Encoding headers", error_status_hint=501 + ) + # "All transfer-coding names are case-insensitive" + # -- https://tools.ietf.org/html/rfc7230#section-4 + value = value.lower() + if value != b"chunked": + raise LocalProtocolError( + "Only Transfer-Encoding: chunked is supported", + error_status_hint=501, + ) + saw_transfer_encoding = True + new_headers.append((raw_name, name, value)) + else: + new_headers.append((raw_name, name, value)) + return Headers(new_headers) + + +def get_comma_header(headers, name): + # Should only be used for headers whose value is a list of + # comma-separated, case-insensitive values. + # + # The header name `name` is expected to be lower-case bytes. + # + # Connection: meets these criteria (including cast insensitivity). + # + # Content-Length: technically is just a single value (1*DIGIT), but the + # standard makes reference to implementations that do multiple values, and + # using this doesn't hurt. Ditto, case insensitivity doesn't things either + # way. + # + # Transfer-Encoding: is more complex (allows for quoted strings), so + # splitting on , is actually wrong. For example, this is legal: + # + # Transfer-Encoding: foo; options="1,2", chunked + # + # and should be parsed as + # + # foo; options="1,2" + # chunked + # + # but this naive function will parse it as + # + # foo; options="1 + # 2" + # chunked + # + # However, this is okay because the only thing we are going to do with + # any Transfer-Encoding is reject ones that aren't just "chunked", so + # both of these will be treated the same anyway. + # + # Expect: the only legal value is the literal string + # "100-continue". Splitting on commas is harmless. Case insensitive. + # + out = [] + for _, found_name, found_raw_value in headers._full_items: + if found_name == name: + found_raw_value = found_raw_value.lower() + for found_split_value in found_raw_value.split(b","): + found_split_value = found_split_value.strip() + if found_split_value: + out.append(found_split_value) + return out + + +def set_comma_header(headers, name, new_values): + # The header name `name` is expected to be lower-case bytes. + # + # Note that when we store the header we use title casing for the header + # names, in order to match the conventional HTTP header style. + # + # Simply calling `.title()` is a blunt approach, but it's correct + # here given the cases where we're using `set_comma_header`... + # + # Connection, Content-Length, Transfer-Encoding. + new_headers = [] + for found_raw_name, found_name, found_raw_value in headers._full_items: + if found_name != name: + new_headers.append((found_raw_name, found_raw_value)) + for new_value in new_values: + new_headers.append((name.title(), new_value)) + return normalize_and_validate(new_headers) + + +def has_expect_100_continue(request): + # https://tools.ietf.org/html/rfc7231#section-5.1.1 + # "A server that receives a 100-continue expectation in an HTTP/1.0 request + # MUST ignore that expectation." + if request.http_version < b"1.1": + return False + expect = get_comma_header(request.headers, b"expect") + return b"100-continue" in expect diff --git a/myenv/lib/python3.9/site-packages/h11/_readers.py b/myenv/lib/python3.9/site-packages/h11/_readers.py new file mode 100644 index 0000000..0ead0be --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_readers.py @@ -0,0 +1,222 @@ +# Code to read HTTP data +# +# Strategy: each reader is a callable which takes a ReceiveBuffer object, and +# either: +# 1) consumes some of it and returns an Event +# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate() +# and it might raise a LocalProtocolError, so simpler just to always use +# this) +# 3) returns None, meaning "I need more data" +# +# If they have a .read_eof attribute, then this will be called if an EOF is +# received -- but this is optional. Either way, the actual ConnectionClosed +# event will be generated afterwards. +# +# READERS is a dict describing how to pick a reader. It maps states to either: +# - a reader +# - or, for body readers, a dict of per-framing reader factories + +import re + +from ._abnf import chunk_header, header_field, request_line, status_line +from ._events import * +from ._state import * +from ._util import LocalProtocolError, RemoteProtocolError, validate + +__all__ = ["READERS"] + +header_field_re = re.compile(header_field.encode("ascii")) + +# Remember that this has to run in O(n) time -- so e.g. the bytearray cast is +# critical. +obs_fold_re = re.compile(br"[ \t]+") + + +def _obsolete_line_fold(lines): + it = iter(lines) + last = None + for line in it: + match = obs_fold_re.match(line) + if match: + if last is None: + raise LocalProtocolError("continuation line at start of headers") + if not isinstance(last, bytearray): + last = bytearray(last) + last += b" " + last += line[match.end() :] + else: + if last is not None: + yield last + last = line + if last is not None: + yield last + + +def _decode_header_lines(lines): + for line in _obsolete_line_fold(lines): + matches = validate(header_field_re, line, "illegal header line: {!r}", line) + yield (matches["field_name"], matches["field_value"]) + + +request_line_re = re.compile(request_line.encode("ascii")) + + +def maybe_read_from_IDLE_client(buf): + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no request line received") + matches = validate( + request_line_re, lines[0], "illegal request line: {!r}", lines[0] + ) + return Request( + headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches + ) + + +status_line_re = re.compile(status_line.encode("ascii")) + + +def maybe_read_from_SEND_RESPONSE_server(buf): + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no response line received") + matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0]) + # Tolerate missing reason phrases + if matches["reason"] is None: + matches["reason"] = b"" + status_code = matches["status_code"] = int(matches["status_code"]) + class_ = InformationalResponse if status_code < 200 else Response + return class_( + headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches + ) + + +class ContentLengthReader: + def __init__(self, length): + self._length = length + self._remaining = length + + def __call__(self, buf): + if self._remaining == 0: + return EndOfMessage() + data = buf.maybe_extract_at_most(self._remaining) + if data is None: + return None + self._remaining -= len(data) + return Data(data=data) + + def read_eof(self): + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(received {} bytes, expected {})".format( + self._length - self._remaining, self._length + ) + ) + + +chunk_header_re = re.compile(chunk_header.encode("ascii")) + + +class ChunkedReader: + def __init__(self): + self._bytes_in_chunk = 0 + # After reading a chunk, we have to throw away the trailing \r\n; if + # this is >0 then we discard that many bytes before resuming regular + # de-chunkification. + self._bytes_to_discard = 0 + self._reading_trailer = False + + def __call__(self, buf): + if self._reading_trailer: + lines = buf.maybe_extract_lines() + if lines is None: + return None + return EndOfMessage(headers=list(_decode_header_lines(lines))) + if self._bytes_to_discard > 0: + data = buf.maybe_extract_at_most(self._bytes_to_discard) + if data is None: + return None + self._bytes_to_discard -= len(data) + if self._bytes_to_discard > 0: + return None + # else, fall through and read some more + assert self._bytes_to_discard == 0 + if self._bytes_in_chunk == 0: + # We need to refill our chunk count + chunk_header = buf.maybe_extract_next_line() + if chunk_header is None: + return None + matches = validate( + chunk_header_re, + chunk_header, + "illegal chunk header: {!r}", + chunk_header, + ) + # XX FIXME: we discard chunk extensions. Does anyone care? + self._bytes_in_chunk = int(matches["chunk_size"], base=16) + if self._bytes_in_chunk == 0: + self._reading_trailer = True + return self(buf) + chunk_start = True + else: + chunk_start = False + assert self._bytes_in_chunk > 0 + data = buf.maybe_extract_at_most(self._bytes_in_chunk) + if data is None: + return None + self._bytes_in_chunk -= len(data) + if self._bytes_in_chunk == 0: + self._bytes_to_discard = 2 + chunk_end = True + else: + chunk_end = False + return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end) + + def read_eof(self): + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(incomplete chunked read)" + ) + + +class Http10Reader: + def __call__(self, buf): + data = buf.maybe_extract_at_most(999999999) + if data is None: + return None + return Data(data=data) + + def read_eof(self): + return EndOfMessage() + + +def expect_nothing(buf): + if buf: + raise LocalProtocolError("Got data when expecting EOF") + return None + + +READERS = { + (CLIENT, IDLE): maybe_read_from_IDLE_client, + (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server, + (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server, + (CLIENT, DONE): expect_nothing, + (CLIENT, MUST_CLOSE): expect_nothing, + (CLIENT, CLOSED): expect_nothing, + (SERVER, DONE): expect_nothing, + (SERVER, MUST_CLOSE): expect_nothing, + (SERVER, CLOSED): expect_nothing, + SEND_BODY: { + "chunked": ChunkedReader, + "content-length": ContentLengthReader, + "http/1.0": Http10Reader, + }, +} diff --git a/myenv/lib/python3.9/site-packages/h11/_receivebuffer.py b/myenv/lib/python3.9/site-packages/h11/_receivebuffer.py new file mode 100644 index 0000000..a3737f3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_receivebuffer.py @@ -0,0 +1,152 @@ +import re +import sys + +__all__ = ["ReceiveBuffer"] + + +# Operations we want to support: +# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable), +# or wait until there is one +# - read at-most-N bytes +# Goals: +# - on average, do this fast +# - worst case, do this in O(n) where n is the number of bytes processed +# Plan: +# - store bytearray, offset, how far we've searched for a separator token +# - use the how-far-we've-searched data to avoid rescanning +# - while doing a stream of uninterrupted processing, advance offset instead +# of constantly copying +# WARNING: +# - I haven't benchmarked or profiled any of this yet. +# +# Note that starting in Python 3.4, deleting the initial n bytes from a +# bytearray is amortized O(n), thanks to some excellent work by Antoine +# Martin: +# +# https://bugs.python.org/issue19087 +# +# This means that if we only supported 3.4+, we could get rid of the code here +# involving self._start and self.compress, because it's doing exactly the same +# thing that bytearray now does internally. +# +# BUT unfortunately, we still support 2.7, and reading short segments out of a +# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually +# delete this code. Yet: +# +# https://pythonclock.org/ +# +# (Two things to double-check first though: make sure PyPy also has the +# optimization, and benchmark to make sure it's a win, since we do have a +# slightly clever thing where we delay calling compress() until we've +# processed a whole event, which could in theory be slightly more efficient +# than the internal bytearray support.) +blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE) + + +class ReceiveBuffer: + def __init__(self): + self._data = bytearray() + self._next_line_search = 0 + self._multiple_lines_search = 0 + + def __iadd__(self, byteslike): + self._data += byteslike + return self + + def __bool__(self): + return bool(len(self)) + + def __len__(self): + return len(self._data) + + # for @property unprocessed_data + def __bytes__(self): + return bytes(self._data) + + def _extract(self, count): + # extracting an initial slice of the data buffer and return it + out = self._data[:count] + del self._data[:count] + + self._next_line_search = 0 + self._multiple_lines_search = 0 + + return out + + def maybe_extract_at_most(self, count): + """ + Extract a fixed number of bytes from the buffer. + """ + out = self._data[:count] + if not out: + return None + + return self._extract(count) + + def maybe_extract_next_line(self): + """ + Extract the first line, if it is completed in the buffer. + """ + # Only search in buffer space that we've not already looked at. + search_start_index = max(0, self._next_line_search - 1) + partial_idx = self._data.find(b"\r\n", search_start_index) + + if partial_idx == -1: + self._next_line_search = len(self._data) + return None + + # + 2 is to compensate len(b"\r\n") + idx = partial_idx + 2 + + return self._extract(idx) + + def maybe_extract_lines(self): + """ + Extract everything up to the first blank line, and return a list of lines. + """ + # Handle the case where we have an immediate empty line. + if self._data[:1] == b"\n": + self._extract(1) + return [] + + if self._data[:2] == b"\r\n": + self._extract(2) + return [] + + # Only search in buffer space that we've not already looked at. + match = blank_line_regex.search(self._data, self._multiple_lines_search) + if match is None: + self._multiple_lines_search = max(0, len(self._data) - 2) + return None + + # Truncate the buffer and return it. + idx = match.span(0)[-1] + out = self._extract(idx) + lines = out.split(b"\n") + + for line in lines: + if line.endswith(b"\r"): + del line[-1] + + assert lines[-2] == lines[-1] == b"" + + del lines[-2:] + + return lines + + # In theory we should wait until `\r\n` before starting to validate + # incoming data. However it's interesting to detect (very) invalid data + # early given they might not even contain `\r\n` at all (hence only + # timeout will get rid of them). + # This is not a 100% effective detection but more of a cheap sanity check + # allowing for early abort in some useful cases. + # This is especially interesting when peer is messing up with HTTPS and + # sent us a TLS stream where we were expecting plain HTTP given all + # versions of TLS so far start handshake with a 0x16 message type code. + def is_next_line_obviously_invalid_request_line(self): + try: + # HTTP header line must not contain non-printable characters + # and should not start with a space + return self._data[0] < 0x21 + except IndexError: + return False diff --git a/myenv/lib/python3.9/site-packages/h11/_state.py b/myenv/lib/python3.9/site-packages/h11/_state.py new file mode 100644 index 0000000..0f08a09 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_state.py @@ -0,0 +1,307 @@ +################################################################ +# The core state machine +################################################################ +# +# Rule 1: everything that affects the state machine and state transitions must +# live here in this file. As much as possible goes into the table-based +# representation, but for the bits that don't quite fit, the actual code and +# state must nonetheless live here. +# +# Rule 2: this file does not know about what role we're playing; it only knows +# about HTTP request/response cycles in the abstract. This ensures that we +# don't cheat and apply different rules to local and remote parties. +# +# +# Theory of operation +# =================== +# +# Possibly the simplest way to think about this is that we actually have 5 +# different state machines here. Yes, 5. These are: +# +# 1) The client state, with its complicated automaton (see the docs) +# 2) The server state, with its complicated automaton (see the docs) +# 3) The keep-alive state, with possible states {True, False} +# 4) The SWITCH_CONNECT state, with possible states {False, True} +# 5) The SWITCH_UPGRADE state, with possible states {False, True} +# +# For (3)-(5), the first state listed is the initial state. +# +# (1)-(3) are stored explicitly in member variables. The last +# two are stored implicitly in the pending_switch_proposals set as: +# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals) +# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals) +# +# And each of these machines has two different kinds of transitions: +# +# a) Event-triggered +# b) State-triggered +# +# Event triggered is the obvious thing that you'd think it is: some event +# happens, and if it's the right event at the right time then a transition +# happens. But there are somewhat complicated rules for which machines can +# "see" which events. (As a rule of thumb, if a machine "sees" an event, this +# means two things: the event can affect the machine, and if the machine is +# not in a state where it expects that event then it's an error.) These rules +# are: +# +# 1) The client machine sees all h11.events objects emitted by the client. +# +# 2) The server machine sees all h11.events objects emitted by the server. +# +# It also sees the client's Request event. +# +# And sometimes, server events are annotated with a _SWITCH_* event. For +# example, we can have a (Response, _SWITCH_CONNECT) event, which is +# different from a regular Response event. +# +# 3) The keep-alive machine sees the process_keep_alive_disabled() event +# (which is derived from Request/Response events), and this event +# transitions it from True -> False, or from False -> False. There's no way +# to transition back. +# +# 4&5) The _SWITCH_* machines transition from False->True when we get a +# Request that proposes the relevant type of switch (via +# process_client_switch_proposals), and they go from True->False when we +# get a Response that has no _SWITCH_* annotation. +# +# So that's event-triggered transitions. +# +# State-triggered transitions are less standard. What they do here is couple +# the machines together. The way this works is, when certain *joint* +# configurations of states are achieved, then we automatically transition to a +# new *joint* state. So, for example, if we're ever in a joint state with +# +# client: DONE +# keep-alive: False +# +# then the client state immediately transitions to: +# +# client: MUST_CLOSE +# +# This is fundamentally different from an event-based transition, because it +# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state +# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive +# transitioned True -> False. Either way, once this precondition is satisfied, +# this transition is immediately triggered. +# +# What if two conflicting state-based transitions get enabled at the same +# time? In practice there's only one case where this arises (client DONE -> +# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by +# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition. +# +# Implementation +# -------------- +# +# The event-triggered transitions for the server and client machines are all +# stored explicitly in a table. Ditto for the state-triggered transitions that +# involve just the server and client state. +# +# The transitions for the other machines, and the state-triggered transitions +# that involve the other machines, are written out as explicit Python code. +# +# It'd be nice if there were some cleaner way to do all this. This isn't +# *too* terrible, but I feel like it could probably be better. +# +# WARNING +# ------- +# +# The script that generates the state machine diagrams for the docs knows how +# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS +# tables. But it can't automatically read the transitions that are written +# directly in Python code. So if you touch those, you need to also update the +# script to keep it in sync! + +from ._events import * +from ._util import LocalProtocolError, make_sentinel + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "CLIENT", + "SERVER", + "IDLE", + "SEND_RESPONSE", + "SEND_BODY", + "DONE", + "MUST_CLOSE", + "CLOSED", + "MIGHT_SWITCH_PROTOCOL", + "SWITCHED_PROTOCOL", + "ERROR", +] + +CLIENT = make_sentinel("CLIENT") +SERVER = make_sentinel("SERVER") + +# States +IDLE = make_sentinel("IDLE") +SEND_RESPONSE = make_sentinel("SEND_RESPONSE") +SEND_BODY = make_sentinel("SEND_BODY") +DONE = make_sentinel("DONE") +MUST_CLOSE = make_sentinel("MUST_CLOSE") +CLOSED = make_sentinel("CLOSED") +ERROR = make_sentinel("ERROR") + +# Switch types +MIGHT_SWITCH_PROTOCOL = make_sentinel("MIGHT_SWITCH_PROTOCOL") +SWITCHED_PROTOCOL = make_sentinel("SWITCHED_PROTOCOL") + +_SWITCH_UPGRADE = make_sentinel("_SWITCH_UPGRADE") +_SWITCH_CONNECT = make_sentinel("_SWITCH_CONNECT") + +EVENT_TRIGGERED_TRANSITIONS = { + CLIENT: { + IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED}, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + MIGHT_SWITCH_PROTOCOL: {}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, + SERVER: { + IDLE: { + ConnectionClosed: CLOSED, + Response: SEND_BODY, + # Special case: server sees client Request events, in this form + (Request, CLIENT): SEND_RESPONSE, + }, + SEND_RESPONSE: { + InformationalResponse: SEND_RESPONSE, + Response: SEND_BODY, + (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL, + (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL, + }, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, +} + +# NB: there are also some special-case state-triggered transitions hard-coded +# into _fire_state_triggered_transitions below. +STATE_TRIGGERED_TRANSITIONS = { + # (Client state, Server state) -> new states + # Protocol negotiation + (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL}, + # Socket shutdown + (CLOSED, DONE): {SERVER: MUST_CLOSE}, + (CLOSED, IDLE): {SERVER: MUST_CLOSE}, + (ERROR, DONE): {SERVER: MUST_CLOSE}, + (DONE, CLOSED): {CLIENT: MUST_CLOSE}, + (IDLE, CLOSED): {CLIENT: MUST_CLOSE}, + (DONE, ERROR): {CLIENT: MUST_CLOSE}, +} + + +class ConnectionState: + def __init__(self): + # Extra bits of state that don't quite fit into the state model. + + # If this is False then it enables the automatic DONE -> MUST_CLOSE + # transition. Don't set this directly; call .keep_alive_disabled() + self.keep_alive = True + + # This is a subset of {UPGRADE, CONNECT}, containing the proposals + # made by the client for switching protocols. + self.pending_switch_proposals = set() + + self.states = {CLIENT: IDLE, SERVER: IDLE} + + def process_error(self, role): + self.states[role] = ERROR + self._fire_state_triggered_transitions() + + def process_keep_alive_disabled(self): + self.keep_alive = False + self._fire_state_triggered_transitions() + + def process_client_switch_proposal(self, switch_event): + self.pending_switch_proposals.add(switch_event) + self._fire_state_triggered_transitions() + + def process_event(self, role, event_type, server_switch_event=None): + if server_switch_event is not None: + assert role is SERVER + if server_switch_event not in self.pending_switch_proposals: + raise LocalProtocolError( + "Received server {} event without a pending proposal".format( + server_switch_event + ) + ) + event_type = (event_type, server_switch_event) + if server_switch_event is None and event_type is Response: + self.pending_switch_proposals = set() + self._fire_event_triggered_transitions(role, event_type) + # Special case: the server state does get to see Request + # events. + if event_type is Request: + assert role is CLIENT + self._fire_event_triggered_transitions(SERVER, (Request, CLIENT)) + self._fire_state_triggered_transitions() + + def _fire_event_triggered_transitions(self, role, event_type): + state = self.states[role] + try: + new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type] + except KeyError: + raise LocalProtocolError( + "can't handle event type {} when role={} and state={}".format( + event_type.__name__, role, self.states[role] + ) + ) + self.states[role] = new_state + + def _fire_state_triggered_transitions(self): + # We apply these rules repeatedly until converging on a fixed point + while True: + start_states = dict(self.states) + + # It could happen that both these special-case transitions are + # enabled at the same time: + # + # DONE -> MIGHT_SWITCH_PROTOCOL + # DONE -> MUST_CLOSE + # + # For example, this will always be true of a HTTP/1.0 client + # requesting CONNECT. If this happens, the protocol switch takes + # priority. From there the client will either go to + # SWITCHED_PROTOCOL, in which case it's none of our business when + # they close the connection, or else the server will deny the + # request, in which case the client will go back to DONE and then + # from there to MUST_CLOSE. + if self.pending_switch_proposals: + if self.states[CLIENT] is DONE: + self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL + + if not self.pending_switch_proposals: + if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL: + self.states[CLIENT] = DONE + + if not self.keep_alive: + for role in (CLIENT, SERVER): + if self.states[role] is DONE: + self.states[role] = MUST_CLOSE + + # Tabular state-triggered transitions + joint_state = (self.states[CLIENT], self.states[SERVER]) + changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {}) + self.states.update(changes) + + if self.states == start_states: + # Fixed point reached + return + + def start_next_cycle(self): + if self.states != {CLIENT: DONE, SERVER: DONE}: + raise LocalProtocolError( + "not in a reusable state. self.states={}".format(self.states) + ) + # Can't reach DONE/DONE with any of these active, but still, let's be + # sure. + assert self.keep_alive + assert not self.pending_switch_proposals + self.states = {CLIENT: IDLE, SERVER: IDLE} diff --git a/myenv/lib/python3.9/site-packages/h11/_util.py b/myenv/lib/python3.9/site-packages/h11/_util.py new file mode 100644 index 0000000..eb1a5cd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_util.py @@ -0,0 +1,122 @@ +__all__ = [ + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "validate", + "make_sentinel", + "bytesify", +] + + +class ProtocolError(Exception): + """Exception indicating a violation of the HTTP/1.1 protocol. + + This as an abstract base class, with two concrete base classes: + :exc:`LocalProtocolError`, which indicates that you tried to do something + that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which + indicates that the remote peer tried to do something that HTTP/1.1 says is + illegal. See :ref:`error-handling` for details. + + In addition to the normal :exc:`Exception` features, it has one attribute: + + .. attribute:: error_status_hint + + This gives a suggestion as to what status code a server might use if + this error occurred as part of a request. + + For a :exc:`RemoteProtocolError`, this is useful as a suggestion for + how you might want to respond to a misbehaving peer, if you're + implementing a server. + + For a :exc:`LocalProtocolError`, this can be taken as a suggestion for + how your peer might have responded to *you* if h11 had allowed you to + continue. + + The default is 400 Bad Request, a generic catch-all for protocol + violations. + + """ + + def __init__(self, msg, error_status_hint=400): + if type(self) is ProtocolError: + raise TypeError("tried to directly instantiate ProtocolError") + Exception.__init__(self, msg) + self.error_status_hint = error_status_hint + + +# Strategy: there are a number of public APIs where a LocalProtocolError can +# be raised (send(), all the different event constructors, ...), and only one +# public API where RemoteProtocolError can be raised +# (receive_data()). Therefore we always raise LocalProtocolError internally, +# and then receive_data will translate this into a RemoteProtocolError. +# +# Internally: +# LocalProtocolError is the generic "ProtocolError". +# Externally: +# LocalProtocolError is for local errors and RemoteProtocolError is for +# remote errors. +class LocalProtocolError(ProtocolError): + def _reraise_as_remote_protocol_error(self): + # After catching a LocalProtocolError, use this method to re-raise it + # as a RemoteProtocolError. This method must be called from inside an + # except: block. + # + # An easy way to get an equivalent RemoteProtocolError is just to + # modify 'self' in place. + self.__class__ = RemoteProtocolError + # But the re-raising is somewhat non-trivial -- you might think that + # now that we've modified the in-flight exception object, that just + # doing 'raise' to re-raise it would be enough. But it turns out that + # this doesn't work, because Python tracks the exception type + # (exc_info[0]) separately from the exception object (exc_info[1]), + # and we only modified the latter. So we really do need to re-raise + # the new type explicitly. + # On py3, the traceback is part of the exception object, so our + # in-place modification preserved it and we can just re-raise: + raise self + + +class RemoteProtocolError(ProtocolError): + pass + + +def validate(regex, data, msg="malformed data", *format_args): + match = regex.fullmatch(data) + if not match: + if format_args: + msg = msg.format(*format_args) + raise LocalProtocolError(msg) + return match.groupdict() + + +# Sentinel values +# +# - Inherit identity-based comparison and hashing from object +# - Have a nice repr +# - Have a *bonus property*: type(sentinel) is sentinel +# +# The bonus property is useful if you want to take the return value from +# next_event() and do some sort of dispatch based on type(event). +class _SentinelBase(type): + def __repr__(self): + return self.__name__ + + +def make_sentinel(name): + cls = _SentinelBase(name, (_SentinelBase,), {}) + cls.__class__ = cls + return cls + + +# Used for methods, request targets, HTTP versions, header names, and header +# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always +# returns bytes. +def bytesify(s): + # Fast-path: + if type(s) is bytes: + return s + if isinstance(s, str): + s = s.encode("ascii") + if isinstance(s, int): + raise TypeError("expected bytes-like object, not int") + return bytes(s) diff --git a/myenv/lib/python3.9/site-packages/h11/_version.py b/myenv/lib/python3.9/site-packages/h11/_version.py new file mode 100644 index 0000000..cb5c2c3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_version.py @@ -0,0 +1,16 @@ +# This file must be kept very simple, because it is consumed from several +# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc. + +# We use a simple scheme: +# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev +# where the +dev versions are never released into the wild, they're just what +# we stick into the VCS in between releases. +# +# This is compatible with PEP 440: +# http://legacy.python.org/dev/peps/pep-0440/ +# via the use of the "local suffix" "+dev", which is disallowed on index +# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we +# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before* +# 1.0.0.) + +__version__ = "0.12.0" diff --git a/myenv/lib/python3.9/site-packages/h11/_writers.py b/myenv/lib/python3.9/site-packages/h11/_writers.py new file mode 100644 index 0000000..cb5e8a8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/_writers.py @@ -0,0 +1,123 @@ +# Code to read HTTP data +# +# Strategy: each writer takes an event + a write-some-bytes function, which is +# calls. +# +# WRITERS is a dict describing how to pick a reader. It maps states to either: +# - a writer +# - or, for body writers, a dict of framin-dependent writer factories + +from ._events import Data, EndOfMessage +from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER +from ._util import LocalProtocolError + +__all__ = ["WRITERS"] + + +def write_headers(headers, write): + # "Since the Host field-value is critical information for handling a + # request, a user agent SHOULD generate Host as the first header field + # following the request-line." - RFC 7230 + raw_items = headers._full_items + for raw_name, name, value in raw_items: + if name == b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + for raw_name, name, value in raw_items: + if name != b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + write(b"\r\n") + + +def write_request(request, write): + if request.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target)) + write_headers(request.headers, write) + + +# Shared between InformationalResponse and Response +def write_any_response(response, write): + if response.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + status_bytes = str(response.status_code).encode("ascii") + # We don't bother sending ascii status messages like "OK"; they're + # optional and ignored by the protocol. (But the space after the numeric + # status code is mandatory.) + # + # XX FIXME: could at least make an effort to pull out the status message + # from stdlib's http.HTTPStatus table. Or maybe just steal their enums + # (either by import or copy/paste). We already accept them as status codes + # since they're of type IntEnum < int. + write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason)) + write_headers(response.headers, write) + + +class BodyWriter: + def __call__(self, event, write): + if type(event) is Data: + self.send_data(event.data, write) + elif type(event) is EndOfMessage: + self.send_eom(event.headers, write) + else: # pragma: no cover + assert False + + +# +# These are all careful not to do anything to 'data' except call len(data) and +# write(data). This allows us to transparently pass-through funny objects, +# like placeholder objects referring to files on disk that will be sent via +# sendfile(2). +# +class ContentLengthWriter(BodyWriter): + def __init__(self, length): + self._length = length + + def send_data(self, data, write): + self._length -= len(data) + if self._length < 0: + raise LocalProtocolError("Too much data for declared Content-Length") + write(data) + + def send_eom(self, headers, write): + if self._length != 0: + raise LocalProtocolError("Too little data for declared Content-Length") + if headers: + raise LocalProtocolError("Content-Length and trailers don't mix") + + +class ChunkedWriter(BodyWriter): + def send_data(self, data, write): + # if we encoded 0-length data in the naive way, it would look like an + # end-of-message. + if not data: + return + write(b"%x\r\n" % len(data)) + write(data) + write(b"\r\n") + + def send_eom(self, headers, write): + write(b"0\r\n") + write_headers(headers, write) + + +class Http10Writer(BodyWriter): + def send_data(self, data, write): + write(data) + + def send_eom(self, headers, write): + if headers: + raise LocalProtocolError("can't send trailers to HTTP/1.0 client") + # no need to close the socket ourselves, that will be taken care of by + # Connection: close machinery + + +WRITERS = { + (CLIENT, IDLE): write_request, + (SERVER, IDLE): write_any_response, + (SERVER, SEND_RESPONSE): write_any_response, + SEND_BODY: { + "chunked": ChunkedWriter, + "content-length": ContentLengthWriter, + "http/1.0": Http10Writer, + }, +} diff --git a/myenv/lib/python3.9/site-packages/h11/tests/__init__.py b/myenv/lib/python3.9/site-packages/h11/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/h11/tests/data/test-file b/myenv/lib/python3.9/site-packages/h11/tests/data/test-file new file mode 100644 index 0000000..d0be0a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/data/test-file @@ -0,0 +1 @@ +92b12bc045050b55b848d37167a1a63947c364579889ce1d39788e45e9fac9e5 diff --git a/myenv/lib/python3.9/site-packages/h11/tests/helpers.py b/myenv/lib/python3.9/site-packages/h11/tests/helpers.py new file mode 100644 index 0000000..9d2cf38 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/helpers.py @@ -0,0 +1,77 @@ +from .._connection import * +from .._events import * +from .._state import * + + +def get_all_events(conn): + got_events = [] + while True: + event = conn.next_event() + if event in (NEED_DATA, PAUSED): + break + got_events.append(event) + if type(event) is ConnectionClosed: + break + return got_events + + +def receive_and_get(conn, data): + conn.receive_data(data) + return get_all_events(conn) + + +# Merges adjacent Data events, converts payloads to bytestrings, and removes +# chunk boundaries. +def normalize_data_events(in_events): + out_events = [] + for event in in_events: + if type(event) is Data: + event.data = bytes(event.data) + event.chunk_start = False + event.chunk_end = False + if out_events and type(out_events[-1]) is type(event) is Data: + out_events[-1].data += event.data + else: + out_events.append(event) + return out_events + + +# Given that we want to write tests that push some events through a Connection +# and check that its state updates appropriately... we might as make a habit +# of pushing them through two Connections with a fake network link in +# between. +class ConnectionPair: + def __init__(self): + self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)} + self.other = {CLIENT: SERVER, SERVER: CLIENT} + + @property + def conns(self): + return self.conn.values() + + # expect="match" if expect=send_events; expect=[...] to say what expected + def send(self, role, send_events, expect="match"): + if not isinstance(send_events, list): + send_events = [send_events] + data = b"" + closed = False + for send_event in send_events: + new_data = self.conn[role].send(send_event) + if new_data is None: + closed = True + else: + data += new_data + # send uses b"" to mean b"", and None to mean closed + # receive uses b"" to mean closed, and None to mean "try again" + # so we have to translate between the two conventions + if data: + self.conn[self.other[role]].receive_data(data) + if closed: + self.conn[self.other[role]].receive_data(b"") + got_events = get_all_events(self.conn[self.other[role]]) + if expect == "match": + expect = send_events + if not isinstance(expect, list): + expect = [expect] + assert got_events == expect + return data diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_against_stdlib_http.py b/myenv/lib/python3.9/site-packages/h11/tests/test_against_stdlib_http.py new file mode 100644 index 0000000..e6c5db4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_against_stdlib_http.py @@ -0,0 +1,111 @@ +import json +import os.path +import socket +import socketserver +import threading +from contextlib import closing, contextmanager +from http.server import SimpleHTTPRequestHandler +from urllib.request import urlopen + +import h11 + + +@contextmanager +def socket_server(handler): + httpd = socketserver.TCPServer(("127.0.0.1", 0), handler) + thread = threading.Thread( + target=httpd.serve_forever, kwargs={"poll_interval": 0.01} + ) + thread.daemon = True + try: + thread.start() + yield httpd + finally: + httpd.shutdown() + + +test_file_path = os.path.join(os.path.dirname(__file__), "data/test-file") +with open(test_file_path, "rb") as f: + test_file_data = f.read() + + +class SingleMindedRequestHandler(SimpleHTTPRequestHandler): + def translate_path(self, path): + return test_file_path + + +def test_h11_as_client(): + with socket_server(SingleMindedRequestHandler) as httpd: + with closing(socket.create_connection(httpd.server_address)) as s: + c = h11.Connection(h11.CLIENT) + + s.sendall( + c.send( + h11.Request( + method="GET", target="/foo", headers=[("Host", "localhost")] + ) + ) + ) + s.sendall(c.send(h11.EndOfMessage())) + + data = bytearray() + while True: + event = c.next_event() + print(event) + if event is h11.NEED_DATA: + # Use a small read buffer to make things more challenging + # and exercise more paths :-) + c.receive_data(s.recv(10)) + continue + if type(event) is h11.Response: + assert event.status_code == 200 + if type(event) is h11.Data: + data += event.data + if type(event) is h11.EndOfMessage: + break + assert bytes(data) == test_file_data + + +class H11RequestHandler(socketserver.BaseRequestHandler): + def handle(self): + with closing(self.request) as s: + c = h11.Connection(h11.SERVER) + request = None + while True: + event = c.next_event() + if event is h11.NEED_DATA: + # Use a small read buffer to make things more challenging + # and exercise more paths :-) + c.receive_data(s.recv(10)) + continue + if type(event) is h11.Request: + request = event + if type(event) is h11.EndOfMessage: + break + info = json.dumps( + { + "method": request.method.decode("ascii"), + "target": request.target.decode("ascii"), + "headers": { + name.decode("ascii"): value.decode("ascii") + for (name, value) in request.headers + }, + } + ) + s.sendall(c.send(h11.Response(status_code=200, headers=[]))) + s.sendall(c.send(h11.Data(data=info.encode("ascii")))) + s.sendall(c.send(h11.EndOfMessage())) + + +def test_h11_as_server(): + with socket_server(H11RequestHandler) as httpd: + host, port = httpd.server_address + url = "http://{}:{}/some-path".format(host, port) + with closing(urlopen(url)) as f: + assert f.getcode() == 200 + data = f.read() + info = json.loads(data.decode("ascii")) + print(info) + assert info["method"] == "GET" + assert info["target"] == "/some-path" + assert "urllib" in info["headers"]["user-agent"] diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_connection.py b/myenv/lib/python3.9/site-packages/h11/tests/test_connection.py new file mode 100644 index 0000000..baadec8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_connection.py @@ -0,0 +1,1078 @@ +import pytest + +from .._connection import _body_framing, _keep_alive, Connection, NEED_DATA, PAUSED +from .._events import * +from .._state import * +from .._util import LocalProtocolError, RemoteProtocolError +from .helpers import ConnectionPair, get_all_events, receive_and_get + + +def test__keep_alive(): + assert _keep_alive( + Request(method="GET", target="/", headers=[("Host", "Example.com")]) + ) + assert not _keep_alive( + Request( + method="GET", + target="/", + headers=[("Host", "Example.com"), ("Connection", "close")], + ) + ) + assert not _keep_alive( + Request( + method="GET", + target="/", + headers=[("Host", "Example.com"), ("Connection", "a, b, cLOse, foo")], + ) + ) + assert not _keep_alive( + Request(method="GET", target="/", headers=[], http_version="1.0") + ) + + assert _keep_alive(Response(status_code=200, headers=[])) + assert not _keep_alive(Response(status_code=200, headers=[("Connection", "close")])) + assert not _keep_alive( + Response(status_code=200, headers=[("Connection", "a, b, cLOse, foo")]) + ) + assert not _keep_alive(Response(status_code=200, headers=[], http_version="1.0")) + + +def test__body_framing(): + def headers(cl, te): + headers = [] + if cl is not None: + headers.append(("Content-Length", str(cl))) + if te: + headers.append(("Transfer-Encoding", "chunked")) + return headers + + def resp(status_code=200, cl=None, te=False): + return Response(status_code=status_code, headers=headers(cl, te)) + + def req(cl=None, te=False): + h = headers(cl, te) + h += [("Host", "example.com")] + return Request(method="GET", target="/", headers=h) + + # Special cases where the headers are ignored: + for kwargs in [{}, {"cl": 100}, {"te": True}, {"cl": 100, "te": True}]: + for meth, r in [ + (b"HEAD", resp(**kwargs)), + (b"GET", resp(status_code=204, **kwargs)), + (b"GET", resp(status_code=304, **kwargs)), + ]: + assert _body_framing(meth, r) == ("content-length", (0,)) + + # Transfer-encoding + for kwargs in [{"te": True}, {"cl": 100, "te": True}]: + for meth, r in [(None, req(**kwargs)), (b"GET", resp(**kwargs))]: + assert _body_framing(meth, r) == ("chunked", ()) + + # Content-Length + for meth, r in [(None, req(cl=100)), (b"GET", resp(cl=100))]: + assert _body_framing(meth, r) == ("content-length", (100,)) + + # No headers + assert _body_framing(None, req()) == ("content-length", (0,)) + assert _body_framing(b"GET", resp()) == ("http/1.0", ()) + + +def test_Connection_basics_and_content_length(): + with pytest.raises(ValueError): + Connection("CLIENT") + + p = ConnectionPair() + assert p.conn[CLIENT].our_role is CLIENT + assert p.conn[CLIENT].their_role is SERVER + assert p.conn[SERVER].our_role is SERVER + assert p.conn[SERVER].their_role is CLIENT + + data = p.send( + CLIENT, + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Content-Length", "10")], + ), + ) + assert data == ( + b"GET / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 10\r\n\r\n" + ) + + for conn in p.conns: + assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + assert p.conn[CLIENT].our_state is SEND_BODY + assert p.conn[CLIENT].their_state is SEND_RESPONSE + assert p.conn[SERVER].our_state is SEND_RESPONSE + assert p.conn[SERVER].their_state is SEND_BODY + + assert p.conn[CLIENT].their_http_version is None + assert p.conn[SERVER].their_http_version == b"1.1" + + data = p.send(SERVER, InformationalResponse(status_code=100, headers=[])) + assert data == b"HTTP/1.1 100 \r\n\r\n" + + data = p.send(SERVER, Response(status_code=200, headers=[("Content-Length", "11")])) + assert data == b"HTTP/1.1 200 \r\nContent-Length: 11\r\n\r\n" + + for conn in p.conns: + assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY} + + assert p.conn[CLIENT].their_http_version == b"1.1" + assert p.conn[SERVER].their_http_version == b"1.1" + + data = p.send(CLIENT, Data(data=b"12345")) + assert data == b"12345" + data = p.send( + CLIENT, Data(data=b"67890"), expect=[Data(data=b"67890"), EndOfMessage()] + ) + assert data == b"67890" + data = p.send(CLIENT, EndOfMessage(), expect=[]) + assert data == b"" + + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY} + + data = p.send(SERVER, Data(data=b"1234567890")) + assert data == b"1234567890" + data = p.send(SERVER, Data(data=b"1"), expect=[Data(data=b"1"), EndOfMessage()]) + assert data == b"1" + data = p.send(SERVER, EndOfMessage(), expect=[]) + assert data == b"" + + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: DONE} + + +def test_chunked(): + p = ConnectionPair() + + p.send( + CLIENT, + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")], + ), + ) + data = p.send(CLIENT, Data(data=b"1234567890", chunk_start=True, chunk_end=True)) + assert data == b"a\r\n1234567890\r\n" + data = p.send(CLIENT, Data(data=b"abcde", chunk_start=True, chunk_end=True)) + assert data == b"5\r\nabcde\r\n" + data = p.send(CLIENT, Data(data=b""), expect=[]) + assert data == b"" + data = p.send(CLIENT, EndOfMessage(headers=[("hello", "there")])) + assert data == b"0\r\nhello: there\r\n\r\n" + + p.send( + SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")]) + ) + p.send(SERVER, Data(data=b"54321", chunk_start=True, chunk_end=True)) + p.send(SERVER, Data(data=b"12345", chunk_start=True, chunk_end=True)) + p.send(SERVER, EndOfMessage()) + + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: DONE} + + +def test_chunk_boundaries(): + conn = Connection(our_role=SERVER) + + request = ( + b"POST / HTTP/1.1\r\n" + b"Host: example.com\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + ) + conn.receive_data(request) + assert conn.next_event() == Request( + method="POST", + target="/", + headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")], + ) + assert conn.next_event() is NEED_DATA + + conn.receive_data(b"5\r\nhello\r\n") + assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True) + + conn.receive_data(b"5\r\nhel") + assert conn.next_event() == Data(data=b"hel", chunk_start=True, chunk_end=False) + + conn.receive_data(b"l") + assert conn.next_event() == Data(data=b"l", chunk_start=False, chunk_end=False) + + conn.receive_data(b"o\r\n") + assert conn.next_event() == Data(data=b"o", chunk_start=False, chunk_end=True) + + conn.receive_data(b"5\r\nhello") + assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True) + + conn.receive_data(b"\r\n") + assert conn.next_event() == NEED_DATA + + conn.receive_data(b"0\r\n\r\n") + assert conn.next_event() == EndOfMessage() + + +def test_client_talking_to_http10_server(): + c = Connection(CLIENT) + c.send(Request(method="GET", target="/", headers=[("Host", "example.com")])) + c.send(EndOfMessage()) + assert c.our_state is DONE + # No content-length, so Http10 framing for body + assert receive_and_get(c, b"HTTP/1.0 200 OK\r\n\r\n") == [ + Response(status_code=200, headers=[], http_version="1.0", reason=b"OK") + ] + assert c.our_state is MUST_CLOSE + assert receive_and_get(c, b"12345") == [Data(data=b"12345")] + assert receive_and_get(c, b"67890") == [Data(data=b"67890")] + assert receive_and_get(c, b"") == [EndOfMessage(), ConnectionClosed()] + assert c.their_state is CLOSED + + +def test_server_talking_to_http10_client(): + c = Connection(SERVER) + # No content-length, so no body + # NB: no host header + assert receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") == [ + Request(method="GET", target="/", headers=[], http_version="1.0"), + EndOfMessage(), + ] + assert c.their_state is MUST_CLOSE + + # We automatically Connection: close back at them + assert ( + c.send(Response(status_code=200, headers=[])) + == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n" + ) + + assert c.send(Data(data=b"12345")) == b"12345" + assert c.send(EndOfMessage()) == b"" + assert c.our_state is MUST_CLOSE + + # Check that it works if they do send Content-Length + c = Connection(SERVER) + # NB: no host header + assert receive_and_get(c, b"POST / HTTP/1.0\r\nContent-Length: 10\r\n\r\n1") == [ + Request( + method="POST", + target="/", + headers=[("Content-Length", "10")], + http_version="1.0", + ), + Data(data=b"1"), + ] + assert receive_and_get(c, b"234567890") == [Data(data=b"234567890"), EndOfMessage()] + assert c.their_state is MUST_CLOSE + assert receive_and_get(c, b"") == [ConnectionClosed()] + + +def test_automatic_transfer_encoding_in_response(): + # Check that in responses, the user can specify either Transfer-Encoding: + # chunked or no framing at all, and in both cases we automatically select + # the right option depending on whether the peer speaks HTTP/1.0 or + # HTTP/1.1 + for user_headers in [ + [("Transfer-Encoding", "chunked")], + [], + # In fact, this even works if Content-Length is set, + # because if both are set then Transfer-Encoding wins + [("Transfer-Encoding", "chunked"), ("Content-Length", "100")], + ]: + p = ConnectionPair() + p.send( + CLIENT, + [ + Request(method="GET", target="/", headers=[("Host", "example.com")]), + EndOfMessage(), + ], + ) + # When speaking to HTTP/1.1 client, all of the above cases get + # normalized to Transfer-Encoding: chunked + p.send( + SERVER, + Response(status_code=200, headers=user_headers), + expect=Response( + status_code=200, headers=[("Transfer-Encoding", "chunked")] + ), + ) + + # When speaking to HTTP/1.0 client, all of the above cases get + # normalized to no-framing-headers + c = Connection(SERVER) + receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") + assert ( + c.send(Response(status_code=200, headers=user_headers)) + == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n" + ) + assert c.send(Data(data=b"12345")) == b"12345" + + +def test_automagic_connection_close_handling(): + p = ConnectionPair() + # If the user explicitly sets Connection: close, then we notice and + # respect it + p.send( + CLIENT, + [ + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Connection", "close")], + ), + EndOfMessage(), + ], + ) + for conn in p.conns: + assert conn.states[CLIENT] is MUST_CLOSE + # And if the client sets it, the server automatically echoes it back + p.send( + SERVER, + # no header here... + [Response(status_code=204, headers=[]), EndOfMessage()], + # ...but oh look, it arrived anyway + expect=[ + Response(status_code=204, headers=[("connection", "close")]), + EndOfMessage(), + ], + ) + for conn in p.conns: + assert conn.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE} + + +def test_100_continue(): + def setup(): + p = ConnectionPair() + p.send( + CLIENT, + Request( + method="GET", + target="/", + headers=[ + ("Host", "example.com"), + ("Content-Length", "100"), + ("Expect", "100-continue"), + ], + ), + ) + for conn in p.conns: + assert conn.client_is_waiting_for_100_continue + assert not p.conn[CLIENT].they_are_waiting_for_100_continue + assert p.conn[SERVER].they_are_waiting_for_100_continue + return p + + # Disabled by 100 Continue + p = setup() + p.send(SERVER, InformationalResponse(status_code=100, headers=[])) + for conn in p.conns: + assert not conn.client_is_waiting_for_100_continue + assert not conn.they_are_waiting_for_100_continue + + # Disabled by a real response + p = setup() + p.send( + SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")]) + ) + for conn in p.conns: + assert not conn.client_is_waiting_for_100_continue + assert not conn.they_are_waiting_for_100_continue + + # Disabled by the client going ahead and sending stuff anyway + p = setup() + p.send(CLIENT, Data(data=b"12345")) + for conn in p.conns: + assert not conn.client_is_waiting_for_100_continue + assert not conn.they_are_waiting_for_100_continue + + +def test_max_incomplete_event_size_countermeasure(): + # Infinitely long headers are definitely not okay + c = Connection(SERVER) + c.receive_data(b"GET / HTTP/1.0\r\nEndless: ") + assert c.next_event() is NEED_DATA + with pytest.raises(RemoteProtocolError): + while True: + c.receive_data(b"a" * 1024) + c.next_event() + + # Checking that the same header is accepted / rejected depending on the + # max_incomplete_event_size setting: + c = Connection(SERVER, max_incomplete_event_size=5000) + c.receive_data(b"GET / HTTP/1.0\r\nBig: ") + c.receive_data(b"a" * 4000) + c.receive_data(b"\r\n\r\n") + assert get_all_events(c) == [ + Request( + method="GET", target="/", http_version="1.0", headers=[("big", "a" * 4000)] + ), + EndOfMessage(), + ] + + c = Connection(SERVER, max_incomplete_event_size=4000) + c.receive_data(b"GET / HTTP/1.0\r\nBig: ") + c.receive_data(b"a" * 4000) + with pytest.raises(RemoteProtocolError): + c.next_event() + + # Temporarily exceeding the size limit is fine, as long as its done with + # complete events: + c = Connection(SERVER, max_incomplete_event_size=5000) + c.receive_data(b"GET / HTTP/1.0\r\nContent-Length: 10000") + c.receive_data(b"\r\n\r\n" + b"a" * 10000) + assert get_all_events(c) == [ + Request( + method="GET", + target="/", + http_version="1.0", + headers=[("Content-Length", "10000")], + ), + Data(data=b"a" * 10000), + EndOfMessage(), + ] + + c = Connection(SERVER, max_incomplete_event_size=100) + # Two pipelined requests to create a way-too-big receive buffer... but + # it's fine because we're not checking + c.receive_data( + b"GET /1 HTTP/1.1\r\nHost: a\r\n\r\n" + b"GET /2 HTTP/1.1\r\nHost: b\r\n\r\n" + b"X" * 1000 + ) + assert get_all_events(c) == [ + Request(method="GET", target="/1", headers=[("host", "a")]), + EndOfMessage(), + ] + # Even more data comes in, still no problem + c.receive_data(b"X" * 1000) + # We can respond and reuse to get the second pipelined request + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + c.start_next_cycle() + assert get_all_events(c) == [ + Request(method="GET", target="/2", headers=[("host", "b")]), + EndOfMessage(), + ] + # But once we unpause and try to read the next message, and find that it's + # incomplete and the buffer is *still* way too large, then *that's* a + # problem: + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + c.start_next_cycle() + with pytest.raises(RemoteProtocolError): + c.next_event() + + +def test_reuse_simple(): + p = ConnectionPair() + p.send( + CLIENT, + [Request(method="GET", target="/", headers=[("Host", "a")]), EndOfMessage()], + ) + p.send(SERVER, [Response(status_code=200, headers=[]), EndOfMessage()]) + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: DONE} + conn.start_next_cycle() + + p.send( + CLIENT, + [ + Request(method="DELETE", target="/foo", headers=[("Host", "a")]), + EndOfMessage(), + ], + ) + p.send(SERVER, [Response(status_code=404, headers=[]), EndOfMessage()]) + + +def test_pipelining(): + # Client doesn't support pipelining, so we have to do this by hand + c = Connection(SERVER) + assert c.next_event() is NEED_DATA + # 3 requests all bunched up + c.receive_data( + b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"12345" + b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"67890" + b"GET /3 HTTP/1.1\r\nHost: a.com\r\n\r\n" + ) + assert get_all_events(c) == [ + Request( + method="GET", + target="/1", + headers=[("Host", "a.com"), ("Content-Length", "5")], + ), + Data(data=b"12345"), + EndOfMessage(), + ] + assert c.their_state is DONE + assert c.our_state is SEND_RESPONSE + + assert c.next_event() is PAUSED + + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + assert c.their_state is DONE + assert c.our_state is DONE + + c.start_next_cycle() + + assert get_all_events(c) == [ + Request( + method="GET", + target="/2", + headers=[("Host", "a.com"), ("Content-Length", "5")], + ), + Data(data=b"67890"), + EndOfMessage(), + ] + assert c.next_event() is PAUSED + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + c.start_next_cycle() + + assert get_all_events(c) == [ + Request(method="GET", target="/3", headers=[("Host", "a.com")]), + EndOfMessage(), + ] + # Doesn't pause this time, no trailing data + assert c.next_event() is NEED_DATA + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + + # Arrival of more data triggers pause + assert c.next_event() is NEED_DATA + c.receive_data(b"SADF") + assert c.next_event() is PAUSED + assert c.trailing_data == (b"SADF", False) + # If EOF arrives while paused, we don't see that either: + c.receive_data(b"") + assert c.trailing_data == (b"SADF", True) + assert c.next_event() is PAUSED + c.receive_data(b"") + assert c.next_event() is PAUSED + # Can't call receive_data with non-empty buf after closing it + with pytest.raises(RuntimeError): + c.receive_data(b"FDSA") + + +def test_protocol_switch(): + for (req, deny, accept) in [ + ( + Request( + method="CONNECT", + target="example.com:443", + headers=[("Host", "foo"), ("Content-Length", "1")], + ), + Response(status_code=404, headers=[]), + Response(status_code=200, headers=[]), + ), + ( + Request( + method="GET", + target="/", + headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")], + ), + Response(status_code=200, headers=[]), + InformationalResponse(status_code=101, headers=[("Upgrade", "a")]), + ), + ( + Request( + method="CONNECT", + target="example.com:443", + headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")], + ), + Response(status_code=404, headers=[]), + # Accept CONNECT, not upgrade + Response(status_code=200, headers=[]), + ), + ( + Request( + method="CONNECT", + target="example.com:443", + headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")], + ), + Response(status_code=404, headers=[]), + # Accept Upgrade, not CONNECT + InformationalResponse(status_code=101, headers=[("Upgrade", "b")]), + ), + ]: + + def setup(): + p = ConnectionPair() + p.send(CLIENT, req) + # No switch-related state change stuff yet; the client has to + # finish the request before that kicks in + for conn in p.conns: + assert conn.states[CLIENT] is SEND_BODY + p.send(CLIENT, [Data(data=b"1"), EndOfMessage()]) + for conn in p.conns: + assert conn.states[CLIENT] is MIGHT_SWITCH_PROTOCOL + assert p.conn[SERVER].next_event() is PAUSED + return p + + # Test deny case + p = setup() + p.send(SERVER, deny) + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY} + p.send(SERVER, EndOfMessage()) + # Check that re-use is still allowed after a denial + for conn in p.conns: + conn.start_next_cycle() + + # Test accept case + p = setup() + p.send(SERVER, accept) + for conn in p.conns: + assert conn.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} + conn.receive_data(b"123") + assert conn.next_event() is PAUSED + conn.receive_data(b"456") + assert conn.next_event() is PAUSED + assert conn.trailing_data == (b"123456", False) + + # Pausing in might-switch, then recovery + # (weird artificial case where the trailing data actually is valid + # HTTP for some reason, because this makes it easier to test the state + # logic) + p = setup() + sc = p.conn[SERVER] + sc.receive_data(b"GET / HTTP/1.0\r\n\r\n") + assert sc.next_event() is PAUSED + assert sc.trailing_data == (b"GET / HTTP/1.0\r\n\r\n", False) + sc.send(deny) + assert sc.next_event() is PAUSED + sc.send(EndOfMessage()) + sc.start_next_cycle() + assert get_all_events(sc) == [ + Request(method="GET", target="/", headers=[], http_version="1.0"), + EndOfMessage(), + ] + + # When we're DONE, have no trailing data, and the connection gets + # closed, we report ConnectionClosed(). When we're in might-switch or + # switched, we don't. + p = setup() + sc = p.conn[SERVER] + sc.receive_data(b"") + assert sc.next_event() is PAUSED + assert sc.trailing_data == (b"", True) + p.send(SERVER, accept) + assert sc.next_event() is PAUSED + + p = setup() + sc = p.conn[SERVER] + sc.receive_data(b"") == [] + assert sc.next_event() is PAUSED + sc.send(deny) + assert sc.next_event() == ConnectionClosed() + + # You can't send after switching protocols, or while waiting for a + # protocol switch + p = setup() + with pytest.raises(LocalProtocolError): + p.conn[CLIENT].send( + Request(method="GET", target="/", headers=[("Host", "a")]) + ) + p = setup() + p.send(SERVER, accept) + with pytest.raises(LocalProtocolError): + p.conn[SERVER].send(Data(data=b"123")) + + +def test_close_simple(): + # Just immediately closing a new connection without anything having + # happened yet. + for (who_shot_first, who_shot_second) in [(CLIENT, SERVER), (SERVER, CLIENT)]: + + def setup(): + p = ConnectionPair() + p.send(who_shot_first, ConnectionClosed()) + for conn in p.conns: + assert conn.states == { + who_shot_first: CLOSED, + who_shot_second: MUST_CLOSE, + } + return p + + # You can keep putting b"" into a closed connection, and you keep + # getting ConnectionClosed() out: + p = setup() + assert p.conn[who_shot_second].next_event() == ConnectionClosed() + assert p.conn[who_shot_second].next_event() == ConnectionClosed() + p.conn[who_shot_second].receive_data(b"") + assert p.conn[who_shot_second].next_event() == ConnectionClosed() + # Second party can close... + p = setup() + p.send(who_shot_second, ConnectionClosed()) + for conn in p.conns: + assert conn.our_state is CLOSED + assert conn.their_state is CLOSED + # But trying to receive new data on a closed connection is a + # RuntimeError (not ProtocolError, because the problem here isn't + # violation of HTTP, it's violation of physics) + p = setup() + with pytest.raises(RuntimeError): + p.conn[who_shot_second].receive_data(b"123") + # And receiving new data on a MUST_CLOSE connection is a ProtocolError + p = setup() + p.conn[who_shot_first].receive_data(b"GET") + with pytest.raises(RemoteProtocolError): + p.conn[who_shot_first].next_event() + + +def test_close_different_states(): + req = [ + Request(method="GET", target="/foo", headers=[("Host", "a")]), + EndOfMessage(), + ] + resp = [Response(status_code=200, headers=[]), EndOfMessage()] + + # Client before request + p = ConnectionPair() + p.send(CLIENT, ConnectionClosed()) + for conn in p.conns: + assert conn.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE} + + # Client after request + p = ConnectionPair() + p.send(CLIENT, req) + p.send(CLIENT, ConnectionClosed()) + for conn in p.conns: + assert conn.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE} + + # Server after request -> not allowed + p = ConnectionPair() + p.send(CLIENT, req) + with pytest.raises(LocalProtocolError): + p.conn[SERVER].send(ConnectionClosed()) + p.conn[CLIENT].receive_data(b"") + with pytest.raises(RemoteProtocolError): + p.conn[CLIENT].next_event() + + # Server after response + p = ConnectionPair() + p.send(CLIENT, req) + p.send(SERVER, resp) + p.send(SERVER, ConnectionClosed()) + for conn in p.conns: + assert conn.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED} + + # Both after closing (ConnectionClosed() is idempotent) + p = ConnectionPair() + p.send(CLIENT, req) + p.send(SERVER, resp) + p.send(CLIENT, ConnectionClosed()) + p.send(SERVER, ConnectionClosed()) + p.send(CLIENT, ConnectionClosed()) + p.send(SERVER, ConnectionClosed()) + + # In the middle of sending -> not allowed + p = ConnectionPair() + p.send( + CLIENT, + Request( + method="GET", target="/", headers=[("Host", "a"), ("Content-Length", "10")] + ), + ) + with pytest.raises(LocalProtocolError): + p.conn[CLIENT].send(ConnectionClosed()) + p.conn[SERVER].receive_data(b"") + with pytest.raises(RemoteProtocolError): + p.conn[SERVER].next_event() + + +# Receive several requests and then client shuts down their side of the +# connection; we can respond to each +def test_pipelined_close(): + c = Connection(SERVER) + # 2 requests then a close + c.receive_data( + b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"12345" + b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"67890" + ) + c.receive_data(b"") + assert get_all_events(c) == [ + Request( + method="GET", + target="/1", + headers=[("host", "a.com"), ("content-length", "5")], + ), + Data(data=b"12345"), + EndOfMessage(), + ] + assert c.states[CLIENT] is DONE + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + assert c.states[SERVER] is DONE + c.start_next_cycle() + assert get_all_events(c) == [ + Request( + method="GET", + target="/2", + headers=[("host", "a.com"), ("content-length", "5")], + ), + Data(data=b"67890"), + EndOfMessage(), + ConnectionClosed(), + ] + assert c.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE} + c.send(Response(status_code=200, headers=[])) + c.send(EndOfMessage()) + assert c.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE} + c.send(ConnectionClosed()) + assert c.states == {CLIENT: CLOSED, SERVER: CLOSED} + + +def test_sendfile(): + class SendfilePlaceholder: + def __len__(self): + return 10 + + placeholder = SendfilePlaceholder() + + def setup(header, http_version): + c = Connection(SERVER) + receive_and_get( + c, "GET / HTTP/{}\r\nHost: a\r\n\r\n".format(http_version).encode("ascii") + ) + headers = [] + if header: + headers.append(header) + c.send(Response(status_code=200, headers=headers)) + return c, c.send_with_data_passthrough(Data(data=placeholder)) + + c, data = setup(("Content-Length", "10"), "1.1") + assert data == [placeholder] + # Raises an error if the connection object doesn't think we've sent + # exactly 10 bytes + c.send(EndOfMessage()) + + _, data = setup(("Transfer-Encoding", "chunked"), "1.1") + assert placeholder in data + data[data.index(placeholder)] = b"x" * 10 + assert b"".join(data) == b"a\r\nxxxxxxxxxx\r\n" + + c, data = setup(None, "1.0") + assert data == [placeholder] + assert c.our_state is SEND_BODY + + +def test_errors(): + # After a receive error, you can't receive + for role in [CLIENT, SERVER]: + c = Connection(our_role=role) + c.receive_data(b"gibberish\r\n\r\n") + with pytest.raises(RemoteProtocolError): + c.next_event() + # Now any attempt to receive continues to raise + assert c.their_state is ERROR + assert c.our_state is not ERROR + print(c._cstate.states) + with pytest.raises(RemoteProtocolError): + c.next_event() + # But we can still yell at the client for sending us gibberish + if role is SERVER: + assert ( + c.send(Response(status_code=400, headers=[])) + == b"HTTP/1.1 400 \r\nConnection: close\r\n\r\n" + ) + + # After an error sending, you can no longer send + # (This is especially important for things like content-length errors, + # where there's complex internal state being modified) + def conn(role): + c = Connection(our_role=role) + if role is SERVER: + # Put it into the state where it *could* send a response... + receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") + assert c.our_state is SEND_RESPONSE + return c + + for role in [CLIENT, SERVER]: + if role is CLIENT: + # This HTTP/1.0 request won't be detected as bad until after we go + # through the state machine and hit the writing code + good = Request(method="GET", target="/", headers=[("Host", "example.com")]) + bad = Request( + method="GET", + target="/", + headers=[("Host", "example.com")], + http_version="1.0", + ) + elif role is SERVER: + good = Response(status_code=200, headers=[]) + bad = Response(status_code=200, headers=[], http_version="1.0") + # Make sure 'good' actually is good + c = conn(role) + c.send(good) + assert c.our_state is not ERROR + # Do that again, but this time sending 'bad' first + c = conn(role) + with pytest.raises(LocalProtocolError): + c.send(bad) + assert c.our_state is ERROR + assert c.their_state is not ERROR + # Now 'good' is not so good + with pytest.raises(LocalProtocolError): + c.send(good) + + # And check send_failed() too + c = conn(role) + c.send_failed() + assert c.our_state is ERROR + assert c.their_state is not ERROR + # This is idempotent + c.send_failed() + assert c.our_state is ERROR + assert c.their_state is not ERROR + + +def test_idle_receive_nothing(): + # At one point this incorrectly raised an error + for role in [CLIENT, SERVER]: + c = Connection(role) + assert c.next_event() is NEED_DATA + + +def test_connection_drop(): + c = Connection(SERVER) + c.receive_data(b"GET /") + assert c.next_event() is NEED_DATA + c.receive_data(b"") + with pytest.raises(RemoteProtocolError): + c.next_event() + + +def test_408_request_timeout(): + # Should be able to send this spontaneously as a server without seeing + # anything from client + p = ConnectionPair() + p.send(SERVER, Response(status_code=408, headers=[])) + + +# This used to raise IndexError +def test_empty_request(): + c = Connection(SERVER) + c.receive_data(b"\r\n") + with pytest.raises(RemoteProtocolError): + c.next_event() + + +# This used to raise IndexError +def test_empty_response(): + c = Connection(CLIENT) + c.send(Request(method="GET", target="/", headers=[("Host", "a")])) + c.receive_data(b"\r\n") + with pytest.raises(RemoteProtocolError): + c.next_event() + + +@pytest.mark.parametrize( + "data", + [ + b"\x00", + b"\x20", + b"\x16\x03\x01\x00\xa5", # Typical start of a TLS Client Hello + ], +) +def test_early_detection_of_invalid_request(data): + c = Connection(SERVER) + # Early detection should occur before even receiving a `\r\n` + c.receive_data(data) + with pytest.raises(RemoteProtocolError): + c.next_event() + + +@pytest.mark.parametrize( + "data", + [ + b"\x00", + b"\x20", + b"\x16\x03\x03\x00\x31", # Typical start of a TLS Server Hello + ], +) +def test_early_detection_of_invalid_response(data): + c = Connection(CLIENT) + # Early detection should occur before even receiving a `\r\n` + c.receive_data(data) + with pytest.raises(RemoteProtocolError): + c.next_event() + + +# This used to give different headers for HEAD and GET. +# The correct way to handle HEAD is to put whatever headers we *would* have +# put if it were a GET -- even though we know that for HEAD, those headers +# will be ignored. +def test_HEAD_framing_headers(): + def setup(method, http_version): + c = Connection(SERVER) + c.receive_data( + method + b" / HTTP/" + http_version + b"\r\n" + b"Host: example.com\r\n\r\n" + ) + assert type(c.next_event()) is Request + assert type(c.next_event()) is EndOfMessage + return c + + for method in [b"GET", b"HEAD"]: + # No Content-Length, HTTP/1.1 peer, should use chunked + c = setup(method, b"1.1") + assert ( + c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" + b"Transfer-Encoding: chunked\r\n\r\n" + ) + + # No Content-Length, HTTP/1.0 peer, frame with connection: close + c = setup(method, b"1.0") + assert ( + c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" + b"Connection: close\r\n\r\n" + ) + + # Content-Length + Transfer-Encoding, TE wins + c = setup(method, b"1.1") + assert ( + c.send( + Response( + status_code=200, + headers=[ + ("Content-Length", "100"), + ("Transfer-Encoding", "chunked"), + ], + ) + ) + == b"HTTP/1.1 200 \r\n" + b"Transfer-Encoding: chunked\r\n\r\n" + ) + + +def test_special_exceptions_for_lost_connection_in_message_body(): + c = Connection(SERVER) + c.receive_data( + b"POST / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 100\r\n\r\n" + ) + assert type(c.next_event()) is Request + assert c.next_event() is NEED_DATA + c.receive_data(b"12345") + assert c.next_event() == Data(data=b"12345") + c.receive_data(b"") + with pytest.raises(RemoteProtocolError) as excinfo: + c.next_event() + assert "received 5 bytes" in str(excinfo.value) + assert "expected 100" in str(excinfo.value) + + c = Connection(SERVER) + c.receive_data( + b"POST / HTTP/1.1\r\n" + b"Host: example.com\r\n" + b"Transfer-Encoding: chunked\r\n\r\n" + ) + assert type(c.next_event()) is Request + assert c.next_event() is NEED_DATA + c.receive_data(b"8\r\n012345") + assert c.next_event().data == b"012345" + c.receive_data(b"") + with pytest.raises(RemoteProtocolError) as excinfo: + c.next_event() + assert "incomplete chunked read" in str(excinfo.value) diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_events.py b/myenv/lib/python3.9/site-packages/h11/tests/test_events.py new file mode 100644 index 0000000..e20f741 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_events.py @@ -0,0 +1,179 @@ +from http import HTTPStatus + +import pytest + +from .. import _events +from .._events import * +from .._util import LocalProtocolError + + +def test_event_bundle(): + class T(_events._EventBundle): + _fields = ["a", "b"] + _defaults = {"b": 1} + + def _validate(self): + if self.a == 0: + raise ValueError + + # basic construction and methods + t = T(a=1, b=0) + assert repr(t) == "T(a=1, b=0)" + assert t == T(a=1, b=0) + assert not (t == T(a=2, b=0)) + assert not (t != T(a=1, b=0)) + assert t != T(a=2, b=0) + with pytest.raises(TypeError): + hash(t) + + # check defaults + t = T(a=10) + assert t.a == 10 + assert t.b == 1 + + # no positional args + with pytest.raises(TypeError): + T(1) + + with pytest.raises(TypeError): + T(1, a=1, b=0) + + # unknown field + with pytest.raises(TypeError): + T(a=1, b=0, c=10) + + # missing required field + with pytest.raises(TypeError) as exc: + T(b=0) + # make sure we error on the right missing kwarg + assert "kwarg a" in str(exc.value) + + # _validate is called + with pytest.raises(ValueError): + T(a=0, b=0) + + +def test_events(): + with pytest.raises(LocalProtocolError): + # Missing Host: + req = Request( + method="GET", target="/", headers=[("a", "b")], http_version="1.1" + ) + # But this is okay (HTTP/1.0) + req = Request(method="GET", target="/", headers=[("a", "b")], http_version="1.0") + # fields are normalized + assert req.method == b"GET" + assert req.target == b"/" + assert req.headers == [(b"a", b"b")] + assert req.http_version == b"1.0" + + # This is also okay -- has a Host (with weird capitalization, which is ok) + req = Request( + method="GET", + target="/", + headers=[("a", "b"), ("hOSt", "example.com")], + http_version="1.1", + ) + # we normalize header capitalization + assert req.headers == [(b"a", b"b"), (b"host", b"example.com")] + + # Multiple host is bad too + with pytest.raises(LocalProtocolError): + req = Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Host", "a")], + http_version="1.1", + ) + # Even for HTTP/1.0 + with pytest.raises(LocalProtocolError): + req = Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Host", "a")], + http_version="1.0", + ) + + # Header values are validated + for bad_char in "\x00\r\n\f\v": + with pytest.raises(LocalProtocolError): + req = Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Foo", "asd" + bad_char)], + http_version="1.0", + ) + + # But for compatibility we allow non-whitespace control characters, even + # though they're forbidden by the spec. + Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Foo", "asd\x01\x02\x7f")], + http_version="1.0", + ) + + # Request target is validated + for bad_char in b"\x00\x20\x7f\xee": + target = bytearray(b"/") + target.append(bad_char) + with pytest.raises(LocalProtocolError): + Request( + method="GET", target=target, headers=[("Host", "a")], http_version="1.1" + ) + + ir = InformationalResponse(status_code=100, headers=[("Host", "a")]) + assert ir.status_code == 100 + assert ir.headers == [(b"host", b"a")] + assert ir.http_version == b"1.1" + + with pytest.raises(LocalProtocolError): + InformationalResponse(status_code=200, headers=[("Host", "a")]) + + resp = Response(status_code=204, headers=[], http_version="1.0") + assert resp.status_code == 204 + assert resp.headers == [] + assert resp.http_version == b"1.0" + + with pytest.raises(LocalProtocolError): + resp = Response(status_code=100, headers=[], http_version="1.0") + + with pytest.raises(LocalProtocolError): + Response(status_code="100", headers=[], http_version="1.0") + + with pytest.raises(LocalProtocolError): + InformationalResponse(status_code=b"100", headers=[], http_version="1.0") + + d = Data(data=b"asdf") + assert d.data == b"asdf" + + eom = EndOfMessage() + assert eom.headers == [] + + cc = ConnectionClosed() + assert repr(cc) == "ConnectionClosed()" + + +def test_intenum_status_code(): + # https://github.com/python-hyper/h11/issues/72 + + r = Response(status_code=HTTPStatus.OK, headers=[], http_version="1.0") + assert r.status_code == HTTPStatus.OK + assert type(r.status_code) is not type(HTTPStatus.OK) + assert type(r.status_code) is int + + +def test_header_casing(): + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert len(r.headers) == 2 + assert r.headers[0] == (b"host", b"example.org") + assert r.headers == [(b"host", b"example.org"), (b"connection", b"keep-alive")] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive"), + ] diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_headers.py b/myenv/lib/python3.9/site-packages/h11/tests/test_headers.py new file mode 100644 index 0000000..ff3dc8d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_headers.py @@ -0,0 +1,151 @@ +import pytest + +from .._headers import * + + +def test_normalize_and_validate(): + assert normalize_and_validate([("foo", "bar")]) == [(b"foo", b"bar")] + assert normalize_and_validate([(b"foo", b"bar")]) == [(b"foo", b"bar")] + + # no leading/trailing whitespace in names + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo ", "bar")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b" foo", "bar")]) + + # no weird characters in names + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate([(b"foo bar", b"baz")]) + assert "foo bar" in str(excinfo.value) + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo\x00bar", b"baz")]) + # Not even 8-bit characters: + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo\xffbar", b"baz")]) + # And not even the control characters we allow in values: + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo\x01bar", b"baz")]) + + # no return or NUL characters in values + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate([("foo", "bar\rbaz")]) + assert "bar\\rbaz" in str(excinfo.value) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "bar\nbaz")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "bar\x00baz")]) + # no leading/trailing whitespace + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "barbaz ")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", " barbaz")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "barbaz\t")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "\tbarbaz")]) + + # content-length + assert normalize_and_validate([("Content-Length", "1")]) == [ + (b"content-length", b"1") + ] + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "asdf")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "1x")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "1"), ("Content-Length", "2")]) + assert normalize_and_validate( + [("Content-Length", "0"), ("Content-Length", "0")] + ) == [(b"content-length", b"0")] + assert normalize_and_validate([("Content-Length", "0 , 0")]) == [ + (b"content-length", b"0") + ] + with pytest.raises(LocalProtocolError): + normalize_and_validate( + [("Content-Length", "1"), ("Content-Length", "1"), ("Content-Length", "2")] + ) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "1 , 1,2")]) + + # transfer-encoding + assert normalize_and_validate([("Transfer-Encoding", "chunked")]) == [ + (b"transfer-encoding", b"chunked") + ] + assert normalize_and_validate([("Transfer-Encoding", "cHuNkEd")]) == [ + (b"transfer-encoding", b"chunked") + ] + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate([("Transfer-Encoding", "gzip")]) + assert excinfo.value.error_status_hint == 501 # Not Implemented + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate( + [("Transfer-Encoding", "chunked"), ("Transfer-Encoding", "gzip")] + ) + assert excinfo.value.error_status_hint == 501 # Not Implemented + + +def test_get_set_comma_header(): + headers = normalize_and_validate( + [ + ("Connection", "close"), + ("whatever", "something"), + ("connectiON", "fOo,, , BAR"), + ] + ) + + assert get_comma_header(headers, b"connection") == [b"close", b"foo", b"bar"] + + headers = set_comma_header(headers, b"newthing", ["a", "b"]) + + with pytest.raises(LocalProtocolError): + set_comma_header(headers, b"newthing", [" a", "b"]) + + assert headers == [ + (b"connection", b"close"), + (b"whatever", b"something"), + (b"connection", b"fOo,, , BAR"), + (b"newthing", b"a"), + (b"newthing", b"b"), + ] + + headers = set_comma_header(headers, b"whatever", ["different thing"]) + + assert headers == [ + (b"connection", b"close"), + (b"connection", b"fOo,, , BAR"), + (b"newthing", b"a"), + (b"newthing", b"b"), + (b"whatever", b"different thing"), + ] + + +def test_has_100_continue(): + from .._events import Request + + assert has_expect_100_continue( + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Expect", "100-continue")], + ) + ) + assert not has_expect_100_continue( + Request(method="GET", target="/", headers=[("Host", "example.com")]) + ) + # Case insensitive + assert has_expect_100_continue( + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Expect", "100-Continue")], + ) + ) + # Doesn't work in HTTP/1.0 + assert not has_expect_100_continue( + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Expect", "100-continue")], + http_version="1.0", + ) + ) diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_helpers.py b/myenv/lib/python3.9/site-packages/h11/tests/test_helpers.py new file mode 100644 index 0000000..1477947 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_helpers.py @@ -0,0 +1,23 @@ +from .helpers import * + + +def test_normalize_data_events(): + assert normalize_data_events( + [ + Data(data=bytearray(b"1")), + Data(data=b"2"), + Response(status_code=200, headers=[]), + Data(data=b"3"), + Data(data=b"4"), + EndOfMessage(), + Data(data=b"5"), + Data(data=b"6"), + Data(data=b"7"), + ] + ) == [ + Data(data=b"12"), + Response(status_code=200, headers=[]), + Data(data=b"34"), + EndOfMessage(), + Data(data=b"567"), + ] diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_io.py b/myenv/lib/python3.9/site-packages/h11/tests/test_io.py new file mode 100644 index 0000000..459a627 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_io.py @@ -0,0 +1,544 @@ +import pytest + +from .._events import * +from .._headers import Headers, normalize_and_validate +from .._readers import ( + _obsolete_line_fold, + ChunkedReader, + ContentLengthReader, + Http10Reader, + READERS, +) +from .._receivebuffer import ReceiveBuffer +from .._state import * +from .._util import LocalProtocolError +from .._writers import ( + ChunkedWriter, + ContentLengthWriter, + Http10Writer, + write_any_response, + write_headers, + write_request, + WRITERS, +) +from .helpers import normalize_data_events + +SIMPLE_CASES = [ + ( + (CLIENT, IDLE), + Request( + method="GET", + target="/a", + headers=[("Host", "foo"), ("Connection", "close")], + ), + b"GET /a HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + Response(status_code=200, headers=[("Connection", "close")], reason=b"OK"), + b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + Response(status_code=200, headers=[], reason=b"OK"), + b"HTTP/1.1 200 OK\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + InformationalResponse( + status_code=101, headers=[("Upgrade", "websocket")], reason=b"Upgrade" + ), + b"HTTP/1.1 101 Upgrade\r\nUpgrade: websocket\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + InformationalResponse(status_code=101, headers=[], reason=b"Upgrade"), + b"HTTP/1.1 101 Upgrade\r\n\r\n", + ), +] + + +def dowrite(writer, obj): + got_list = [] + writer(obj, got_list.append) + return b"".join(got_list) + + +def tw(writer, obj, expected): + got = dowrite(writer, obj) + assert got == expected + + +def makebuf(data): + buf = ReceiveBuffer() + buf += data + return buf + + +def tr(reader, data, expected): + def check(got): + assert got == expected + # Headers should always be returned as bytes, not e.g. bytearray + # https://github.com/python-hyper/wsproto/pull/54#issuecomment-377709478 + for name, value in getattr(got, "headers", []): + print(name, value) + assert type(name) is bytes + assert type(value) is bytes + + # Simple: consume whole thing + buf = makebuf(data) + check(reader(buf)) + assert not buf + + # Incrementally growing buffer + buf = ReceiveBuffer() + for i in range(len(data)): + assert reader(buf) is None + buf += data[i : i + 1] + check(reader(buf)) + + # Trailing data + buf = makebuf(data) + buf += b"trailing" + check(reader(buf)) + assert bytes(buf) == b"trailing" + + +def test_writers_simple(): + for ((role, state), event, binary) in SIMPLE_CASES: + tw(WRITERS[role, state], event, binary) + + +def test_readers_simple(): + for ((role, state), event, binary) in SIMPLE_CASES: + tr(READERS[role, state], binary, event) + + +def test_writers_unusual(): + # Simple test of the write_headers utility routine + tw( + write_headers, + normalize_and_validate([("foo", "bar"), ("baz", "quux")]), + b"foo: bar\r\nbaz: quux\r\n\r\n", + ) + tw(write_headers, Headers([]), b"\r\n") + + # We understand HTTP/1.0, but we don't speak it + with pytest.raises(LocalProtocolError): + tw( + write_request, + Request( + method="GET", + target="/", + headers=[("Host", "foo"), ("Connection", "close")], + http_version="1.0", + ), + None, + ) + with pytest.raises(LocalProtocolError): + tw( + write_any_response, + Response( + status_code=200, headers=[("Connection", "close")], http_version="1.0" + ), + None, + ) + + +def test_readers_unusual(): + # Reading HTTP/1.0 + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.0\r\nSome: header\r\n\r\n", + Request( + method="HEAD", + target="/foo", + headers=[("Some", "header")], + http_version="1.0", + ), + ) + + # check no-headers, since it's only legal with HTTP/1.0 + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.0\r\n\r\n", + Request(method="HEAD", target="/foo", headers=[], http_version="1.0"), + ) + + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\nSome: header\r\n\r\n", + Response( + status_code=200, + headers=[("Some", "header")], + http_version="1.0", + reason=b"OK", + ), + ) + + # single-character header values (actually disallowed by the ABNF in RFC + # 7230 -- this is a bug in the standard that we originally copied...) + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\n" b"Foo: a a a a a \r\n\r\n", + Response( + status_code=200, + headers=[("Foo", "a a a a a")], + http_version="1.0", + reason=b"OK", + ), + ) + + # Empty headers -- also legal + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\n" b"Foo:\r\n\r\n", + Response( + status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK" + ), + ) + + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\n" b"Foo: \t \t \r\n\r\n", + Response( + status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK" + ), + ) + + # Tolerate broken servers that leave off the response code + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200\r\n" b"Foo: bar\r\n\r\n", + Response( + status_code=200, headers=[("Foo", "bar")], http_version="1.0", reason=b"" + ), + ) + + # Tolerate headers line endings (\r\n and \n) + # \n\r\b between headers and body + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.1 200 OK\r\nSomeHeader: val\n\r\n", + Response( + status_code=200, + headers=[("SomeHeader", "val")], + http_version="1.1", + reason="OK", + ), + ) + + # delimited only with \n + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.1 200 OK\nSomeHeader1: val1\nSomeHeader2: val2\n\n", + Response( + status_code=200, + headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")], + http_version="1.1", + reason="OK", + ), + ) + + # mixed \r\n and \n + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.1 200 OK\r\nSomeHeader1: val1\nSomeHeader2: val2\n\r\n", + Response( + status_code=200, + headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")], + http_version="1.1", + reason="OK", + ), + ) + + # obsolete line folding + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" + b"Host: example.com\r\n" + b"Some: multi-line\r\n" + b" header\r\n" + b"\tnonsense\r\n" + b" \t \t\tI guess\r\n" + b"Connection: close\r\n" + b"More-nonsense: in the\r\n" + b" last header \r\n\r\n", + Request( + method="HEAD", + target="/foo", + headers=[ + ("Host", "example.com"), + ("Some", "multi-line header nonsense I guess"), + ("Connection", "close"), + ("More-nonsense", "in the last header"), + ], + ), + ) + + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b" folded: line\r\n\r\n", + None, + ) + + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"foo : line\r\n\r\n", + None, + ) + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n", + None, + ) + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n", + None, + ) + with pytest.raises(LocalProtocolError): + tr(READERS[CLIENT, IDLE], b"HEAD /foo HTTP/1.1\r\n" b": line\r\n\r\n", None) + + +def test__obsolete_line_fold_bytes(): + # _obsolete_line_fold has a defensive cast to bytearray, which is + # necessary to protect against O(n^2) behavior in case anyone ever passes + # in regular bytestrings... but right now we never pass in regular + # bytestrings. so this test just exists to get some coverage on that + # defensive cast. + assert list(_obsolete_line_fold([b"aaa", b"bbb", b" ccc", b"ddd"])) == [ + b"aaa", + bytearray(b"bbb ccc"), + b"ddd", + ] + + +def _run_reader_iter(reader, buf, do_eof): + while True: + event = reader(buf) + if event is None: + break + yield event + # body readers have undefined behavior after returning EndOfMessage, + # because this changes the state so they don't get called again + if type(event) is EndOfMessage: + break + if do_eof: + assert not buf + yield reader.read_eof() + + +def _run_reader(*args): + events = list(_run_reader_iter(*args)) + return normalize_data_events(events) + + +def t_body_reader(thunk, data, expected, do_eof=False): + # Simple: consume whole thing + print("Test 1") + buf = makebuf(data) + assert _run_reader(thunk(), buf, do_eof) == expected + + # Incrementally growing buffer + print("Test 2") + reader = thunk() + buf = ReceiveBuffer() + events = [] + for i in range(len(data)): + events += _run_reader(reader, buf, False) + buf += data[i : i + 1] + events += _run_reader(reader, buf, do_eof) + assert normalize_data_events(events) == expected + + is_complete = any(type(event) is EndOfMessage for event in expected) + if is_complete and not do_eof: + buf = makebuf(data + b"trailing") + assert _run_reader(thunk(), buf, False) == expected + + +def test_ContentLengthReader(): + t_body_reader(lambda: ContentLengthReader(0), b"", [EndOfMessage()]) + + t_body_reader( + lambda: ContentLengthReader(10), + b"0123456789", + [Data(data=b"0123456789"), EndOfMessage()], + ) + + +def test_Http10Reader(): + t_body_reader(Http10Reader, b"", [EndOfMessage()], do_eof=True) + t_body_reader(Http10Reader, b"asdf", [Data(data=b"asdf")], do_eof=False) + t_body_reader( + Http10Reader, b"asdf", [Data(data=b"asdf"), EndOfMessage()], do_eof=True + ) + + +def test_ChunkedReader(): + t_body_reader(ChunkedReader, b"0\r\n\r\n", [EndOfMessage()]) + + t_body_reader( + ChunkedReader, + b"0\r\nSome: header\r\n\r\n", + [EndOfMessage(headers=[("Some", "header")])], + ) + + t_body_reader( + ChunkedReader, + b"5\r\n01234\r\n" + + b"10\r\n0123456789abcdef\r\n" + + b"0\r\n" + + b"Some: header\r\n\r\n", + [ + Data(data=b"012340123456789abcdef"), + EndOfMessage(headers=[("Some", "header")]), + ], + ) + + t_body_reader( + ChunkedReader, + b"5\r\n01234\r\n" + b"10\r\n0123456789abcdef\r\n" + b"0\r\n\r\n", + [Data(data=b"012340123456789abcdef"), EndOfMessage()], + ) + + # handles upper and lowercase hex + t_body_reader( + ChunkedReader, + b"aA\r\n" + b"x" * 0xAA + b"\r\n" + b"0\r\n\r\n", + [Data(data=b"x" * 0xAA), EndOfMessage()], + ) + + # refuses arbitrarily long chunk integers + with pytest.raises(LocalProtocolError): + # Technically this is legal HTTP/1.1, but we refuse to process chunk + # sizes that don't fit into 20 characters of hex + t_body_reader(ChunkedReader, b"9" * 100 + b"\r\nxxx", [Data(data=b"xxx")]) + + # refuses garbage in the chunk count + with pytest.raises(LocalProtocolError): + t_body_reader(ChunkedReader, b"10\x00\r\nxxx", None) + + # handles (and discards) "chunk extensions" omg wtf + t_body_reader( + ChunkedReader, + b"5; hello=there\r\n" + + b"xxxxx" + + b"\r\n" + + b'0; random="junk"; some=more; canbe=lonnnnngg\r\n\r\n', + [Data(data=b"xxxxx"), EndOfMessage()], + ) + + +def test_ContentLengthWriter(): + w = ContentLengthWriter(5) + assert dowrite(w, Data(data=b"123")) == b"123" + assert dowrite(w, Data(data=b"45")) == b"45" + assert dowrite(w, EndOfMessage()) == b"" + + w = ContentLengthWriter(5) + with pytest.raises(LocalProtocolError): + dowrite(w, Data(data=b"123456")) + + w = ContentLengthWriter(5) + dowrite(w, Data(data=b"123")) + with pytest.raises(LocalProtocolError): + dowrite(w, Data(data=b"456")) + + w = ContentLengthWriter(5) + dowrite(w, Data(data=b"123")) + with pytest.raises(LocalProtocolError): + dowrite(w, EndOfMessage()) + + w = ContentLengthWriter(5) + dowrite(w, Data(data=b"123")) == b"123" + dowrite(w, Data(data=b"45")) == b"45" + with pytest.raises(LocalProtocolError): + dowrite(w, EndOfMessage(headers=[("Etag", "asdf")])) + + +def test_ChunkedWriter(): + w = ChunkedWriter() + assert dowrite(w, Data(data=b"aaa")) == b"3\r\naaa\r\n" + assert dowrite(w, Data(data=b"a" * 20)) == b"14\r\n" + b"a" * 20 + b"\r\n" + + assert dowrite(w, Data(data=b"")) == b"" + + assert dowrite(w, EndOfMessage()) == b"0\r\n\r\n" + + assert ( + dowrite(w, EndOfMessage(headers=[("Etag", "asdf"), ("a", "b")])) + == b"0\r\nEtag: asdf\r\na: b\r\n\r\n" + ) + + +def test_Http10Writer(): + w = Http10Writer() + assert dowrite(w, Data(data=b"1234")) == b"1234" + assert dowrite(w, EndOfMessage()) == b"" + + with pytest.raises(LocalProtocolError): + dowrite(w, EndOfMessage(headers=[("Etag", "asdf")])) + + +def test_reject_garbage_after_request_line(): + with pytest.raises(LocalProtocolError): + tr(READERS[SERVER, SEND_RESPONSE], b"HTTP/1.0 200 OK\x00xxxx\r\n\r\n", None) + + +def test_reject_garbage_after_response_line(): + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1 xxxxxx\r\n" b"Host: a\r\n\r\n", + None, + ) + + +def test_reject_garbage_in_header_line(): + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"Host: foo\x00bar\r\n\r\n", + None, + ) + + +def test_reject_non_vchar_in_path(): + for bad_char in b"\x00\x20\x7f\xee": + message = bytearray(b"HEAD /") + message.append(bad_char) + message.extend(b" HTTP/1.1\r\nHost: foobar\r\n\r\n") + with pytest.raises(LocalProtocolError): + tr(READERS[CLIENT, IDLE], message, None) + + +# https://github.com/python-hyper/h11/issues/57 +def test_allow_some_garbage_in_cookies(): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" + b"Host: foo\r\n" + b"Set-Cookie: ___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900\r\n" + b"\r\n", + Request( + method="HEAD", + target="/foo", + headers=[ + ("Host", "foo"), + ("Set-Cookie", "___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900"), + ], + ), + ) + + +def test_host_comes_first(): + tw( + write_headers, + normalize_and_validate([("foo", "bar"), ("Host", "example.com")]), + b"Host: example.com\r\nfoo: bar\r\n\r\n", + ) diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_receivebuffer.py b/myenv/lib/python3.9/site-packages/h11/tests/test_receivebuffer.py new file mode 100644 index 0000000..3a61f9d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_receivebuffer.py @@ -0,0 +1,134 @@ +import re + +import pytest + +from .._receivebuffer import ReceiveBuffer + + +def test_receivebuffer(): + b = ReceiveBuffer() + assert not b + assert len(b) == 0 + assert bytes(b) == b"" + + b += b"123" + assert b + assert len(b) == 3 + assert bytes(b) == b"123" + + assert bytes(b) == b"123" + + assert b.maybe_extract_at_most(2) == b"12" + assert b + assert len(b) == 1 + assert bytes(b) == b"3" + + assert bytes(b) == b"3" + + assert b.maybe_extract_at_most(10) == b"3" + assert bytes(b) == b"" + + assert b.maybe_extract_at_most(10) is None + assert not b + + ################################################################ + # maybe_extract_until_next + ################################################################ + + b += b"123\n456\r\n789\r\n" + + assert b.maybe_extract_next_line() == b"123\n456\r\n" + assert bytes(b) == b"789\r\n" + + assert b.maybe_extract_next_line() == b"789\r\n" + assert bytes(b) == b"" + + b += b"12\r" + assert b.maybe_extract_next_line() is None + assert bytes(b) == b"12\r" + + b += b"345\n\r" + assert b.maybe_extract_next_line() is None + assert bytes(b) == b"12\r345\n\r" + + # here we stopped at the middle of b"\r\n" delimiter + + b += b"\n6789aaa123\r\n" + assert b.maybe_extract_next_line() == b"12\r345\n\r\n" + assert b.maybe_extract_next_line() == b"6789aaa123\r\n" + assert b.maybe_extract_next_line() is None + assert bytes(b) == b"" + + ################################################################ + # maybe_extract_lines + ################################################################ + + b += b"123\r\na: b\r\nfoo:bar\r\n\r\ntrailing" + lines = b.maybe_extract_lines() + assert lines == [b"123", b"a: b", b"foo:bar"] + assert bytes(b) == b"trailing" + + assert b.maybe_extract_lines() is None + + b += b"\r\n\r" + assert b.maybe_extract_lines() is None + + assert b.maybe_extract_at_most(100) == b"trailing\r\n\r" + assert not b + + # Empty body case (as happens at the end of chunked encoding if there are + # no trailing headers, e.g.) + b += b"\r\ntrailing" + assert b.maybe_extract_lines() == [] + assert bytes(b) == b"trailing" + + +@pytest.mark.parametrize( + "data", + [ + pytest.param( + ( + b"HTTP/1.1 200 OK\r\n", + b"Content-type: text/plain\r\n", + b"Connection: close\r\n", + b"\r\n", + b"Some body", + ), + id="with_crlf_delimiter", + ), + pytest.param( + ( + b"HTTP/1.1 200 OK\n", + b"Content-type: text/plain\n", + b"Connection: close\n", + b"\n", + b"Some body", + ), + id="with_lf_only_delimiter", + ), + pytest.param( + ( + b"HTTP/1.1 200 OK\n", + b"Content-type: text/plain\r\n", + b"Connection: close\n", + b"\n", + b"Some body", + ), + id="with_mixed_crlf_and_lf", + ), + ], +) +def test_receivebuffer_for_invalid_delimiter(data): + b = ReceiveBuffer() + + for line in data: + b += line + + lines = b.maybe_extract_lines() + + assert lines == [ + b"HTTP/1.1 200 OK", + b"Content-type: text/plain", + b"Connection: close", + ] + assert bytes(b) == b"Some body" diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_state.py b/myenv/lib/python3.9/site-packages/h11/tests/test_state.py new file mode 100644 index 0000000..efe83f0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_state.py @@ -0,0 +1,250 @@ +import pytest + +from .._events import * +from .._state import * +from .._state import _SWITCH_CONNECT, _SWITCH_UPGRADE, ConnectionState +from .._util import LocalProtocolError + + +def test_ConnectionState(): + cs = ConnectionState() + + # Basic event-triggered transitions + + assert cs.states == {CLIENT: IDLE, SERVER: IDLE} + + cs.process_event(CLIENT, Request) + # The SERVER-Request special case: + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + # Illegal transitions raise an error and nothing happens + with pytest.raises(LocalProtocolError): + cs.process_event(CLIENT, Request) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, InformationalResponse) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, Response) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY} + + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, EndOfMessage) + assert cs.states == {CLIENT: DONE, SERVER: DONE} + + # State-triggered transition + + cs.process_event(SERVER, ConnectionClosed) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED} + + +def test_ConnectionState_keep_alive(): + # keep_alive = False + cs = ConnectionState() + cs.process_event(CLIENT, Request) + cs.process_keep_alive_disabled() + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE} + + +def test_ConnectionState_keep_alive_in_DONE(): + # Check that if keep_alive is disabled when the CLIENT is already in DONE, + # then this is sufficient to immediately trigger the DONE -> MUST_CLOSE + # transition + cs = ConnectionState() + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + assert cs.states[CLIENT] is DONE + cs.process_keep_alive_disabled() + assert cs.states[CLIENT] is MUST_CLOSE + + +def test_ConnectionState_switch_denied(): + for switch_type in (_SWITCH_CONNECT, _SWITCH_UPGRADE): + for deny_early in (True, False): + cs = ConnectionState() + cs.process_client_switch_proposal(switch_type) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, Data) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + assert switch_type in cs.pending_switch_proposals + + if deny_early: + # before client reaches DONE + cs.process_event(SERVER, Response) + assert not cs.pending_switch_proposals + + cs.process_event(CLIENT, EndOfMessage) + + if deny_early: + assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} + else: + assert cs.states == { + CLIENT: MIGHT_SWITCH_PROTOCOL, + SERVER: SEND_RESPONSE, + } + + cs.process_event(SERVER, InformationalResponse) + assert cs.states == { + CLIENT: MIGHT_SWITCH_PROTOCOL, + SERVER: SEND_RESPONSE, + } + + cs.process_event(SERVER, Response) + assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} + assert not cs.pending_switch_proposals + + +_response_type_for_switch = { + _SWITCH_UPGRADE: InformationalResponse, + _SWITCH_CONNECT: Response, + None: Response, +} + + +def test_ConnectionState_protocol_switch_accepted(): + for switch_event in [_SWITCH_UPGRADE, _SWITCH_CONNECT]: + cs = ConnectionState() + cs.process_client_switch_proposal(switch_event) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, Data) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, InformationalResponse) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, _response_type_for_switch[switch_event], switch_event) + assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} + + +def test_ConnectionState_double_protocol_switch(): + # CONNECT + Upgrade is legal! Very silly, but legal. So we support + # it. Because sometimes doing the silly thing is easier than not. + for server_switch in [None, _SWITCH_UPGRADE, _SWITCH_CONNECT]: + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_client_switch_proposal(_SWITCH_CONNECT) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + cs.process_event( + SERVER, _response_type_for_switch[server_switch], server_switch + ) + if server_switch is None: + assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} + else: + assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} + + +def test_ConnectionState_inconsistent_protocol_switch(): + for client_switches, server_switch in [ + ([], _SWITCH_CONNECT), + ([], _SWITCH_UPGRADE), + ([_SWITCH_UPGRADE], _SWITCH_CONNECT), + ([_SWITCH_CONNECT], _SWITCH_UPGRADE), + ]: + cs = ConnectionState() + for client_switch in client_switches: + cs.process_client_switch_proposal(client_switch) + cs.process_event(CLIENT, Request) + with pytest.raises(LocalProtocolError): + cs.process_event(SERVER, Response, server_switch) + + +def test_ConnectionState_keepalive_protocol_switch_interaction(): + # keep_alive=False + pending_switch_proposals + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_event(CLIENT, Request) + cs.process_keep_alive_disabled() + cs.process_event(CLIENT, Data) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + # the protocol switch "wins" + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + + # but when the server denies the request, keep_alive comes back into play + cs.process_event(SERVER, Response) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY} + + +def test_ConnectionState_reuse(): + cs = ConnectionState() + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + cs.start_next_cycle() + assert cs.states == {CLIENT: IDLE, SERVER: IDLE} + + # No keepalive + + cs.process_event(CLIENT, Request) + cs.process_keep_alive_disabled() + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + # One side closed + + cs = ConnectionState() + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(CLIENT, ConnectionClosed) + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + # Succesful protocol switch + + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, InformationalResponse, _SWITCH_UPGRADE) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + # Failed protocol switch + + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + cs.start_next_cycle() + assert cs.states == {CLIENT: IDLE, SERVER: IDLE} + + +def test_server_request_is_illegal(): + # There used to be a bug in how we handled the Request special case that + # made this allowed... + cs = ConnectionState() + with pytest.raises(LocalProtocolError): + cs.process_event(SERVER, Request) diff --git a/myenv/lib/python3.9/site-packages/h11/tests/test_util.py b/myenv/lib/python3.9/site-packages/h11/tests/test_util.py new file mode 100644 index 0000000..d851bdc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/h11/tests/test_util.py @@ -0,0 +1,99 @@ +import re +import sys +import traceback + +import pytest + +from .._util import * + + +def test_ProtocolError(): + with pytest.raises(TypeError): + ProtocolError("abstract base class") + + +def test_LocalProtocolError(): + try: + raise LocalProtocolError("foo") + except LocalProtocolError as e: + assert str(e) == "foo" + assert e.error_status_hint == 400 + + try: + raise LocalProtocolError("foo", error_status_hint=418) + except LocalProtocolError as e: + assert str(e) == "foo" + assert e.error_status_hint == 418 + + def thunk(): + raise LocalProtocolError("a", error_status_hint=420) + + try: + try: + thunk() + except LocalProtocolError as exc1: + orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2])) + exc1._reraise_as_remote_protocol_error() + except RemoteProtocolError as exc2: + assert type(exc2) is RemoteProtocolError + assert exc2.args == ("a",) + assert exc2.error_status_hint == 420 + new_traceback = "".join(traceback.format_tb(sys.exc_info()[2])) + assert new_traceback.endswith(orig_traceback) + + +def test_validate(): + my_re = re.compile(br"(?P[0-9]+)\.(?P[0-9]+)") + with pytest.raises(LocalProtocolError): + validate(my_re, b"0.") + + groups = validate(my_re, b"0.1") + assert groups == {"group1": b"0", "group2": b"1"} + + # successful partial matches are an error - must match whole string + with pytest.raises(LocalProtocolError): + validate(my_re, b"0.1xx") + with pytest.raises(LocalProtocolError): + validate(my_re, b"0.1\n") + + +def test_validate_formatting(): + my_re = re.compile(br"foo") + + with pytest.raises(LocalProtocolError) as excinfo: + validate(my_re, b"", "oops") + assert "oops" in str(excinfo.value) + + with pytest.raises(LocalProtocolError) as excinfo: + validate(my_re, b"", "oops {}") + assert "oops {}" in str(excinfo.value) + + with pytest.raises(LocalProtocolError) as excinfo: + validate(my_re, b"", "oops {} xx", 10) + assert "oops 10 xx" in str(excinfo.value) + + +def test_make_sentinel(): + S = make_sentinel("S") + assert repr(S) == "S" + assert S == S + assert type(S).__name__ == "S" + assert S in {S} + assert type(S) is S + S2 = make_sentinel("S2") + assert repr(S2) == "S2" + assert S != S2 + assert S not in {S2} + assert type(S) is not type(S2) + + +def test_bytesify(): + assert bytesify(b"123") == b"123" + assert bytesify(bytearray(b"123")) == b"123" + assert bytesify("123") == b"123" + + with pytest.raises(UnicodeEncodeError): + bytesify("\u1234") + + with pytest.raises(TypeError): + bytesify(10) diff --git a/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/LICENSE.md b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/LICENSE.md new file mode 100644 index 0000000..311b2b5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/LICENSE.md @@ -0,0 +1,27 @@ +Copyright © 2020, [Encode OSS Ltd](https://www.encode.io/). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/METADATA new file mode 100644 index 0000000..2361011 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/METADATA @@ -0,0 +1,489 @@ +Metadata-Version: 2.1 +Name: httpcore +Version: 0.15.0 +Summary: A minimal low-level HTTP client. +Home-page: https://github.com/encode/httpcore +Author: Tom Christie +Author-email: tom@tomchristie.com +License: BSD +Project-URL: Documentation, https://www.encode.io/httpcore +Project-URL: Source, https://github.com/encode/httpcore +Classifier: Development Status :: 3 - Alpha +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Framework :: AsyncIO +Classifier: Framework :: Trio +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE.md +Requires-Dist: h11 (<0.13,>=0.11) +Requires-Dist: sniffio (==1.*) +Requires-Dist: anyio (==3.*) +Requires-Dist: certifi +Provides-Extra: http2 +Requires-Dist: h2 (<5,>=3) ; extra == 'http2' +Provides-Extra: socks +Requires-Dist: socksio (==1.*) ; extra == 'socks' + +# HTTP Core + +[![Test Suite](https://github.com/encode/httpcore/workflows/Test%20Suite/badge.svg)](https://github.com/encode/httpcore/actions) +[![Package version](https://badge.fury.io/py/httpcore.svg)](https://pypi.org/project/httpcore/) + +> *Do one thing, and do it well.* + +The HTTP Core package provides a minimal low-level HTTP client, which does +one thing only. Sending HTTP requests. + +It does not provide any high level model abstractions over the API, +does not handle redirects, multipart uploads, building authentication headers, +transparent HTTP caching, URL parsing, session cookie handling, +content or charset decoding, handling JSON, environment based configuration +defaults, or any of that Jazz. + +Some things HTTP Core does do: + +* Sending HTTP requests. +* Thread-safe / task-safe connection pooling. +* HTTP(S) proxy & SOCKS proxy support. +* Supports HTTP/1.1 and HTTP/2. +* Provides both sync and async interfaces. +* Async backend support for `asyncio` and `trio`. + +## Requirements + +Python 3.7+ + +## Installation + +For HTTP/1.1 only support, install with: + +```shell +$ pip install httpcore +``` + +For HTTP/1.1 and HTTP/2 support, install with: + +```shell +$ pip install httpcore[http2] +``` + +For SOCKS proxy support, install with: + +```shell +$ pip install httpcore[socks] +``` + +# Sending requests + +Send an HTTP request: + +```python +import httpcore + +response = httpcore.request("GET", "https://www.example.com/") + +print(response) +# +print(response.status) +# 200 +print(response.headers) +# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...] +print(response.content) +# b'\n\n\nExample Domain\n\n\n ...' +``` + +The top-level `httpcore.request()` function is provided for convenience. In practice whenever you're working with `httpcore` you'll want to use the connection pooling functionality that it provides. + +```python +import httpcore + +http = httpcore.ConnectionPool() +response = http.request("GET", "https://www.example.com/") +``` + +Once you're ready to get going, [head over to the documentation](https://www.encode.io/httpcore/). + +## Motivation + +You *probably* don't want to be using HTTP Core directly. It might make sense if +you're writing something like a proxy service in Python, and you just want +something at the lowest possible level, but more typically you'll want to use +a higher level client library, such as `httpx`. + +The motivation for `httpcore` is: + +* To provide a reusable low-level client library, that other packages can then build on top of. +* To provide a *really clear interface split* between the networking code and client logic, + so that each is easier to understand and reason about in isolation. + + +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## 0.15.0 (May 17th, 2022) + +- Drop Python 3.6 support (#535) +- Ensure HTTP proxy CONNECT requests include `timeout` configuration. (#506) +- Switch to explicit `typing.Optional` for type hints (#513) +- For `trio` map OSError exceptions to `ConnectError` (#543) + +## 0.14.7 (February 4th, 2022) + +- Requests which raise a PoolTimeout need to be removed from the pool queue. (#502) +- Fix AttributeError that happened when Socks5Connection were terminated. (#501) + +## 0.14.6 (February 1st, 2022) + +- Fix SOCKS support for `http://` URLs. (#492) +- Resolve race condition around exceptions during streaming a response. (#491) + +## 0.14.5 (January 18th, 2022) + +- SOCKS proxy support. (#478) +- Add proxy_auth argument to HTTPProxy (#481) +- Improve error message on 'RemoteProtocolError' exception when server disconnects without sending a response (#479) + +## 0.14.4 (January 5th, 2022) + +- Support HTTP/2 on HTTPS tunnelling proxies. (#468) +- Fix proxy headers missing on HTTP forwarding. (#456) +- Only instantiate SSL context if required. (#457) +- More robust HTTP/2 handling. (#253, #439, #440, #441) + +## 0.14.3 (November 17th, 2021) + +- Fix race condition when removing closed connections from the pool (#437) + +## 0.14.2 (November 16th, 2021) + +- Failed connections no longer remain in the pool. (Pull #433) + +## 0.14.1 (November 12th, 2021) + +- `max_connections` becomes optional. (Pull #429) +- `certifi` is now included in the install dependancies. (Pull #428) +- `h2` is now strictly optional. (Pull #428) + +## 0.14.0 (November 11th, 2021) + +The 0.14 release is a complete reworking of `httpcore`, comprehensively addressing some underlying issues in the connection pooling, as well as substantially redesigning the API to be more user friendly. + +Some of the lower-level API design also makes the components more easily testable in isolation, and the package now has 100% test coverage. + +See [discussion #419](https://github.com/encode/httpcore/discussions/419) for a little more background. + +There's some other neat bits in there too, such as the "trace" extension, which gives a hook into inspecting the internal events that occur during the request/response cycle. This extension is needed for the HTTPX cli, in order to... + +* Log the point at which the connection is established, and the IP/port on which it is made. +* Determine if the outgoing request should log as HTTP/1.1 or HTTP/2, rather than having to assume it's HTTP/2 if the --http2 flag was passed. (Which may not actually be true.) +* Log SSL version info / certificate info. + +Note that `curio` support is not currently available in 0.14.0. If you're using `httpcore` with `curio` please get in touch, so we can assess if we ought to prioritize it as a feature or not. + +## 0.13.7 (September 13th, 2021) + +- Fix broken error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #403) + +## 0.13.6 (June 15th, 2021) + +### Fixed + +- Close sockets when read or write timeouts occur. (Pull #365) + +## 0.13.5 (June 14th, 2021) + +### Fixed + +- Resolved niggles with AnyIO EOF behaviours. (Pull #358, #362) + +## 0.13.4 (June 9th, 2021) + +### Added + +- Improved error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #354) + +### Fixed + +- Switched to `anyio` as the default backend implementation when running with `asyncio`. Resolves some awkward [TLS timeout issues](https://github.com/encode/httpx/discussions/1511). + +## 0.13.3 (May 6th, 2021) + +### Added + +- Support HTTP/2 prior knowledge, using `httpcore.SyncConnectionPool(http1=False)`. (Pull #333) + +### Fixed + +- Handle cases where environment does not provide `select.poll` support. (Pull #331) + +## 0.13.2 (April 29th, 2021) + +### Added + +- Improve error message for specific case of `RemoteProtocolError` where server disconnects without sending a response. (Pull #313) + +## 0.13.1 (April 28th, 2021) + +### Fixed + +- More resiliant testing for closed connections. (Pull #311) +- Don't raise exceptions on ungraceful connection closes. (Pull #310) + +## 0.13.0 (April 21st, 2021) + +The 0.13 release updates the core API in order to match the HTTPX Transport API, +introduced in HTTPX 0.18 onwards. + +An example of making requests with the new interface is: + +```python +with httpcore.SyncConnectionPool() as http: + status_code, headers, stream, extensions = http.handle_request( + method=b'GET', + url=(b'https', b'example.org', 443, b'/'), + headers=[(b'host', b'example.org'), (b'user-agent', b'httpcore')] + stream=httpcore.ByteStream(b''), + extensions={} + ) + body = stream.read() + print(status_code, body) +``` + +### Changed + +- The `.request()` method is now `handle_request()`. (Pull #296) +- The `.arequest()` method is now `.handle_async_request()`. (Pull #296) +- The `headers` argument is no longer optional. (Pull #296) +- The `stream` argument is no longer optional. (Pull #296) +- The `ext` argument is now named `extensions`, and is no longer optional. (Pull #296) +- The `"reason"` extension keyword is now named `"reason_phrase"`. (Pull #296) +- The `"reason_phrase"` and `"http_version"` extensions now use byte strings for their values. (Pull #296) +- The `httpcore.PlainByteStream()` class becomes `httpcore.ByteStream()`. (Pull #296) + +### Added + +- Streams now support a `.read()` interface. (Pull #296) + +### Fixed + +- Task cancellation no longer leaks connections from the connection pool. (Pull #305) + +## 0.12.3 (December 7th, 2020) + +### Fixed + +- Abort SSL connections on close rather than waiting for remote EOF when using `asyncio`. (Pull #167) +- Fix exception raised in case of connect timeouts when using the `anyio` backend. (Pull #236) +- Fix `Host` header precedence for `:authority` in HTTP/2. (Pull #241, #243) +- Handle extra edge case when detecting for socket readability when using `asyncio`. (Pull #242, #244) +- Fix `asyncio` SSL warning when using proxy tunneling. (Pull #249) + +## 0.12.2 (November 20th, 2020) + +### Fixed + +- Properly wrap connect errors on the asyncio backend. (Pull #235) +- Fix `ImportError` occurring on Python 3.9 when using the HTTP/1.1 sync client in a multithreaded context. (Pull #237) + +## 0.12.1 (November 7th, 2020) + +### Added + +- Add connect retries. (Pull #221) + +### Fixed + +- Tweak detection of dropped connections, resolving an issue with open files limits on Linux. (Pull #185) +- Avoid leaking connections when establishing an HTTP tunnel to a proxy has failed. (Pull #223) +- Properly wrap OS errors when using `trio`. (Pull #225) + +## 0.12.0 (October 6th, 2020) + +### Changed + +- HTTP header casing is now preserved, rather than always sent in lowercase. (#216 and python-hyper/h11#104) + +### Added + +- Add Python 3.9 to officially supported versions. + +### Fixed + +- Gracefully handle a stdlib asyncio bug when a connection is closed while it is in a paused-for-reading state. (#201) + +## 0.11.1 (September 28nd, 2020) + +### Fixed + +- Add await to async semaphore release() coroutine (#197) +- Drop incorrect curio classifier (#192) + +## 0.11.0 (September 22nd, 2020) + +The Transport API with 0.11.0 has a couple of significant changes. + +Firstly we've moved changed the request interface in order to allow extensions, which will later enable us to support features +such as trailing headers, HTTP/2 server push, and CONNECT/Upgrade connections. + +The interface changes from: + +```python +def request(method, url, headers, stream, timeout): + return (http_version, status_code, reason, headers, stream) +``` + +To instead including an optional dictionary of extensions on the request and response: + +```python +def request(method, url, headers, stream, ext): + return (status_code, headers, stream, ext) +``` + +Having an open-ended extensions point will allow us to add later support for various optional features, that wouldn't otherwise be supported without these API changes. + +In particular: + +* Trailing headers support. +* HTTP/2 Server Push +* sendfile. +* Exposing raw connection on CONNECT, Upgrade, HTTP/2 bi-di streaming. +* Exposing debug information out of the API, including template name, template context. + +Currently extensions are limited to: + +* request: `timeout` - Optional. Timeout dictionary. +* response: `http_version` - Optional. Include the HTTP version used on the response. +* response: `reason` - Optional. Include the reason phrase used on the response. Only valid with HTTP/1.*. + +See https://github.com/encode/httpx/issues/1274#issuecomment-694884553 for the history behind this. + +Secondly, the async version of `request` is now namespaced as `arequest`. + +This allows concrete transports to support both sync and async implementations on the same class. + +### Added + +- Add curio support. (Pull #168) +- Add anyio support, with `backend="anyio"`. (Pull #169) + +### Changed + +- Update the Transport API to use 'ext' for optional extensions. (Pull #190) +- Update the Transport API to use `.request` and `.arequest` so implementations can support both sync and async. (Pull #189) + +## 0.10.2 (August 20th, 2020) + +### Added + +- Added Unix Domain Socket support. (Pull #139) + +### Fixed + +- Always include the port on proxy CONNECT requests. (Pull #154) +- Fix `max_keepalive_connections` configuration. (Pull #153) +- Fixes behaviour in HTTP/1.1 where server disconnects can be used to signal the end of the response body. (Pull #164) + +## 0.10.1 (August 7th, 2020) + +- Include `max_keepalive_connections` on `AsyncHTTPProxy`/`SyncHTTPProxy` classes. + +## 0.10.0 (August 7th, 2020) + +The most notable change in the 0.10.0 release is that HTTP/2 support is now fully optional. + +Use either `pip install httpcore` for HTTP/1.1 support only, or `pip install httpcore[http2]` for HTTP/1.1 and HTTP/2 support. + +### Added + +- HTTP/2 support becomes optional. (Pull #121, #130) +- Add `local_address=...` support. (Pull #100, #134) +- Add `PlainByteStream`, `IteratorByteStream`, `AsyncIteratorByteStream`. The `AsyncByteSteam` and `SyncByteStream` classes are now pure interface classes. (#133) +- Add `LocalProtocolError`, `RemoteProtocolError` exceptions. (Pull #129) +- Add `UnsupportedProtocol` exception. (Pull #128) +- Add `.get_connection_info()` method. (Pull #102, #137) +- Add better TRACE logs. (Pull #101) + +### Changed + +- `max_keepalive` is deprecated in favour of `max_keepalive_connections`. (Pull #140) + +### Fixed + +- Improve handling of server disconnects. (Pull #112) + +## 0.9.1 (May 27th, 2020) + +### Fixed + +- Proper host resolution for sync case, including IPv6 support. (Pull #97) +- Close outstanding connections when connection pool is closed. (Pull #98) + +## 0.9.0 (May 21th, 2020) + +### Changed + +- URL port becomes an `Optional[int]` instead of `int`. (Pull #92) + +### Fixed + +- Honor HTTP/2 max concurrent streams settings. (Pull #89, #90) +- Remove incorrect debug log. (Pull #83) + +## 0.8.4 (May 11th, 2020) + +### Added + +- Logging via HTTPCORE_LOG_LEVEL and HTTPX_LOG_LEVEL environment variables +and TRACE level logging. (Pull #79) + +### Fixed + +- Reuse of connections on HTTP/2 in close concurrency situations. (Pull #81) + +## 0.8.3 (May 6rd, 2020) + +### Fixed + +- Include `Host` and `Accept` headers on proxy "CONNECT" requests. +- De-duplicate any headers also contained in proxy_headers. +- HTTP/2 flag not being passed down to proxy connections. + +## 0.8.2 (May 3rd, 2020) + +### Fixed + +- Fix connections using proxy forwarding requests not being added to the +connection pool properly. (Pull #70) + +## 0.8.1 (April 30th, 2020) + +### Changed + +- Allow inherintance of both `httpcore.AsyncByteStream`, `httpcore.SyncByteStream` without type conflicts. + +## 0.8.0 (April 30th, 2020) + +### Fixed + +- Fixed tunnel proxy support. + +### Added + +- New `TimeoutException` base class. + +## 0.7.0 (March 5th, 2020) + +- First integration with HTTPX. diff --git a/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/RECORD new file mode 100644 index 0000000..72f9d95 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/RECORD @@ -0,0 +1,38 @@ +httpcore/__init__.py,sha256=XmFtXye6ccUEQzSoUUKpEpmd0DV8SROuIzg0fx1vVKw,1869 +httpcore/_api.py,sha256=kasGkW6W8-DDd_74UaC-KQDBV5N4GTdsuP32yuW0la8,3154 +httpcore/_exceptions.py,sha256=L8hJfhAVa3HUdef9OrlXOuU-VxuXEwND-SKanz0siJ4,1111 +httpcore/_models.py,sha256=ukPIXFSfbapVKQQ9pX5Nl9qA6zBj_m7zDghp6Z6Gja8,16205 +httpcore/_ssl.py,sha256=W41aVdVq5UkCp_y3wqGBxp4ozDe2GAMpiJkhYOddlfs,203 +httpcore/_synchronization.py,sha256=8Ay8T57hRRCZxYsvZ6xh3EWSm8Zs0aXzt-nSFH4yx9s,2226 +httpcore/_trace.py,sha256=TOg0iQINCX3DNQCaNBfMrrvp8cIuOTOnAh-STvyGeN4,1797 +httpcore/_utils.py,sha256=9QPh5ib4JilWX4dBCC_XO6wdBY4b0kbUGgfV3QfBANc,1525 +httpcore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +httpcore/_async/__init__.py,sha256=EWdl2v4thnAHzJpqjU4h2a8DUiGAvNiWrkii9pfhTf0,1221 +httpcore/_async/connection.py,sha256=_q5CUhTi3v01kpfAPway7f5377LgpeYut7NqDbJDY54,7827 +httpcore/_async/connection_pool.py,sha256=h6Z7LpnpXEbYOptoO5XnHVDkBw5L9CNUdg_G-VwKWdw,14020 +httpcore/_async/http11.py,sha256=KlWD3I0tQ9elXFehjxgQSTt1UjJXdjj7rzc1oIy9Z50,11263 +httpcore/_async/http2.py,sha256=JX8M2fiCiswzTKfrTi8O8IUZn8mO8PNmS0GTaRkUJXo,19588 +httpcore/_async/http_proxy.py,sha256=DWo9qoKuM52giv5fbNpYOy9_Pl-Y9ok-M3Ef_CiAYrI,13471 +httpcore/_async/interfaces.py,sha256=o36n2kNiYwA5jlLFNXTqYFyLmoc1v29igN7M-zxqvUw,4465 +httpcore/_async/socks_proxy.py,sha256=jCawaM9EAGdxy-bc_jnlOQmLG-uAy-IWB1R2EzuH784,13754 +httpcore/_sync/__init__.py,sha256=JBDIgXt5la1LCJ1sLQeKhjKFpLnpNr8Svs6z2ni3fgg,1141 +httpcore/_sync/connection.py,sha256=VFHf3i1CCpUT7bbWwGObDfCJ8u3WM7HVgP70jA2Bw3c,7628 +httpcore/_sync/connection_pool.py,sha256=zD9OEX96vCdKRm0fA_Kesu85CgQXSPLPLNMm3Tve0Eg,13658 +httpcore/_sync/http11.py,sha256=Te2fILGjsJcvFJYSHwQv2IqxJtwCPwWCG5fyfONTDWc,10939 +httpcore/_sync/http2.py,sha256=aNAwq1BHadSUO6EtLbt97BBzPdmliIW7WbnWTPbxtJ4,19146 +httpcore/_sync/http_proxy.py,sha256=K1t1qfrMmy3POIXed-DEfL7ra1j_QStnSwJjHLkauOM,13233 +httpcore/_sync/interfaces.py,sha256=a3Jh2VCuz6GOgZuOF1VK9aJ89f52CMbVUrtrsac0n7Q,4344 +httpcore/_sync/socks_proxy.py,sha256=tcUpNjFNCye_qwCwfmWo9N45e1WR4Vy-x5s2Z6iOJ_I,13539 +httpcore/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +httpcore/backends/asyncio.py,sha256=KMGdbntlx-KdWER_VyHjV6X7ThdZ8JIa6nYhzNZKgrY,4381 +httpcore/backends/auto.py,sha256=WDfYnVtuXh1SZKZ85yM1bIGe63RssDFBNKbecqYXVD0,1321 +httpcore/backends/base.py,sha256=1R0753TvDRGwiPNeIKfuu-PRcssY_xXqtxtXq6EeiVk,2699 +httpcore/backends/mock.py,sha256=KhAofRzcxMqfk_8gUJ-1znBy1U8DbHES4pb_MoFeokk,3366 +httpcore/backends/sync.py,sha256=7aap1R3t6wiEsFNkWS9gUaIYUB8eZaF_vQuQXiD5aCw,3327 +httpcore/backends/trio.py,sha256=nbvet36SpcfQxhayDvCP1kPp62PJ1Mr6_4_S_FV4OJg,5045 +httpcore-0.15.0.dist-info/LICENSE.md,sha256=_ctZFUx0y6uhahEkL3dAvqnyPW_rVUeRfYxflKgDkqU,1518 +httpcore-0.15.0.dist-info/METADATA,sha256=JD5fs4RqwsoHRRusp7J__aClnRNb4XKbQWUxNQbl3Dw,15849 +httpcore-0.15.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +httpcore-0.15.0.dist-info/top_level.txt,sha256=RaSz_iTqWmkXuXYRXC61wQGRTub4xiYn0d71TCHCrAg,58 +httpcore-0.15.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +httpcore-0.15.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/top_level.txt new file mode 100644 index 0000000..855079b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore-0.15.0.dist-info/top_level.txt @@ -0,0 +1,4 @@ +httpcore +httpcore/_async +httpcore/_sync +httpcore/backends diff --git a/myenv/lib/python3.9/site-packages/httpcore/__init__.py b/myenv/lib/python3.9/site-packages/httpcore/__init__.py new file mode 100644 index 0000000..e435076 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/__init__.py @@ -0,0 +1,91 @@ +from ._api import request, stream +from ._async import ( + AsyncConnectionInterface, + AsyncConnectionPool, + AsyncHTTP2Connection, + AsyncHTTP11Connection, + AsyncHTTPConnection, + AsyncHTTPProxy, + AsyncSOCKSProxy, +) +from ._exceptions import ( + ConnectError, + ConnectionNotAvailable, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from ._models import URL, Origin, Request, Response +from ._ssl import default_ssl_context +from ._sync import ( + ConnectionInterface, + ConnectionPool, + HTTP2Connection, + HTTP11Connection, + HTTPConnection, + HTTPProxy, + SOCKSProxy, +) + +__all__ = [ + # top-level requests + "request", + "stream", + # models + "Origin", + "URL", + "Request", + "Response", + # async + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", + # sync + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", + # util + "default_ssl_context", + # exceptions + "ConnectionNotAvailable", + "ProxyError", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "UnsupportedProtocol", + "TimeoutException", + "PoolTimeout", + "ConnectTimeout", + "ReadTimeout", + "WriteTimeout", + "NetworkError", + "ConnectError", + "ReadError", + "WriteError", +] + +__version__ = "0.15.0" + + +__locals = locals() +for __name in __all__: + if not __name.startswith("__"): + setattr(__locals[__name], "__module__", "httpcore") # noqa diff --git a/myenv/lib/python3.9/site-packages/httpcore/_api.py b/myenv/lib/python3.9/site-packages/httpcore/_api.py new file mode 100644 index 0000000..859a189 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_api.py @@ -0,0 +1,92 @@ +from contextlib import contextmanager +from typing import Iterator, Optional, Union + +from ._models import URL, Response +from ._sync.connection_pool import ConnectionPool + + +def request( + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[dict] = None, +) -> Response: + """ + Sends an HTTP request, returning the response. + + ``` + response = httpcore.request("GET", "https://www.example.com/") + ``` + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + return pool.request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + + +@contextmanager +def stream( + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[dict] = None, +) -> Iterator[Response]: + """ + Sends an HTTP request, returning the response within a content manager. + + ``` + with httpcore.stream("GET", "https://www.example.com/") as response: + ... + ``` + + When using the `stream()` function, the body of the response will not be + automatically read. If you want to access the response body you should + either use `content = response.read()`, or `for chunk in response.iter_content()`. + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + with pool.stream( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) as response: + yield response diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/__init__.py b/myenv/lib/python3.9/site-packages/httpcore/_async/__init__.py new file mode 100644 index 0000000..88dc7f0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/__init__.py @@ -0,0 +1,39 @@ +from .connection import AsyncHTTPConnection +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .http_proxy import AsyncHTTPProxy +from .interfaces import AsyncConnectionInterface + +try: + from .http2 import AsyncHTTP2Connection +except ImportError: # pragma: nocover + + class AsyncHTTP2Connection: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use http2 support, but the `h2` package is not " + "installed. Use 'pip install httpcore[http2]'." + ) + + +try: + from .socks_proxy import AsyncSOCKSProxy +except ImportError: # pragma: nocover + + class AsyncSOCKSProxy: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use SOCKS support, but the `socksio` package is not " + "installed. Use 'pip install httpcore[socks]'." + ) + + +__all__ = [ + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", +] diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/connection.py b/myenv/lib/python3.9/site-packages/httpcore/_async/connection.py new file mode 100644 index 0000000..b919678 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/connection.py @@ -0,0 +1,208 @@ +import itertools +import ssl +from types import TracebackType +from typing import Iterator, Optional, Type + +from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout +from .._models import Origin, Request, Response +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from ..backends.auto import AutoBackend +from ..backends.base import AsyncNetworkBackend, AsyncNetworkStream +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc. + + +def exponential_backoff(factor: float) -> Iterator[float]: + yield 0 + for n in itertools.count(2): + yield factor * (2 ** (n - 2)) + + +class AsyncHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + ) -> None: + self._origin = origin + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend: AsyncNetworkBackend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._connection: Optional[AsyncConnectionInterface] = None + self._connect_failed: bool = False + self._request_lock = AsyncLock() + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection to {self._origin}" + ) + + async with self._request_lock: + if self._connection is None: + try: + stream = await self._connect(request) + + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): + raise ConnectionNotAvailable() + + return await self._connection.handle_async_request(request) + + async def _connect(self, request: Request) -> AsyncNetworkStream: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + retries_left = self._retries + delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) + + while True: + try: + if self._uds is None: + kwargs = { + "host": self._origin.host.decode("ascii"), + "port": self._origin.port, + "local_address": self._local_address, + "timeout": timeout, + } + async with Trace( + "connection.connect_tcp", request, kwargs + ) as trace: + stream = await self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + else: + kwargs = { + "path": self._uds, + "timeout": timeout, + } + async with Trace( + "connection.connect_unix_socket", request, kwargs + ) as trace: + stream = await self._network_backend.connect_unix_socket( + **kwargs + ) + trace.return_value = stream + except (ConnectError, ConnectTimeout): + if retries_left <= 0: + raise + retries_left -= 1 + delay = next(delays) + # TRACE 'retry' + await self._network_backend.sleep(delay) + else: + break + + if self._origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("connection.start_tls", request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + return stream + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + async def aclose(self) -> None: + if self._connection is not None: + await self._connection.aclose() + + def is_available(self) -> bool: + if self._connection is None: + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> "AsyncHTTPConnection": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + await self.aclose() diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/connection_pool.py b/myenv/lib/python3.9/site-packages/httpcore/_async/connection_pool.py new file mode 100644 index 0000000..06ead3e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/connection_pool.py @@ -0,0 +1,354 @@ +import ssl +import sys +from types import TracebackType +from typing import AsyncIterable, AsyncIterator, List, Optional, Type + +from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol +from .._models import Origin, Request, Response +from .._synchronization import AsyncEvent, AsyncLock +from ..backends.auto import AutoBackend +from ..backends.base import AsyncNetworkBackend +from .connection import AsyncHTTPConnection +from .interfaces import AsyncConnectionInterface, AsyncRequestInterface + + +class RequestStatus: + def __init__(self, request: Request): + self.request = request + self.connection: Optional[AsyncConnectionInterface] = None + self._connection_acquired = AsyncEvent() + + def set_connection(self, connection: AsyncConnectionInterface) -> None: + assert self.connection is None + self.connection = connection + self._connection_acquired.set() + + def unset_connection(self) -> None: + assert self.connection is not None + self.connection = None + self._connection_acquired = AsyncEvent() + + async def wait_for_connection( + self, timeout: Optional[float] = None + ) -> AsyncConnectionInterface: + await self._connection_acquired.wait(timeout=timeout) + assert self.connection is not None + return self.connection + + +class AsyncConnectionPool(AsyncRequestInterface): + """ + A connection pool for making HTTP requests. + """ + + def __init__( + self, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish a + connection. + local_address: Local address to connect from. Can also be used to connect + using a particular address family. Using `local_address="0.0.0.0"` + will connect using an `AF_INET` address (IPv4), while using + `local_address="::"` will connect using an `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + self._ssl_context = ssl_context + + self._max_connections = ( + sys.maxsize if max_connections is None else max_connections + ) + self._max_keepalive_connections = ( + sys.maxsize + if max_keepalive_connections is None + else max_keepalive_connections + ) + self._max_keepalive_connections = min( + self._max_connections, self._max_keepalive_connections + ) + + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._pool: List[AsyncConnectionInterface] = [] + self._requests: List[RequestStatus] = [] + self._pool_lock = AsyncLock() + self._network_backend = ( + AutoBackend() if network_backend is None else network_backend + ) + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + return AsyncHTTPConnection( + origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + retries=self._retries, + local_address=self._local_address, + uds=self._uds, + network_backend=self._network_backend, + ) + + @property + def connections(self) -> List[AsyncConnectionInterface]: + """ + Return a list of the connections currently in the pool. + + For example: + + ```python + >>> pool.connections + [ + , + , + , + ] + ``` + """ + return list(self._pool) + + async def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool: + """ + Attempt to provide a connection that can handle the given origin. + """ + origin = status.request.url.origin + + # If there are queued requests in front of us, then don't acquire a + # connection. We handle requests strictly in order. + waiting = [s for s in self._requests if s.connection is None] + if waiting and waiting[0] is not status: + return False + + # Reuse an existing connection if one is currently available. + for idx, connection in enumerate(self._pool): + if connection.can_handle_request(origin) and connection.is_available(): + self._pool.pop(idx) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + # If the pool is currently full, attempt to close one idle connection. + if len(self._pool) >= self._max_connections: + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle(): + await connection.aclose() + self._pool.pop(idx) + break + + # If the pool is still full, then we cannot acquire a connection. + if len(self._pool) >= self._max_connections: + return False + + # Otherwise create a new connection. + connection = self.create_connection(origin) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + async def _close_expired_connections(self) -> None: + """ + Clean up the connection pool by closing off any connections that have expired. + """ + # Close any connections that have expired their keep-alive time. + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.has_expired(): + await connection.aclose() + self._pool.pop(idx) + + # If the pool size exceeds the maximum number of allowed keep-alive connections, + # then close off idle connections as required. + pool_size = len(self._pool) + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle() and pool_size > self._max_keepalive_connections: + await connection.aclose() + self._pool.pop(idx) + pool_size -= 1 + + async def handle_async_request(self, request: Request) -> Response: + """ + Send an HTTP request, and return an HTTP response. + + This is the core implementation that is called into by `.request()` or `.stream()`. + """ + scheme = request.url.scheme.decode() + if scheme == "": + raise UnsupportedProtocol( + "Request URL is missing an 'http://' or 'https://' protocol." + ) + if scheme not in ("http", "https"): + raise UnsupportedProtocol( + f"Request URL has an unsupported protocol '{scheme}://'." + ) + + status = RequestStatus(request) + + async with self._pool_lock: + self._requests.append(status) + await self._close_expired_connections() + await self._attempt_to_acquire_connection(status) + + while True: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("pool", None) + try: + connection = await status.wait_for_connection(timeout=timeout) + except BaseException as exc: + # If we timeout here, or if the task is cancelled, then make + # sure to remove the request from the queue before bubbling + # up the exception. + async with self._pool_lock: + self._requests.remove(status) + raise exc + + try: + response = await connection.handle_async_request(request) + except ConnectionNotAvailable: + # The ConnectionNotAvailable exception is a special case, that + # indicates we need to retry the request on a new connection. + # + # The most common case where this can occur is when multiple + # requests are queued waiting for a single connection, which + # might end up as an HTTP/2 connection, but which actually ends + # up as HTTP/1.1. + async with self._pool_lock: + # Maintain our position in the request queue, but reset the + # status so that the request becomes queued again. + status.unset_connection() + await self._attempt_to_acquire_connection(status) + except BaseException as exc: + await self.response_closed(status) + raise exc + else: + break + + # When we return the response, we wrap the stream in a special class + # that handles notifying the connection pool once the response + # has been released. + assert isinstance(response.stream, AsyncIterable) + return Response( + status=response.status, + headers=response.headers, + content=ConnectionPoolByteStream(response.stream, self, status), + extensions=response.extensions, + ) + + async def response_closed(self, status: RequestStatus) -> None: + """ + This method acts as a callback once the request/response cycle is complete. + + It is called into from the `ConnectionPoolByteStream.aclose()` method. + """ + assert status.connection is not None + connection = status.connection + + async with self._pool_lock: + # Update the state of the connection pool. + if status in self._requests: + self._requests.remove(status) + + if connection.is_closed() and connection in self._pool: + self._pool.remove(connection) + + # Since we've had a response closed, it's possible we'll now be able + # to service one or more requests that are currently pending. + for status in self._requests: + if status.connection is None: + acquired = await self._attempt_to_acquire_connection(status) + # If we could not acquire a connection for a queued request + # then we don't need to check anymore requests that are + # queued later behind it. + if not acquired: + break + + # Housekeeping. + await self._close_expired_connections() + + async def aclose(self) -> None: + """ + Close any connections in the pool. + """ + async with self._pool_lock: + requests_still_in_flight = len(self._requests) + + for connection in self._pool: + await connection.aclose() + self._pool = [] + self._requests = [] + + if requests_still_in_flight: + raise RuntimeError( + f"The connection pool was closed while {requests_still_in_flight} " + f"HTTP requests/responses were still in-flight." + ) + + async def __aenter__(self) -> "AsyncConnectionPool": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + await self.aclose() + + +class ConnectionPoolByteStream: + """ + A wrapper around the response byte stream, that additionally handles + notifying the connection pool when the response has been closed. + """ + + def __init__( + self, + stream: AsyncIterable[bytes], + pool: AsyncConnectionPool, + status: RequestStatus, + ) -> None: + self._stream = stream + self._pool = pool + self._status = status + + async def __aiter__(self) -> AsyncIterator[bytes]: + async for part in self._stream: + yield part + + async def aclose(self) -> None: + try: + if hasattr(self._stream, "aclose"): + await self._stream.aclose() # type: ignore + finally: + await self._pool.response_closed(self._status) diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/http11.py b/myenv/lib/python3.9/site-packages/httpcore/_async/http11.py new file mode 100644 index 0000000..a4880f3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/http11.py @@ -0,0 +1,306 @@ +import enum +import time +from types import TracebackType +from typing import AsyncIterable, AsyncIterator, List, Optional, Tuple, Type, Union + +import h11 + +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, + map_exceptions, +) +from .._models import Origin, Request, Response +from .._synchronization import AsyncLock +from .._trace import Trace +from ..backends.base import AsyncNetworkStream +from .interfaces import AsyncConnectionInterface + +H11Event = Union[ + h11.Request, + h11.Response, + h11.InformationalResponse, + h11.Data, + h11.EndOfMessage, + h11.ConnectionClosed, +] + + +class HTTPConnectionState(enum.IntEnum): + NEW = 0 + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class AsyncHTTP11Connection(AsyncConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + + def __init__( + self, + origin: Origin, + stream: AsyncNetworkStream, + keepalive_expiry: Optional[float] = None, + ) -> None: + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: Optional[float] = keepalive_expiry + self._expire_at: Optional[float] = None + self._state = HTTPConnectionState.NEW + self._state_lock = AsyncLock() + self._request_count = 0 + self._h11_state = h11.Connection(our_role=h11.CLIENT) + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + async with self._state_lock: + if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): + self._request_count += 1 + self._state = HTTPConnectionState.ACTIVE + self._expire_at = None + else: + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request} + async with Trace("http11.send_request_headers", request, kwargs) as trace: + await self._send_request_headers(**kwargs) + async with Trace("http11.send_request_body", request, kwargs) as trace: + await self._send_request_body(**kwargs) + async with Trace( + "http11.receive_response_headers", request, kwargs + ) as trace: + ( + http_version, + status, + reason_phrase, + headers, + ) = await self._receive_response_headers(**kwargs) + trace.return_value = ( + http_version, + status, + reason_phrase, + headers, + ) + + return Response( + status=status, + headers=headers, + content=HTTP11ConnectionByteStream(self, request), + extensions={ + "http_version": http_version, + "reason_phrase": reason_phrase, + "network_stream": self._network_stream, + }, + ) + except BaseException as exc: + async with Trace("http11.response_closed", request) as trace: + await self._response_closed() + raise exc + + # Sending the request... + + async def _send_request_headers(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): + event = h11.Request( + method=request.method, + target=request.url.target, + headers=request.headers, + ) + await self._send_event(event, timeout=timeout) + + async def _send_request_body(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + assert isinstance(request.stream, AsyncIterable) + async for chunk in request.stream: + event = h11.Data(data=chunk) + await self._send_event(event, timeout=timeout) + + event = h11.EndOfMessage() + await self._send_event(event, timeout=timeout) + + async def _send_event( + self, event: H11Event, timeout: Optional[float] = None + ) -> None: + bytes_to_send = self._h11_state.send(event) + await self._network_stream.write(bytes_to_send, timeout=timeout) + + # Receiving the response... + + async def _receive_response_headers( + self, request: Request + ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = await self._receive_event(timeout=timeout) + if isinstance(event, h11.Response): + break + + http_version = b"HTTP/" + event.http_version + + # h11 version 0.11+ supports a `raw_items` interface to get the + # raw header casing, rather than the enforced lowercase headers. + headers = event.headers.raw_items() + + return http_version, event.status_code, event.reason, headers + + async def _receive_response_body(self, request: Request) -> AsyncIterator[bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = await self._receive_event(timeout=timeout) + if isinstance(event, h11.Data): + yield bytes(event.data) + elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): + break + + async def _receive_event(self, timeout: Optional[float] = None) -> H11Event: + while True: + with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): + event = self._h11_state.next_event() + + if event is h11.NEED_DATA: + data = await self._network_stream.read( + self.READ_NUM_BYTES, timeout=timeout + ) + + # If we feed this case through h11 we'll raise an exception like: + # + # httpcore.RemoteProtocolError: can't handle event type + # ConnectionClosed when role=SERVER and state=SEND_RESPONSE + # + # Which is accurate, but not very informative from an end-user + # perspective. Instead we handle this case distinctly and treat + # it as a ConnectError. + if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: + msg = "Server disconnected without sending a response." + raise RemoteProtocolError(msg) + + self._h11_state.receive_data(data) + else: + return event + + async def _response_closed(self) -> None: + async with self._state_lock: + if ( + self._h11_state.our_state is h11.DONE + and self._h11_state.their_state is h11.DONE + ): + self._state = HTTPConnectionState.IDLE + self._h11_state.start_next_cycle() + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + else: + await self.aclose() + + # Once the connection is no longer required... + + async def aclose(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._state = HTTPConnectionState.CLOSED + await self._network_stream.aclose() + + # The AsyncConnectionInterface methods provide information about the state of + # the connection, allowing for a connection pooling implementation to + # determine when to reuse and when to close the connection... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + # Note that HTTP/1.1 connections in the "NEW" state are not treated as + # being "available". The control flow which created the connection will + # be able to send an outgoing request, but the connection will not be + # acquired from the connection pool for any other request. + return self._state == HTTPConnectionState.IDLE + + def has_expired(self) -> bool: + now = time.monotonic() + keepalive_expired = self._expire_at is not None and now > self._expire_at + + # If the HTTP connection is idle but the socket is readable, then the + # only valid state is that the socket is about to return b"", indicating + # a server-initiated disconnect. + server_disconnected = ( + self._state == HTTPConnectionState.IDLE + and self._network_stream.get_extra_info("is_readable") + ) + + return keepalive_expired or server_disconnected + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/1.1, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> "AsyncHTTP11Connection": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + await self.aclose() + + +class HTTP11ConnectionByteStream: + def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None: + self._connection = connection + self._request = request + self._closed = False + + async def __aiter__(self) -> AsyncIterator[bytes]: + kwargs = {"request": self._request} + try: + async with Trace("http11.receive_response_body", self._request, kwargs): + async for chunk in self._connection._receive_response_body(**kwargs): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + await self.aclose() + raise exc + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + async with Trace("http11.response_closed", self._request): + await self._connection._response_closed() diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/http2.py b/myenv/lib/python3.9/site-packages/httpcore/_async/http2.py new file mode 100644 index 0000000..4be4789 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/http2.py @@ -0,0 +1,475 @@ +import enum +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import AsyncLock, AsyncSemaphore +from .._trace import Trace +from ..backends.base import AsyncNetworkStream +from .interfaces import AsyncConnectionInterface + + +def has_body_headers(request: Request) -> bool: + return any( + [ + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ] + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class AsyncHTTP2Connection(AsyncConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: AsyncNetworkStream, + keepalive_expiry: typing.Optional[float] = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: typing.Optional[float] = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: typing.Optional[float] = None + self._request_count = 0 + self._init_lock = AsyncLock() + self._state_lock = AsyncLock() + self._read_lock = AsyncLock() + self._write_lock = AsyncLock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + self._events: typing.Dict[int, h2.events.Event] = {} + self._read_exception: typing.Optional[Exception] = None + self._write_exception: typing.Optional[Exception] = None + self._connection_error_event: typing.Optional[h2.events.Event] = None + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + async with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + async with self._init_lock: + if not self._sent_connection_init: + kwargs = {"request": request} + async with Trace("http2.send_connection_init", request, kwargs): + await self._send_connection_init(**kwargs) + self._sent_connection_init = True + max_streams = self._h2_state.local_settings.max_concurrent_streams + self._max_streams_semaphore = AsyncSemaphore(max_streams) + + await self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + async with Trace("http2.send_request_headers", request, kwargs): + await self._send_request_headers(request=request, stream_id=stream_id) + async with Trace("http2.send_request_body", request, kwargs): + await self._send_request_body(request=request, stream_id=stream_id) + async with Trace( + "http2.receive_response_headers", request, kwargs + ) as trace: + status, headers = await self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={"stream_id": stream_id, "http_version": b"HTTP/2"}, + ) + except Exception as exc: # noqa: PIE786 + kwargs = {"stream_id": stream_id} + async with Trace("http2.response_closed", request, kwargs): + await self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_error_event: + raise RemoteProtocolError(self._connection_error_event) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + async def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + await self._write_outgoing_data(request) + + # Sending the request... + + async def _send_request_headers(self, request: Request, stream_id: int) -> None: + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + await self._write_outgoing_data(request) + + async def _send_request_body(self, request: Request, stream_id: int) -> None: + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.AsyncIterable) + async for data in request.stream: + while data: + max_flow = await self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + await self._write_outgoing_data(request) + + self._h2_state.end_stream(stream_id) + await self._write_outgoing_data(request) + + # Receiving the response... + + async def _receive_response( + self, request: Request, stream_id: int + ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: + while True: + event = await self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + async def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.AsyncIterator[bytes]: + while True: + event = await self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + await self._write_outgoing_data(request) + yield event.data + elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)): + break + + async def _receive_stream_event( + self, request: Request, stream_id: int + ) -> h2.events.Event: + while not self._events.get(stream_id): + await self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + # The StreamReset event applies to a single stream. + if hasattr(event, "error_code"): + raise RemoteProtocolError(event) + return event + + async def _receive_events( + self, request: Request, stream_id: typing.Optional[int] = None + ) -> None: + async with self._read_lock: + if self._connection_error_event is not None: # pragma: nocover + raise RemoteProtocolError(self._connection_error_event) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = await self._read_incoming_data(request) + for event in events: + event_stream_id = getattr(event, "stream_id", 0) + + # The ConnectionTerminatedEvent applies to the entire connection, + # and should be saved so it can be raised on all streams. + if hasattr(event, "error_code") and event_stream_id == 0: + self._connection_error_event = event + raise RemoteProtocolError(event) + + if event_stream_id in self._events: + self._events[event_stream_id].append(event) + + await self._write_outgoing_data(request) + + async def _response_closed(self, stream_id: int) -> None: + await self._max_streams_semaphore.release() + del self._events[stream_id] + async with self._state_lock: + if self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + await self.aclose() + + async def aclose(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + await self._network_stream.aclose() + + # Wrappers around network read/write operations... + + async def _read_incoming_data( + self, request: Request + ) -> typing.List[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = await self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events = self._h2_state.receive_data(data) + + return events + + async def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + async with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + await self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + async def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + await self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> "AsyncHTTP2Connection": + return self + + async def __aexit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[types.TracebackType] = None, + ) -> None: + await self.aclose() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: AsyncHTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + async with Trace("http2.receive_response_body", self._request, kwargs): + async for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + await self.aclose() + raise exc + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + async with Trace("http2.response_closed", self._request, kwargs): + await self._connection._response_closed(stream_id=self._stream_id) diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/http_proxy.py b/myenv/lib/python3.9/site-packages/httpcore/_async/http_proxy.py new file mode 100644 index 0000000..48d37cd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/http_proxy.py @@ -0,0 +1,337 @@ +import ssl +from base64 import b64encode +from typing import List, Mapping, Optional, Sequence, Tuple, Union + +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from ..backends.base import AsyncNetworkBackend +from .connection import AsyncHTTPConnection +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] + + +def merge_headers( + default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, +) -> List[Tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set([key.lower() for key, value in override_headers]) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +def build_auth_header(username: bytes, password: bytes) -> bytes: + userpass = username + b":" + password + return b"Basic " + b64encode(userpass) + + +class AsyncHTTPProxy(AsyncConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: Union[URL, bytes, str], + proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + authorization = build_auth_header(username, password) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + if origin.scheme == b"http": + return AsyncForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + return AsyncTunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class AsyncForwardHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + keepalive_expiry: Optional[float] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + ) -> None: + self._connection = AsyncHTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + + async def handle_async_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return await self._connection.handle_async_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin.scheme == b"http" + + async def aclose(self) -> None: + await self._connection.aclose() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class AsyncTunnelHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: Optional[AsyncNetworkBackend] = None, + ) -> None: + self._connection: AsyncConnectionInterface = AsyncHTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = AsyncLock() + self._connected = False + + async def handle_async_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + async with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = await self._connection.handle_async_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + await self._connection.aclose() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("connection.start_tls", request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return await self._connection.handle_async_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + await self._connection.aclose() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/interfaces.py b/myenv/lib/python3.9/site-packages/httpcore/_async/interfaces.py new file mode 100644 index 0000000..c3ffa2a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/interfaces.py @@ -0,0 +1,133 @@ +from contextlib import asynccontextmanager +from typing import AsyncIterator, Optional, Union + +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, + include_request_headers, +) + + +class AsyncRequestInterface: + async def request( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, AsyncIterator[bytes], None] = None, + extensions: Optional[dict] = None, + ) -> Response: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = await self.handle_async_request(request) + try: + await response.aread() + finally: + await response.aclose() + return response + + @asynccontextmanager + async def stream( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, AsyncIterator[bytes], None] = None, + extensions: Optional[dict] = None, + ) -> AsyncIterator[Response]: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = await self.handle_async_request(request) + try: + yield response + finally: + await response.aclose() + + async def handle_async_request(self, request: Request) -> Response: + raise NotImplementedError() # pragma: nocover + + +class AsyncConnectionInterface(AsyncRequestInterface): + async def aclose(self) -> None: + raise NotImplementedError() # pragma: nocover + + def info(self) -> str: + raise NotImplementedError() # pragma: nocover + + def can_handle_request(self, origin: Origin) -> bool: + raise NotImplementedError() # pragma: nocover + + def is_available(self) -> bool: + """ + Return `True` if the connection is currently able to accept an + outgoing request. + + An HTTP/1.1 connection will only be available if it is currently idle. + + An HTTP/2 connection will be available so long as the stream ID space is + not yet exhausted, and the connection is not in an error state. + + While the connection is being established we may not yet know if it is going + to result in an HTTP/1.1 or HTTP/2 connection. The connection should be + treated as being available, but might ultimately raise `NewConnectionRequired` + required exceptions if multiple requests are attempted over a connection + that ends up being established as HTTP/1.1. + """ + raise NotImplementedError() # pragma: nocover + + def has_expired(self) -> bool: + """ + Return `True` if the connection is in a state where it should be closed. + + This either means that the connection is idle and it has passed the + expiry time on its keep-alive, or that server has sent an EOF. + """ + raise NotImplementedError() # pragma: nocover + + def is_idle(self) -> bool: + """ + Return `True` if the connection is currently idle. + """ + raise NotImplementedError() # pragma: nocover + + def is_closed(self) -> bool: + """ + Return `True` if the connection has been closed. + + Used when a response is closed to determine if the connection may be + returned to the connection pool or not. + """ + raise NotImplementedError() # pragma: nocover diff --git a/myenv/lib/python3.9/site-packages/httpcore/_async/socks_proxy.py b/myenv/lib/python3.9/site-packages/httpcore/_async/socks_proxy.py new file mode 100644 index 0000000..fcba9e5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_async/socks_proxy.py @@ -0,0 +1,336 @@ +import ssl +import typing + +from socksio import socks5 + +from .._exceptions import ConnectionNotAvailable, ProxyError +from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from ..backends.auto import AutoBackend +from ..backends.base import AsyncNetworkBackend, AsyncNetworkStream +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +AUTH_METHODS = { + b"\x00": "NO AUTHENTICATION REQUIRED", + b"\x01": "GSSAPI", + b"\x02": "USERNAME/PASSWORD", + b"\xff": "NO ACCEPTABLE METHODS", +} + +REPLY_CODES = { + b"\x00": "Succeeded", + b"\x01": "General SOCKS server failure", + b"\x02": "Connection not allowed by ruleset", + b"\x03": "Network unreachable", + b"\x04": "Host unreachable", + b"\x05": "Connection refused", + b"\x06": "TTL expired", + b"\x07": "Command not supported", + b"\x08": "Address type not supported", +} + + +async def _init_socks5_connection( + stream: AsyncNetworkStream, + *, + host: bytes, + port: int, + auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, +) -> None: + conn = socks5.SOCKS5Connection() + + # Auth method request + auth_method = ( + socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED + if auth is None + else socks5.SOCKS5AuthMethod.USERNAME_PASSWORD + ) + conn.send(socks5.SOCKS5AuthMethodsRequest([auth_method])) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Auth method response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5AuthReply) + if response.method != auth_method: + requested = AUTH_METHODS.get(auth_method, "UNKNOWN") + responded = AUTH_METHODS.get(response.method, "UNKNOWN") + raise ProxyError( + f"Requested {requested} from proxy server, but got {responded}." + ) + + if response.method == socks5.SOCKS5AuthMethod.USERNAME_PASSWORD: + # Username/password request + assert auth is not None + username, password = auth + conn.send(socks5.SOCKS5UsernamePasswordRequest(username, password)) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Username/password response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5UsernamePasswordReply) + if not response.success: + raise ProxyError("Invalid username/password") + + # Connect request + conn.send( + socks5.SOCKS5CommandRequest.from_address( + socks5.SOCKS5Command.CONNECT, (host, port) + ) + ) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Connect response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5Reply) + if response.reply_code != socks5.SOCKS5ReplyCode.SUCCEEDED: + reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN") + raise ProxyError(f"Proxy Server could not connect: {reply_code}.") + + +class AsyncSOCKSProxy(AsyncConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: typing.Union[URL, bytes, str], + proxy_auth: typing.Optional[ + typing.Tuple[typing.Union[bytes, str], typing.Union[bytes, str]] + ] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + max_connections: typing.Optional[int] = 10, + max_keepalive_connections: typing.Optional[int] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: typing.Optional[AsyncNetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if proxy_auth is not None: + username, password = proxy_auth + username_bytes = enforce_bytes(username, name="proxy_auth") + password_bytes = enforce_bytes(password, name="proxy_auth") + self._proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = ( + username_bytes, + password_bytes, + ) + else: + self._proxy_auth = None + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + return AsyncSocks5Connection( + proxy_origin=self._proxy_url.origin, + remote_origin=origin, + proxy_auth=self._proxy_auth, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class AsyncSocks5Connection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: typing.Optional[AsyncNetworkBackend] = None, + ) -> None: + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._proxy_auth = proxy_auth + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + + self._network_backend: AsyncNetworkBackend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._connect_lock = AsyncLock() + self._connection: typing.Optional[AsyncConnectionInterface] = None + self._connect_failed = False + + async def handle_async_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + async with self._connect_lock: + if self._connection is None: + try: + # Connect to the proxy + kwargs = { + "host": self._proxy_origin.host.decode("ascii"), + "port": self._proxy_origin.port, + "timeout": timeout, + } + with Trace("connection.connect_tcp", request, kwargs) as trace: + stream = await self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + + # Connect to the remote host using socks5 + kwargs = { + "stream": stream, + "host": self._remote_origin.host.decode("ascii"), + "port": self._remote_origin.port, + "auth": self._proxy_auth, + } + with Trace( + "connection.setup_socks5_connection", request, kwargs + ) as trace: + await _init_socks5_connection(**kwargs) + trace.return_value = stream + + # Upgrade the stream to SSL + if self._remote_origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ( + ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ) + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace( + "connection.start_tls", request, kwargs + ) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or ( + self._http2 and not self._http1 + ): # pragma: nocover + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): # pragma: nocover + raise ConnectionNotAvailable() + + return await self._connection.handle_async_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + if self._connection is not None: + await self._connection.aclose() + + def is_available(self) -> bool: + if self._connection is None: # pragma: nocover + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._remote_origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: # pragma: nocover + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/myenv/lib/python3.9/site-packages/httpcore/_exceptions.py b/myenv/lib/python3.9/site-packages/httpcore/_exceptions.py new file mode 100644 index 0000000..f28dcc2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_exceptions.py @@ -0,0 +1,79 @@ +import contextlib +from typing import Dict, Iterator, Type + + +@contextlib.contextmanager +def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: + try: + yield + except Exception as exc: # noqa: PIE786 + for from_exc, to_exc in map.items(): + if isinstance(exc, from_exc): + raise to_exc(exc) + raise # pragma: nocover + + +class ConnectionNotAvailable(Exception): + pass + + +class ProxyError(Exception): + pass + + +class UnsupportedProtocol(Exception): + pass + + +class ProtocolError(Exception): + pass + + +class RemoteProtocolError(ProtocolError): + pass + + +class LocalProtocolError(ProtocolError): + pass + + +# Timeout errors + + +class TimeoutException(Exception): + pass + + +class PoolTimeout(TimeoutException): + pass + + +class ConnectTimeout(TimeoutException): + pass + + +class ReadTimeout(TimeoutException): + pass + + +class WriteTimeout(TimeoutException): + pass + + +# Network errors + + +class NetworkError(Exception): + pass + + +class ConnectError(NetworkError): + pass + + +class ReadError(NetworkError): + pass + + +class WriteError(NetworkError): + pass diff --git a/myenv/lib/python3.9/site-packages/httpcore/_models.py b/myenv/lib/python3.9/site-packages/httpcore/_models.py new file mode 100644 index 0000000..170011c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_models.py @@ -0,0 +1,474 @@ +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Iterable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) +from urllib.parse import urlparse + +# Functions for typechecking... + + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] + + +def enforce_bytes(value: Union[bytes, str], *, name: str) -> bytes: + """ + Any arguments that are ultimately represented as bytes can be specified + either as bytes or as strings. + + However we enforce that any string arguments must only contain characters in + the plain ASCII range. chr(0)...chr(127). If you need to use characters + outside that range then be precise, and use a byte-wise argument. + """ + if isinstance(value, str): + try: + return value.encode("ascii") + except UnicodeEncodeError: + raise TypeError(f"{name} strings may not include unicode characters.") + elif isinstance(value, bytes): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be bytes or str, but got {seen_type}.") + + +def enforce_url(value: Union["URL", bytes, str], *, name: str) -> "URL": + """ + Type check for URL parameters. + """ + if isinstance(value, (bytes, str)): + return URL(value) + elif isinstance(value, URL): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.") + + +def enforce_headers( + value: Union[HeadersAsMapping, HeadersAsSequence, None] = None, *, name: str +) -> List[Tuple[bytes, bytes]]: + """ + Convienence function that ensure all items in request or response headers + are either bytes or strings in the plain ASCII range. + """ + if value is None: + return [] + elif isinstance(value, Mapping): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value.items() + ] + elif isinstance(value, Sequence): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value + ] + + seen_type = type(value).__name__ + raise TypeError( + f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}." + ) + + +def enforce_stream( + value: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None], *, name: str +) -> Union[Iterable[bytes], AsyncIterable[bytes]]: + if value is None: + return ByteStream(b"") + elif isinstance(value, bytes): + return ByteStream(value) + return value + + +# * https://tools.ietf.org/html/rfc3986#section-3.2.3 +# * https://url.spec.whatwg.org/#url-miscellaneous +# * https://url.spec.whatwg.org/#scheme-state +DEFAULT_PORTS = { + b"ftp": 21, + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, +} + + +def include_request_headers( + headers: List[Tuple[bytes, bytes]], + *, + url: "URL", + content: Union[None, bytes, Iterable[bytes], AsyncIterable[bytes]], +) -> List[Tuple[bytes, bytes]]: + headers_set = set([k.lower() for k, v in headers]) + + if b"host" not in headers_set: + default_port = DEFAULT_PORTS.get(url.scheme) + if url.port is None or url.port == default_port: + header_value = url.host + else: + header_value = b"%b:%d" % (url.host, url.port) + headers = [(b"Host", header_value)] + headers + + if ( + content is not None + and b"content-length" not in headers_set + and b"transfer-encoding" not in headers_set + ): + if isinstance(content, bytes): + content_length = str(len(content)).encode("ascii") + headers += [(b"Content-Length", content_length)] + else: + headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover + + return headers + + +# Interfaces for byte streams... + + +class ByteStream: + """ + A container for non-streaming content, and that supports both sync and async + stream iteration. + """ + + def __init__(self, content: bytes) -> None: + self._content = content + + def __iter__(self) -> Iterator[bytes]: + yield self._content + + async def __aiter__(self) -> AsyncIterator[bytes]: + yield self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{len(self._content)} bytes]>" + + +class Origin: + def __init__(self, scheme: bytes, host: bytes, port: int) -> None: + self.scheme = scheme + self.host = host + self.port = port + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, Origin) + and self.scheme == other.scheme + and self.host == other.host + and self.port == other.port + ) + + def __str__(self) -> str: + scheme = self.scheme.decode("ascii") + host = self.host.decode("ascii") + port = str(self.port) + return f"{scheme}://{host}:{port}" + + +class URL: + """ + Represents the URL against which an HTTP request may be made. + + The URL may either be specified as a plain string, for convienence: + + ```python + url = httpcore.URL("https://www.example.com/") + ``` + + Or be constructed with explicitily pre-parsed components: + + ```python + url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/') + ``` + + Using this second more explicit style allows integrations that are using + `httpcore` to pass through URLs that have already been parsed in order to use + libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures + that URL parsing is treated identically at both the networking level and at any + higher layers of abstraction. + + The four components are important here, as they allow the URL to be precisely + specified in a pre-parsed format. They also allow certain types of request to + be created that could not otherwise be expressed. + + For example, an HTTP request to `http://www.example.com/` forwarded via a proxy + at `http://localhost:8080`... + + ```python + # Constructs an HTTP request with a complete URL as the target: + # GET https://www.example.com/ HTTP/1.1 + url = httpcore.URL( + scheme=b'http', + host=b'localhost', + port=8080, + target=b'https://www.example.com/' + ) + request = httpcore.Request( + method="GET", + url=url + ) + ``` + + Another example is constructing an `OPTIONS *` request... + + ```python + # Constructs an 'OPTIONS *' HTTP request: + # OPTIONS * HTTP/1.1 + url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*') + request = httpcore.Request(method="OPTIONS", url=url) + ``` + + This kind of request is not possible to formulate with a URL string, + because the `/` delimiter is always used to demark the target from the + host/port portion of the URL. + + For convenience, string-like arguments may be specified either as strings or + as bytes. However, once a request is being issue over-the-wire, the URL + components are always ultimately required to be a bytewise representation. + + In order to avoid any ambiguity over character encodings, when strings are used + as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`. + If you require a bytewise representation that is outside this range you must + handle the character encoding directly, and pass a bytes instance. + """ + + def __init__( + self, + url: Union[bytes, str] = "", + *, + scheme: Union[bytes, str] = b"", + host: Union[bytes, str] = b"", + port: Optional[int] = None, + target: Union[bytes, str] = b"", + ) -> None: + """ + Parameters: + url: The complete URL as a string or bytes. + scheme: The URL scheme as a string or bytes. + Typically either `"http"` or `"https"`. + host: The URL host as a string or bytes. Such as `"www.example.com"`. + port: The port to connect to. Either an integer or `None`. + target: The target of the HTTP request. Such as `"/items?search=red"`. + """ + if url: + parsed = urlparse(enforce_bytes(url, name="url")) + self.scheme = parsed.scheme + self.host = parsed.hostname or b"" + self.port = parsed.port + self.target = (parsed.path or b"/") + ( + b"?" + parsed.query if parsed.query else b"" + ) + else: + self.scheme = enforce_bytes(scheme, name="scheme") + self.host = enforce_bytes(host, name="host") + self.port = port + self.target = enforce_bytes(target, name="target") + + @property + def origin(self) -> Origin: + default_port = {b"http": 80, b"https": 443, b"socks5": 1080}[self.scheme] + return Origin( + scheme=self.scheme, host=self.host, port=self.port or default_port + ) + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, URL) + and other.scheme == self.scheme + and other.host == self.host + and other.port == self.port + and other.target == self.target + ) + + def __bytes__(self) -> bytes: + if self.port is None: + return b"%b://%b%b" % (self.scheme, self.host, self.target) + return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(scheme={self.scheme!r}, " + f"host={self.host!r}, port={self.port!r}, target={self.target!r})" + ) + + +class Request: + """ + An HTTP request. + """ + + def __init__( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None] = None, + extensions: Optional[dict] = None, + ) -> None: + """ + Parameters: + method: The HTTP request method, either as a string or bytes. + For example: `GET`. + url: The request URL, either as a `URL` instance, or as a string or bytes. + For example: `"https://www.example.com".` + headers: The HTTP request headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the request. Possible keys include `"timeout"`, and `"trace"`. + """ + self.method: bytes = enforce_bytes(method, name="method") + self.url: URL = enforce_url(url, name="url") + self.headers: List[Tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: Union[Iterable[bytes], AsyncIterable[bytes]] = enforce_stream( + content, name="content" + ) + self.extensions = {} if extensions is None else extensions + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.method!r}]>" + + +class Response: + """ + An HTTP response. + """ + + def __init__( + self, + status: int, + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None] = None, + extensions: Optional[dict] = None, + ) -> None: + """ + Parameters: + status: The HTTP status code of the response. For example `200`. + headers: The HTTP response headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the responseself.Possible keys include `"http_version"`, + `"reason_phrase"`, and `"network_stream"`. + """ + self.status: int = status + self.headers: List[Tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: Union[Iterable[bytes], AsyncIterable[bytes]] = enforce_stream( + content, name="content" + ) + self.extensions: dict = {} if extensions is None else extensions + + self._stream_consumed = False + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + if isinstance(self.stream, Iterable): + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'response.read()' first." + ) + else: + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'await response.aread()' first." + ) + return self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.status}]>" + + # Sync interface... + + def read(self) -> bytes: + if not isinstance(self.stream, Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an asynchronous response using 'response.read()'. " + "You should use 'await response.aread()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part for part in self.iter_stream()]) + return self._content + + def iter_stream(self) -> Iterator[bytes]: + if not isinstance(self.stream, Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an asynchronous response using 'for ... in " + "response.iter_stream()'. " + "You should use 'async for ... in response.aiter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'for ... in response.iter_stream()' more than once." + ) + self._stream_consumed = True + for chunk in self.stream: + yield chunk + + def close(self) -> None: + if not isinstance(self.stream, Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to close an asynchronous response using 'response.close()'. " + "You should use 'await response.aclose()' instead." + ) + if hasattr(self.stream, "close"): + self.stream.close() # type: ignore + + # Async interface... + + async def aread(self) -> bytes: + if not isinstance(self.stream, AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an synchronous response using " + "'await response.aread()'. " + "You should use 'response.read()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_stream()]) + return self._content + + async def aiter_stream(self) -> AsyncIterator[bytes]: + if not isinstance(self.stream, AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an synchronous response using 'async for ... in " + "response.aiter_stream()'. " + "You should use 'for ... in response.iter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'async for ... in response.aiter_stream()' " + "more than once." + ) + self._stream_consumed = True + async for chunk in self.stream: + yield chunk + + async def aclose(self) -> None: + if not isinstance(self.stream, AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to close a synchronous response using " + "'await response.aclose()'. " + "You should use 'response.close()' instead." + ) + if hasattr(self.stream, "aclose"): + await self.stream.aclose() # type: ignore diff --git a/myenv/lib/python3.9/site-packages/httpcore/_ssl.py b/myenv/lib/python3.9/site-packages/httpcore/_ssl.py new file mode 100644 index 0000000..629f002 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_ssl.py @@ -0,0 +1,9 @@ +import ssl + +import certifi # type: ignore + + +def default_ssl_context() -> ssl.SSLContext: + context = ssl.create_default_context() + context.load_verify_locations(certifi.where()) + return context diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/__init__.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/__init__.py new file mode 100644 index 0000000..b476d76 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/__init__.py @@ -0,0 +1,39 @@ +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .http_proxy import HTTPProxy +from .interfaces import ConnectionInterface + +try: + from .http2 import HTTP2Connection +except ImportError: # pragma: nocover + + class HTTP2Connection: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use http2 support, but the `h2` package is not " + "installed. Use 'pip install httpcore[http2]'." + ) + + +try: + from .socks_proxy import SOCKSProxy +except ImportError: # pragma: nocover + + class SOCKSProxy: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use SOCKS support, but the `socksio` package is not " + "installed. Use 'pip install httpcore[socks]'." + ) + + +__all__ = [ + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", +] diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/connection.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/connection.py new file mode 100644 index 0000000..3312c2c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/connection.py @@ -0,0 +1,208 @@ +import itertools +import ssl +from types import TracebackType +from typing import Iterator, Optional, Type + +from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout +from .._models import Origin, Request, Response +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from ..backends.sync import SyncBackend +from ..backends.base import NetworkBackend, NetworkStream +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc. + + +def exponential_backoff(factor: float) -> Iterator[float]: + yield 0 + for n in itertools.count(2): + yield factor * (2 ** (n - 2)) + + +class HTTPConnection(ConnectionInterface): + def __init__( + self, + origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + ) -> None: + self._origin = origin + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend: NetworkBackend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._connection: Optional[ConnectionInterface] = None + self._connect_failed: bool = False + self._request_lock = Lock() + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection to {self._origin}" + ) + + with self._request_lock: + if self._connection is None: + try: + stream = self._connect(request) + + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): + raise ConnectionNotAvailable() + + return self._connection.handle_request(request) + + def _connect(self, request: Request) -> NetworkStream: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + retries_left = self._retries + delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) + + while True: + try: + if self._uds is None: + kwargs = { + "host": self._origin.host.decode("ascii"), + "port": self._origin.port, + "local_address": self._local_address, + "timeout": timeout, + } + with Trace( + "connection.connect_tcp", request, kwargs + ) as trace: + stream = self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + else: + kwargs = { + "path": self._uds, + "timeout": timeout, + } + with Trace( + "connection.connect_unix_socket", request, kwargs + ) as trace: + stream = self._network_backend.connect_unix_socket( + **kwargs + ) + trace.return_value = stream + except (ConnectError, ConnectTimeout): + if retries_left <= 0: + raise + retries_left -= 1 + delay = next(delays) + # TRACE 'retry' + self._network_backend.sleep(delay) + else: + break + + if self._origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("connection.start_tls", request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + return stream + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def close(self) -> None: + if self._connection is not None: + self._connection.close() + + def is_available(self) -> bool: + if self._connection is None: + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTPConnection": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self.close() diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py new file mode 100644 index 0000000..88b2918 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py @@ -0,0 +1,354 @@ +import ssl +import sys +from types import TracebackType +from typing import Iterable, Iterator, List, Optional, Type + +from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol +from .._models import Origin, Request, Response +from .._synchronization import Event, Lock +from ..backends.sync import SyncBackend +from ..backends.base import NetworkBackend +from .connection import HTTPConnection +from .interfaces import ConnectionInterface, RequestInterface + + +class RequestStatus: + def __init__(self, request: Request): + self.request = request + self.connection: Optional[ConnectionInterface] = None + self._connection_acquired = Event() + + def set_connection(self, connection: ConnectionInterface) -> None: + assert self.connection is None + self.connection = connection + self._connection_acquired.set() + + def unset_connection(self) -> None: + assert self.connection is not None + self.connection = None + self._connection_acquired = Event() + + def wait_for_connection( + self, timeout: Optional[float] = None + ) -> ConnectionInterface: + self._connection_acquired.wait(timeout=timeout) + assert self.connection is not None + return self.connection + + +class ConnectionPool(RequestInterface): + """ + A connection pool for making HTTP requests. + """ + + def __init__( + self, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish a + connection. + local_address: Local address to connect from. Can also be used to connect + using a particular address family. Using `local_address="0.0.0.0"` + will connect using an `AF_INET` address (IPv4), while using + `local_address="::"` will connect using an `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + self._ssl_context = ssl_context + + self._max_connections = ( + sys.maxsize if max_connections is None else max_connections + ) + self._max_keepalive_connections = ( + sys.maxsize + if max_keepalive_connections is None + else max_keepalive_connections + ) + self._max_keepalive_connections = min( + self._max_connections, self._max_keepalive_connections + ) + + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._pool: List[ConnectionInterface] = [] + self._requests: List[RequestStatus] = [] + self._pool_lock = Lock() + self._network_backend = ( + SyncBackend() if network_backend is None else network_backend + ) + + def create_connection(self, origin: Origin) -> ConnectionInterface: + return HTTPConnection( + origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + retries=self._retries, + local_address=self._local_address, + uds=self._uds, + network_backend=self._network_backend, + ) + + @property + def connections(self) -> List[ConnectionInterface]: + """ + Return a list of the connections currently in the pool. + + For example: + + ```python + >>> pool.connections + [ + , + , + , + ] + ``` + """ + return list(self._pool) + + def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool: + """ + Attempt to provide a connection that can handle the given origin. + """ + origin = status.request.url.origin + + # If there are queued requests in front of us, then don't acquire a + # connection. We handle requests strictly in order. + waiting = [s for s in self._requests if s.connection is None] + if waiting and waiting[0] is not status: + return False + + # Reuse an existing connection if one is currently available. + for idx, connection in enumerate(self._pool): + if connection.can_handle_request(origin) and connection.is_available(): + self._pool.pop(idx) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + # If the pool is currently full, attempt to close one idle connection. + if len(self._pool) >= self._max_connections: + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle(): + connection.close() + self._pool.pop(idx) + break + + # If the pool is still full, then we cannot acquire a connection. + if len(self._pool) >= self._max_connections: + return False + + # Otherwise create a new connection. + connection = self.create_connection(origin) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + def _close_expired_connections(self) -> None: + """ + Clean up the connection pool by closing off any connections that have expired. + """ + # Close any connections that have expired their keep-alive time. + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.has_expired(): + connection.close() + self._pool.pop(idx) + + # If the pool size exceeds the maximum number of allowed keep-alive connections, + # then close off idle connections as required. + pool_size = len(self._pool) + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle() and pool_size > self._max_keepalive_connections: + connection.close() + self._pool.pop(idx) + pool_size -= 1 + + def handle_request(self, request: Request) -> Response: + """ + Send an HTTP request, and return an HTTP response. + + This is the core implementation that is called into by `.request()` or `.stream()`. + """ + scheme = request.url.scheme.decode() + if scheme == "": + raise UnsupportedProtocol( + "Request URL is missing an 'http://' or 'https://' protocol." + ) + if scheme not in ("http", "https"): + raise UnsupportedProtocol( + f"Request URL has an unsupported protocol '{scheme}://'." + ) + + status = RequestStatus(request) + + with self._pool_lock: + self._requests.append(status) + self._close_expired_connections() + self._attempt_to_acquire_connection(status) + + while True: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("pool", None) + try: + connection = status.wait_for_connection(timeout=timeout) + except BaseException as exc: + # If we timeout here, or if the task is cancelled, then make + # sure to remove the request from the queue before bubbling + # up the exception. + with self._pool_lock: + self._requests.remove(status) + raise exc + + try: + response = connection.handle_request(request) + except ConnectionNotAvailable: + # The ConnectionNotAvailable exception is a special case, that + # indicates we need to retry the request on a new connection. + # + # The most common case where this can occur is when multiple + # requests are queued waiting for a single connection, which + # might end up as an HTTP/2 connection, but which actually ends + # up as HTTP/1.1. + with self._pool_lock: + # Maintain our position in the request queue, but reset the + # status so that the request becomes queued again. + status.unset_connection() + self._attempt_to_acquire_connection(status) + except BaseException as exc: + self.response_closed(status) + raise exc + else: + break + + # When we return the response, we wrap the stream in a special class + # that handles notifying the connection pool once the response + # has been released. + assert isinstance(response.stream, Iterable) + return Response( + status=response.status, + headers=response.headers, + content=ConnectionPoolByteStream(response.stream, self, status), + extensions=response.extensions, + ) + + def response_closed(self, status: RequestStatus) -> None: + """ + This method acts as a callback once the request/response cycle is complete. + + It is called into from the `ConnectionPoolByteStream.close()` method. + """ + assert status.connection is not None + connection = status.connection + + with self._pool_lock: + # Update the state of the connection pool. + if status in self._requests: + self._requests.remove(status) + + if connection.is_closed() and connection in self._pool: + self._pool.remove(connection) + + # Since we've had a response closed, it's possible we'll now be able + # to service one or more requests that are currently pending. + for status in self._requests: + if status.connection is None: + acquired = self._attempt_to_acquire_connection(status) + # If we could not acquire a connection for a queued request + # then we don't need to check anymore requests that are + # queued later behind it. + if not acquired: + break + + # Housekeeping. + self._close_expired_connections() + + def close(self) -> None: + """ + Close any connections in the pool. + """ + with self._pool_lock: + requests_still_in_flight = len(self._requests) + + for connection in self._pool: + connection.close() + self._pool = [] + self._requests = [] + + if requests_still_in_flight: + raise RuntimeError( + f"The connection pool was closed while {requests_still_in_flight} " + f"HTTP requests/responses were still in-flight." + ) + + def __enter__(self) -> "ConnectionPool": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self.close() + + +class ConnectionPoolByteStream: + """ + A wrapper around the response byte stream, that additionally handles + notifying the connection pool when the response has been closed. + """ + + def __init__( + self, + stream: Iterable[bytes], + pool: ConnectionPool, + status: RequestStatus, + ) -> None: + self._stream = stream + self._pool = pool + self._status = status + + def __iter__(self) -> Iterator[bytes]: + for part in self._stream: + yield part + + def close(self) -> None: + try: + if hasattr(self._stream, "close"): + self._stream.close() # type: ignore + finally: + self._pool.response_closed(self._status) diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/http11.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/http11.py new file mode 100644 index 0000000..d7ea275 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/http11.py @@ -0,0 +1,306 @@ +import enum +import time +from types import TracebackType +from typing import Iterable, Iterator, List, Optional, Tuple, Type, Union + +import h11 + +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, + map_exceptions, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock +from .._trace import Trace +from ..backends.base import NetworkStream +from .interfaces import ConnectionInterface + +H11Event = Union[ + h11.Request, + h11.Response, + h11.InformationalResponse, + h11.Data, + h11.EndOfMessage, + h11.ConnectionClosed, +] + + +class HTTPConnectionState(enum.IntEnum): + NEW = 0 + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP11Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: Optional[float] = None, + ) -> None: + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: Optional[float] = keepalive_expiry + self._expire_at: Optional[float] = None + self._state = HTTPConnectionState.NEW + self._state_lock = Lock() + self._request_count = 0 + self._h11_state = h11.Connection(our_role=h11.CLIENT) + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): + self._request_count += 1 + self._state = HTTPConnectionState.ACTIVE + self._expire_at = None + else: + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request} + with Trace("http11.send_request_headers", request, kwargs) as trace: + self._send_request_headers(**kwargs) + with Trace("http11.send_request_body", request, kwargs) as trace: + self._send_request_body(**kwargs) + with Trace( + "http11.receive_response_headers", request, kwargs + ) as trace: + ( + http_version, + status, + reason_phrase, + headers, + ) = self._receive_response_headers(**kwargs) + trace.return_value = ( + http_version, + status, + reason_phrase, + headers, + ) + + return Response( + status=status, + headers=headers, + content=HTTP11ConnectionByteStream(self, request), + extensions={ + "http_version": http_version, + "reason_phrase": reason_phrase, + "network_stream": self._network_stream, + }, + ) + except BaseException as exc: + with Trace("http11.response_closed", request) as trace: + self._response_closed() + raise exc + + # Sending the request... + + def _send_request_headers(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): + event = h11.Request( + method=request.method, + target=request.url.target, + headers=request.headers, + ) + self._send_event(event, timeout=timeout) + + def _send_request_body(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + assert isinstance(request.stream, Iterable) + for chunk in request.stream: + event = h11.Data(data=chunk) + self._send_event(event, timeout=timeout) + + event = h11.EndOfMessage() + self._send_event(event, timeout=timeout) + + def _send_event( + self, event: H11Event, timeout: Optional[float] = None + ) -> None: + bytes_to_send = self._h11_state.send(event) + self._network_stream.write(bytes_to_send, timeout=timeout) + + # Receiving the response... + + def _receive_response_headers( + self, request: Request + ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = self._receive_event(timeout=timeout) + if isinstance(event, h11.Response): + break + + http_version = b"HTTP/" + event.http_version + + # h11 version 0.11+ supports a `raw_items` interface to get the + # raw header casing, rather than the enforced lowercase headers. + headers = event.headers.raw_items() + + return http_version, event.status_code, event.reason, headers + + def _receive_response_body(self, request: Request) -> Iterator[bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = self._receive_event(timeout=timeout) + if isinstance(event, h11.Data): + yield bytes(event.data) + elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): + break + + def _receive_event(self, timeout: Optional[float] = None) -> H11Event: + while True: + with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): + event = self._h11_state.next_event() + + if event is h11.NEED_DATA: + data = self._network_stream.read( + self.READ_NUM_BYTES, timeout=timeout + ) + + # If we feed this case through h11 we'll raise an exception like: + # + # httpcore.RemoteProtocolError: can't handle event type + # ConnectionClosed when role=SERVER and state=SEND_RESPONSE + # + # Which is accurate, but not very informative from an end-user + # perspective. Instead we handle this case distinctly and treat + # it as a ConnectError. + if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: + msg = "Server disconnected without sending a response." + raise RemoteProtocolError(msg) + + self._h11_state.receive_data(data) + else: + return event + + def _response_closed(self) -> None: + with self._state_lock: + if ( + self._h11_state.our_state is h11.DONE + and self._h11_state.their_state is h11.DONE + ): + self._state = HTTPConnectionState.IDLE + self._h11_state.start_next_cycle() + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + else: + self.close() + + # Once the connection is no longer required... + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # The ConnectionInterface methods provide information about the state of + # the connection, allowing for a connection pooling implementation to + # determine when to reuse and when to close the connection... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + # Note that HTTP/1.1 connections in the "NEW" state are not treated as + # being "available". The control flow which created the connection will + # be able to send an outgoing request, but the connection will not be + # acquired from the connection pool for any other request. + return self._state == HTTPConnectionState.IDLE + + def has_expired(self) -> bool: + now = time.monotonic() + keepalive_expired = self._expire_at is not None and now > self._expire_at + + # If the HTTP connection is idle but the socket is readable, then the + # only valid state is that the socket is about to return b"", indicating + # a server-initiated disconnect. + server_disconnected = ( + self._state == HTTPConnectionState.IDLE + and self._network_stream.get_extra_info("is_readable") + ) + + return keepalive_expired or server_disconnected + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/1.1, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTP11Connection": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self.close() + + +class HTTP11ConnectionByteStream: + def __init__(self, connection: HTTP11Connection, request: Request) -> None: + self._connection = connection + self._request = request + self._closed = False + + def __iter__(self) -> Iterator[bytes]: + kwargs = {"request": self._request} + try: + with Trace("http11.receive_response_body", self._request, kwargs): + for chunk in self._connection._receive_response_body(**kwargs): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + with Trace("http11.response_closed", self._request): + self._connection._response_closed() diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/http2.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/http2.py new file mode 100644 index 0000000..5bd2de0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/http2.py @@ -0,0 +1,475 @@ +import enum +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock, Semaphore +from .._trace import Trace +from ..backends.base import NetworkStream +from .interfaces import ConnectionInterface + + +def has_body_headers(request: Request) -> bool: + return any( + [ + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ] + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP2Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: typing.Optional[float] = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: typing.Optional[float] = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: typing.Optional[float] = None + self._request_count = 0 + self._init_lock = Lock() + self._state_lock = Lock() + self._read_lock = Lock() + self._write_lock = Lock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + self._events: typing.Dict[int, h2.events.Event] = {} + self._read_exception: typing.Optional[Exception] = None + self._write_exception: typing.Optional[Exception] = None + self._connection_error_event: typing.Optional[h2.events.Event] = None + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + with self._init_lock: + if not self._sent_connection_init: + kwargs = {"request": request} + with Trace("http2.send_connection_init", request, kwargs): + self._send_connection_init(**kwargs) + self._sent_connection_init = True + max_streams = self._h2_state.local_settings.max_concurrent_streams + self._max_streams_semaphore = Semaphore(max_streams) + + self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + with Trace("http2.send_request_headers", request, kwargs): + self._send_request_headers(request=request, stream_id=stream_id) + with Trace("http2.send_request_body", request, kwargs): + self._send_request_body(request=request, stream_id=stream_id) + with Trace( + "http2.receive_response_headers", request, kwargs + ) as trace: + status, headers = self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={"stream_id": stream_id, "http_version": b"HTTP/2"}, + ) + except Exception as exc: # noqa: PIE786 + kwargs = {"stream_id": stream_id} + with Trace("http2.response_closed", request, kwargs): + self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_error_event: + raise RemoteProtocolError(self._connection_error_event) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + self._write_outgoing_data(request) + + # Sending the request... + + def _send_request_headers(self, request: Request, stream_id: int) -> None: + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + self._write_outgoing_data(request) + + def _send_request_body(self, request: Request, stream_id: int) -> None: + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.Iterable) + for data in request.stream: + while data: + max_flow = self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + self._write_outgoing_data(request) + + self._h2_state.end_stream(stream_id) + self._write_outgoing_data(request) + + # Receiving the response... + + def _receive_response( + self, request: Request, stream_id: int + ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.Iterator[bytes]: + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + self._write_outgoing_data(request) + yield event.data + elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)): + break + + def _receive_stream_event( + self, request: Request, stream_id: int + ) -> h2.events.Event: + while not self._events.get(stream_id): + self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + # The StreamReset event applies to a single stream. + if hasattr(event, "error_code"): + raise RemoteProtocolError(event) + return event + + def _receive_events( + self, request: Request, stream_id: typing.Optional[int] = None + ) -> None: + with self._read_lock: + if self._connection_error_event is not None: # pragma: nocover + raise RemoteProtocolError(self._connection_error_event) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = self._read_incoming_data(request) + for event in events: + event_stream_id = getattr(event, "stream_id", 0) + + # The ConnectionTerminatedEvent applies to the entire connection, + # and should be saved so it can be raised on all streams. + if hasattr(event, "error_code") and event_stream_id == 0: + self._connection_error_event = event + raise RemoteProtocolError(event) + + if event_stream_id in self._events: + self._events[event_stream_id].append(event) + + self._write_outgoing_data(request) + + def _response_closed(self, stream_id: int) -> None: + self._max_streams_semaphore.release() + del self._events[stream_id] + with self._state_lock: + if self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + self.close() + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # Wrappers around network read/write operations... + + def _read_incoming_data( + self, request: Request + ) -> typing.List[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events = self._h2_state.receive_data(data) + + return events + + def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTP2Connection": + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[types.TracebackType] = None, + ) -> None: + self.close() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: HTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + def __iter__(self) -> typing.Iterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + with Trace("http2.receive_response_body", self._request, kwargs): + for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + with Trace("http2.response_closed", self._request, kwargs): + self._connection._response_closed(stream_id=self._stream_id) diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/http_proxy.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/http_proxy.py new file mode 100644 index 0000000..3dadf9a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/http_proxy.py @@ -0,0 +1,337 @@ +import ssl +from base64 import b64encode +from typing import List, Mapping, Optional, Sequence, Tuple, Union + +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from ..backends.base import NetworkBackend +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] + + +def merge_headers( + default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, +) -> List[Tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set([key.lower() for key, value in override_headers]) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +def build_auth_header(username: bytes, password: bytes) -> bytes: + userpass = username + b":" + password + return b"Basic " + b64encode(userpass) + + +class HTTPProxy(ConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: Union[URL, bytes, str], + proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + authorization = build_auth_header(username, password) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> ConnectionInterface: + if origin.scheme == b"http": + return ForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + return TunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class ForwardHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + keepalive_expiry: Optional[float] = None, + network_backend: Optional[NetworkBackend] = None, + ) -> None: + self._connection = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + + def handle_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return self._connection.handle_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin.scheme == b"http" + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class TunnelHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: Optional[NetworkBackend] = None, + ) -> None: + self._connection: ConnectionInterface = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = Lock() + self._connected = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = self._connection.handle_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + self._connection.close() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("connection.start_tls", request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/interfaces.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/interfaces.py new file mode 100644 index 0000000..bf512ab --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/interfaces.py @@ -0,0 +1,133 @@ +from contextlib import contextmanager +from typing import Iterator, Optional, Union + +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, + include_request_headers, +) + + +class RequestInterface: + def request( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[dict] = None, + ) -> Response: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = self.handle_request(request) + try: + response.read() + finally: + response.close() + return response + + @contextmanager + def stream( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: Union[dict, list, None] = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[dict] = None, + ) -> Iterator[Response]: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = self.handle_request(request) + try: + yield response + finally: + response.close() + + def handle_request(self, request: Request) -> Response: + raise NotImplementedError() # pragma: nocover + + +class ConnectionInterface(RequestInterface): + def close(self) -> None: + raise NotImplementedError() # pragma: nocover + + def info(self) -> str: + raise NotImplementedError() # pragma: nocover + + def can_handle_request(self, origin: Origin) -> bool: + raise NotImplementedError() # pragma: nocover + + def is_available(self) -> bool: + """ + Return `True` if the connection is currently able to accept an + outgoing request. + + An HTTP/1.1 connection will only be available if it is currently idle. + + An HTTP/2 connection will be available so long as the stream ID space is + not yet exhausted, and the connection is not in an error state. + + While the connection is being established we may not yet know if it is going + to result in an HTTP/1.1 or HTTP/2 connection. The connection should be + treated as being available, but might ultimately raise `NewConnectionRequired` + required exceptions if multiple requests are attempted over a connection + that ends up being established as HTTP/1.1. + """ + raise NotImplementedError() # pragma: nocover + + def has_expired(self) -> bool: + """ + Return `True` if the connection is in a state where it should be closed. + + This either means that the connection is idle and it has passed the + expiry time on its keep-alive, or that server has sent an EOF. + """ + raise NotImplementedError() # pragma: nocover + + def is_idle(self) -> bool: + """ + Return `True` if the connection is currently idle. + """ + raise NotImplementedError() # pragma: nocover + + def is_closed(self) -> bool: + """ + Return `True` if the connection has been closed. + + Used when a response is closed to determine if the connection may be + returned to the connection pool or not. + """ + raise NotImplementedError() # pragma: nocover diff --git a/myenv/lib/python3.9/site-packages/httpcore/_sync/socks_proxy.py b/myenv/lib/python3.9/site-packages/httpcore/_sync/socks_proxy.py new file mode 100644 index 0000000..95c004a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_sync/socks_proxy.py @@ -0,0 +1,336 @@ +import ssl +import typing + +from socksio import socks5 + +from .._exceptions import ConnectionNotAvailable, ProxyError +from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from ..backends.sync import SyncBackend +from ..backends.base import NetworkBackend, NetworkStream +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +AUTH_METHODS = { + b"\x00": "NO AUTHENTICATION REQUIRED", + b"\x01": "GSSAPI", + b"\x02": "USERNAME/PASSWORD", + b"\xff": "NO ACCEPTABLE METHODS", +} + +REPLY_CODES = { + b"\x00": "Succeeded", + b"\x01": "General SOCKS server failure", + b"\x02": "Connection not allowed by ruleset", + b"\x03": "Network unreachable", + b"\x04": "Host unreachable", + b"\x05": "Connection refused", + b"\x06": "TTL expired", + b"\x07": "Command not supported", + b"\x08": "Address type not supported", +} + + +def _init_socks5_connection( + stream: NetworkStream, + *, + host: bytes, + port: int, + auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, +) -> None: + conn = socks5.SOCKS5Connection() + + # Auth method request + auth_method = ( + socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED + if auth is None + else socks5.SOCKS5AuthMethod.USERNAME_PASSWORD + ) + conn.send(socks5.SOCKS5AuthMethodsRequest([auth_method])) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Auth method response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5AuthReply) + if response.method != auth_method: + requested = AUTH_METHODS.get(auth_method, "UNKNOWN") + responded = AUTH_METHODS.get(response.method, "UNKNOWN") + raise ProxyError( + f"Requested {requested} from proxy server, but got {responded}." + ) + + if response.method == socks5.SOCKS5AuthMethod.USERNAME_PASSWORD: + # Username/password request + assert auth is not None + username, password = auth + conn.send(socks5.SOCKS5UsernamePasswordRequest(username, password)) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Username/password response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5UsernamePasswordReply) + if not response.success: + raise ProxyError("Invalid username/password") + + # Connect request + conn.send( + socks5.SOCKS5CommandRequest.from_address( + socks5.SOCKS5Command.CONNECT, (host, port) + ) + ) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Connect response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5Reply) + if response.reply_code != socks5.SOCKS5ReplyCode.SUCCEEDED: + reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN") + raise ProxyError(f"Proxy Server could not connect: {reply_code}.") + + +class SOCKSProxy(ConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: typing.Union[URL, bytes, str], + proxy_auth: typing.Optional[ + typing.Tuple[typing.Union[bytes, str], typing.Union[bytes, str]] + ] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + max_connections: typing.Optional[int] = 10, + max_keepalive_connections: typing.Optional[int] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: typing.Optional[NetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if proxy_auth is not None: + username, password = proxy_auth + username_bytes = enforce_bytes(username, name="proxy_auth") + password_bytes = enforce_bytes(password, name="proxy_auth") + self._proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = ( + username_bytes, + password_bytes, + ) + else: + self._proxy_auth = None + + def create_connection(self, origin: Origin) -> ConnectionInterface: + return Socks5Connection( + proxy_origin=self._proxy_url.origin, + remote_origin=origin, + proxy_auth=self._proxy_auth, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class Socks5Connection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: typing.Optional[NetworkBackend] = None, + ) -> None: + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._proxy_auth = proxy_auth + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + + self._network_backend: NetworkBackend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._connect_lock = Lock() + self._connection: typing.Optional[ConnectionInterface] = None + self._connect_failed = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if self._connection is None: + try: + # Connect to the proxy + kwargs = { + "host": self._proxy_origin.host.decode("ascii"), + "port": self._proxy_origin.port, + "timeout": timeout, + } + with Trace("connection.connect_tcp", request, kwargs) as trace: + stream = self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + + # Connect to the remote host using socks5 + kwargs = { + "stream": stream, + "host": self._remote_origin.host.decode("ascii"), + "port": self._remote_origin.port, + "auth": self._proxy_auth, + } + with Trace( + "connection.setup_socks5_connection", request, kwargs + ) as trace: + _init_socks5_connection(**kwargs) + trace.return_value = stream + + # Upgrade the stream to SSL + if self._remote_origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ( + ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ) + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace( + "connection.start_tls", request, kwargs + ) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or ( + self._http2 and not self._http1 + ): # pragma: nocover + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): # pragma: nocover + raise ConnectionNotAvailable() + + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + if self._connection is not None: + self._connection.close() + + def is_available(self) -> bool: + if self._connection is None: # pragma: nocover + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._remote_origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: # pragma: nocover + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/myenv/lib/python3.9/site-packages/httpcore/_synchronization.py b/myenv/lib/python3.9/site-packages/httpcore/_synchronization.py new file mode 100644 index 0000000..3885ca7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_synchronization.py @@ -0,0 +1,89 @@ +import threading +from types import TracebackType +from typing import Optional, Type + +import anyio + +from ._exceptions import PoolTimeout, map_exceptions + + +class AsyncLock: + def __init__(self) -> None: + self._lock = anyio.Lock() + + async def __aenter__(self) -> "AsyncLock": + await self._lock.acquire() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self._lock.release() + + +class AsyncEvent: + def __init__(self) -> None: + self._event = anyio.Event() + + def set(self) -> None: + self._event.set() + + async def wait(self, timeout: Optional[float] = None) -> None: + exc_map: dict = {TimeoutError: PoolTimeout} + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + await self._event.wait() + + +class AsyncSemaphore: + def __init__(self, bound: int) -> None: + self._semaphore = anyio.Semaphore(initial_value=bound, max_value=bound) + + async def acquire(self) -> None: + await self._semaphore.acquire() + + async def release(self) -> None: + self._semaphore.release() + + +class Lock: + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> "Lock": + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self._lock.release() + + +class Event: + def __init__(self) -> None: + self._event = threading.Event() + + def set(self) -> None: + self._event.set() + + def wait(self, timeout: Optional[float] = None) -> None: + if not self._event.wait(timeout=timeout): + raise PoolTimeout() # pragma: nocover + + +class Semaphore: + def __init__(self, bound: int) -> None: + self._semaphore = threading.Semaphore(value=bound) + + def acquire(self) -> None: + self._semaphore.acquire() + + def release(self) -> None: + self._semaphore.release() diff --git a/myenv/lib/python3.9/site-packages/httpcore/_trace.py b/myenv/lib/python3.9/site-packages/httpcore/_trace.py new file mode 100644 index 0000000..c957c2c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_trace.py @@ -0,0 +1,54 @@ +from types import TracebackType +from typing import Any, Optional, Type + +from ._models import Request + + +class Trace: + def __init__( + self, name: str, request: Request, kwargs: Optional[dict] = None + ) -> None: + self.name = name + self.trace = request.extensions.get("trace") + self.kwargs = kwargs or {} + self.return_value: Any = None + + def __enter__(self) -> "Trace": + if self.trace is not None: + info = self.kwargs + self.trace(f"{self.name}.started", info) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + if self.trace is not None: + if exc_value is None: + info: dict = {"return_value": self.return_value} + self.trace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + self.trace(f"{self.name}.failed", info) + + async def __aenter__(self) -> "Trace": + if self.trace is not None: + info = self.kwargs + await self.trace(f"{self.name}.started", info) + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + if self.trace is not None: + if exc_value is None: + info: dict = {"return_value": self.return_value} + await self.trace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + await self.trace(f"{self.name}.failed", info) diff --git a/myenv/lib/python3.9/site-packages/httpcore/_utils.py b/myenv/lib/python3.9/site-packages/httpcore/_utils.py new file mode 100644 index 0000000..df5dea8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/_utils.py @@ -0,0 +1,36 @@ +import select +import socket +import sys +import typing + + +def is_socket_readable(sock: typing.Optional[socket.socket]) -> bool: + """ + Return whether a socket, as identifed by its file descriptor, is readable. + "A socket is readable" means that the read buffer isn't empty, i.e. that calling + .recv() on it would immediately return some data. + """ + # NOTE: we want check for readability without actually attempting to read, because + # we don't want to block forever if it's not readable. + + # In the case that the socket no longer exists, or cannot return a file + # descriptor, we treat it as being readable, as if it the next read operation + # on it is ready to return the terminating `b""`. + sock_fd = None if sock is None else sock.fileno() + if sock_fd is None or sock_fd < 0: # pragma: nocover + return True + + # The implementation below was stolen from: + # https://github.com/python-trio/trio/blob/20ee2b1b7376db637435d80e266212a35837ddcc/trio/_socket.py#L471-L478 + # See also: https://github.com/encode/httpcore/pull/193#issuecomment-703129316 + + # Use select.select on Windows, and when poll is unavailable and select.poll + # everywhere else. (E.g. When eventlet is in use. See #327) + if ( + sys.platform == "win32" or getattr(select, "poll", None) is None + ): # pragma: nocover + rready, _, _ = select.select([sock_fd], [], [], 0) + return bool(rready) + p = select.poll() + p.register(sock_fd, select.POLLIN) + return bool(p.poll(0)) diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/__init__.py b/myenv/lib/python3.9/site-packages/httpcore/backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/asyncio.py b/myenv/lib/python3.9/site-packages/httpcore/backends/asyncio.py new file mode 100644 index 0000000..3b8abf1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/backends/asyncio.py @@ -0,0 +1,130 @@ +import ssl +import typing + +import anyio + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .._utils import is_socket_readable +from .base import AsyncNetworkBackend, AsyncNetworkStream + + +class AsyncIOStream(AsyncNetworkStream): + def __init__(self, stream: anyio.abc.ByteStream) -> None: + self._stream = stream + + async def read( + self, max_bytes: int, timeout: typing.Optional[float] = None + ) -> bytes: + exc_map = { + TimeoutError: ReadTimeout, + anyio.BrokenResourceError: ReadError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + try: + return await self._stream.receive(max_bytes=max_bytes) + except anyio.EndOfStream: # pragma: nocover + return b"" + + async def write( + self, buffer: bytes, timeout: typing.Optional[float] = None + ) -> None: + if not buffer: + return + + exc_map = { + TimeoutError: WriteTimeout, + anyio.BrokenResourceError: WriteError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + await self._stream.send(item=buffer) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> AsyncNetworkStream: + exc_map = { + TimeoutError: ConnectTimeout, + anyio.BrokenResourceError: ConnectError, + } + with map_exceptions(exc_map): + try: + with anyio.fail_after(timeout): + ssl_stream = await anyio.streams.tls.TLSStream.wrap( + self._stream, + ssl_context=ssl_context, + hostname=server_hostname, + standard_compatible=False, + server_side=False, + ) + except Exception as exc: # pragma: nocover + await self.aclose() + raise exc + return AsyncIOStream(ssl_stream) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object": + return self._stream.extra(anyio.streams.tls.TLSAttribute.ssl_object, None) + if info == "client_addr": + return self._stream.extra(anyio.abc.SocketAttribute.local_address, None) + if info == "server_addr": + return self._stream.extra(anyio.abc.SocketAttribute.remote_address, None) + if info == "socket": + return self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None) + if info == "is_readable": + sock = self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None) + return is_socket_readable(sock) + return None + + +class AsyncIOBackend(AsyncNetworkBackend): + async def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + ) -> AsyncNetworkStream: + exc_map = { + TimeoutError: ConnectTimeout, + OSError: ConnectError, + anyio.BrokenResourceError: ConnectError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + stream: anyio.abc.ByteStream = await anyio.connect_tcp( + remote_host=host, + remote_port=port, + local_host=local_address, + ) + return AsyncIOStream(stream) + + async def connect_unix_socket( + self, path: str, timeout: typing.Optional[float] = None + ) -> AsyncNetworkStream: # pragma: nocover + exc_map = { + TimeoutError: ConnectTimeout, + OSError: ConnectError, + anyio.BrokenResourceError: ConnectError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + stream: anyio.abc.ByteStream = await anyio.connect_unix(path) + return AsyncIOStream(stream) + + async def sleep(self, seconds: float) -> None: + await anyio.sleep(seconds) # pragma: nocover diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/auto.py b/myenv/lib/python3.9/site-packages/httpcore/backends/auto.py new file mode 100644 index 0000000..62087d8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/backends/auto.py @@ -0,0 +1,41 @@ +from typing import Optional + +import sniffio + +from .base import AsyncNetworkBackend, AsyncNetworkStream + + +class AutoBackend(AsyncNetworkBackend): + async def _init_backend(self) -> None: + if not (hasattr(self, "_backend")): + backend = sniffio.current_async_library() + if backend == "trio": + from .trio import TrioBackend + + self._backend: AsyncNetworkBackend = TrioBackend() + else: + from .asyncio import AsyncIOBackend + + self._backend = AsyncIOBackend() + + async def connect_tcp( + self, + host: str, + port: int, + timeout: Optional[float] = None, + local_address: Optional[str] = None, + ) -> AsyncNetworkStream: + await self._init_backend() + return await self._backend.connect_tcp( + host, port, timeout=timeout, local_address=local_address + ) + + async def connect_unix_socket( + self, path: str, timeout: Optional[float] = None + ) -> AsyncNetworkStream: # pragma: nocover + await self._init_backend() + return await self._backend.connect_unix_socket(path, timeout=timeout) + + async def sleep(self, seconds: float) -> None: # pragma: nocover + await self._init_backend() + return await self._backend.sleep(seconds) diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/base.py b/myenv/lib/python3.9/site-packages/httpcore/backends/base.py new file mode 100644 index 0000000..6854db6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/backends/base.py @@ -0,0 +1,89 @@ +import ssl +import time +import typing + + +class NetworkStream: + def read(self, max_bytes: int, timeout: typing.Optional[float] = None) -> bytes: + raise NotImplementedError() # pragma: nocover + + def write(self, buffer: bytes, timeout: typing.Optional[float] = None) -> None: + raise NotImplementedError() # pragma: nocover + + def close(self) -> None: + raise NotImplementedError() # pragma: nocover + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> "NetworkStream": + raise NotImplementedError() # pragma: nocover + + def get_extra_info(self, info: str) -> typing.Any: + return None # pragma: nocover + + +class NetworkBackend: + def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + ) -> NetworkStream: + raise NotImplementedError() # pragma: nocover + + def connect_unix_socket( + self, path: str, timeout: typing.Optional[float] = None + ) -> NetworkStream: + raise NotImplementedError() # pragma: nocover + + def sleep(self, seconds: float) -> None: + time.sleep(seconds) # pragma: nocover + + +class AsyncNetworkStream: + async def read( + self, max_bytes: int, timeout: typing.Optional[float] = None + ) -> bytes: + raise NotImplementedError() # pragma: nocover + + async def write( + self, buffer: bytes, timeout: typing.Optional[float] = None + ) -> None: + raise NotImplementedError() # pragma: nocover + + async def aclose(self) -> None: + raise NotImplementedError() # pragma: nocover + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> "AsyncNetworkStream": + raise NotImplementedError() # pragma: nocover + + def get_extra_info(self, info: str) -> typing.Any: + return None # pragma: nocover + + +class AsyncNetworkBackend: + async def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + ) -> AsyncNetworkStream: + raise NotImplementedError() # pragma: nocover + + async def connect_unix_socket( + self, path: str, timeout: typing.Optional[float] = None + ) -> AsyncNetworkStream: + raise NotImplementedError() # pragma: nocover + + async def sleep(self, seconds: float) -> None: + raise NotImplementedError() # pragma: nocover diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/mock.py b/myenv/lib/python3.9/site-packages/httpcore/backends/mock.py new file mode 100644 index 0000000..8491f6d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/backends/mock.py @@ -0,0 +1,115 @@ +import ssl +import typing +from typing import Optional + +from .base import AsyncNetworkBackend, AsyncNetworkStream, NetworkBackend, NetworkStream + + +class MockSSLObject: + def __init__(self, http2: bool): + self._http2 = http2 + + def selected_alpn_protocol(self) -> str: + return "h2" if self._http2 else "http/1.1" + + +class MockStream(NetworkStream): + def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + + def read(self, max_bytes: int, timeout: Optional[float] = None) -> bytes: + if not self._buffer: + return b"" + return self._buffer.pop(0) + + def write(self, buffer: bytes, timeout: Optional[float] = None) -> None: + pass + + def close(self) -> None: + pass + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: Optional[str] = None, + timeout: Optional[float] = None, + ) -> NetworkStream: + return self + + def get_extra_info(self, info: str) -> typing.Any: + return MockSSLObject(http2=self._http2) if info == "ssl_object" else None + + +class MockBackend(NetworkBackend): + def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + + def connect_tcp( + self, + host: str, + port: int, + timeout: Optional[float] = None, + local_address: Optional[str] = None, + ) -> NetworkStream: + return MockStream(list(self._buffer), http2=self._http2) + + def connect_unix_socket( + self, path: str, timeout: Optional[float] = None + ) -> NetworkStream: + return MockStream(list(self._buffer), http2=self._http2) + + def sleep(self, seconds: float) -> None: + pass + + +class AsyncMockStream(AsyncNetworkStream): + def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + + async def read(self, max_bytes: int, timeout: Optional[float] = None) -> bytes: + if not self._buffer: + return b"" + return self._buffer.pop(0) + + async def write(self, buffer: bytes, timeout: Optional[float] = None) -> None: + pass + + async def aclose(self) -> None: + pass + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: Optional[str] = None, + timeout: Optional[float] = None, + ) -> AsyncNetworkStream: + return self + + def get_extra_info(self, info: str) -> typing.Any: + return MockSSLObject(http2=self._http2) if info == "ssl_object" else None + + +class AsyncMockBackend(AsyncNetworkBackend): + def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + + async def connect_tcp( + self, + host: str, + port: int, + timeout: Optional[float] = None, + local_address: Optional[str] = None, + ) -> AsyncNetworkStream: + return AsyncMockStream(list(self._buffer), http2=self._http2) + + async def connect_unix_socket( + self, path: str, timeout: Optional[float] = None + ) -> AsyncNetworkStream: + return AsyncMockStream(list(self._buffer), http2=self._http2) + + async def sleep(self, seconds: float) -> None: + pass diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/sync.py b/myenv/lib/python3.9/site-packages/httpcore/backends/sync.py new file mode 100644 index 0000000..18e1dd3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/backends/sync.py @@ -0,0 +1,99 @@ +import socket +import ssl +import typing + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .._utils import is_socket_readable +from .base import NetworkBackend, NetworkStream + + +class SyncStream(NetworkStream): + def __init__(self, sock: socket.socket) -> None: + self._sock = sock + + def read(self, max_bytes: int, timeout: typing.Optional[float] = None) -> bytes: + exc_map = {socket.timeout: ReadTimeout, socket.error: ReadError} + with map_exceptions(exc_map): + self._sock.settimeout(timeout) + return self._sock.recv(max_bytes) + + def write(self, buffer: bytes, timeout: typing.Optional[float] = None) -> None: + if not buffer: + return + + exc_map = {socket.timeout: WriteTimeout, socket.error: WriteError} + with map_exceptions(exc_map): + while buffer: + self._sock.settimeout(timeout) + n = self._sock.send(buffer) + buffer = buffer[n:] + + def close(self) -> None: + self._sock.close() + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> NetworkStream: + exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} + with map_exceptions(exc_map): + try: + self._sock.settimeout(timeout) + sock = ssl_context.wrap_socket( + self._sock, server_hostname=server_hostname + ) + except Exception as exc: # pragma: nocover + self.close() + raise exc + return SyncStream(sock) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._sock, ssl.SSLSocket): + return self._sock._sslobj # type: ignore + if info == "client_addr": + return self._sock.getsockname() + if info == "server_addr": + return self._sock.getpeername() + if info == "socket": + return self._sock + if info == "is_readable": + return is_socket_readable(self._sock) + return None + + +class SyncBackend(NetworkBackend): + def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + ) -> NetworkStream: + address = (host, port) + source_address = None if local_address is None else (local_address, 0) + exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} + with map_exceptions(exc_map): + sock = socket.create_connection( + address, timeout, source_address=source_address + ) + return SyncStream(sock) + + def connect_unix_socket( + self, path: str, timeout: typing.Optional[float] = None + ) -> NetworkStream: # pragma: nocover + exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} + with map_exceptions(exc_map): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(timeout) + sock.connect(path) + return SyncStream(sock) diff --git a/myenv/lib/python3.9/site-packages/httpcore/backends/trio.py b/myenv/lib/python3.9/site-packages/httpcore/backends/trio.py new file mode 100644 index 0000000..c1d9794 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpcore/backends/trio.py @@ -0,0 +1,142 @@ +import ssl +import typing + +import trio + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .base import AsyncNetworkBackend, AsyncNetworkStream + + +class TrioStream(AsyncNetworkStream): + def __init__(self, stream: trio.abc.Stream) -> None: + self._stream = stream + + async def read( + self, max_bytes: int, timeout: typing.Optional[float] = None + ) -> bytes: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map = {trio.TooSlowError: ReadTimeout, trio.BrokenResourceError: ReadError} + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + return await self._stream.receive_some(max_bytes=max_bytes) + + async def write( + self, buffer: bytes, timeout: typing.Optional[float] = None + ) -> None: + if not buffer: + return + + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map = { + trio.TooSlowError: WriteTimeout, + trio.BrokenResourceError: WriteError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + await self._stream.send_all(data=buffer) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> AsyncNetworkStream: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + } + ssl_stream = trio.SSLStream( + self._stream, + ssl_context=ssl_context, + server_hostname=server_hostname, + https_compatible=True, + server_side=False, + ) + with map_exceptions(exc_map): + try: + with trio.fail_after(timeout_or_inf): + await ssl_stream.do_handshake() + except Exception as exc: # pragma: nocover + await self.aclose() + raise exc + return TrioStream(ssl_stream) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._stream, trio.SSLStream): + return self._stream._ssl_object # type: ignore + if info == "client_addr": + return self._get_socket_stream().socket.getsockname() + if info == "server_addr": + return self._get_socket_stream().socket.getpeername() + if info == "socket": + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream.socket + if info == "is_readable": + socket = self.get_extra_info("socket") + return socket.is_readable() + return None + + def _get_socket_stream(self) -> trio.SocketStream: + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream + + +class TrioBackend(AsyncNetworkBackend): + async def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + ) -> AsyncNetworkStream: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + # Trio supports 'local_address' from 0.16.1 onwards. + # We only include the keyword argument if a local_address + # argument has been passed. + kwargs: dict = {} if local_address is None else {"local_address": local_address} + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_tcp_stream( + host=host, port=port, **kwargs + ) + return TrioStream(stream) + + async def connect_unix_socket( + self, path: str, timeout: typing.Optional[float] = None + ) -> AsyncNetworkStream: # pragma: nocover + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_unix_socket(path) + return TrioStream(stream) + + async def sleep(self, seconds: float) -> None: + await trio.sleep(seconds) # pragma: nocover diff --git a/myenv/lib/python3.9/site-packages/httpcore/py.typed b/myenv/lib/python3.9/site-packages/httpcore/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/LICENSE new file mode 100644 index 0000000..79a03ca --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2015 MagicStack Inc. http://magic.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/METADATA new file mode 100644 index 0000000..77d6e49 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/METADATA @@ -0,0 +1,135 @@ +Metadata-Version: 2.1 +Name: httptools +Version: 0.3.0 +Summary: A collection of framework independent HTTP protocol utils. +Home-page: https://github.com/MagicStack/httptools +Author: Yury Selivanov +Author-email: yury@magic.io +License: MIT +Platform: macOS +Platform: POSIX +Platform: Windows +Classifier: License :: OSI Approved :: MIT License +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Environment :: Web Environment +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=3.5.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: test +Requires-Dist: Cython (<0.30.0,>=0.29.24) ; extra == 'test' + +![Tests](https://github.com/MagicStack/httptools/workflows/Tests/badge.svg) + +httptools is a Python binding for the nodejs HTTP parser. + +The package is available on PyPI: `pip install httptools`. + + +# APIs + +httptools contains two classes `httptools.HttpRequestParser`, +`httptools.HttpResponseParser` (fulfilled through +[llhttp](https://github.com/nodejs/llhttp)) and a function for +parsing URLs `httptools.parse_url` (through +[http-parse](https://github.com/nodejs/http-parser) for now). +See unittests for examples. + + +```python + +class HttpRequestParser: + + def __init__(self, protocol): + """HttpRequestParser + + protocol -- a Python object with the following methods + (all optional): + + - on_message_begin() + - on_url(url: bytes) + - on_header(name: bytes, value: bytes) + - on_headers_complete() + - on_body(body: bytes) + - on_message_complete() + - on_chunk_header() + - on_chunk_complete() + - on_status(status: bytes) + """ + + def get_http_version(self) -> str: + """Return an HTTP protocol version.""" + + def should_keep_alive(self) -> bool: + """Return ``True`` if keep-alive mode is preferred.""" + + def should_upgrade(self) -> bool: + """Return ``True`` if the parsed request is a valid Upgrade request. + The method exposes a flag set just before on_headers_complete. + Calling this method earlier will only yield `False`. + """ + + def feed_data(self, data: bytes): + """Feed data to the parser. + + Will eventually trigger callbacks on the ``protocol`` + object. + + On HTTP upgrade, this method will raise an + ``HttpParserUpgrade`` exception, with its sole argument + set to the offset of the non-HTTP data in ``data``. + """ + + def get_method(self) -> bytes: + """Return HTTP request method (GET, HEAD, etc)""" + + +class HttpResponseParser: + + """Has all methods except ``get_method()`` that + HttpRequestParser has.""" + + def get_status_code(self) -> int: + """Return the status code of the HTTP response""" + + +def parse_url(url: bytes): + """Parse URL strings into a structured Python object. + + Returns an instance of ``httptools.URL`` class with the + following attributes: + + - schema: bytes + - host: bytes + - port: int + - path: bytes + - query: bytes + - fragment: bytes + - userinfo: bytes + """ +``` + + +# Development + +1. Clone this repository with + `git clone --recursive git@github.com:MagicStack/httptools.git` + +2. Create a virtual environment with Python 3: + `python3 -m venv envname` + +3. Activate the environment with `source envname/bin/activate` + +4. Install development requirements with `pip install -e .[test]` + +5. Run `make` and `make test`. + + +# License + +MIT. + + diff --git a/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/RECORD new file mode 100644 index 0000000..a0fc539 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/RECORD @@ -0,0 +1,14 @@ +httptools-0.3.0.dist-info/LICENSE,sha256=9Fc-fLdnZ0X7W402-lSKqT45HPtoct2s1lEwxF6mqS0,1093 +httptools-0.3.0.dist-info/WHEEL,sha256=bRe_g_g-vZInZP5wOdewl-4AeWx8E2_UC3Ffr2csPyk,109 +httptools-0.3.0.dist-info/top_level.txt,sha256=APjJKTbZcj0OQ4fdgf2eTCk82nK1n2BFXOD7ky41MPY,10 +httptools-0.3.0.dist-info/METADATA,sha256=dpBDixuRjdq-QLwIgV5XqxUcLzZeMgM6g7GCpdjZDSg,3597 +httptools/_version.py,sha256=b8pdihIYuOWdO3Rpi5ZLDFTAiW4v4IwK95V14jBWrzw,575 +httptools/__init__.py,sha256=plt3MIbueJdco9Dy7zoH3ksLNeyirqWagat5rwRmAjo,147 +httptools/parser/url_parser.cpython-39-darwin.so,sha256=CWNauH_HaYaaso1U4BpCr0mjNDP5DLZXzBpbLfZBV8U,89080 +httptools/parser/parser.cpython-39-darwin.so,sha256=FHIawJFL9Dcpxc5N2WLQ5r0jHLjZQb9yyt1HQwoqhNI,148944 +httptools/parser/__init__.py,sha256=fWyconPEHZlJojzRwmBKSn4C85OGXmKEwiEcdjHqXO8,166 +httptools/parser/url_parser.c,sha256=gZj4QulPeuygRPfjX4qxOBccjoOsOYQNTZXqaLWatSk,232605 +httptools/parser/errors.py,sha256=ZVrtN1smPIb_opQ2Ud3uCbGlNLMlECYM2-6S7r5LnHs,566 +httptools/parser/parser.c,sha256=QILAMkPuFSa_CQGJJfIZU6fBsxQVWMmpUsSkbdiYk8I,395897 +httptools-0.3.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +httptools-0.3.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/WHEEL new file mode 100644 index 0000000..868d54a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_10_9_x86_64 + diff --git a/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/top_level.txt new file mode 100644 index 0000000..bef3b40 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools-0.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +httptools diff --git a/myenv/lib/python3.9/site-packages/httptools/__init__.py b/myenv/lib/python3.9/site-packages/httptools/__init__.py new file mode 100644 index 0000000..972053e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools/__init__.py @@ -0,0 +1,6 @@ +from . import parser +from .parser import * # NOQA + +from ._version import __version__ # NOQA + +__all__ = parser.__all__ + ('__version__',) # NOQA diff --git a/myenv/lib/python3.9/site-packages/httptools/_version.py b/myenv/lib/python3.9/site-packages/httptools/_version.py new file mode 100644 index 0000000..1878dfc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools/_version.py @@ -0,0 +1,13 @@ +# This file MUST NOT contain anything but the __version__ assignment. +# +# When making a release, change the value of __version__ +# to an appropriate value, and open a pull request against +# the correct branch (master if making a new feature release). +# The commit message MUST contain a properly formatted release +# log, and the commit must be signed. +# +# The release automation will: build and test the packages for the +# supported platforms, publish the packages on PyPI, merge the PR +# to the target branch, create a Git tag pointing to the commit. + +__version__ = '0.3.0' diff --git a/myenv/lib/python3.9/site-packages/httptools/parser/__init__.py b/myenv/lib/python3.9/site-packages/httptools/parser/__init__.py new file mode 100644 index 0000000..ba371f5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools/parser/__init__.py @@ -0,0 +1,5 @@ +from .parser import * # NoQA +from .errors import * # NoQA +from .url_parser import * # NoQA + +__all__ = parser.__all__ + errors.__all__ + url_parser.__all__ # NoQA diff --git a/myenv/lib/python3.9/site-packages/httptools/parser/errors.py b/myenv/lib/python3.9/site-packages/httptools/parser/errors.py new file mode 100644 index 0000000..bc24c46 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools/parser/errors.py @@ -0,0 +1,30 @@ +__all__ = ('HttpParserError', + 'HttpParserCallbackError', + 'HttpParserInvalidStatusError', + 'HttpParserInvalidMethodError', + 'HttpParserInvalidURLError', + 'HttpParserUpgrade') + + +class HttpParserError(Exception): + pass + + +class HttpParserCallbackError(HttpParserError): + pass + + +class HttpParserInvalidStatusError(HttpParserError): + pass + + +class HttpParserInvalidMethodError(HttpParserError): + pass + + +class HttpParserInvalidURLError(HttpParserError): + pass + + +class HttpParserUpgrade(Exception): + pass diff --git a/myenv/lib/python3.9/site-packages/httptools/parser/parser.c b/myenv/lib/python3.9/site-packages/httptools/parser/parser.c new file mode 100644 index 0000000..c3c2428 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools/parser/parser.c @@ -0,0 +1,10319 @@ +/* Generated by Cython 0.29.24 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [], + "extra_compile_args": [ + "-O2" + ], + "name": "httptools.parser.parser", + "sources": [ + "httptools/parser/parser.pyx" + ] + }, + "module_name": "httptools.parser.parser" +} +END: Cython Metadata */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_24" +#define CYTHON_HEX_VERSION 0x001D18F0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #if defined(PyUnicode_IS_READY) + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #else + #define __Pyx_PyUnicode_READY(op) (0) + #endif + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__httptools__parser__parser +#define __PYX_HAVE_API__httptools__parser__parser +/* Early includes */ +#include +#include +#include "pythread.h" +#include +#include "llhttp.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "httptools/parser/parser.pyx", + "stringsource", + ".eggs/Cython-0.29.24-py3.9-macosx-10.9-x86_64.egg/Cython/Includes/cpython/type.pxd", + ".eggs/Cython-0.29.24-py3.9-macosx-10.9-x86_64.egg/Cython/Includes/cpython/bool.pxd", + ".eggs/Cython-0.29.24-py3.9-macosx-10.9-x86_64.egg/Cython/Includes/cpython/complex.pxd", +}; + +/*--- Type declarations ---*/ +struct __pyx_obj_9httptools_6parser_6parser_HttpParser; +struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser; +struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser; + +/* "httptools/parser/parser.pyx":26 + * + * @cython.internal + * cdef class HttpParser: # <<<<<<<<<<<<<< + * + * cdef: + */ +struct __pyx_obj_9httptools_6parser_6parser_HttpParser { + PyObject_HEAD + struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *__pyx_vtab; + llhttp_t *_cparser; + llhttp_settings_t *_csettings; + PyObject *_current_header_name; + PyObject *_current_header_value; + PyObject *_proto_on_url; + PyObject *_proto_on_status; + PyObject *_proto_on_body; + PyObject *_proto_on_header; + PyObject *_proto_on_headers_complete; + PyObject *_proto_on_message_complete; + PyObject *_proto_on_chunk_header; + PyObject *_proto_on_chunk_complete; + PyObject *_proto_on_message_begin; + PyObject *_last_error; + Py_buffer py_buf; +}; + + +/* "httptools/parser/parser.pyx":215 + * + * + * cdef class HttpRequestParser(HttpParser): # <<<<<<<<<<<<<< + * + * def __init__(self, protocol): + */ +struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser __pyx_base; +}; + + +/* "httptools/parser/parser.pyx":229 + * + * + * cdef class HttpResponseParser(HttpParser): # <<<<<<<<<<<<<< + * + * def __init__(self, protocol): + */ +struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser __pyx_base; +}; + + + +/* "httptools/parser/parser.pyx":26 + * + * @cython.internal + * cdef class HttpParser: # <<<<<<<<<<<<<< + * + * cdef: + */ + +struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser { + PyObject *(*_init)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *, llhttp_type_t); + PyObject *(*_maybe_call_on_header)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); + PyObject *(*_on_header_field)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *); + PyObject *(*_on_header_value)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *); + PyObject *(*_on_headers_complete)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); + PyObject *(*_on_chunk_header)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); + PyObject *(*_on_chunk_complete)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); +}; +static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *__pyx_vtabptr_9httptools_6parser_6parser_HttpParser; + + +/* "httptools/parser/parser.pyx":215 + * + * + * cdef class HttpRequestParser(HttpParser): # <<<<<<<<<<<<<< + * + * def __init__(self, protocol): + */ + +struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser { + struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser __pyx_base; +}; +static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser *__pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser; + + +/* "httptools/parser/parser.pyx":229 + * + * + * cdef class HttpResponseParser(HttpParser): # <<<<<<<<<<<<<< + * + * def __init__(self, protocol): + */ + +struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser { + struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser __pyx_base; +}; +static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser *__pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* KeywordStringCheck.proto */ +static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallNoArg.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); +#else +#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyObjectSetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); +#else +#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) +#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) +#endif + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* decode_c_string_utf16.proto */ +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/* decode_c_bytes.proto */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( + const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/* decode_bytes.proto */ +static CYTHON_INLINE PyObject* __Pyx_decode_bytes( + PyObject* string, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + return __Pyx_decode_c_bytes( + PyBytes_AS_STRING(string), PyBytes_GET_SIZE(string), + start, stop, encoding, errors, decode_func); +} + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable); + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* GCCDiagnostics.proto */ +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint8_t(uint8_t value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_ptrdiff_t(ptrdiff_t value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__init(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_protocol, llhttp_type_t __pyx_v_mode); /* proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__maybe_call_on_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_field(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_field); /* proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_value(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_val); /* proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_headers_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'cpython.version' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.exc' */ + +/* Module declarations from 'cpython.module' */ + +/* Module declarations from 'cpython.tuple' */ + +/* Module declarations from 'cpython.list' */ + +/* Module declarations from 'cpython.sequence' */ + +/* Module declarations from 'cpython.mapping' */ + +/* Module declarations from 'cpython.iterator' */ + +/* Module declarations from 'cpython.number' */ + +/* Module declarations from 'cpython.int' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.bool' */ +static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; + +/* Module declarations from 'cpython.long' */ + +/* Module declarations from 'cpython.float' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.complex' */ +static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; + +/* Module declarations from 'cpython.string' */ + +/* Module declarations from 'cpython.unicode' */ + +/* Module declarations from 'cpython.dict' */ + +/* Module declarations from 'cpython.instance' */ + +/* Module declarations from 'cpython.function' */ + +/* Module declarations from 'cpython.method' */ + +/* Module declarations from 'cpython.weakref' */ + +/* Module declarations from 'cpython.getargs' */ + +/* Module declarations from 'cpython.pythread' */ + +/* Module declarations from 'cpython.pystate' */ + +/* Module declarations from 'cpython.cobject' */ + +/* Module declarations from 'cpython.oldbuffer' */ + +/* Module declarations from 'cpython.set' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'cpython.bytes' */ + +/* Module declarations from 'cpython.pycapsule' */ + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'httptools.parser.python' */ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'httptools.parser' */ + +/* Module declarations from 'libc.stdint' */ + +/* Module declarations from 'httptools.parser.cparser' */ + +/* Module declarations from 'httptools.parser.parser' */ +static PyTypeObject *__pyx_ptype_9httptools_6parser_6parser_HttpParser = 0; +static PyTypeObject *__pyx_ptype_9httptools_6parser_6parser_HttpRequestParser = 0; +static PyTypeObject *__pyx_ptype_9httptools_6parser_6parser_HttpResponseParser = 0; +static int __pyx_f_9httptools_6parser_6parser_cb_on_message_begin(llhttp_t *); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_url(llhttp_t *, char const *, size_t); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_status(llhttp_t *, char const *, size_t); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_header_field(llhttp_t *, char const *, size_t); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_header_value(llhttp_t *, char const *, size_t); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_headers_complete(llhttp_t *); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_body(llhttp_t *, char const *, size_t); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_message_complete(llhttp_t *); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_header(llhttp_t *); /*proto*/ +static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_complete(llhttp_t *); /*proto*/ +static PyObject *__pyx_f_9httptools_6parser_6parser_parser_error_from_errno(llhttp_t *, llhttp_errno_t); /*proto*/ +#define __Pyx_MODULE_NAME "httptools.parser.parser" +extern int __pyx_module_is_main_httptools__parser__parser; +int __pyx_module_is_main_httptools__parser__parser = 0; + +/* Implementation of 'httptools.parser.parser' */ +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_BaseException; +static const char __pyx_k_[] = "{}.{}"; +static const char __pyx_k_all[] = "__all__"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_errors[] = "errors"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_on_url[] = "on_url"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_context[] = "__context__"; +static const char __pyx_k_on_body[] = "on_body"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_protocol[] = "protocol"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_on_header[] = "on_header"; +static const char __pyx_k_on_status[] = "on_status"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_BaseException[] = "BaseException"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_HttpParserError[] = "HttpParserError"; +static const char __pyx_k_on_chunk_header[] = "on_chunk_header"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_on_message_begin[] = "on_message_begin"; +static const char __pyx_k_HttpParserUpgrade[] = "HttpParserUpgrade"; +static const char __pyx_k_HttpRequestParser[] = "HttpRequestParser"; +static const char __pyx_k_on_chunk_complete[] = "on_chunk_complete"; +static const char __pyx_k_HttpResponseParser[] = "HttpResponseParser"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_on_headers_complete[] = "on_headers_complete"; +static const char __pyx_k_on_message_complete[] = "on_message_complete"; +static const char __pyx_k_invalid_headers_state[] = "invalid headers state"; +static const char __pyx_k_HttpParserCallbackError[] = "HttpParserCallbackError"; +static const char __pyx_k_HttpParserInvalidURLError[] = "HttpParserInvalidURLError"; +static const char __pyx_k_HttpParserInvalidMethodError[] = "HttpParserInvalidMethodError"; +static const char __pyx_k_HttpParserInvalidStatusError[] = "HttpParserInvalidStatusError"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static PyObject *__pyx_kp_u_; +static PyObject *__pyx_n_s_BaseException; +static PyObject *__pyx_n_s_HttpParserCallbackError; +static PyObject *__pyx_n_s_HttpParserError; +static PyObject *__pyx_n_s_HttpParserInvalidMethodError; +static PyObject *__pyx_n_s_HttpParserInvalidStatusError; +static PyObject *__pyx_n_s_HttpParserInvalidURLError; +static PyObject *__pyx_n_s_HttpParserUpgrade; +static PyObject *__pyx_n_s_HttpRequestParser; +static PyObject *__pyx_n_u_HttpRequestParser; +static PyObject *__pyx_n_s_HttpResponseParser; +static PyObject *__pyx_n_u_HttpResponseParser; +static PyObject *__pyx_n_s_MemoryError; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_n_s_all; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_context; +static PyObject *__pyx_n_s_errors; +static PyObject *__pyx_n_s_format; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_kp_u_invalid_headers_state; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_u_on_body; +static PyObject *__pyx_n_u_on_chunk_complete; +static PyObject *__pyx_n_u_on_chunk_header; +static PyObject *__pyx_n_u_on_header; +static PyObject *__pyx_n_u_on_headers_complete; +static PyObject *__pyx_n_u_on_message_begin; +static PyObject *__pyx_n_u_on_message_complete; +static PyObject *__pyx_n_u_on_status; +static PyObject *__pyx_n_u_on_url; +static PyObject *__pyx_n_s_protocol; +static PyObject *__pyx_n_s_pyx_vtable; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_test; +static int __pyx_pf_9httptools_6parser_6parser_10HttpParser___cinit__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ +static void __pyx_pf_9httptools_6parser_6parser_10HttpParser_2__dealloc__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_4get_http_version(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_6should_keep_alive(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_8should_upgrade(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_10feed_data(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_12__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_14__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, PyObject *__pyx_v_protocol); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_2get_method(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, PyObject *__pyx_v_protocol); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_2get_status_code(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpRequestParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpResponseParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +/* Late includes */ + +/* "httptools/parser/parser.pyx":44 + * Py_buffer py_buf + * + * def __cinit__(self): # <<<<<<<<<<<<<< + * self._cparser = \ + * PyMem_Malloc(sizeof(cparser.llhttp_t)) + */ + +/* Python wrapper */ +static int __pyx_pw_9httptools_6parser_6parser_10HttpParser_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_9httptools_6parser_6parser_10HttpParser_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;} + if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1; + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser___cinit__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_9httptools_6parser_6parser_10HttpParser___cinit__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "httptools/parser/parser.pyx":45 + * + * def __cinit__(self): + * self._cparser = \ # <<<<<<<<<<<<<< + * PyMem_Malloc(sizeof(cparser.llhttp_t)) + * if self._cparser is NULL: + */ + __pyx_v_self->_cparser = ((llhttp_t *)PyMem_Malloc((sizeof(llhttp_t)))); + + /* "httptools/parser/parser.pyx":47 + * self._cparser = \ + * PyMem_Malloc(sizeof(cparser.llhttp_t)) + * if self._cparser is NULL: # <<<<<<<<<<<<<< + * raise MemoryError() + * + */ + __pyx_t_1 = ((__pyx_v_self->_cparser == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "httptools/parser/parser.pyx":48 + * PyMem_Malloc(sizeof(cparser.llhttp_t)) + * if self._cparser is NULL: + * raise MemoryError() # <<<<<<<<<<<<<< + * + * self._csettings = \ + */ + PyErr_NoMemory(); __PYX_ERR(0, 48, __pyx_L1_error) + + /* "httptools/parser/parser.pyx":47 + * self._cparser = \ + * PyMem_Malloc(sizeof(cparser.llhttp_t)) + * if self._cparser is NULL: # <<<<<<<<<<<<<< + * raise MemoryError() + * + */ + } + + /* "httptools/parser/parser.pyx":50 + * raise MemoryError() + * + * self._csettings = \ # <<<<<<<<<<<<<< + * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) + * if self._csettings is NULL: + */ + __pyx_v_self->_csettings = ((llhttp_settings_t *)PyMem_Malloc((sizeof(llhttp_settings_t)))); + + /* "httptools/parser/parser.pyx":52 + * self._csettings = \ + * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) + * if self._csettings is NULL: # <<<<<<<<<<<<<< + * raise MemoryError() + * + */ + __pyx_t_1 = ((__pyx_v_self->_csettings == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "httptools/parser/parser.pyx":53 + * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) + * if self._csettings is NULL: + * raise MemoryError() # <<<<<<<<<<<<<< + * + * def __dealloc__(self): + */ + PyErr_NoMemory(); __PYX_ERR(0, 53, __pyx_L1_error) + + /* "httptools/parser/parser.pyx":52 + * self._csettings = \ + * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) + * if self._csettings is NULL: # <<<<<<<<<<<<<< + * raise MemoryError() + * + */ + } + + /* "httptools/parser/parser.pyx":44 + * Py_buffer py_buf + * + * def __cinit__(self): # <<<<<<<<<<<<<< + * self._cparser = \ + * PyMem_Malloc(sizeof(cparser.llhttp_t)) + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":55 + * raise MemoryError() + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * PyMem_Free(self._cparser) + * PyMem_Free(self._csettings) + */ + +/* Python wrapper */ +static void __pyx_pw_9httptools_6parser_6parser_10HttpParser_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_pw_9httptools_6parser_6parser_10HttpParser_3__dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_pf_9httptools_6parser_6parser_10HttpParser_2__dealloc__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_9httptools_6parser_6parser_10HttpParser_2__dealloc__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "httptools/parser/parser.pyx":56 + * + * def __dealloc__(self): + * PyMem_Free(self._cparser) # <<<<<<<<<<<<<< + * PyMem_Free(self._csettings) + * + */ + PyMem_Free(__pyx_v_self->_cparser); + + /* "httptools/parser/parser.pyx":57 + * def __dealloc__(self): + * PyMem_Free(self._cparser) + * PyMem_Free(self._csettings) # <<<<<<<<<<<<<< + * + * cdef _init(self, protocol, cparser.llhttp_type_t mode): + */ + PyMem_Free(__pyx_v_self->_csettings); + + /* "httptools/parser/parser.pyx":55 + * raise MemoryError() + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * PyMem_Free(self._cparser) + * PyMem_Free(self._csettings) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "httptools/parser/parser.pyx":59 + * PyMem_Free(self._csettings) + * + * cdef _init(self, protocol, cparser.llhttp_type_t mode): # <<<<<<<<<<<<<< + * cparser.llhttp_settings_init(self._csettings) + * + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__init(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_protocol, llhttp_type_t __pyx_v_mode) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_init", 0); + + /* "httptools/parser/parser.pyx":60 + * + * cdef _init(self, protocol, cparser.llhttp_type_t mode): + * cparser.llhttp_settings_init(self._csettings) # <<<<<<<<<<<<<< + * + * cparser.llhttp_init(self._cparser, mode, self._csettings) + */ + llhttp_settings_init(__pyx_v_self->_csettings); + + /* "httptools/parser/parser.pyx":62 + * cparser.llhttp_settings_init(self._csettings) + * + * cparser.llhttp_init(self._cparser, mode, self._csettings) # <<<<<<<<<<<<<< + * self._cparser.data = self + * + */ + llhttp_init(__pyx_v_self->_cparser, __pyx_v_mode, __pyx_v_self->_csettings); + + /* "httptools/parser/parser.pyx":63 + * + * cparser.llhttp_init(self._cparser, mode, self._csettings) + * self._cparser.data = self # <<<<<<<<<<<<<< + * + * self._current_header_name = None + */ + __pyx_v_self->_cparser->data = ((void *)__pyx_v_self); + + /* "httptools/parser/parser.pyx":65 + * self._cparser.data = self + * + * self._current_header_name = None # <<<<<<<<<<<<<< + * self._current_header_value = None + * + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->_current_header_name); + __Pyx_DECREF(__pyx_v_self->_current_header_name); + __pyx_v_self->_current_header_name = ((PyObject*)Py_None); + + /* "httptools/parser/parser.pyx":66 + * + * self._current_header_name = None + * self._current_header_value = None # <<<<<<<<<<<<<< + * + * self._proto_on_header = getattr(protocol, 'on_header', None) + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->_current_header_value); + __Pyx_DECREF(__pyx_v_self->_current_header_value); + __pyx_v_self->_current_header_value = ((PyObject*)Py_None); + + /* "httptools/parser/parser.pyx":68 + * self._current_header_value = None + * + * self._proto_on_header = getattr(protocol, 'on_header', None) # <<<<<<<<<<<<<< + * if self._proto_on_header is not None: + * self._csettings.on_header_field = cb_on_header_field + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_header, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_header); + __Pyx_DECREF(__pyx_v_self->_proto_on_header); + __pyx_v_self->_proto_on_header = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":69 + * + * self._proto_on_header = getattr(protocol, 'on_header', None) + * if self._proto_on_header is not None: # <<<<<<<<<<<<<< + * self._csettings.on_header_field = cb_on_header_field + * self._csettings.on_header_value = cb_on_header_value + */ + __pyx_t_2 = (__pyx_v_self->_proto_on_header != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":70 + * self._proto_on_header = getattr(protocol, 'on_header', None) + * if self._proto_on_header is not None: + * self._csettings.on_header_field = cb_on_header_field # <<<<<<<<<<<<<< + * self._csettings.on_header_value = cb_on_header_value + * self._proto_on_headers_complete = getattr( + */ + __pyx_v_self->_csettings->on_header_field = __pyx_f_9httptools_6parser_6parser_cb_on_header_field; + + /* "httptools/parser/parser.pyx":71 + * if self._proto_on_header is not None: + * self._csettings.on_header_field = cb_on_header_field + * self._csettings.on_header_value = cb_on_header_value # <<<<<<<<<<<<<< + * self._proto_on_headers_complete = getattr( + * protocol, 'on_headers_complete', None) + */ + __pyx_v_self->_csettings->on_header_value = __pyx_f_9httptools_6parser_6parser_cb_on_header_value; + + /* "httptools/parser/parser.pyx":69 + * + * self._proto_on_header = getattr(protocol, 'on_header', None) + * if self._proto_on_header is not None: # <<<<<<<<<<<<<< + * self._csettings.on_header_field = cb_on_header_field + * self._csettings.on_header_value = cb_on_header_value + */ + } + + /* "httptools/parser/parser.pyx":72 + * self._csettings.on_header_field = cb_on_header_field + * self._csettings.on_header_value = cb_on_header_value + * self._proto_on_headers_complete = getattr( # <<<<<<<<<<<<<< + * protocol, 'on_headers_complete', None) + * self._csettings.on_headers_complete = cb_on_headers_complete + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_headers_complete, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_headers_complete); + __Pyx_DECREF(__pyx_v_self->_proto_on_headers_complete); + __pyx_v_self->_proto_on_headers_complete = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":74 + * self._proto_on_headers_complete = getattr( + * protocol, 'on_headers_complete', None) + * self._csettings.on_headers_complete = cb_on_headers_complete # <<<<<<<<<<<<<< + * + * self._proto_on_body = getattr(protocol, 'on_body', None) + */ + __pyx_v_self->_csettings->on_headers_complete = __pyx_f_9httptools_6parser_6parser_cb_on_headers_complete; + + /* "httptools/parser/parser.pyx":76 + * self._csettings.on_headers_complete = cb_on_headers_complete + * + * self._proto_on_body = getattr(protocol, 'on_body', None) # <<<<<<<<<<<<<< + * if self._proto_on_body is not None: + * self._csettings.on_body = cb_on_body + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_body, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_body); + __Pyx_DECREF(__pyx_v_self->_proto_on_body); + __pyx_v_self->_proto_on_body = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":77 + * + * self._proto_on_body = getattr(protocol, 'on_body', None) + * if self._proto_on_body is not None: # <<<<<<<<<<<<<< + * self._csettings.on_body = cb_on_body + * + */ + __pyx_t_3 = (__pyx_v_self->_proto_on_body != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + if (__pyx_t_2) { + + /* "httptools/parser/parser.pyx":78 + * self._proto_on_body = getattr(protocol, 'on_body', None) + * if self._proto_on_body is not None: + * self._csettings.on_body = cb_on_body # <<<<<<<<<<<<<< + * + * self._proto_on_message_begin = getattr( + */ + __pyx_v_self->_csettings->on_body = __pyx_f_9httptools_6parser_6parser_cb_on_body; + + /* "httptools/parser/parser.pyx":77 + * + * self._proto_on_body = getattr(protocol, 'on_body', None) + * if self._proto_on_body is not None: # <<<<<<<<<<<<<< + * self._csettings.on_body = cb_on_body + * + */ + } + + /* "httptools/parser/parser.pyx":80 + * self._csettings.on_body = cb_on_body + * + * self._proto_on_message_begin = getattr( # <<<<<<<<<<<<<< + * protocol, 'on_message_begin', None) + * if self._proto_on_message_begin is not None: + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_message_begin, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_message_begin); + __Pyx_DECREF(__pyx_v_self->_proto_on_message_begin); + __pyx_v_self->_proto_on_message_begin = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":82 + * self._proto_on_message_begin = getattr( + * protocol, 'on_message_begin', None) + * if self._proto_on_message_begin is not None: # <<<<<<<<<<<<<< + * self._csettings.on_message_begin = cb_on_message_begin + * + */ + __pyx_t_2 = (__pyx_v_self->_proto_on_message_begin != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":83 + * protocol, 'on_message_begin', None) + * if self._proto_on_message_begin is not None: + * self._csettings.on_message_begin = cb_on_message_begin # <<<<<<<<<<<<<< + * + * self._proto_on_message_complete = getattr( + */ + __pyx_v_self->_csettings->on_message_begin = __pyx_f_9httptools_6parser_6parser_cb_on_message_begin; + + /* "httptools/parser/parser.pyx":82 + * self._proto_on_message_begin = getattr( + * protocol, 'on_message_begin', None) + * if self._proto_on_message_begin is not None: # <<<<<<<<<<<<<< + * self._csettings.on_message_begin = cb_on_message_begin + * + */ + } + + /* "httptools/parser/parser.pyx":85 + * self._csettings.on_message_begin = cb_on_message_begin + * + * self._proto_on_message_complete = getattr( # <<<<<<<<<<<<<< + * protocol, 'on_message_complete', None) + * if self._proto_on_message_complete is not None: + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_message_complete, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_message_complete); + __Pyx_DECREF(__pyx_v_self->_proto_on_message_complete); + __pyx_v_self->_proto_on_message_complete = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":87 + * self._proto_on_message_complete = getattr( + * protocol, 'on_message_complete', None) + * if self._proto_on_message_complete is not None: # <<<<<<<<<<<<<< + * self._csettings.on_message_complete = cb_on_message_complete + * + */ + __pyx_t_3 = (__pyx_v_self->_proto_on_message_complete != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + if (__pyx_t_2) { + + /* "httptools/parser/parser.pyx":88 + * protocol, 'on_message_complete', None) + * if self._proto_on_message_complete is not None: + * self._csettings.on_message_complete = cb_on_message_complete # <<<<<<<<<<<<<< + * + * self._proto_on_chunk_header = getattr( + */ + __pyx_v_self->_csettings->on_message_complete = __pyx_f_9httptools_6parser_6parser_cb_on_message_complete; + + /* "httptools/parser/parser.pyx":87 + * self._proto_on_message_complete = getattr( + * protocol, 'on_message_complete', None) + * if self._proto_on_message_complete is not None: # <<<<<<<<<<<<<< + * self._csettings.on_message_complete = cb_on_message_complete + * + */ + } + + /* "httptools/parser/parser.pyx":90 + * self._csettings.on_message_complete = cb_on_message_complete + * + * self._proto_on_chunk_header = getattr( # <<<<<<<<<<<<<< + * protocol, 'on_chunk_header', None) + * self._csettings.on_chunk_header = cb_on_chunk_header + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_chunk_header, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_chunk_header); + __Pyx_DECREF(__pyx_v_self->_proto_on_chunk_header); + __pyx_v_self->_proto_on_chunk_header = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":92 + * self._proto_on_chunk_header = getattr( + * protocol, 'on_chunk_header', None) + * self._csettings.on_chunk_header = cb_on_chunk_header # <<<<<<<<<<<<<< + * + * self._proto_on_chunk_complete = getattr( + */ + __pyx_v_self->_csettings->on_chunk_header = __pyx_f_9httptools_6parser_6parser_cb_on_chunk_header; + + /* "httptools/parser/parser.pyx":94 + * self._csettings.on_chunk_header = cb_on_chunk_header + * + * self._proto_on_chunk_complete = getattr( # <<<<<<<<<<<<<< + * protocol, 'on_chunk_complete', None) + * self._csettings.on_chunk_complete = cb_on_chunk_complete + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_chunk_complete, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_proto_on_chunk_complete); + __Pyx_DECREF(__pyx_v_self->_proto_on_chunk_complete); + __pyx_v_self->_proto_on_chunk_complete = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":96 + * self._proto_on_chunk_complete = getattr( + * protocol, 'on_chunk_complete', None) + * self._csettings.on_chunk_complete = cb_on_chunk_complete # <<<<<<<<<<<<<< + * + * self._last_error = None + */ + __pyx_v_self->_csettings->on_chunk_complete = __pyx_f_9httptools_6parser_6parser_cb_on_chunk_complete; + + /* "httptools/parser/parser.pyx":98 + * self._csettings.on_chunk_complete = cb_on_chunk_complete + * + * self._last_error = None # <<<<<<<<<<<<<< + * + * cdef _maybe_call_on_header(self): + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->_last_error); + __Pyx_DECREF(__pyx_v_self->_last_error); + __pyx_v_self->_last_error = Py_None; + + /* "httptools/parser/parser.pyx":59 + * PyMem_Free(self._csettings) + * + * cdef _init(self, protocol, cparser.llhttp_type_t mode): # <<<<<<<<<<<<<< + * cparser.llhttp_settings_init(self._csettings) + * + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._init", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":100 + * self._last_error = None + * + * cdef _maybe_call_on_header(self): # <<<<<<<<<<<<<< + * if self._current_header_value is not None: + * current_header_name = self._current_header_name + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__maybe_call_on_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + PyObject *__pyx_v_current_header_name = NULL; + PyObject *__pyx_v_current_header_value = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_maybe_call_on_header", 0); + + /* "httptools/parser/parser.pyx":101 + * + * cdef _maybe_call_on_header(self): + * if self._current_header_value is not None: # <<<<<<<<<<<<<< + * current_header_name = self._current_header_name + * current_header_value = self._current_header_value + */ + __pyx_t_1 = (__pyx_v_self->_current_header_value != ((PyObject*)Py_None)); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "httptools/parser/parser.pyx":102 + * cdef _maybe_call_on_header(self): + * if self._current_header_value is not None: + * current_header_name = self._current_header_name # <<<<<<<<<<<<<< + * current_header_value = self._current_header_value + * + */ + __pyx_t_3 = __pyx_v_self->_current_header_name; + __Pyx_INCREF(__pyx_t_3); + __pyx_v_current_header_name = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "httptools/parser/parser.pyx":103 + * if self._current_header_value is not None: + * current_header_name = self._current_header_name + * current_header_value = self._current_header_value # <<<<<<<<<<<<<< + * + * self._current_header_name = self._current_header_value = None + */ + __pyx_t_3 = __pyx_v_self->_current_header_value; + __Pyx_INCREF(__pyx_t_3); + __pyx_v_current_header_value = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "httptools/parser/parser.pyx":105 + * current_header_value = self._current_header_value + * + * self._current_header_name = self._current_header_value = None # <<<<<<<<<<<<<< + * + * if self._proto_on_header is not None: + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->_current_header_name); + __Pyx_DECREF(__pyx_v_self->_current_header_name); + __pyx_v_self->_current_header_name = ((PyObject*)Py_None); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->_current_header_value); + __Pyx_DECREF(__pyx_v_self->_current_header_value); + __pyx_v_self->_current_header_value = ((PyObject*)Py_None); + + /* "httptools/parser/parser.pyx":107 + * self._current_header_name = self._current_header_value = None + * + * if self._proto_on_header is not None: # <<<<<<<<<<<<<< + * self._proto_on_header(current_header_name, + * current_header_value) + */ + __pyx_t_2 = (__pyx_v_self->_proto_on_header != Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "httptools/parser/parser.pyx":109 + * if self._proto_on_header is not None: + * self._proto_on_header(current_header_name, + * current_header_value) # <<<<<<<<<<<<<< + * + * cdef _on_header_field(self, bytes field): + */ + __Pyx_INCREF(__pyx_v_self->_proto_on_header); + __pyx_t_4 = __pyx_v_self->_proto_on_header; __pyx_t_5 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_current_header_name, __pyx_v_current_header_value}; + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_3); + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_current_header_name, __pyx_v_current_header_value}; + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_3); + } else + #endif + { + __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_INCREF(__pyx_v_current_header_name); + __Pyx_GIVEREF(__pyx_v_current_header_name); + PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_v_current_header_name); + __Pyx_INCREF(__pyx_v_current_header_value); + __Pyx_GIVEREF(__pyx_v_current_header_value); + PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_v_current_header_value); + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "httptools/parser/parser.pyx":107 + * self._current_header_name = self._current_header_value = None + * + * if self._proto_on_header is not None: # <<<<<<<<<<<<<< + * self._proto_on_header(current_header_name, + * current_header_value) + */ + } + + /* "httptools/parser/parser.pyx":101 + * + * cdef _maybe_call_on_header(self): + * if self._current_header_value is not None: # <<<<<<<<<<<<<< + * current_header_name = self._current_header_name + * current_header_value = self._current_header_value + */ + } + + /* "httptools/parser/parser.pyx":100 + * self._last_error = None + * + * cdef _maybe_call_on_header(self): # <<<<<<<<<<<<<< + * if self._current_header_value is not None: + * current_header_name = self._current_header_name + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._maybe_call_on_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_current_header_name); + __Pyx_XDECREF(__pyx_v_current_header_value); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":111 + * current_header_value) + * + * cdef _on_header_field(self, bytes field): # <<<<<<<<<<<<<< + * self._maybe_call_on_header() + * if self._current_header_name is None: + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_field(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_field) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_on_header_field", 0); + + /* "httptools/parser/parser.pyx":112 + * + * cdef _on_header_field(self, bytes field): + * self._maybe_call_on_header() # <<<<<<<<<<<<<< + * if self._current_header_name is None: + * self._current_header_name = field + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_maybe_call_on_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":113 + * cdef _on_header_field(self, bytes field): + * self._maybe_call_on_header() + * if self._current_header_name is None: # <<<<<<<<<<<<<< + * self._current_header_name = field + * else: + */ + __pyx_t_2 = (__pyx_v_self->_current_header_name == ((PyObject*)Py_None)); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":114 + * self._maybe_call_on_header() + * if self._current_header_name is None: + * self._current_header_name = field # <<<<<<<<<<<<<< + * else: + * self._current_header_name += field + */ + __Pyx_INCREF(__pyx_v_field); + __Pyx_GIVEREF(__pyx_v_field); + __Pyx_GOTREF(__pyx_v_self->_current_header_name); + __Pyx_DECREF(__pyx_v_self->_current_header_name); + __pyx_v_self->_current_header_name = __pyx_v_field; + + /* "httptools/parser/parser.pyx":113 + * cdef _on_header_field(self, bytes field): + * self._maybe_call_on_header() + * if self._current_header_name is None: # <<<<<<<<<<<<<< + * self._current_header_name = field + * else: + */ + goto __pyx_L3; + } + + /* "httptools/parser/parser.pyx":116 + * self._current_header_name = field + * else: + * self._current_header_name += field # <<<<<<<<<<<<<< + * + * cdef _on_header_value(self, bytes val): + */ + /*else*/ { + __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_self->_current_header_name, __pyx_v_field); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->_current_header_name); + __Pyx_DECREF(__pyx_v_self->_current_header_name); + __pyx_v_self->_current_header_name = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + } + __pyx_L3:; + + /* "httptools/parser/parser.pyx":111 + * current_header_value) + * + * cdef _on_header_field(self, bytes field): # <<<<<<<<<<<<<< + * self._maybe_call_on_header() + * if self._current_header_name is None: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":118 + * self._current_header_name += field + * + * cdef _on_header_value(self, bytes val): # <<<<<<<<<<<<<< + * if self._current_header_value is None: + * self._current_header_value = val + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_value(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_val) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_on_header_value", 0); + + /* "httptools/parser/parser.pyx":119 + * + * cdef _on_header_value(self, bytes val): + * if self._current_header_value is None: # <<<<<<<<<<<<<< + * self._current_header_value = val + * else: + */ + __pyx_t_1 = (__pyx_v_self->_current_header_value == ((PyObject*)Py_None)); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "httptools/parser/parser.pyx":120 + * cdef _on_header_value(self, bytes val): + * if self._current_header_value is None: + * self._current_header_value = val # <<<<<<<<<<<<<< + * else: + * # This is unlikely, as mostly HTTP headers are one-line + */ + __Pyx_INCREF(__pyx_v_val); + __Pyx_GIVEREF(__pyx_v_val); + __Pyx_GOTREF(__pyx_v_self->_current_header_value); + __Pyx_DECREF(__pyx_v_self->_current_header_value); + __pyx_v_self->_current_header_value = __pyx_v_val; + + /* "httptools/parser/parser.pyx":119 + * + * cdef _on_header_value(self, bytes val): + * if self._current_header_value is None: # <<<<<<<<<<<<<< + * self._current_header_value = val + * else: + */ + goto __pyx_L3; + } + + /* "httptools/parser/parser.pyx":123 + * else: + * # This is unlikely, as mostly HTTP headers are one-line + * self._current_header_value += val # <<<<<<<<<<<<<< + * + * cdef _on_headers_complete(self): + */ + /*else*/ { + __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_self->_current_header_value, __pyx_v_val); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 123, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_v_self->_current_header_value); + __Pyx_DECREF(__pyx_v_self->_current_header_value); + __pyx_v_self->_current_header_value = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "httptools/parser/parser.pyx":118 + * self._current_header_name += field + * + * cdef _on_header_value(self, bytes val): # <<<<<<<<<<<<<< + * if self._current_header_value is None: + * self._current_header_value = val + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":125 + * self._current_header_value += val + * + * cdef _on_headers_complete(self): # <<<<<<<<<<<<<< + * self._maybe_call_on_header() + * + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_headers_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_on_headers_complete", 0); + + /* "httptools/parser/parser.pyx":126 + * + * cdef _on_headers_complete(self): + * self._maybe_call_on_header() # <<<<<<<<<<<<<< + * + * if self._proto_on_headers_complete is not None: + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_maybe_call_on_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":128 + * self._maybe_call_on_header() + * + * if self._proto_on_headers_complete is not None: # <<<<<<<<<<<<<< + * self._proto_on_headers_complete() + * + */ + __pyx_t_2 = (__pyx_v_self->_proto_on_headers_complete != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":129 + * + * if self._proto_on_headers_complete is not None: + * self._proto_on_headers_complete() # <<<<<<<<<<<<<< + * + * cdef _on_chunk_header(self): + */ + __Pyx_INCREF(__pyx_v_self->_proto_on_headers_complete); + __pyx_t_4 = __pyx_v_self->_proto_on_headers_complete; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":128 + * self._maybe_call_on_header() + * + * if self._proto_on_headers_complete is not None: # <<<<<<<<<<<<<< + * self._proto_on_headers_complete() + * + */ + } + + /* "httptools/parser/parser.pyx":125 + * self._current_header_value += val + * + * cdef _on_headers_complete(self): # <<<<<<<<<<<<<< + * self._maybe_call_on_header() + * + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":131 + * self._proto_on_headers_complete() + * + * cdef _on_chunk_header(self): # <<<<<<<<<<<<<< + * if (self._current_header_value is not None or + * self._current_header_name is not None): + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_on_chunk_header", 0); + + /* "httptools/parser/parser.pyx":132 + * + * cdef _on_chunk_header(self): + * if (self._current_header_value is not None or # <<<<<<<<<<<<<< + * self._current_header_name is not None): + * raise HttpParserError('invalid headers state') + */ + __pyx_t_2 = (__pyx_v_self->_current_header_value != ((PyObject*)Py_None)); + __pyx_t_3 = (__pyx_t_2 != 0); + if (!__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L4_bool_binop_done; + } + + /* "httptools/parser/parser.pyx":133 + * cdef _on_chunk_header(self): + * if (self._current_header_value is not None or + * self._current_header_name is not None): # <<<<<<<<<<<<<< + * raise HttpParserError('invalid headers state') + * + */ + __pyx_t_3 = (__pyx_v_self->_current_header_name != ((PyObject*)Py_None)); + __pyx_t_2 = (__pyx_t_3 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + + /* "httptools/parser/parser.pyx":132 + * + * cdef _on_chunk_header(self): + * if (self._current_header_value is not None or # <<<<<<<<<<<<<< + * self._current_header_name is not None): + * raise HttpParserError('invalid headers state') + */ + if (unlikely(__pyx_t_1)) { + + /* "httptools/parser/parser.pyx":134 + * if (self._current_header_value is not None or + * self._current_header_name is not None): + * raise HttpParserError('invalid headers state') # <<<<<<<<<<<<<< + * + * if self._proto_on_chunk_header is not None: + */ + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_HttpParserError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_4 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_kp_u_invalid_headers_state) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_kp_u_invalid_headers_state); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(0, 134, __pyx_L1_error) + + /* "httptools/parser/parser.pyx":132 + * + * cdef _on_chunk_header(self): + * if (self._current_header_value is not None or # <<<<<<<<<<<<<< + * self._current_header_name is not None): + * raise HttpParserError('invalid headers state') + */ + } + + /* "httptools/parser/parser.pyx":136 + * raise HttpParserError('invalid headers state') + * + * if self._proto_on_chunk_header is not None: # <<<<<<<<<<<<<< + * self._proto_on_chunk_header() + * + */ + __pyx_t_1 = (__pyx_v_self->_proto_on_chunk_header != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "httptools/parser/parser.pyx":137 + * + * if self._proto_on_chunk_header is not None: + * self._proto_on_chunk_header() # <<<<<<<<<<<<<< + * + * cdef _on_chunk_complete(self): + */ + __Pyx_INCREF(__pyx_v_self->_proto_on_chunk_header); + __pyx_t_5 = __pyx_v_self->_proto_on_chunk_header; __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_4 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "httptools/parser/parser.pyx":136 + * raise HttpParserError('invalid headers state') + * + * if self._proto_on_chunk_header is not None: # <<<<<<<<<<<<<< + * self._proto_on_chunk_header() + * + */ + } + + /* "httptools/parser/parser.pyx":131 + * self._proto_on_headers_complete() + * + * cdef _on_chunk_header(self): # <<<<<<<<<<<<<< + * if (self._current_header_value is not None or + * self._current_header_name is not None): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":139 + * self._proto_on_chunk_header() + * + * cdef _on_chunk_complete(self): # <<<<<<<<<<<<<< + * self._maybe_call_on_header() + * + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_on_chunk_complete", 0); + + /* "httptools/parser/parser.pyx":140 + * + * cdef _on_chunk_complete(self): + * self._maybe_call_on_header() # <<<<<<<<<<<<<< + * + * if self._proto_on_chunk_complete is not None: + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_maybe_call_on_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":142 + * self._maybe_call_on_header() + * + * if self._proto_on_chunk_complete is not None: # <<<<<<<<<<<<<< + * self._proto_on_chunk_complete() + * + */ + __pyx_t_2 = (__pyx_v_self->_proto_on_chunk_complete != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":143 + * + * if self._proto_on_chunk_complete is not None: + * self._proto_on_chunk_complete() # <<<<<<<<<<<<<< + * + * ### Public API ### + */ + __Pyx_INCREF(__pyx_v_self->_proto_on_chunk_complete); + __pyx_t_4 = __pyx_v_self->_proto_on_chunk_complete; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":142 + * self._maybe_call_on_header() + * + * if self._proto_on_chunk_complete is not None: # <<<<<<<<<<<<<< + * self._proto_on_chunk_complete() + * + */ + } + + /* "httptools/parser/parser.pyx":139 + * self._proto_on_chunk_header() + * + * cdef _on_chunk_complete(self): # <<<<<<<<<<<<<< + * self._maybe_call_on_header() + * + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":147 + * ### Public API ### + * + * def get_http_version(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return '{}.{}'.format(parser.http_major, parser.http_minor) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_5get_http_version(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_5get_http_version(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("get_http_version (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_4get_http_version(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_4get_http_version(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + llhttp_t *__pyx_v_parser; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + llhttp_t *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_http_version", 0); + + /* "httptools/parser/parser.pyx":148 + * + * def get_http_version(self): + * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< + * return '{}.{}'.format(parser.http_major, parser.http_minor) + * + */ + __pyx_t_1 = __pyx_v_self->_cparser; + __pyx_v_parser = __pyx_t_1; + + /* "httptools/parser/parser.pyx":149 + * def get_http_version(self): + * cdef cparser.llhttp_t* parser = self._cparser + * return '{}.{}'.format(parser.http_major, parser.http_minor) # <<<<<<<<<<<<<< + * + * def should_keep_alive(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_, __pyx_n_s_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_From_uint8_t(__pyx_v_parser->http_major); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyInt_From_uint8_t(__pyx_v_parser->http_minor); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_3)) { + PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { + PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } else + #endif + { + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + if (__pyx_t_6) { + __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; + } + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_t_5); + __pyx_t_4 = 0; + __pyx_t_5 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "httptools/parser/parser.pyx":147 + * ### Public API ### + * + * def get_http_version(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return '{}.{}'.format(parser.http_major, parser.http_minor) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.get_http_version", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":151 + * return '{}.{}'.format(parser.http_major, parser.http_minor) + * + * def should_keep_alive(self): # <<<<<<<<<<<<<< + * return bool(cparser.llhttp_should_keep_alive(self._cparser)) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_7should_keep_alive(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_7should_keep_alive(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("should_keep_alive (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_6should_keep_alive(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_6should_keep_alive(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("should_keep_alive", 0); + + /* "httptools/parser/parser.pyx":152 + * + * def should_keep_alive(self): + * return bool(cparser.llhttp_should_keep_alive(self._cparser)) # <<<<<<<<<<<<<< + * + * def should_upgrade(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(llhttp_should_keep_alive(__pyx_v_self->_cparser)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 152, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "httptools/parser/parser.pyx":151 + * return '{}.{}'.format(parser.http_major, parser.http_minor) + * + * def should_keep_alive(self): # <<<<<<<<<<<<<< + * return bool(cparser.llhttp_should_keep_alive(self._cparser)) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.should_keep_alive", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":154 + * return bool(cparser.llhttp_should_keep_alive(self._cparser)) + * + * def should_upgrade(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return bool(parser.upgrade) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_9should_upgrade(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_9should_upgrade(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("should_upgrade (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_8should_upgrade(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_8should_upgrade(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + llhttp_t *__pyx_v_parser; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + llhttp_t *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("should_upgrade", 0); + + /* "httptools/parser/parser.pyx":155 + * + * def should_upgrade(self): + * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< + * return bool(parser.upgrade) + * + */ + __pyx_t_1 = __pyx_v_self->_cparser; + __pyx_v_parser = __pyx_t_1; + + /* "httptools/parser/parser.pyx":156 + * def should_upgrade(self): + * cdef cparser.llhttp_t* parser = self._cparser + * return bool(parser.upgrade) # <<<<<<<<<<<<<< + * + * def feed_data(self, data): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_uint8_t(__pyx_v_parser->upgrade); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyBool_FromLong((!(!__pyx_t_3))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "httptools/parser/parser.pyx":154 + * return bool(cparser.llhttp_should_keep_alive(self._cparser)) + * + * def should_upgrade(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return bool(parser.upgrade) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.should_upgrade", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":158 + * return bool(parser.upgrade) + * + * def feed_data(self, data): # <<<<<<<<<<<<<< + * cdef: + * size_t data_len + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_11feed_data(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_11feed_data(PyObject *__pyx_v_self, PyObject *__pyx_v_data) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("feed_data (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_10feed_data(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v_data)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_10feed_data(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_data) { + size_t __pyx_v_data_len; + llhttp_errno_t __pyx_v_err; + Py_buffer *__pyx_v_buf; + int __pyx_v_owning_buf; + char *__pyx_v_err_pos; + PyObject *__pyx_v_ex = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + char const *__pyx_t_9; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + PyObject *__pyx_t_13 = NULL; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("feed_data", 0); + + /* "httptools/parser/parser.pyx":163 + * cparser.llhttp_errno_t err + * Py_buffer *buf + * bint owning_buf = False # <<<<<<<<<<<<<< + * char* err_pos + * + */ + __pyx_v_owning_buf = 0; + + /* "httptools/parser/parser.pyx":166 + * char* err_pos + * + * if PyMemoryView_Check(data): # <<<<<<<<<<<<<< + * buf = PyMemoryView_GET_BUFFER(data) + * data_len = buf.len + */ + __pyx_t_1 = (PyMemoryView_Check(__pyx_v_data) != 0); + if (__pyx_t_1) { + + /* "httptools/parser/parser.pyx":167 + * + * if PyMemoryView_Check(data): + * buf = PyMemoryView_GET_BUFFER(data) # <<<<<<<<<<<<<< + * data_len = buf.len + * err = cparser.llhttp_execute( + */ + __pyx_v_buf = PyMemoryView_GET_BUFFER(__pyx_v_data); + + /* "httptools/parser/parser.pyx":168 + * if PyMemoryView_Check(data): + * buf = PyMemoryView_GET_BUFFER(data) + * data_len = buf.len # <<<<<<<<<<<<<< + * err = cparser.llhttp_execute( + * self._cparser, + */ + __pyx_v_data_len = ((size_t)__pyx_v_buf->len); + + /* "httptools/parser/parser.pyx":169 + * buf = PyMemoryView_GET_BUFFER(data) + * data_len = buf.len + * err = cparser.llhttp_execute( # <<<<<<<<<<<<<< + * self._cparser, + * buf.buf, + */ + __pyx_v_err = llhttp_execute(__pyx_v_self->_cparser, ((char *)__pyx_v_buf->buf), __pyx_v_data_len); + + /* "httptools/parser/parser.pyx":166 + * char* err_pos + * + * if PyMemoryView_Check(data): # <<<<<<<<<<<<<< + * buf = PyMemoryView_GET_BUFFER(data) + * data_len = buf.len + */ + goto __pyx_L3; + } + + /* "httptools/parser/parser.pyx":175 + * + * else: + * buf = &self.py_buf # <<<<<<<<<<<<<< + * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) + * owning_buf = True + */ + /*else*/ { + __pyx_v_buf = (&__pyx_v_self->py_buf); + + /* "httptools/parser/parser.pyx":176 + * else: + * buf = &self.py_buf + * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<< + * owning_buf = True + * data_len = buf.len + */ + __pyx_t_2 = PyObject_GetBuffer(__pyx_v_data, __pyx_v_buf, PyBUF_SIMPLE); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 176, __pyx_L1_error) + + /* "httptools/parser/parser.pyx":177 + * buf = &self.py_buf + * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) + * owning_buf = True # <<<<<<<<<<<<<< + * data_len = buf.len + * + */ + __pyx_v_owning_buf = 1; + + /* "httptools/parser/parser.pyx":178 + * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) + * owning_buf = True + * data_len = buf.len # <<<<<<<<<<<<<< + * + * err = cparser.llhttp_execute( + */ + __pyx_v_data_len = ((size_t)__pyx_v_buf->len); + + /* "httptools/parser/parser.pyx":180 + * data_len = buf.len + * + * err = cparser.llhttp_execute( # <<<<<<<<<<<<<< + * self._cparser, + * buf.buf, + */ + __pyx_v_err = llhttp_execute(__pyx_v_self->_cparser, ((char *)__pyx_v_buf->buf), __pyx_v_data_len); + } + __pyx_L3:; + + /* "httptools/parser/parser.pyx":185 + * data_len) + * + * try: # <<<<<<<<<<<<<< + * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: + * err_pos = cparser.llhttp_get_error_pos(self._cparser) + */ + /*try:*/ { + + /* "httptools/parser/parser.pyx":186 + * + * try: + * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: # <<<<<<<<<<<<<< + * err_pos = cparser.llhttp_get_error_pos(self._cparser) + * + */ + __pyx_t_3 = ((__pyx_v_self->_cparser->upgrade == 1) != 0); + if (__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L8_bool_binop_done; + } + __pyx_t_3 = ((__pyx_v_err == HPE_PAUSED_UPGRADE) != 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L8_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "httptools/parser/parser.pyx":187 + * try: + * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: + * err_pos = cparser.llhttp_get_error_pos(self._cparser) # <<<<<<<<<<<<<< + * + * # Immediately free the parser from "error" state, simulating + */ + __pyx_v_err_pos = llhttp_get_error_pos(__pyx_v_self->_cparser); + + /* "httptools/parser/parser.pyx":193 + * # allow users manually "resume after upgrade", and 2) the use + * # case for resuming parsing is very rare. + * cparser.llhttp_resume_after_upgrade(self._cparser) # <<<<<<<<<<<<<< + * + * # The err_pos here is specific for the input buf. So if we ever + */ + llhttp_resume_after_upgrade(__pyx_v_self->_cparser); + + /* "httptools/parser/parser.pyx":199 + * # successive calls to feed_data() until resume_after_upgrade is + * # called), we have to store the result and keep our own state. + * raise HttpParserUpgrade(err_pos - buf.buf) # <<<<<<<<<<<<<< + * finally: + * if owning_buf: + */ + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_HttpParserUpgrade); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 199, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyInt_From_ptrdiff_t((__pyx_v_err_pos - ((char *)__pyx_v_buf->buf))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 199, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 199, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(0, 199, __pyx_L5_error) + + /* "httptools/parser/parser.pyx":186 + * + * try: + * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: # <<<<<<<<<<<<<< + * err_pos = cparser.llhttp_get_error_pos(self._cparser) + * + */ + } + } + + /* "httptools/parser/parser.pyx":201 + * raise HttpParserUpgrade(err_pos - buf.buf) + * finally: + * if owning_buf: # <<<<<<<<<<<<<< + * PyBuffer_Release(buf) + * + */ + /*finally:*/ { + /*normal exit:*/{ + __pyx_t_1 = (__pyx_v_owning_buf != 0); + if (__pyx_t_1) { + + /* "httptools/parser/parser.pyx":202 + * finally: + * if owning_buf: + * PyBuffer_Release(buf) # <<<<<<<<<<<<<< + * + * if err != cparser.HPE_OK: + */ + PyBuffer_Release(__pyx_v_buf); + + /* "httptools/parser/parser.pyx":201 + * raise HttpParserUpgrade(err_pos - buf.buf) + * finally: + * if owning_buf: # <<<<<<<<<<<<<< + * PyBuffer_Release(buf) + * + */ + } + goto __pyx_L6; + } + __pyx_L5_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12) < 0)) __Pyx_ErrFetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __Pyx_XGOTREF(__pyx_t_13); + __Pyx_XGOTREF(__pyx_t_14); + __Pyx_XGOTREF(__pyx_t_15); + __pyx_t_2 = __pyx_lineno; __pyx_t_8 = __pyx_clineno; __pyx_t_9 = __pyx_filename; + { + __pyx_t_1 = (__pyx_v_owning_buf != 0); + if (__pyx_t_1) { + + /* "httptools/parser/parser.pyx":202 + * finally: + * if owning_buf: + * PyBuffer_Release(buf) # <<<<<<<<<<<<<< + * + * if err != cparser.HPE_OK: + */ + PyBuffer_Release(__pyx_v_buf); + + /* "httptools/parser/parser.pyx":201 + * raise HttpParserUpgrade(err_pos - buf.buf) + * finally: + * if owning_buf: # <<<<<<<<<<<<<< + * PyBuffer_Release(buf) + * + */ + } + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_13); + __Pyx_XGIVEREF(__pyx_t_14); + __Pyx_XGIVEREF(__pyx_t_15); + __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15); + } + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ErrRestore(__pyx_t_10, __pyx_t_11, __pyx_t_12); + __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; + __pyx_lineno = __pyx_t_2; __pyx_clineno = __pyx_t_8; __pyx_filename = __pyx_t_9; + goto __pyx_L1_error; + } + __pyx_L6:; + } + + /* "httptools/parser/parser.pyx":204 + * PyBuffer_Release(buf) + * + * if err != cparser.HPE_OK: # <<<<<<<<<<<<<< + * ex = parser_error_from_errno( + * self._cparser, + */ + __pyx_t_1 = ((__pyx_v_err != HPE_OK) != 0); + if (__pyx_t_1) { + + /* "httptools/parser/parser.pyx":205 + * + * if err != cparser.HPE_OK: + * ex = parser_error_from_errno( # <<<<<<<<<<<<<< + * self._cparser, + * self._cparser.error) + */ + __pyx_t_4 = __pyx_f_9httptools_6parser_6parser_parser_error_from_errno(__pyx_v_self->_cparser, ((llhttp_errno_t)__pyx_v_self->_cparser->error)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 205, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_v_ex = __pyx_t_4; + __pyx_t_4 = 0; + + /* "httptools/parser/parser.pyx":208 + * self._cparser, + * self._cparser.error) + * if isinstance(ex, HttpParserCallbackError): # <<<<<<<<<<<<<< + * if self._last_error is not None: + * ex.__context__ = self._last_error + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_HttpParserCallbackError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 208, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyObject_IsInstance(__pyx_v_ex, __pyx_t_4); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 208, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_3 = (__pyx_t_1 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":209 + * self._cparser.error) + * if isinstance(ex, HttpParserCallbackError): + * if self._last_error is not None: # <<<<<<<<<<<<<< + * ex.__context__ = self._last_error + * self._last_error = None + */ + __pyx_t_3 = (__pyx_v_self->_last_error != Py_None); + __pyx_t_1 = (__pyx_t_3 != 0); + if (__pyx_t_1) { + + /* "httptools/parser/parser.pyx":210 + * if isinstance(ex, HttpParserCallbackError): + * if self._last_error is not None: + * ex.__context__ = self._last_error # <<<<<<<<<<<<<< + * self._last_error = None + * raise ex + */ + __pyx_t_4 = __pyx_v_self->_last_error; + __Pyx_INCREF(__pyx_t_4); + if (__Pyx_PyObject_SetAttrStr(__pyx_v_ex, __pyx_n_s_context, __pyx_t_4) < 0) __PYX_ERR(0, 210, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "httptools/parser/parser.pyx":211 + * if self._last_error is not None: + * ex.__context__ = self._last_error + * self._last_error = None # <<<<<<<<<<<<<< + * raise ex + * + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->_last_error); + __Pyx_DECREF(__pyx_v_self->_last_error); + __pyx_v_self->_last_error = Py_None; + + /* "httptools/parser/parser.pyx":209 + * self._cparser.error) + * if isinstance(ex, HttpParserCallbackError): + * if self._last_error is not None: # <<<<<<<<<<<<<< + * ex.__context__ = self._last_error + * self._last_error = None + */ + } + + /* "httptools/parser/parser.pyx":208 + * self._cparser, + * self._cparser.error) + * if isinstance(ex, HttpParserCallbackError): # <<<<<<<<<<<<<< + * if self._last_error is not None: + * ex.__context__ = self._last_error + */ + } + + /* "httptools/parser/parser.pyx":212 + * ex.__context__ = self._last_error + * self._last_error = None + * raise ex # <<<<<<<<<<<<<< + * + * + */ + __Pyx_Raise(__pyx_v_ex, 0, 0, 0); + __PYX_ERR(0, 212, __pyx_L1_error) + + /* "httptools/parser/parser.pyx":204 + * PyBuffer_Release(buf) + * + * if err != cparser.HPE_OK: # <<<<<<<<<<<<<< + * ex = parser_error_from_errno( + * self._cparser, + */ + } + + /* "httptools/parser/parser.pyx":158 + * return bool(parser.upgrade) + * + * def feed_data(self, data): # <<<<<<<<<<<<<< + * cdef: + * size_t data_len + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.feed_data", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_13__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_13__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_12__reduce_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_12__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_15__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_15__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_14__setstate_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_14__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":217 + * cdef class HttpRequestParser(HttpParser): + * + * def __init__(self, protocol): # <<<<<<<<<<<<<< + * self._init(protocol, cparser.HTTP_REQUEST) + * + */ + +/* Python wrapper */ +static int __pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_protocol = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_protocol,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_protocol)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 217, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_protocol = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 217, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser___init__(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self), __pyx_v_protocol); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, PyObject *__pyx_v_protocol) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__init__", 0); + + /* "httptools/parser/parser.pyx":218 + * + * def __init__(self, protocol): + * self._init(protocol, cparser.HTTP_REQUEST) # <<<<<<<<<<<<<< + * + * self._proto_on_url = getattr(protocol, 'on_url', None) + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._init(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), __pyx_v_protocol, HTTP_REQUEST); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":220 + * self._init(protocol, cparser.HTTP_REQUEST) + * + * self._proto_on_url = getattr(protocol, 'on_url', None) # <<<<<<<<<<<<<< + * if self._proto_on_url is not None: + * self._csettings.on_url = cb_on_url + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_url, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 220, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->__pyx_base._proto_on_url); + __Pyx_DECREF(__pyx_v_self->__pyx_base._proto_on_url); + __pyx_v_self->__pyx_base._proto_on_url = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":221 + * + * self._proto_on_url = getattr(protocol, 'on_url', None) + * if self._proto_on_url is not None: # <<<<<<<<<<<<<< + * self._csettings.on_url = cb_on_url + * + */ + __pyx_t_2 = (__pyx_v_self->__pyx_base._proto_on_url != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":222 + * self._proto_on_url = getattr(protocol, 'on_url', None) + * if self._proto_on_url is not None: + * self._csettings.on_url = cb_on_url # <<<<<<<<<<<<<< + * + * def get_method(self): + */ + __pyx_v_self->__pyx_base._csettings->on_url = __pyx_f_9httptools_6parser_6parser_cb_on_url; + + /* "httptools/parser/parser.pyx":221 + * + * self._proto_on_url = getattr(protocol, 'on_url', None) + * if self._proto_on_url is not None: # <<<<<<<<<<<<<< + * self._csettings.on_url = cb_on_url + * + */ + } + + /* "httptools/parser/parser.pyx":217 + * cdef class HttpRequestParser(HttpParser): + * + * def __init__(self, protocol): # <<<<<<<<<<<<<< + * self._init(protocol, cparser.HTTP_REQUEST) + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":224 + * self._csettings.on_url = cb_on_url + * + * def get_method(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return cparser.llhttp_method_name( parser.method) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_3get_method(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_3get_method(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("get_method (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_2get_method(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_2get_method(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self) { + llhttp_t *__pyx_v_parser; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + llhttp_t *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_method", 0); + + /* "httptools/parser/parser.pyx":225 + * + * def get_method(self): + * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< + * return cparser.llhttp_method_name( parser.method) + * + */ + __pyx_t_1 = __pyx_v_self->__pyx_base._cparser; + __pyx_v_parser = __pyx_t_1; + + /* "httptools/parser/parser.pyx":226 + * def get_method(self): + * cdef cparser.llhttp_t* parser = self._cparser + * return cparser.llhttp_method_name( parser.method) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBytes_FromString(llhttp_method_name(((llhttp_method_t)__pyx_v_parser->method))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "httptools/parser/parser.pyx":224 + * self._csettings.on_url = cb_on_url + * + * def get_method(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return cparser.llhttp_method_name( parser.method) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.get_method", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_4__reduce_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_6__setstate_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":231 + * cdef class HttpResponseParser(HttpParser): + * + * def __init__(self, protocol): # <<<<<<<<<<<<<< + * self._init(protocol, cparser.HTTP_RESPONSE) + * + */ + +/* Python wrapper */ +static int __pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_protocol = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_protocol,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_protocol)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 231, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_protocol = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 231, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser___init__(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self), __pyx_v_protocol); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, PyObject *__pyx_v_protocol) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__init__", 0); + + /* "httptools/parser/parser.pyx":232 + * + * def __init__(self, protocol): + * self._init(protocol, cparser.HTTP_RESPONSE) # <<<<<<<<<<<<<< + * + * self._proto_on_status = getattr(protocol, 'on_status', None) + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._init(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), __pyx_v_protocol, HTTP_RESPONSE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 232, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":234 + * self._init(protocol, cparser.HTTP_RESPONSE) + * + * self._proto_on_status = getattr(protocol, 'on_status', None) # <<<<<<<<<<<<<< + * if self._proto_on_status is not None: + * self._csettings.on_status = cb_on_status + */ + __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_status, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v_self->__pyx_base._proto_on_status); + __Pyx_DECREF(__pyx_v_self->__pyx_base._proto_on_status); + __pyx_v_self->__pyx_base._proto_on_status = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":235 + * + * self._proto_on_status = getattr(protocol, 'on_status', None) + * if self._proto_on_status is not None: # <<<<<<<<<<<<<< + * self._csettings.on_status = cb_on_status + * + */ + __pyx_t_2 = (__pyx_v_self->__pyx_base._proto_on_status != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "httptools/parser/parser.pyx":236 + * self._proto_on_status = getattr(protocol, 'on_status', None) + * if self._proto_on_status is not None: + * self._csettings.on_status = cb_on_status # <<<<<<<<<<<<<< + * + * def get_status_code(self): + */ + __pyx_v_self->__pyx_base._csettings->on_status = __pyx_f_9httptools_6parser_6parser_cb_on_status; + + /* "httptools/parser/parser.pyx":235 + * + * self._proto_on_status = getattr(protocol, 'on_status', None) + * if self._proto_on_status is not None: # <<<<<<<<<<<<<< + * self._csettings.on_status = cb_on_status + * + */ + } + + /* "httptools/parser/parser.pyx":231 + * cdef class HttpResponseParser(HttpParser): + * + * def __init__(self, protocol): # <<<<<<<<<<<<<< + * self._init(protocol, cparser.HTTP_RESPONSE) + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":238 + * self._csettings.on_status = cb_on_status + * + * def get_status_code(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return parser.status_code + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_3get_status_code(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_3get_status_code(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("get_status_code (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_2get_status_code(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_2get_status_code(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self) { + llhttp_t *__pyx_v_parser; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + llhttp_t *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_status_code", 0); + + /* "httptools/parser/parser.pyx":239 + * + * def get_status_code(self): + * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< + * return parser.status_code + * + */ + __pyx_t_1 = __pyx_v_self->__pyx_base._cparser; + __pyx_v_parser = __pyx_t_1; + + /* "httptools/parser/parser.pyx":240 + * def get_status_code(self): + * cdef cparser.llhttp_t* parser = self._cparser + * return parser.status_code # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_uint16_t(__pyx_v_parser->status_code); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "httptools/parser/parser.pyx":238 + * self._csettings.on_status = cb_on_status + * + * def get_status_code(self): # <<<<<<<<<<<<<< + * cdef cparser.llhttp_t* parser = self._cparser + * return parser.status_code + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.get_status_code", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_4__reduce_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_6__setstate_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":243 + * + * + * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_message_begin(llhttp_t *__pyx_v_parser) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_message_begin", 0); + + /* "httptools/parser/parser.pyx":244 + * + * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._proto_on_message_begin() + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":245 + * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_message_begin() + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":246 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._proto_on_message_begin() # <<<<<<<<<<<<<< + * except BaseException as ex: + * pyparser._last_error = ex + */ + __Pyx_INCREF(__pyx_v_pyparser->_proto_on_message_begin); + __pyx_t_5 = __pyx_v_pyparser->_proto_on_message_begin; __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 246, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":245 + * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_message_begin() + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":251 + * return -1 + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "httptools/parser/parser.pyx":247 + * try: + * pyparser._proto_on_message_begin() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_7) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_begin", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6) < 0) __PYX_ERR(0, 247, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __pyx_v_ex = __pyx_t_5; + /*try:*/ { + + /* "httptools/parser/parser.pyx":248 + * pyparser._proto_on_message_begin() + * except BaseException as ex: + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return -1 + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":249 + * except BaseException as ex: + * pyparser._last_error = ex + * return -1 # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = -1; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":247 + * try: + * pyparser._proto_on_message_begin() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_7 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_7; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":245 + * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_message_begin() + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":243 + * + * + * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_begin", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":254 + * + * + * cdef int cb_on_url(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_url(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_url", 0); + + /* "httptools/parser/parser.pyx":256 + * cdef int cb_on_url(cparser.llhttp_t* parser, + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._proto_on_url(at[:length]) + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":257 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_url(at[:length]) + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":258 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._proto_on_url(at[:length]) # <<<<<<<<<<<<<< + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") + */ + __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 258, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_pyparser->_proto_on_url); + __pyx_t_6 = __pyx_v_pyparser->_proto_on_url; __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":257 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_url(at[:length]) + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":264 + * return cparser.HPE_USER + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "httptools/parser/parser.pyx":259 + * try: + * pyparser._proto_on_url(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") + * pyparser._last_error = ex + */ + __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_8) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_url", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5) < 0) __PYX_ERR(0, 259, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __pyx_v_ex = __pyx_t_6; + /*try:*/ { + + /* "httptools/parser/parser.pyx":260 + * pyparser._proto_on_url(at[:length]) + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return cparser.HPE_USER + */ + llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_url` callback error")); + + /* "httptools/parser/parser.pyx":261 + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return cparser.HPE_USER + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":262 + * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") + * pyparser._last_error = ex + * return cparser.HPE_USER # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = HPE_USER; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":259 + * try: + * pyparser._proto_on_url(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") + * pyparser._last_error = ex + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_8 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_8; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":257 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_url(at[:length]) + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":254 + * + * + * cdef int cb_on_url(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_url", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":267 + * + * + * cdef int cb_on_status(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_status(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_status", 0); + + /* "httptools/parser/parser.pyx":269 + * cdef int cb_on_status(cparser.llhttp_t* parser, + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._proto_on_status(at[:length]) + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":270 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_status(at[:length]) + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":271 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._proto_on_status(at[:length]) # <<<<<<<<<<<<<< + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") + */ + __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 271, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_pyparser->_proto_on_status); + __pyx_t_6 = __pyx_v_pyparser->_proto_on_status; __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 271, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":270 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_status(at[:length]) + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":277 + * return cparser.HPE_USER + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "httptools/parser/parser.pyx":272 + * try: + * pyparser._proto_on_status(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") + * pyparser._last_error = ex + */ + __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_8) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_status", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5) < 0) __PYX_ERR(0, 272, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __pyx_v_ex = __pyx_t_6; + /*try:*/ { + + /* "httptools/parser/parser.pyx":273 + * pyparser._proto_on_status(at[:length]) + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return cparser.HPE_USER + */ + llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_status` callback error")); + + /* "httptools/parser/parser.pyx":274 + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return cparser.HPE_USER + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":275 + * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") + * pyparser._last_error = ex + * return cparser.HPE_USER # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = HPE_USER; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":272 + * try: + * pyparser._proto_on_status(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") + * pyparser._last_error = ex + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_8 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_8; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":270 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_status(at[:length]) + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":267 + * + * + * cdef int cb_on_status(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_status", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":280 + * + * + * cdef int cb_on_header_field(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_header_field(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_header_field", 0); + + /* "httptools/parser/parser.pyx":282 + * cdef int cb_on_header_field(cparser.llhttp_t* parser, + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._on_header_field(at[:length]) + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":283 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_header_field(at[:length]) + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":284 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._on_header_field(at[:length]) # <<<<<<<<<<<<<< + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 284, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_header_field(__pyx_v_pyparser, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 284, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + + /* "httptools/parser/parser.pyx":283 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_header_field(at[:length]) + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":290 + * return cparser.HPE_USER + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + + /* "httptools/parser/parser.pyx":285 + * try: + * pyparser._on_header_field(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") + * pyparser._last_error = ex + */ + __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_6) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_1, &__pyx_t_7) < 0) __PYX_ERR(0, 285, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_7); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_ex = __pyx_t_1; + /*try:*/ { + + /* "httptools/parser/parser.pyx":286 + * pyparser._on_header_field(at[:length]) + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return cparser.HPE_USER + */ + llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_header_field` callback error")); + + /* "httptools/parser/parser.pyx":287 + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return cparser.HPE_USER + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":288 + * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") + * pyparser._last_error = ex + * return cparser.HPE_USER # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = HPE_USER; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":285 + * try: + * pyparser._on_header_field(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") + * pyparser._last_error = ex + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_6 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_6; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":283 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_header_field(at[:length]) + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":280 + * + * + * cdef int cb_on_header_field(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":293 + * + * + * cdef int cb_on_header_value(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_header_value(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_header_value", 0); + + /* "httptools/parser/parser.pyx":295 + * cdef int cb_on_header_value(cparser.llhttp_t* parser, + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._on_header_value(at[:length]) + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":296 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_header_value(at[:length]) + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":297 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._on_header_value(at[:length]) # <<<<<<<<<<<<<< + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 297, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_header_value(__pyx_v_pyparser, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + + /* "httptools/parser/parser.pyx":296 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_header_value(at[:length]) + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":303 + * return cparser.HPE_USER + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + + /* "httptools/parser/parser.pyx":298 + * try: + * pyparser._on_header_value(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") + * pyparser._last_error = ex + */ + __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_6) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_1, &__pyx_t_7) < 0) __PYX_ERR(0, 298, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_7); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_ex = __pyx_t_1; + /*try:*/ { + + /* "httptools/parser/parser.pyx":299 + * pyparser._on_header_value(at[:length]) + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return cparser.HPE_USER + */ + llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_header_value` callback error")); + + /* "httptools/parser/parser.pyx":300 + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return cparser.HPE_USER + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":301 + * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") + * pyparser._last_error = ex + * return cparser.HPE_USER # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = HPE_USER; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":298 + * try: + * pyparser._on_header_value(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") + * pyparser._last_error = ex + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_6 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_6; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":296 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_header_value(at[:length]) + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":293 + * + * + * cdef int cb_on_header_value(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":306 + * + * + * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_headers_complete(llhttp_t *__pyx_v_parser) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_headers_complete", 0); + + /* "httptools/parser/parser.pyx":307 + * + * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._on_headers_complete() + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":308 + * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_headers_complete() + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":309 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._on_headers_complete() # <<<<<<<<<<<<<< + * except BaseException as ex: + * pyparser._last_error = ex + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_headers_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 309, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":308 + * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_headers_complete() + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":314 + * return -1 + * else: + * if pyparser._cparser.upgrade: # <<<<<<<<<<<<<< + * return 1 + * else: + */ + /*else:*/ { + __pyx_t_5 = (__pyx_v_pyparser->_cparser->upgrade != 0); + if (__pyx_t_5) { + + /* "httptools/parser/parser.pyx":315 + * else: + * if pyparser._cparser.upgrade: + * return 1 # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = 1; + goto __pyx_L6_except_return; + + /* "httptools/parser/parser.pyx":314 + * return -1 + * else: + * if pyparser._cparser.upgrade: # <<<<<<<<<<<<<< + * return 1 + * else: + */ + } + + /* "httptools/parser/parser.pyx":317 + * return 1 + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":310 + * try: + * pyparser._on_headers_complete() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_6) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_7, &__pyx_t_8) < 0) __PYX_ERR(0, 310, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GOTREF(__pyx_t_8); + __Pyx_INCREF(__pyx_t_7); + __pyx_v_ex = __pyx_t_7; + /*try:*/ { + + /* "httptools/parser/parser.pyx":311 + * pyparser._on_headers_complete() + * except BaseException as ex: + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return -1 + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":312 + * except BaseException as ex: + * pyparser._last_error = ex + * return -1 # <<<<<<<<<<<<<< + * else: + * if pyparser._cparser.upgrade: + */ + __pyx_r = -1; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L14_return; + } + + /* "httptools/parser/parser.pyx":310 + * try: + * pyparser._on_headers_complete() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + /*finally:*/ { + __pyx_L14_return: { + __pyx_t_6 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_6; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":308 + * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_headers_complete() + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":306 + * + * + * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":320 + * + * + * cdef int cb_on_body(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_body(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_body", 0); + + /* "httptools/parser/parser.pyx":322 + * cdef int cb_on_body(cparser.llhttp_t* parser, + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._proto_on_body(at[:length]) + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":323 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_body(at[:length]) + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":324 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._proto_on_body(at[:length]) # <<<<<<<<<<<<<< + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") + */ + __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 324, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_pyparser->_proto_on_body); + __pyx_t_6 = __pyx_v_pyparser->_proto_on_body; __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":323 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_body(at[:length]) + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":330 + * return cparser.HPE_USER + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "httptools/parser/parser.pyx":325 + * try: + * pyparser._proto_on_body(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") + * pyparser._last_error = ex + */ + __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_8) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_body", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5) < 0) __PYX_ERR(0, 325, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __pyx_v_ex = __pyx_t_6; + /*try:*/ { + + /* "httptools/parser/parser.pyx":326 + * pyparser._proto_on_body(at[:length]) + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return cparser.HPE_USER + */ + llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_body` callback error")); + + /* "httptools/parser/parser.pyx":327 + * except BaseException as ex: + * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return cparser.HPE_USER + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":328 + * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") + * pyparser._last_error = ex + * return cparser.HPE_USER # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = HPE_USER; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":325 + * try: + * pyparser._proto_on_body(at[:length]) + * except BaseException as ex: # <<<<<<<<<<<<<< + * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") + * pyparser._last_error = ex + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_8 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_8; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":323 + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_body(at[:length]) + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":320 + * + * + * cdef int cb_on_body(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< + * const char *at, size_t length) except -1: + * cdef HttpParser pyparser = parser.data + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_body", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":333 + * + * + * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_message_complete(llhttp_t *__pyx_v_parser) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_message_complete", 0); + + /* "httptools/parser/parser.pyx":334 + * + * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._proto_on_message_complete() + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":335 + * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_message_complete() + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":336 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._proto_on_message_complete() # <<<<<<<<<<<<<< + * except BaseException as ex: + * pyparser._last_error = ex + */ + __Pyx_INCREF(__pyx_v_pyparser->_proto_on_message_complete); + __pyx_t_5 = __pyx_v_pyparser->_proto_on_message_complete; __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 336, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":335 + * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_message_complete() + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":341 + * return -1 + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "httptools/parser/parser.pyx":337 + * try: + * pyparser._proto_on_message_complete() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_7) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6) < 0) __PYX_ERR(0, 337, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __pyx_v_ex = __pyx_t_5; + /*try:*/ { + + /* "httptools/parser/parser.pyx":338 + * pyparser._proto_on_message_complete() + * except BaseException as ex: + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return -1 + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":339 + * except BaseException as ex: + * pyparser._last_error = ex + * return -1 # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = -1; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":337 + * try: + * pyparser._proto_on_message_complete() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_7 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_7; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":335 + * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._proto_on_message_complete() + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":333 + * + * + * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":344 + * + * + * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_header(llhttp_t *__pyx_v_parser) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_chunk_header", 0); + + /* "httptools/parser/parser.pyx":345 + * + * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._on_chunk_header() + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":346 + * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_chunk_header() + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":347 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._on_chunk_header() # <<<<<<<<<<<<<< + * except BaseException as ex: + * pyparser._last_error = ex + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_chunk_header(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 347, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":346 + * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_chunk_header() + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":352 + * return -1 + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":348 + * try: + * pyparser._on_chunk_header() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_5) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 348, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + __Pyx_INCREF(__pyx_t_6); + __pyx_v_ex = __pyx_t_6; + /*try:*/ { + + /* "httptools/parser/parser.pyx":349 + * pyparser._on_chunk_header() + * except BaseException as ex: + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return -1 + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":350 + * except BaseException as ex: + * pyparser._last_error = ex + * return -1 # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = -1; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":348 + * try: + * pyparser._on_chunk_header() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_5 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_5; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":346 + * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_chunk_header() + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":344 + * + * + * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":355 + * + * + * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + +static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_complete(llhttp_t *__pyx_v_parser) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; + PyObject *__pyx_v_ex = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cb_on_chunk_complete", 0); + + /* "httptools/parser/parser.pyx":356 + * + * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data # <<<<<<<<<<<<<< + * try: + * pyparser._on_chunk_complete() + */ + __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); + __Pyx_INCREF(__pyx_t_1); + __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":357 + * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_chunk_complete() + * except BaseException as ex: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "httptools/parser/parser.pyx":358 + * cdef HttpParser pyparser = parser.data + * try: + * pyparser._on_chunk_complete() # <<<<<<<<<<<<<< + * except BaseException as ex: + * pyparser._last_error = ex + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_chunk_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 358, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":357 + * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_chunk_complete() + * except BaseException as ex: + */ + } + + /* "httptools/parser/parser.pyx":363 + * return -1 + * else: + * return 0 # <<<<<<<<<<<<<< + * + * + */ + /*else:*/ { + __pyx_r = 0; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":359 + * try: + * pyparser._on_chunk_complete() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); + if (__pyx_t_5) { + __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 359, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + __Pyx_INCREF(__pyx_t_6); + __pyx_v_ex = __pyx_t_6; + /*try:*/ { + + /* "httptools/parser/parser.pyx":360 + * pyparser._on_chunk_complete() + * except BaseException as ex: + * pyparser._last_error = ex # <<<<<<<<<<<<<< + * return -1 + * else: + */ + __Pyx_INCREF(__pyx_v_ex); + __Pyx_GIVEREF(__pyx_v_ex); + __Pyx_GOTREF(__pyx_v_pyparser->_last_error); + __Pyx_DECREF(__pyx_v_pyparser->_last_error); + __pyx_v_pyparser->_last_error = __pyx_v_ex; + + /* "httptools/parser/parser.pyx":361 + * except BaseException as ex: + * pyparser._last_error = ex + * return -1 # <<<<<<<<<<<<<< + * else: + * return 0 + */ + __pyx_r = -1; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L13_return; + } + + /* "httptools/parser/parser.pyx":359 + * try: + * pyparser._on_chunk_complete() + * except BaseException as ex: # <<<<<<<<<<<<<< + * pyparser._last_error = ex + * return -1 + */ + /*finally:*/ { + __pyx_L13_return: { + __pyx_t_5 = __pyx_r; + __Pyx_DECREF(__pyx_v_ex); + __pyx_v_ex = NULL; + __pyx_r = __pyx_t_5; + goto __pyx_L6_except_return; + } + } + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "httptools/parser/parser.pyx":357 + * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: + * cdef HttpParser pyparser = parser.data + * try: # <<<<<<<<<<<<<< + * pyparser._on_chunk_complete() + * except BaseException as ex: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "httptools/parser/parser.pyx":355 + * + * + * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< + * cdef HttpParser pyparser = parser.data + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); + __Pyx_XDECREF(__pyx_v_ex); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/parser.pyx":366 + * + * + * cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): # <<<<<<<<<<<<<< + * cdef bytes reason = cparser.llhttp_get_error_reason(parser) + * + */ + +static PyObject *__pyx_f_9httptools_6parser_6parser_parser_error_from_errno(llhttp_t *__pyx_v_parser, llhttp_errno_t __pyx_v_errno) { + PyObject *__pyx_v_reason = 0; + PyObject *__pyx_v_cls = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("parser_error_from_errno", 0); + + /* "httptools/parser/parser.pyx":367 + * + * cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): + * cdef bytes reason = cparser.llhttp_get_error_reason(parser) # <<<<<<<<<<<<<< + * + * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, + */ + __pyx_t_1 = __Pyx_PyBytes_FromString(llhttp_get_error_reason(__pyx_v_parser)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 367, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_reason = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":369 + * cdef bytes reason = cparser.llhttp_get_error_reason(parser) + * + * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, # <<<<<<<<<<<<<< + * cparser.HPE_CB_HEADERS_COMPLETE, + * cparser.HPE_CB_MESSAGE_COMPLETE, + */ + switch (__pyx_v_errno) { + case HPE_CB_MESSAGE_BEGIN: + case HPE_CB_HEADERS_COMPLETE: + + /* "httptools/parser/parser.pyx":370 + * + * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, + * cparser.HPE_CB_HEADERS_COMPLETE, # <<<<<<<<<<<<<< + * cparser.HPE_CB_MESSAGE_COMPLETE, + * cparser.HPE_CB_CHUNK_HEADER, + */ + case HPE_CB_MESSAGE_COMPLETE: + + /* "httptools/parser/parser.pyx":371 + * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, + * cparser.HPE_CB_HEADERS_COMPLETE, + * cparser.HPE_CB_MESSAGE_COMPLETE, # <<<<<<<<<<<<<< + * cparser.HPE_CB_CHUNK_HEADER, + * cparser.HPE_CB_CHUNK_COMPLETE, + */ + case HPE_CB_CHUNK_HEADER: + + /* "httptools/parser/parser.pyx":372 + * cparser.HPE_CB_HEADERS_COMPLETE, + * cparser.HPE_CB_MESSAGE_COMPLETE, + * cparser.HPE_CB_CHUNK_HEADER, # <<<<<<<<<<<<<< + * cparser.HPE_CB_CHUNK_COMPLETE, + * cparser.HPE_USER): + */ + case HPE_CB_CHUNK_COMPLETE: + + /* "httptools/parser/parser.pyx":373 + * cparser.HPE_CB_MESSAGE_COMPLETE, + * cparser.HPE_CB_CHUNK_HEADER, + * cparser.HPE_CB_CHUNK_COMPLETE, # <<<<<<<<<<<<<< + * cparser.HPE_USER): + * cls = HttpParserCallbackError + */ + case HPE_USER: + + /* "httptools/parser/parser.pyx":375 + * cparser.HPE_CB_CHUNK_COMPLETE, + * cparser.HPE_USER): + * cls = HttpParserCallbackError # <<<<<<<<<<<<<< + * + * elif errno == cparser.HPE_INVALID_STATUS: + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserCallbackError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 375, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_cls = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":369 + * cdef bytes reason = cparser.llhttp_get_error_reason(parser) + * + * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, # <<<<<<<<<<<<<< + * cparser.HPE_CB_HEADERS_COMPLETE, + * cparser.HPE_CB_MESSAGE_COMPLETE, + */ + break; + case HPE_INVALID_STATUS: + + /* "httptools/parser/parser.pyx":378 + * + * elif errno == cparser.HPE_INVALID_STATUS: + * cls = HttpParserInvalidStatusError # <<<<<<<<<<<<<< + * + * elif errno == cparser.HPE_INVALID_METHOD: + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserInvalidStatusError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_cls = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":377 + * cls = HttpParserCallbackError + * + * elif errno == cparser.HPE_INVALID_STATUS: # <<<<<<<<<<<<<< + * cls = HttpParserInvalidStatusError + * + */ + break; + case HPE_INVALID_METHOD: + + /* "httptools/parser/parser.pyx":381 + * + * elif errno == cparser.HPE_INVALID_METHOD: + * cls = HttpParserInvalidMethodError # <<<<<<<<<<<<<< + * + * elif errno == cparser.HPE_INVALID_URL: + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserInvalidMethodError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 381, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_cls = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":380 + * cls = HttpParserInvalidStatusError + * + * elif errno == cparser.HPE_INVALID_METHOD: # <<<<<<<<<<<<<< + * cls = HttpParserInvalidMethodError + * + */ + break; + case HPE_INVALID_URL: + + /* "httptools/parser/parser.pyx":384 + * + * elif errno == cparser.HPE_INVALID_URL: + * cls = HttpParserInvalidURLError # <<<<<<<<<<<<<< + * + * else: + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 384, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_cls = __pyx_t_1; + __pyx_t_1 = 0; + + /* "httptools/parser/parser.pyx":383 + * cls = HttpParserInvalidMethodError + * + * elif errno == cparser.HPE_INVALID_URL: # <<<<<<<<<<<<<< + * cls = HttpParserInvalidURLError + * + */ + break; + default: + + /* "httptools/parser/parser.pyx":387 + * + * else: + * cls = HttpParserError # <<<<<<<<<<<<<< + * + * return cls(reason.decode('latin-1')) + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 387, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_cls = __pyx_t_1; + __pyx_t_1 = 0; + break; + } + + /* "httptools/parser/parser.pyx":389 + * cls = HttpParserError + * + * return cls(reason.decode('latin-1')) # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_decode_bytes(__pyx_v_reason, 0, PY_SSIZE_T_MAX, NULL, NULL, PyUnicode_DecodeLatin1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 389, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_cls); + __pyx_t_3 = __pyx_v_cls; __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 389, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "httptools/parser/parser.pyx":366 + * + * + * cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): # <<<<<<<<<<<<<< + * cdef bytes reason = cparser.llhttp_get_error_reason(parser) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("httptools.parser.parser.parser_error_from_errno", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_reason); + __Pyx_XDECREF(__pyx_v_cls); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser __pyx_vtable_9httptools_6parser_6parser_HttpParser; + +static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpParser(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o); + p->__pyx_vtab = __pyx_vtabptr_9httptools_6parser_6parser_HttpParser; + p->_current_header_name = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_current_header_value = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_proto_on_url = Py_None; Py_INCREF(Py_None); + p->_proto_on_status = Py_None; Py_INCREF(Py_None); + p->_proto_on_body = Py_None; Py_INCREF(Py_None); + p->_proto_on_header = Py_None; Py_INCREF(Py_None); + p->_proto_on_headers_complete = Py_None; Py_INCREF(Py_None); + p->_proto_on_message_complete = Py_None; Py_INCREF(Py_None); + p->_proto_on_chunk_header = Py_None; Py_INCREF(Py_None); + p->_proto_on_chunk_complete = Py_None; Py_INCREF(Py_None); + p->_proto_on_message_begin = Py_None; Py_INCREF(Py_None); + p->_last_error = Py_None; Py_INCREF(Py_None); + p->py_buf.obj = NULL; + if (unlikely(__pyx_pw_9httptools_6parser_6parser_10HttpParser_1__cinit__(o, __pyx_empty_tuple, NULL) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser(PyObject *o) { + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p = (struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_pw_9httptools_6parser_6parser_10HttpParser_3__dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->_current_header_name); + Py_CLEAR(p->_current_header_value); + Py_CLEAR(p->_proto_on_url); + Py_CLEAR(p->_proto_on_status); + Py_CLEAR(p->_proto_on_body); + Py_CLEAR(p->_proto_on_header); + Py_CLEAR(p->_proto_on_headers_complete); + Py_CLEAR(p->_proto_on_message_complete); + Py_CLEAR(p->_proto_on_chunk_header); + Py_CLEAR(p->_proto_on_chunk_complete); + Py_CLEAR(p->_proto_on_message_begin); + Py_CLEAR(p->_last_error); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p = (struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o; + if (p->_proto_on_url) { + e = (*v)(p->_proto_on_url, a); if (e) return e; + } + if (p->_proto_on_status) { + e = (*v)(p->_proto_on_status, a); if (e) return e; + } + if (p->_proto_on_body) { + e = (*v)(p->_proto_on_body, a); if (e) return e; + } + if (p->_proto_on_header) { + e = (*v)(p->_proto_on_header, a); if (e) return e; + } + if (p->_proto_on_headers_complete) { + e = (*v)(p->_proto_on_headers_complete, a); if (e) return e; + } + if (p->_proto_on_message_complete) { + e = (*v)(p->_proto_on_message_complete, a); if (e) return e; + } + if (p->_proto_on_chunk_header) { + e = (*v)(p->_proto_on_chunk_header, a); if (e) return e; + } + if (p->_proto_on_chunk_complete) { + e = (*v)(p->_proto_on_chunk_complete, a); if (e) return e; + } + if (p->_proto_on_message_begin) { + e = (*v)(p->_proto_on_message_begin, a); if (e) return e; + } + if (p->_last_error) { + e = (*v)(p->_last_error, a); if (e) return e; + } + if (p->py_buf.obj) { + e = (*v)(p->py_buf.obj, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_9httptools_6parser_6parser_HttpParser(PyObject *o) { + PyObject* tmp; + struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p = (struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o; + tmp = ((PyObject*)p->_proto_on_url); + p->_proto_on_url = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_status); + p->_proto_on_status = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_body); + p->_proto_on_body = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_header); + p->_proto_on_header = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_headers_complete); + p->_proto_on_headers_complete = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_message_complete); + p->_proto_on_message_complete = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_chunk_header); + p->_proto_on_chunk_header = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_chunk_complete); + p->_proto_on_chunk_complete = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_proto_on_message_begin); + p->_proto_on_message_begin = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_last_error); + p->_last_error = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + Py_CLEAR(p->py_buf.obj); + return 0; +} + +static PyMethodDef __pyx_methods_9httptools_6parser_6parser_HttpParser[] = { + {"get_http_version", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_5get_http_version, METH_NOARGS, 0}, + {"should_keep_alive", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_7should_keep_alive, METH_NOARGS, 0}, + {"should_upgrade", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_9should_upgrade, METH_NOARGS, 0}, + {"feed_data", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_11feed_data, METH_O, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_13__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_15__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_9httptools_6parser_6parser_HttpParser = { + PyVarObject_HEAD_INIT(0, 0) + "httptools.parser.parser.HttpParser", /*tp_name*/ + sizeof(struct __pyx_obj_9httptools_6parser_6parser_HttpParser), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser, /*tp_traverse*/ + __pyx_tp_clear_9httptools_6parser_6parser_HttpParser, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_9httptools_6parser_6parser_HttpParser, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_9httptools_6parser_6parser_HttpParser, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser __pyx_vtable_9httptools_6parser_6parser_HttpRequestParser; + +static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpRequestParser(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *p; + PyObject *o = __pyx_tp_new_9httptools_6parser_6parser_HttpParser(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser*)__pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser; + return o; +} + +static PyMethodDef __pyx_methods_9httptools_6parser_6parser_HttpRequestParser[] = { + {"get_method", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_3get_method, METH_NOARGS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_5__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_7__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_9httptools_6parser_6parser_HttpRequestParser = { + PyVarObject_HEAD_INIT(0, 0) + "httptools.parser.parser.HttpRequestParser", /*tp_name*/ + sizeof(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser, /*tp_traverse*/ + __pyx_tp_clear_9httptools_6parser_6parser_HttpParser, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_9httptools_6parser_6parser_HttpRequestParser, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_1__init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_9httptools_6parser_6parser_HttpRequestParser, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser __pyx_vtable_9httptools_6parser_6parser_HttpResponseParser; + +static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpResponseParser(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *p; + PyObject *o = __pyx_tp_new_9httptools_6parser_6parser_HttpParser(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser*)__pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser; + return o; +} + +static PyMethodDef __pyx_methods_9httptools_6parser_6parser_HttpResponseParser[] = { + {"get_status_code", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_3get_status_code, METH_NOARGS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_5__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_7__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_9httptools_6parser_6parser_HttpResponseParser = { + PyVarObject_HEAD_INIT(0, 0) + "httptools.parser.parser.HttpResponseParser", /*tp_name*/ + sizeof(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser, /*tp_traverse*/ + __pyx_tp_clear_9httptools_6parser_6parser_HttpParser, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_9httptools_6parser_6parser_HttpResponseParser, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_1__init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_9httptools_6parser_6parser_HttpResponseParser, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_parser(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_parser}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "parser", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0}, + {&__pyx_n_s_BaseException, __pyx_k_BaseException, sizeof(__pyx_k_BaseException), 0, 0, 1, 1}, + {&__pyx_n_s_HttpParserCallbackError, __pyx_k_HttpParserCallbackError, sizeof(__pyx_k_HttpParserCallbackError), 0, 0, 1, 1}, + {&__pyx_n_s_HttpParserError, __pyx_k_HttpParserError, sizeof(__pyx_k_HttpParserError), 0, 0, 1, 1}, + {&__pyx_n_s_HttpParserInvalidMethodError, __pyx_k_HttpParserInvalidMethodError, sizeof(__pyx_k_HttpParserInvalidMethodError), 0, 0, 1, 1}, + {&__pyx_n_s_HttpParserInvalidStatusError, __pyx_k_HttpParserInvalidStatusError, sizeof(__pyx_k_HttpParserInvalidStatusError), 0, 0, 1, 1}, + {&__pyx_n_s_HttpParserInvalidURLError, __pyx_k_HttpParserInvalidURLError, sizeof(__pyx_k_HttpParserInvalidURLError), 0, 0, 1, 1}, + {&__pyx_n_s_HttpParserUpgrade, __pyx_k_HttpParserUpgrade, sizeof(__pyx_k_HttpParserUpgrade), 0, 0, 1, 1}, + {&__pyx_n_s_HttpRequestParser, __pyx_k_HttpRequestParser, sizeof(__pyx_k_HttpRequestParser), 0, 0, 1, 1}, + {&__pyx_n_u_HttpRequestParser, __pyx_k_HttpRequestParser, sizeof(__pyx_k_HttpRequestParser), 0, 1, 0, 1}, + {&__pyx_n_s_HttpResponseParser, __pyx_k_HttpResponseParser, sizeof(__pyx_k_HttpResponseParser), 0, 0, 1, 1}, + {&__pyx_n_u_HttpResponseParser, __pyx_k_HttpResponseParser, sizeof(__pyx_k_HttpResponseParser), 0, 1, 0, 1}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_context, __pyx_k_context, sizeof(__pyx_k_context), 0, 0, 1, 1}, + {&__pyx_n_s_errors, __pyx_k_errors, sizeof(__pyx_k_errors), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_kp_u_invalid_headers_state, __pyx_k_invalid_headers_state, sizeof(__pyx_k_invalid_headers_state), 0, 1, 0, 0}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_u_on_body, __pyx_k_on_body, sizeof(__pyx_k_on_body), 0, 1, 0, 1}, + {&__pyx_n_u_on_chunk_complete, __pyx_k_on_chunk_complete, sizeof(__pyx_k_on_chunk_complete), 0, 1, 0, 1}, + {&__pyx_n_u_on_chunk_header, __pyx_k_on_chunk_header, sizeof(__pyx_k_on_chunk_header), 0, 1, 0, 1}, + {&__pyx_n_u_on_header, __pyx_k_on_header, sizeof(__pyx_k_on_header), 0, 1, 0, 1}, + {&__pyx_n_u_on_headers_complete, __pyx_k_on_headers_complete, sizeof(__pyx_k_on_headers_complete), 0, 1, 0, 1}, + {&__pyx_n_u_on_message_begin, __pyx_k_on_message_begin, sizeof(__pyx_k_on_message_begin), 0, 1, 0, 1}, + {&__pyx_n_u_on_message_complete, __pyx_k_on_message_complete, sizeof(__pyx_k_on_message_complete), 0, 1, 0, 1}, + {&__pyx_n_u_on_status, __pyx_k_on_status, sizeof(__pyx_k_on_status), 0, 1, 0, 1}, + {&__pyx_n_u_on_url, __pyx_k_on_url, sizeof(__pyx_k_on_url), 0, 1, 0, 1}, + {&__pyx_n_s_protocol, __pyx_k_protocol, sizeof(__pyx_k_protocol), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 48, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) + __pyx_builtin_BaseException = __Pyx_GetBuiltinName(__pyx_n_s_BaseException); if (!__pyx_builtin_BaseException) __PYX_ERR(0, 247, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "httptools/parser/parser.pyx":22 + * + * + * __all__ = ('HttpRequestParser', 'HttpResponseParser') # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__8 = PyTuple_Pack(2, __pyx_n_u_HttpRequestParser, __pyx_n_u_HttpResponseParser); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_vtabptr_9httptools_6parser_6parser_HttpParser = &__pyx_vtable_9httptools_6parser_6parser_HttpParser; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._init = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *, llhttp_type_t))__pyx_f_9httptools_6parser_6parser_10HttpParser__init; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._maybe_call_on_header = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__maybe_call_on_header; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_header_field = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_field; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_header_value = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_value; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_headers_complete = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_headers_complete; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_chunk_header = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_header; + __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_chunk_complete = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_complete; + if (PyType_Ready(&__pyx_type_9httptools_6parser_6parser_HttpParser) < 0) __PYX_ERR(0, 26, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type_9httptools_6parser_6parser_HttpParser.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_6parser_HttpParser.tp_dictoffset && __pyx_type_9httptools_6parser_6parser_HttpParser.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_9httptools_6parser_6parser_HttpParser.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type_9httptools_6parser_6parser_HttpParser.tp_dict, __pyx_vtabptr_9httptools_6parser_6parser_HttpParser) < 0) __PYX_ERR(0, 26, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_6parser_HttpParser) < 0) __PYX_ERR(0, 26, __pyx_L1_error) + __pyx_ptype_9httptools_6parser_6parser_HttpParser = &__pyx_type_9httptools_6parser_6parser_HttpParser; + __pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser = &__pyx_vtable_9httptools_6parser_6parser_HttpRequestParser; + __pyx_vtable_9httptools_6parser_6parser_HttpRequestParser.__pyx_base = *__pyx_vtabptr_9httptools_6parser_6parser_HttpParser; + __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_base = __pyx_ptype_9httptools_6parser_6parser_HttpParser; + if (PyType_Ready(&__pyx_type_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_dictoffset && __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_dict, __pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_HttpRequestParser, (PyObject *)&__pyx_type_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) + __pyx_ptype_9httptools_6parser_6parser_HttpRequestParser = &__pyx_type_9httptools_6parser_6parser_HttpRequestParser; + __pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser = &__pyx_vtable_9httptools_6parser_6parser_HttpResponseParser; + __pyx_vtable_9httptools_6parser_6parser_HttpResponseParser.__pyx_base = *__pyx_vtabptr_9httptools_6parser_6parser_HttpParser; + __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_base = __pyx_ptype_9httptools_6parser_6parser_HttpParser; + if (PyType_Ready(&__pyx_type_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_dictoffset && __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_dict, __pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_HttpResponseParser, (PyObject *)&__pyx_type_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) + __pyx_ptype_9httptools_6parser_6parser_HttpResponseParser = &__pyx_type_9httptools_6parser_6parser_HttpResponseParser; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initparser(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initparser(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_parser(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_parser(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_parser(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'parser' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_parser(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + PyEval_InitThreads(); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("parser", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_httptools__parser__parser) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "httptools.parser.parser")) { + if (unlikely(PyDict_SetItemString(modules, "httptools.parser.parser", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "httptools/parser/parser.pyx":11 + * + * + * from .errors import (HttpParserError, # <<<<<<<<<<<<<< + * HttpParserCallbackError, + * HttpParserInvalidStatusError, + */ + __pyx_t_1 = PyList_New(6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_n_s_HttpParserError); + __Pyx_GIVEREF(__pyx_n_s_HttpParserError); + PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_HttpParserError); + __Pyx_INCREF(__pyx_n_s_HttpParserCallbackError); + __Pyx_GIVEREF(__pyx_n_s_HttpParserCallbackError); + PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_HttpParserCallbackError); + __Pyx_INCREF(__pyx_n_s_HttpParserInvalidStatusError); + __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidStatusError); + PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_HttpParserInvalidStatusError); + __Pyx_INCREF(__pyx_n_s_HttpParserInvalidMethodError); + __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidMethodError); + PyList_SET_ITEM(__pyx_t_1, 3, __pyx_n_s_HttpParserInvalidMethodError); + __Pyx_INCREF(__pyx_n_s_HttpParserInvalidURLError); + __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidURLError); + PyList_SET_ITEM(__pyx_t_1, 4, __pyx_n_s_HttpParserInvalidURLError); + __Pyx_INCREF(__pyx_n_s_HttpParserUpgrade); + __Pyx_GIVEREF(__pyx_n_s_HttpParserUpgrade); + PyList_SET_ITEM(__pyx_t_1, 5, __pyx_n_s_HttpParserUpgrade); + __pyx_t_2 = __Pyx_Import(__pyx_n_s_errors, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserError, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserCallbackError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserCallbackError, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidStatusError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidStatusError, __pyx_t_1) < 0) __PYX_ERR(0, 13, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidMethodError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidMethodError, __pyx_t_1) < 0) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidURLError, __pyx_t_1) < 0) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserUpgrade); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserUpgrade, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "httptools/parser/parser.pyx":22 + * + * + * __all__ = ('HttpRequestParser', 'HttpResponseParser') # <<<<<<<<<<<<<< + * + * + */ + if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_tuple__8) < 0) __PYX_ERR(0, 22, __pyx_L1_error) + + /* "httptools/parser/parser.pyx":1 + * #cython: language_level=3 # <<<<<<<<<<<<<< + * + * from __future__ import print_function + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init httptools.parser.parser", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init httptools.parser.parser"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* KeywordStringCheck */ +static int __Pyx_CheckKeywordStrings( + PyObject *kwdict, + const char* function_name, + int kw_allowed) +{ + PyObject* key = 0; + Py_ssize_t pos = 0; +#if CYTHON_COMPILING_IN_PYPY + if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0)) + goto invalid_keyword; + return 1; +#else + while (PyDict_Next(kwdict, &pos, &key, 0)) { + #if PY_MAJOR_VERSION < 3 + if (unlikely(!PyString_Check(key))) + #endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } + if ((!kw_allowed) && unlikely(key)) + goto invalid_keyword; + return 1; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + return 0; +#endif +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif + return 0; +} + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* GetAttr */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* GetAttr3 */ +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r = __Pyx_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +} + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallNoArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, NULL, 0); + } +#endif +#ifdef __Pyx_CyFunction_USED + if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) +#else + if (likely(PyCFunction_Check(func))) +#endif + { + if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { + return __Pyx_PyObject_CallMethO(func, NULL); + } + } + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (__Pyx_PyFastCFunction_Check(func)) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyObjectSetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_setattro)) + return tp->tp_setattro(obj, attr_name, value); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_setattr)) + return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); +#endif + return PyObject_SetAttr(obj, attr_name, value); +} +#endif + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* decode_c_bytes */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( + const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + if (unlikely((start < 0) | (stop < 0))) { + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (stop > length) + stop = length; + if (unlikely(stop <= start)) + return __Pyx_NewRef(__pyx_empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* SetVTable */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { +#if PY_VERSION_HEX >= 0x02070000 + PyObject *ob = PyCapsule_New(vtable, 0, 0); +#else + PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); +#endif + if (!ob) + goto bad; + if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* TypeImport */ +#ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint8_t(uint8_t value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const uint8_t neg_one = (uint8_t) -1, const_zero = (uint8_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(uint8_t) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(uint8_t) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(uint8_t) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(uint8_t) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(uint8_t) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(uint8_t), + little, !is_unsigned); + } +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_ptrdiff_t(ptrdiff_t value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const ptrdiff_t neg_one = (ptrdiff_t) -1, const_zero = (ptrdiff_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(ptrdiff_t) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(ptrdiff_t) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(ptrdiff_t) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(ptrdiff_t) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(ptrdiff_t) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(ptrdiff_t), + little, !is_unsigned); + } +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const uint16_t neg_one = (uint16_t) -1, const_zero = (uint16_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(uint16_t) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(uint16_t) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(uint16_t) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(uint16_t) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(uint16_t) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(uint16_t), + little, !is_unsigned); + } +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/myenv/lib/python3.9/site-packages/httptools/parser/parser.cpython-39-darwin.so b/myenv/lib/python3.9/site-packages/httptools/parser/parser.cpython-39-darwin.so new file mode 100755 index 0000000..89c036c Binary files /dev/null and b/myenv/lib/python3.9/site-packages/httptools/parser/parser.cpython-39-darwin.so differ diff --git a/myenv/lib/python3.9/site-packages/httptools/parser/url_parser.c b/myenv/lib/python3.9/site-packages/httptools/parser/url_parser.c new file mode 100644 index 0000000..3f56d85 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httptools/parser/url_parser.c @@ -0,0 +1,5935 @@ +/* Generated by Cython 0.29.24 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [], + "extra_compile_args": [ + "-O2" + ], + "name": "httptools.parser.url_parser", + "sources": [ + "httptools/parser/url_parser.pyx" + ] + }, + "module_name": "httptools.parser.url_parser" +} +END: Cython Metadata */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_24" +#define CYTHON_HEX_VERSION 0x001D18F0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #if defined(PyUnicode_IS_READY) + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #else + #define __Pyx_PyUnicode_READY(op) (0) + #endif + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__httptools__parser__url_parser +#define __PYX_HAVE_API__httptools__parser__url_parser +/* Early includes */ +#include +#include +#include "pythread.h" +#include +#include "http_parser.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "stringsource", + "httptools/parser/url_parser.pyx", + ".eggs/Cython-0.29.24-py3.9-macosx-10.9-x86_64.egg/Cython/Includes/cpython/type.pxd", + ".eggs/Cython-0.29.24-py3.9-macosx-10.9-x86_64.egg/Cython/Includes/cpython/bool.pxd", + ".eggs/Cython-0.29.24-py3.9-macosx-10.9-x86_64.egg/Cython/Includes/cpython/complex.pxd", +}; + +/*--- Type declarations ---*/ +struct __pyx_obj_9httptools_6parser_10url_parser_URL; + +/* "httptools/parser/url_parser.pyx":16 + * + * @cython.freelist(250) + * cdef class URL: # <<<<<<<<<<<<<< + * cdef readonly bytes schema + * cdef readonly bytes host + */ +struct __pyx_obj_9httptools_6parser_10url_parser_URL { + PyObject_HEAD + PyObject *schema; + PyObject *host; + PyObject *port; + PyObject *path; + PyObject *query; + PyObject *fragment; + PyObject *userinfo; +}; + + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* IncludeStringH.proto */ +#include + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* GCCDiagnostics.proto */ +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'cpython.version' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.exc' */ + +/* Module declarations from 'cpython.module' */ + +/* Module declarations from 'cpython.tuple' */ + +/* Module declarations from 'cpython.list' */ + +/* Module declarations from 'cpython.sequence' */ + +/* Module declarations from 'cpython.mapping' */ + +/* Module declarations from 'cpython.iterator' */ + +/* Module declarations from 'cpython.number' */ + +/* Module declarations from 'cpython.int' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.bool' */ +static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; + +/* Module declarations from 'cpython.long' */ + +/* Module declarations from 'cpython.float' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.complex' */ +static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; + +/* Module declarations from 'cpython.string' */ + +/* Module declarations from 'cpython.unicode' */ + +/* Module declarations from 'cpython.dict' */ + +/* Module declarations from 'cpython.instance' */ + +/* Module declarations from 'cpython.function' */ + +/* Module declarations from 'cpython.method' */ + +/* Module declarations from 'cpython.weakref' */ + +/* Module declarations from 'cpython.getargs' */ + +/* Module declarations from 'cpython.pythread' */ + +/* Module declarations from 'cpython.pystate' */ + +/* Module declarations from 'cpython.cobject' */ + +/* Module declarations from 'cpython.oldbuffer' */ + +/* Module declarations from 'cpython.set' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'cpython.bytes' */ + +/* Module declarations from 'cpython.pycapsule' */ + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'httptools.parser' */ + +/* Module declarations from 'libc.stdint' */ + +/* Module declarations from 'httptools.parser.url_cparser' */ + +/* Module declarations from 'httptools.parser.url_parser' */ +static PyTypeObject *__pyx_ptype_9httptools_6parser_10url_parser_URL = 0; +#define __Pyx_MODULE_NAME "httptools.parser.url_parser" +extern int __pyx_module_is_main_httptools__parser__url_parser; +int __pyx_module_is_main_httptools__parser__url_parser = 0; + +/* Implementation of 'httptools.parser.url_parser' */ +static PyObject *__pyx_builtin_TypeError; +static const char __pyx_k_ln[] = "ln"; +static const char __pyx_k_URL[] = "URL"; +static const char __pyx_k_all[] = "__all__"; +static const char __pyx_k_off[] = "off"; +static const char __pyx_k_res[] = "res"; +static const char __pyx_k_url[] = "url"; +static const char __pyx_k_host[] = "host"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_path[] = "path"; +static const char __pyx_k_port[] = "port"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_query[] = "query"; +static const char __pyx_k_errors[] = "errors"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_parsed[] = "parsed"; +static const char __pyx_k_py_buf[] = "py_buf"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_result[] = "result"; +static const char __pyx_k_schema[] = "schema"; +static const char __pyx_k_buf_data[] = "buf_data"; +static const char __pyx_k_fragment[] = "fragment"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_userinfo[] = "userinfo"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_parse_url[] = "parse_url"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_invalid_url_r[] = "invalid url {!r}"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_HttpParserInvalidURLError[] = "HttpParserInvalidURLError"; +static const char __pyx_k_httptools_parser_url_parser[] = "httptools.parser.url_parser"; +static const char __pyx_k_URL_schema_r_host_r_port_r_path[] = ""; +static const char __pyx_k_httptools_parser_url_parser_pyx[] = "httptools/parser/url_parser.pyx"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static PyObject *__pyx_n_s_HttpParserInvalidURLError; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_n_s_URL; +static PyObject *__pyx_kp_u_URL_schema_r_host_r_port_r_path; +static PyObject *__pyx_n_s_all; +static PyObject *__pyx_n_s_buf_data; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_errors; +static PyObject *__pyx_n_s_format; +static PyObject *__pyx_n_s_fragment; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_n_s_host; +static PyObject *__pyx_n_s_httptools_parser_url_parser; +static PyObject *__pyx_kp_s_httptools_parser_url_parser_pyx; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_kp_u_invalid_url_r; +static PyObject *__pyx_n_s_ln; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_off; +static PyObject *__pyx_n_s_parse_url; +static PyObject *__pyx_n_u_parse_url; +static PyObject *__pyx_n_s_parsed; +static PyObject *__pyx_n_s_path; +static PyObject *__pyx_n_s_port; +static PyObject *__pyx_n_s_py_buf; +static PyObject *__pyx_n_s_query; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_res; +static PyObject *__pyx_n_s_result; +static PyObject *__pyx_n_s_schema; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_url; +static PyObject *__pyx_n_s_userinfo; +static int __pyx_pf_9httptools_6parser_10url_parser_3URL___cinit__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, PyObject *__pyx_v_schema, PyObject *__pyx_v_host, PyObject *__pyx_v_port, PyObject *__pyx_v_path, PyObject *__pyx_v_query, PyObject *__pyx_v_fragment, PyObject *__pyx_v_userinfo); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_2__repr__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6schema___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4host___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4port___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4path___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_5query___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8fragment___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8userinfo___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_parse_url(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url); /* proto */ +static PyObject *__pyx_tp_new_9httptools_6parser_10url_parser_URL(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_codeobj__5; +/* Late includes */ + +/* "httptools/parser/url_parser.pyx":25 + * cdef readonly bytes userinfo + * + * def __cinit__(self, bytes schema, bytes host, object port, bytes path, # <<<<<<<<<<<<<< + * bytes query, bytes fragment, bytes userinfo): + * + */ + +/* Python wrapper */ +static int __pyx_pw_9httptools_6parser_10url_parser_3URL_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_9httptools_6parser_10url_parser_3URL_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_schema = 0; + PyObject *__pyx_v_host = 0; + PyObject *__pyx_v_port = 0; + PyObject *__pyx_v_path = 0; + PyObject *__pyx_v_query = 0; + PyObject *__pyx_v_fragment = 0; + PyObject *__pyx_v_userinfo = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_schema,&__pyx_n_s_host,&__pyx_n_s_port,&__pyx_n_s_path,&__pyx_n_s_query,&__pyx_n_s_fragment,&__pyx_n_s_userinfo,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_schema)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_host)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 1); __PYX_ERR(1, 25, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_port)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 2); __PYX_ERR(1, 25, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_path)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 3); __PYX_ERR(1, 25, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_query)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 4); __PYX_ERR(1, 25, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fragment)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 5); __PYX_ERR(1, 25, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_userinfo)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 6); __PYX_ERR(1, 25, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 25, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + } + __pyx_v_schema = ((PyObject*)values[0]); + __pyx_v_host = ((PyObject*)values[1]); + __pyx_v_port = values[2]; + __pyx_v_path = ((PyObject*)values[3]); + __pyx_v_query = ((PyObject*)values[4]); + __pyx_v_fragment = ((PyObject*)values[5]); + __pyx_v_userinfo = ((PyObject*)values[6]); + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 25, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("httptools.parser.url_parser.URL.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_schema), (&PyBytes_Type), 1, "schema", 1))) __PYX_ERR(1, 25, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_host), (&PyBytes_Type), 1, "host", 1))) __PYX_ERR(1, 25, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_path), (&PyBytes_Type), 1, "path", 1))) __PYX_ERR(1, 25, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_query), (&PyBytes_Type), 1, "query", 1))) __PYX_ERR(1, 26, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_fragment), (&PyBytes_Type), 1, "fragment", 1))) __PYX_ERR(1, 26, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_userinfo), (&PyBytes_Type), 1, "userinfo", 1))) __PYX_ERR(1, 26, __pyx_L1_error) + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL___cinit__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self), __pyx_v_schema, __pyx_v_host, __pyx_v_port, __pyx_v_path, __pyx_v_query, __pyx_v_fragment, __pyx_v_userinfo); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_9httptools_6parser_10url_parser_3URL___cinit__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, PyObject *__pyx_v_schema, PyObject *__pyx_v_host, PyObject *__pyx_v_port, PyObject *__pyx_v_path, PyObject *__pyx_v_query, PyObject *__pyx_v_fragment, PyObject *__pyx_v_userinfo) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "httptools/parser/url_parser.pyx":28 + * bytes query, bytes fragment, bytes userinfo): + * + * self.schema = schema # <<<<<<<<<<<<<< + * self.host = host + * self.port = port + */ + __Pyx_INCREF(__pyx_v_schema); + __Pyx_GIVEREF(__pyx_v_schema); + __Pyx_GOTREF(__pyx_v_self->schema); + __Pyx_DECREF(__pyx_v_self->schema); + __pyx_v_self->schema = __pyx_v_schema; + + /* "httptools/parser/url_parser.pyx":29 + * + * self.schema = schema + * self.host = host # <<<<<<<<<<<<<< + * self.port = port + * self.path = path + */ + __Pyx_INCREF(__pyx_v_host); + __Pyx_GIVEREF(__pyx_v_host); + __Pyx_GOTREF(__pyx_v_self->host); + __Pyx_DECREF(__pyx_v_self->host); + __pyx_v_self->host = __pyx_v_host; + + /* "httptools/parser/url_parser.pyx":30 + * self.schema = schema + * self.host = host + * self.port = port # <<<<<<<<<<<<<< + * self.path = path + * self.query = query + */ + __Pyx_INCREF(__pyx_v_port); + __Pyx_GIVEREF(__pyx_v_port); + __Pyx_GOTREF(__pyx_v_self->port); + __Pyx_DECREF(__pyx_v_self->port); + __pyx_v_self->port = __pyx_v_port; + + /* "httptools/parser/url_parser.pyx":31 + * self.host = host + * self.port = port + * self.path = path # <<<<<<<<<<<<<< + * self.query = query + * self.fragment = fragment + */ + __Pyx_INCREF(__pyx_v_path); + __Pyx_GIVEREF(__pyx_v_path); + __Pyx_GOTREF(__pyx_v_self->path); + __Pyx_DECREF(__pyx_v_self->path); + __pyx_v_self->path = __pyx_v_path; + + /* "httptools/parser/url_parser.pyx":32 + * self.port = port + * self.path = path + * self.query = query # <<<<<<<<<<<<<< + * self.fragment = fragment + * self.userinfo = userinfo + */ + __Pyx_INCREF(__pyx_v_query); + __Pyx_GIVEREF(__pyx_v_query); + __Pyx_GOTREF(__pyx_v_self->query); + __Pyx_DECREF(__pyx_v_self->query); + __pyx_v_self->query = __pyx_v_query; + + /* "httptools/parser/url_parser.pyx":33 + * self.path = path + * self.query = query + * self.fragment = fragment # <<<<<<<<<<<<<< + * self.userinfo = userinfo + * + */ + __Pyx_INCREF(__pyx_v_fragment); + __Pyx_GIVEREF(__pyx_v_fragment); + __Pyx_GOTREF(__pyx_v_self->fragment); + __Pyx_DECREF(__pyx_v_self->fragment); + __pyx_v_self->fragment = __pyx_v_fragment; + + /* "httptools/parser/url_parser.pyx":34 + * self.query = query + * self.fragment = fragment + * self.userinfo = userinfo # <<<<<<<<<<<<<< + * + * def __repr__(self): + */ + __Pyx_INCREF(__pyx_v_userinfo); + __Pyx_GIVEREF(__pyx_v_userinfo); + __Pyx_GOTREF(__pyx_v_self->userinfo); + __Pyx_DECREF(__pyx_v_self->userinfo); + __pyx_v_self->userinfo = __pyx_v_userinfo; + + /* "httptools/parser/url_parser.pyx":25 + * cdef readonly bytes userinfo + * + * def __cinit__(self, bytes schema, bytes host, object port, bytes path, # <<<<<<<<<<<<<< + * bytes query, bytes fragment, bytes userinfo): + * + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":36 + * self.userinfo = userinfo + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return ('' + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_3__repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_3__repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_2__repr__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_2__repr__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "httptools/parser/url_parser.pyx":37 + * + * def __repr__(self): + * return ('' + * .format(self.schema, self.host, self.port, self.path, + */ + __Pyx_XDECREF(__pyx_r); + + /* "httptools/parser/url_parser.pyx":39 + * return ('' + * .format(self.schema, self.host, self.port, self.path, # <<<<<<<<<<<<<< + * self.query, self.fragment, self.userinfo)) + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_URL_schema_r_host_r_port_r_path, __pyx_n_s_format); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "httptools/parser/url_parser.pyx":40 + * 'query: {!r}, fragment: {!r}, userinfo: {!r}>' + * .format(self.schema, self.host, self.port, self.path, + * self.query, self.fragment, self.userinfo)) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = NULL; + __pyx_t_4 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + __pyx_t_4 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_2)) { + PyObject *__pyx_temp[8] = {__pyx_t_3, __pyx_v_self->schema, __pyx_v_self->host, __pyx_v_self->port, __pyx_v_self->path, __pyx_v_self->query, __pyx_v_self->fragment, __pyx_v_self->userinfo}; + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 7+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 39, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_1); + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { + PyObject *__pyx_temp[8] = {__pyx_t_3, __pyx_v_self->schema, __pyx_v_self->host, __pyx_v_self->port, __pyx_v_self->path, __pyx_v_self->query, __pyx_v_self->fragment, __pyx_v_self->userinfo}; + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 7+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 39, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_1); + } else + #endif + { + __pyx_t_5 = PyTuple_New(7+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (__pyx_t_3) { + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; + } + __Pyx_INCREF(__pyx_v_self->schema); + __Pyx_GIVEREF(__pyx_v_self->schema); + PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_self->schema); + __Pyx_INCREF(__pyx_v_self->host); + __Pyx_GIVEREF(__pyx_v_self->host); + PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_self->host); + __Pyx_INCREF(__pyx_v_self->port); + __Pyx_GIVEREF(__pyx_v_self->port); + PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, __pyx_v_self->port); + __Pyx_INCREF(__pyx_v_self->path); + __Pyx_GIVEREF(__pyx_v_self->path); + PyTuple_SET_ITEM(__pyx_t_5, 3+__pyx_t_4, __pyx_v_self->path); + __Pyx_INCREF(__pyx_v_self->query); + __Pyx_GIVEREF(__pyx_v_self->query); + PyTuple_SET_ITEM(__pyx_t_5, 4+__pyx_t_4, __pyx_v_self->query); + __Pyx_INCREF(__pyx_v_self->fragment); + __Pyx_GIVEREF(__pyx_v_self->fragment); + PyTuple_SET_ITEM(__pyx_t_5, 5+__pyx_t_4, __pyx_v_self->fragment); + __Pyx_INCREF(__pyx_v_self->userinfo); + __Pyx_GIVEREF(__pyx_v_self->userinfo); + PyTuple_SET_ITEM(__pyx_t_5, 6+__pyx_t_4, __pyx_v_self->userinfo); + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "httptools/parser/url_parser.pyx":36 + * self.userinfo = userinfo + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return ('' + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("httptools.parser.url_parser.URL.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":17 + * @cython.freelist(250) + * cdef class URL: + * cdef readonly bytes schema # <<<<<<<<<<<<<< + * cdef readonly bytes host + * cdef readonly object port + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_6schema_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_6schema_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_6schema___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6schema___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->schema); + __pyx_r = __pyx_v_self->schema; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":18 + * cdef class URL: + * cdef readonly bytes schema + * cdef readonly bytes host # <<<<<<<<<<<<<< + * cdef readonly object port + * cdef readonly bytes path + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4host_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4host_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4host___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4host___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->host); + __pyx_r = __pyx_v_self->host; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":19 + * cdef readonly bytes schema + * cdef readonly bytes host + * cdef readonly object port # <<<<<<<<<<<<<< + * cdef readonly bytes path + * cdef readonly bytes query + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4port_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4port_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4port___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4port___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->port); + __pyx_r = __pyx_v_self->port; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":20 + * cdef readonly bytes host + * cdef readonly object port + * cdef readonly bytes path # <<<<<<<<<<<<<< + * cdef readonly bytes query + * cdef readonly bytes fragment + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4path_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4path_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4path___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4path___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->path); + __pyx_r = __pyx_v_self->path; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":21 + * cdef readonly object port + * cdef readonly bytes path + * cdef readonly bytes query # <<<<<<<<<<<<<< + * cdef readonly bytes fragment + * cdef readonly bytes userinfo + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5query_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5query_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_5query___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_5query___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->query); + __pyx_r = __pyx_v_self->query; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":22 + * cdef readonly bytes path + * cdef readonly bytes query + * cdef readonly bytes fragment # <<<<<<<<<<<<<< + * cdef readonly bytes userinfo + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8fragment_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8fragment_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_8fragment___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8fragment___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->fragment); + __pyx_r = __pyx_v_self->fragment; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":23 + * cdef readonly bytes query + * cdef readonly bytes fragment + * cdef readonly bytes userinfo # <<<<<<<<<<<<<< + * + * def __cinit__(self, bytes schema, bytes host, object port, bytes path, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8userinfo_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8userinfo_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_8userinfo___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8userinfo___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->userinfo); + __pyx_r = __pyx_v_self->userinfo; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4__reduce_cython__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(0, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.url_parser.URL.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_6__setstate_cython__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(0, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("httptools.parser.url_parser.URL.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "httptools/parser/url_parser.pyx":43 + * + * + * def parse_url(url): # <<<<<<<<<<<<<< + * cdef: + * Py_buffer py_buf + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_1parse_url(PyObject *__pyx_self, PyObject *__pyx_v_url); /*proto*/ +static PyMethodDef __pyx_mdef_9httptools_6parser_10url_parser_1parse_url = {"parse_url", (PyCFunction)__pyx_pw_9httptools_6parser_10url_parser_1parse_url, METH_O, 0}; +static PyObject *__pyx_pw_9httptools_6parser_10url_parser_1parse_url(PyObject *__pyx_self, PyObject *__pyx_v_url) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("parse_url (wrapper)", 0); + __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_parse_url(__pyx_self, ((PyObject *)__pyx_v_url)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9httptools_6parser_10url_parser_parse_url(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url) { + Py_buffer __pyx_v_py_buf; + char *__pyx_v_buf_data; + struct http_parser_url *__pyx_v_parsed; + int __pyx_v_res; + PyObject *__pyx_v_schema = 0; + PyObject *__pyx_v_host = 0; + PyObject *__pyx_v_port = 0; + PyObject *__pyx_v_path = 0; + PyObject *__pyx_v_query = 0; + PyObject *__pyx_v_fragment = 0; + PyObject *__pyx_v_userinfo = 0; + CYTHON_UNUSED PyObject *__pyx_v_result = 0; + int __pyx_v_off; + int __pyx_v_ln; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + uint16_t __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_t_9; + char const *__pyx_t_10; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + PyObject *__pyx_t_13 = NULL; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + PyObject *__pyx_t_16 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("parse_url", 0); + + /* "httptools/parser/url_parser.pyx":49 + * uparser.http_parser_url* parsed + * int res + * bytes schema = None # <<<<<<<<<<<<<< + * bytes host = None + * object port = None + */ + __Pyx_INCREF(Py_None); + __pyx_v_schema = ((PyObject*)Py_None); + + /* "httptools/parser/url_parser.pyx":50 + * int res + * bytes schema = None + * bytes host = None # <<<<<<<<<<<<<< + * object port = None + * bytes path = None + */ + __Pyx_INCREF(Py_None); + __pyx_v_host = ((PyObject*)Py_None); + + /* "httptools/parser/url_parser.pyx":51 + * bytes schema = None + * bytes host = None + * object port = None # <<<<<<<<<<<<<< + * bytes path = None + * bytes query = None + */ + __Pyx_INCREF(Py_None); + __pyx_v_port = Py_None; + + /* "httptools/parser/url_parser.pyx":52 + * bytes host = None + * object port = None + * bytes path = None # <<<<<<<<<<<<<< + * bytes query = None + * bytes fragment = None + */ + __Pyx_INCREF(Py_None); + __pyx_v_path = ((PyObject*)Py_None); + + /* "httptools/parser/url_parser.pyx":53 + * object port = None + * bytes path = None + * bytes query = None # <<<<<<<<<<<<<< + * bytes fragment = None + * bytes userinfo = None + */ + __Pyx_INCREF(Py_None); + __pyx_v_query = ((PyObject*)Py_None); + + /* "httptools/parser/url_parser.pyx":54 + * bytes path = None + * bytes query = None + * bytes fragment = None # <<<<<<<<<<<<<< + * bytes userinfo = None + * object result = None + */ + __Pyx_INCREF(Py_None); + __pyx_v_fragment = ((PyObject*)Py_None); + + /* "httptools/parser/url_parser.pyx":55 + * bytes query = None + * bytes fragment = None + * bytes userinfo = None # <<<<<<<<<<<<<< + * object result = None + * int off + */ + __Pyx_INCREF(Py_None); + __pyx_v_userinfo = ((PyObject*)Py_None); + + /* "httptools/parser/url_parser.pyx":56 + * bytes fragment = None + * bytes userinfo = None + * object result = None # <<<<<<<<<<<<<< + * int off + * int ln + */ + __Pyx_INCREF(Py_None); + __pyx_v_result = Py_None; + + /* "httptools/parser/url_parser.pyx":60 + * int ln + * + * parsed = \ # <<<<<<<<<<<<<< + * PyMem_Malloc(sizeof(uparser.http_parser_url)) + * uparser.http_parser_url_init(parsed) + */ + __pyx_v_parsed = ((struct http_parser_url *)PyMem_Malloc((sizeof(struct http_parser_url)))); + + /* "httptools/parser/url_parser.pyx":62 + * parsed = \ + * PyMem_Malloc(sizeof(uparser.http_parser_url)) + * uparser.http_parser_url_init(parsed) # <<<<<<<<<<<<<< + * + * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) + */ + http_parser_url_init(__pyx_v_parsed); + + /* "httptools/parser/url_parser.pyx":64 + * uparser.http_parser_url_init(parsed) + * + * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<< + * try: + * buf_data = py_buf.buf + */ + __pyx_t_1 = PyObject_GetBuffer(__pyx_v_url, (&__pyx_v_py_buf), PyBUF_SIMPLE); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 64, __pyx_L1_error) + + /* "httptools/parser/url_parser.pyx":65 + * + * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) + * try: # <<<<<<<<<<<<<< + * buf_data = py_buf.buf + * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) + */ + /*try:*/ { + + /* "httptools/parser/url_parser.pyx":66 + * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) + * try: + * buf_data = py_buf.buf # <<<<<<<<<<<<<< + * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) + * + */ + __pyx_v_buf_data = ((char *)__pyx_v_py_buf.buf); + + /* "httptools/parser/url_parser.pyx":67 + * try: + * buf_data = py_buf.buf + * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) # <<<<<<<<<<<<<< + * + * if res == 0: + */ + __pyx_v_res = http_parser_parse_url(__pyx_v_buf_data, __pyx_v_py_buf.len, 0, __pyx_v_parsed); + + /* "httptools/parser/url_parser.pyx":69 + * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) + * + * if res == 0: # <<<<<<<<<<<<<< + * if parsed.field_set & (1 << uparser.UF_SCHEMA): + * off = parsed.field_data[uparser.UF_SCHEMA].off + */ + __pyx_t_2 = ((__pyx_v_res == 0) != 0); + if (likely(__pyx_t_2)) { + + /* "httptools/parser/url_parser.pyx":70 + * + * if res == 0: + * if parsed.field_set & (1 << uparser.UF_SCHEMA): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_SCHEMA].off + * ln = parsed.field_data[uparser.UF_SCHEMA].len + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_SCHEMA)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":71 + * if res == 0: + * if parsed.field_set & (1 << uparser.UF_SCHEMA): + * off = parsed.field_data[uparser.UF_SCHEMA].off # <<<<<<<<<<<<<< + * ln = parsed.field_data[uparser.UF_SCHEMA].len + * schema = buf_data[off:off+ln] + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_SCHEMA)]).off; + __pyx_v_off = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":72 + * if parsed.field_set & (1 << uparser.UF_SCHEMA): + * off = parsed.field_data[uparser.UF_SCHEMA].off + * ln = parsed.field_data[uparser.UF_SCHEMA].len # <<<<<<<<<<<<<< + * schema = buf_data[off:off+ln] + * + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_SCHEMA)]).len; + __pyx_v_ln = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":73 + * off = parsed.field_data[uparser.UF_SCHEMA].off + * ln = parsed.field_data[uparser.UF_SCHEMA].len + * schema = buf_data[off:off+ln] # <<<<<<<<<<<<<< + * + * if parsed.field_set & (1 << uparser.UF_HOST): + */ + __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 73, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_schema, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":70 + * + * if res == 0: + * if parsed.field_set & (1 << uparser.UF_SCHEMA): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_SCHEMA].off + * ln = parsed.field_data[uparser.UF_SCHEMA].len + */ + } + + /* "httptools/parser/url_parser.pyx":75 + * schema = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_HOST): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_HOST].off + * ln = parsed.field_data[uparser.UF_HOST].len + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_HOST)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":76 + * + * if parsed.field_set & (1 << uparser.UF_HOST): + * off = parsed.field_data[uparser.UF_HOST].off # <<<<<<<<<<<<<< + * ln = parsed.field_data[uparser.UF_HOST].len + * host = buf_data[off:off+ln] + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_HOST)]).off; + __pyx_v_off = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":77 + * if parsed.field_set & (1 << uparser.UF_HOST): + * off = parsed.field_data[uparser.UF_HOST].off + * ln = parsed.field_data[uparser.UF_HOST].len # <<<<<<<<<<<<<< + * host = buf_data[off:off+ln] + * + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_HOST)]).len; + __pyx_v_ln = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":78 + * off = parsed.field_data[uparser.UF_HOST].off + * ln = parsed.field_data[uparser.UF_HOST].len + * host = buf_data[off:off+ln] # <<<<<<<<<<<<<< + * + * if parsed.field_set & (1 << uparser.UF_PORT): + */ + __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 78, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_host, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":75 + * schema = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_HOST): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_HOST].off + * ln = parsed.field_data[uparser.UF_HOST].len + */ + } + + /* "httptools/parser/url_parser.pyx":80 + * host = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_PORT): # <<<<<<<<<<<<<< + * port = parsed.port + * + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_PORT)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":81 + * + * if parsed.field_set & (1 << uparser.UF_PORT): + * port = parsed.port # <<<<<<<<<<<<<< + * + * if parsed.field_set & (1 << uparser.UF_PATH): + */ + __pyx_t_4 = __Pyx_PyInt_From_uint16_t(__pyx_v_parsed->port); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 81, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_port, __pyx_t_4); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":80 + * host = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_PORT): # <<<<<<<<<<<<<< + * port = parsed.port + * + */ + } + + /* "httptools/parser/url_parser.pyx":83 + * port = parsed.port + * + * if parsed.field_set & (1 << uparser.UF_PATH): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_PATH].off + * ln = parsed.field_data[uparser.UF_PATH].len + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_PATH)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":84 + * + * if parsed.field_set & (1 << uparser.UF_PATH): + * off = parsed.field_data[uparser.UF_PATH].off # <<<<<<<<<<<<<< + * ln = parsed.field_data[uparser.UF_PATH].len + * path = buf_data[off:off+ln] + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_PATH)]).off; + __pyx_v_off = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":85 + * if parsed.field_set & (1 << uparser.UF_PATH): + * off = parsed.field_data[uparser.UF_PATH].off + * ln = parsed.field_data[uparser.UF_PATH].len # <<<<<<<<<<<<<< + * path = buf_data[off:off+ln] + * + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_PATH)]).len; + __pyx_v_ln = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":86 + * off = parsed.field_data[uparser.UF_PATH].off + * ln = parsed.field_data[uparser.UF_PATH].len + * path = buf_data[off:off+ln] # <<<<<<<<<<<<<< + * + * if parsed.field_set & (1 << uparser.UF_QUERY): + */ + __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 86, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_path, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":83 + * port = parsed.port + * + * if parsed.field_set & (1 << uparser.UF_PATH): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_PATH].off + * ln = parsed.field_data[uparser.UF_PATH].len + */ + } + + /* "httptools/parser/url_parser.pyx":88 + * path = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_QUERY): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_QUERY].off + * ln = parsed.field_data[uparser.UF_QUERY].len + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_QUERY)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":89 + * + * if parsed.field_set & (1 << uparser.UF_QUERY): + * off = parsed.field_data[uparser.UF_QUERY].off # <<<<<<<<<<<<<< + * ln = parsed.field_data[uparser.UF_QUERY].len + * query = buf_data[off:off+ln] + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_QUERY)]).off; + __pyx_v_off = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":90 + * if parsed.field_set & (1 << uparser.UF_QUERY): + * off = parsed.field_data[uparser.UF_QUERY].off + * ln = parsed.field_data[uparser.UF_QUERY].len # <<<<<<<<<<<<<< + * query = buf_data[off:off+ln] + * + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_QUERY)]).len; + __pyx_v_ln = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":91 + * off = parsed.field_data[uparser.UF_QUERY].off + * ln = parsed.field_data[uparser.UF_QUERY].len + * query = buf_data[off:off+ln] # <<<<<<<<<<<<<< + * + * if parsed.field_set & (1 << uparser.UF_FRAGMENT): + */ + __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 91, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_query, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":88 + * path = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_QUERY): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_QUERY].off + * ln = parsed.field_data[uparser.UF_QUERY].len + */ + } + + /* "httptools/parser/url_parser.pyx":93 + * query = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_FRAGMENT): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_FRAGMENT].off + * ln = parsed.field_data[uparser.UF_FRAGMENT].len + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_FRAGMENT)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":94 + * + * if parsed.field_set & (1 << uparser.UF_FRAGMENT): + * off = parsed.field_data[uparser.UF_FRAGMENT].off # <<<<<<<<<<<<<< + * ln = parsed.field_data[uparser.UF_FRAGMENT].len + * fragment = buf_data[off:off+ln] + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_FRAGMENT)]).off; + __pyx_v_off = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":95 + * if parsed.field_set & (1 << uparser.UF_FRAGMENT): + * off = parsed.field_data[uparser.UF_FRAGMENT].off + * ln = parsed.field_data[uparser.UF_FRAGMENT].len # <<<<<<<<<<<<<< + * fragment = buf_data[off:off+ln] + * + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_FRAGMENT)]).len; + __pyx_v_ln = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":96 + * off = parsed.field_data[uparser.UF_FRAGMENT].off + * ln = parsed.field_data[uparser.UF_FRAGMENT].len + * fragment = buf_data[off:off+ln] # <<<<<<<<<<<<<< + * + * if parsed.field_set & (1 << uparser.UF_USERINFO): + */ + __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 96, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_fragment, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":93 + * query = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_FRAGMENT): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_FRAGMENT].off + * ln = parsed.field_data[uparser.UF_FRAGMENT].len + */ + } + + /* "httptools/parser/url_parser.pyx":98 + * fragment = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_USERINFO): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_USERINFO].off + * ln = parsed.field_data[uparser.UF_USERINFO].len + */ + __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_USERINFO)) != 0); + if (__pyx_t_2) { + + /* "httptools/parser/url_parser.pyx":99 + * + * if parsed.field_set & (1 << uparser.UF_USERINFO): + * off = parsed.field_data[uparser.UF_USERINFO].off # <<<<<<<<<<<<<< + * ln = parsed.field_data[uparser.UF_USERINFO].len + * userinfo = buf_data[off:off+ln] + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_USERINFO)]).off; + __pyx_v_off = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":100 + * if parsed.field_set & (1 << uparser.UF_USERINFO): + * off = parsed.field_data[uparser.UF_USERINFO].off + * ln = parsed.field_data[uparser.UF_USERINFO].len # <<<<<<<<<<<<<< + * userinfo = buf_data[off:off+ln] + * + */ + __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_USERINFO)]).len; + __pyx_v_ln = __pyx_t_3; + + /* "httptools/parser/url_parser.pyx":101 + * off = parsed.field_data[uparser.UF_USERINFO].off + * ln = parsed.field_data[uparser.UF_USERINFO].len + * userinfo = buf_data[off:off+ln] # <<<<<<<<<<<<<< + * + * return URL(schema, host, port, path, query, fragment, userinfo) + */ + __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_userinfo, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "httptools/parser/url_parser.pyx":98 + * fragment = buf_data[off:off+ln] + * + * if parsed.field_set & (1 << uparser.UF_USERINFO): # <<<<<<<<<<<<<< + * off = parsed.field_data[uparser.UF_USERINFO].off + * ln = parsed.field_data[uparser.UF_USERINFO].len + */ + } + + /* "httptools/parser/url_parser.pyx":103 + * userinfo = buf_data[off:off+ln] + * + * return URL(schema, host, port, path, query, fragment, userinfo) # <<<<<<<<<<<<<< + * else: + * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_4 = PyTuple_New(7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 103, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_INCREF(__pyx_v_schema); + __Pyx_GIVEREF(__pyx_v_schema); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_schema); + __Pyx_INCREF(__pyx_v_host); + __Pyx_GIVEREF(__pyx_v_host); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_host); + __Pyx_INCREF(__pyx_v_port); + __Pyx_GIVEREF(__pyx_v_port); + PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_port); + __Pyx_INCREF(__pyx_v_path); + __Pyx_GIVEREF(__pyx_v_path); + PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_path); + __Pyx_INCREF(__pyx_v_query); + __Pyx_GIVEREF(__pyx_v_query); + PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_v_query); + __Pyx_INCREF(__pyx_v_fragment); + __Pyx_GIVEREF(__pyx_v_fragment); + PyTuple_SET_ITEM(__pyx_t_4, 5, __pyx_v_fragment); + __Pyx_INCREF(__pyx_v_userinfo); + __Pyx_GIVEREF(__pyx_v_userinfo); + PyTuple_SET_ITEM(__pyx_t_4, 6, __pyx_v_userinfo); + __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_9httptools_6parser_10url_parser_URL), __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 103, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L3_return; + + /* "httptools/parser/url_parser.pyx":69 + * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) + * + * if res == 0: # <<<<<<<<<<<<<< + * if parsed.field_set & (1 << uparser.UF_SCHEMA): + * off = parsed.field_data[uparser.UF_SCHEMA].off + */ + } + + /* "httptools/parser/url_parser.pyx":105 + * return URL(schema, host, port, path, query, fragment, userinfo) + * else: + * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) # <<<<<<<<<<<<<< + * finally: + * PyBuffer_Release(&py_buf) + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 105, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_invalid_url_r, __pyx_n_s_format); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 105, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_8 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + } + } + __pyx_t_6 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_v_url) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_url); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 105, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_5 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 105, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(1, 105, __pyx_L4_error) + } + } + + /* "httptools/parser/url_parser.pyx":107 + * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) + * finally: + * PyBuffer_Release(&py_buf) # <<<<<<<<<<<<<< + * PyMem_Free(parsed) + */ + /*finally:*/ { + __pyx_L4_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13) < 0)) __Pyx_ErrFetch(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __Pyx_XGOTREF(__pyx_t_13); + __Pyx_XGOTREF(__pyx_t_14); + __Pyx_XGOTREF(__pyx_t_15); + __Pyx_XGOTREF(__pyx_t_16); + __pyx_t_1 = __pyx_lineno; __pyx_t_9 = __pyx_clineno; __pyx_t_10 = __pyx_filename; + { + PyBuffer_Release((&__pyx_v_py_buf)); + + /* "httptools/parser/url_parser.pyx":108 + * finally: + * PyBuffer_Release(&py_buf) + * PyMem_Free(parsed) # <<<<<<<<<<<<<< + */ + PyMem_Free(__pyx_v_parsed); + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_14); + __Pyx_XGIVEREF(__pyx_t_15); + __Pyx_XGIVEREF(__pyx_t_16); + __Pyx_ExceptionReset(__pyx_t_14, __pyx_t_15, __pyx_t_16); + } + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_XGIVEREF(__pyx_t_13); + __Pyx_ErrRestore(__pyx_t_11, __pyx_t_12, __pyx_t_13); + __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; + __pyx_lineno = __pyx_t_1; __pyx_clineno = __pyx_t_9; __pyx_filename = __pyx_t_10; + goto __pyx_L1_error; + } + __pyx_L3_return: { + __pyx_t_16 = __pyx_r; + __pyx_r = 0; + + /* "httptools/parser/url_parser.pyx":107 + * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) + * finally: + * PyBuffer_Release(&py_buf) # <<<<<<<<<<<<<< + * PyMem_Free(parsed) + */ + PyBuffer_Release((&__pyx_v_py_buf)); + + /* "httptools/parser/url_parser.pyx":108 + * finally: + * PyBuffer_Release(&py_buf) + * PyMem_Free(parsed) # <<<<<<<<<<<<<< + */ + PyMem_Free(__pyx_v_parsed); + __pyx_r = __pyx_t_16; + __pyx_t_16 = 0; + goto __pyx_L0; + } + } + + /* "httptools/parser/url_parser.pyx":43 + * + * + * def parse_url(url): # <<<<<<<<<<<<<< + * cdef: + * Py_buffer py_buf + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("httptools.parser.url_parser.parse_url", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_schema); + __Pyx_XDECREF(__pyx_v_host); + __Pyx_XDECREF(__pyx_v_port); + __Pyx_XDECREF(__pyx_v_path); + __Pyx_XDECREF(__pyx_v_query); + __Pyx_XDECREF(__pyx_v_fragment); + __Pyx_XDECREF(__pyx_v_userinfo); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_freelist_9httptools_6parser_10url_parser_URL[250]; +static int __pyx_freecount_9httptools_6parser_10url_parser_URL = 0; + +static PyObject *__pyx_tp_new_9httptools_6parser_10url_parser_URL(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_obj_9httptools_6parser_10url_parser_URL *p; + PyObject *o; + if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_9httptools_6parser_10url_parser_URL > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL)) & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) { + o = (PyObject*)__pyx_freelist_9httptools_6parser_10url_parser_URL[--__pyx_freecount_9httptools_6parser_10url_parser_URL]; + memset(o, 0, sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL)); + (void) PyObject_INIT(o, t); + PyObject_GC_Track(o); + } else { + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + } + p = ((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o); + p->schema = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->host = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->port = Py_None; Py_INCREF(Py_None); + p->path = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->query = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->fragment = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->userinfo = ((PyObject*)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_pw_9httptools_6parser_10url_parser_3URL_1__cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_9httptools_6parser_10url_parser_URL(PyObject *o) { + struct __pyx_obj_9httptools_6parser_10url_parser_URL *p = (struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + Py_CLEAR(p->schema); + Py_CLEAR(p->host); + Py_CLEAR(p->port); + Py_CLEAR(p->path); + Py_CLEAR(p->query); + Py_CLEAR(p->fragment); + Py_CLEAR(p->userinfo); + if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_9httptools_6parser_10url_parser_URL < 250) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL)) & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) { + __pyx_freelist_9httptools_6parser_10url_parser_URL[__pyx_freecount_9httptools_6parser_10url_parser_URL++] = ((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o); + } else { + (*Py_TYPE(o)->tp_free)(o); + } +} + +static int __pyx_tp_traverse_9httptools_6parser_10url_parser_URL(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_obj_9httptools_6parser_10url_parser_URL *p = (struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o; + if (p->port) { + e = (*v)(p->port, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_9httptools_6parser_10url_parser_URL(PyObject *o) { + PyObject* tmp; + struct __pyx_obj_9httptools_6parser_10url_parser_URL *p = (struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o; + tmp = ((PyObject*)p->port); + p->port = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_schema(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_6schema_1__get__(o); +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_host(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_4host_1__get__(o); +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_port(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_4port_1__get__(o); +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_path(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_4path_1__get__(o); +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_query(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_5query_1__get__(o); +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_fragment(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_8fragment_1__get__(o); +} + +static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_userinfo(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_9httptools_6parser_10url_parser_3URL_8userinfo_1__get__(o); +} + +static PyMethodDef __pyx_methods_9httptools_6parser_10url_parser_URL[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_10url_parser_3URL_5__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_10url_parser_3URL_7__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_9httptools_6parser_10url_parser_URL[] = { + {(char *)"schema", __pyx_getprop_9httptools_6parser_10url_parser_3URL_schema, 0, (char *)0, 0}, + {(char *)"host", __pyx_getprop_9httptools_6parser_10url_parser_3URL_host, 0, (char *)0, 0}, + {(char *)"port", __pyx_getprop_9httptools_6parser_10url_parser_3URL_port, 0, (char *)0, 0}, + {(char *)"path", __pyx_getprop_9httptools_6parser_10url_parser_3URL_path, 0, (char *)0, 0}, + {(char *)"query", __pyx_getprop_9httptools_6parser_10url_parser_3URL_query, 0, (char *)0, 0}, + {(char *)"fragment", __pyx_getprop_9httptools_6parser_10url_parser_3URL_fragment, 0, (char *)0, 0}, + {(char *)"userinfo", __pyx_getprop_9httptools_6parser_10url_parser_3URL_userinfo, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_9httptools_6parser_10url_parser_URL = { + PyVarObject_HEAD_INIT(0, 0) + "httptools.parser.url_parser.URL", /*tp_name*/ + sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_9httptools_6parser_10url_parser_URL, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_pw_9httptools_6parser_10url_parser_3URL_3__repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_9httptools_6parser_10url_parser_URL, /*tp_traverse*/ + __pyx_tp_clear_9httptools_6parser_10url_parser_URL, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_9httptools_6parser_10url_parser_URL, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_9httptools_6parser_10url_parser_URL, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_9httptools_6parser_10url_parser_URL, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_url_parser(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_url_parser}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "url_parser", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_HttpParserInvalidURLError, __pyx_k_HttpParserInvalidURLError, sizeof(__pyx_k_HttpParserInvalidURLError), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_n_s_URL, __pyx_k_URL, sizeof(__pyx_k_URL), 0, 0, 1, 1}, + {&__pyx_kp_u_URL_schema_r_host_r_port_r_path, __pyx_k_URL_schema_r_host_r_port_r_path, sizeof(__pyx_k_URL_schema_r_host_r_port_r_path), 0, 1, 0, 0}, + {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, + {&__pyx_n_s_buf_data, __pyx_k_buf_data, sizeof(__pyx_k_buf_data), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_errors, __pyx_k_errors, sizeof(__pyx_k_errors), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_fragment, __pyx_k_fragment, sizeof(__pyx_k_fragment), 0, 0, 1, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_n_s_host, __pyx_k_host, sizeof(__pyx_k_host), 0, 0, 1, 1}, + {&__pyx_n_s_httptools_parser_url_parser, __pyx_k_httptools_parser_url_parser, sizeof(__pyx_k_httptools_parser_url_parser), 0, 0, 1, 1}, + {&__pyx_kp_s_httptools_parser_url_parser_pyx, __pyx_k_httptools_parser_url_parser_pyx, sizeof(__pyx_k_httptools_parser_url_parser_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_kp_u_invalid_url_r, __pyx_k_invalid_url_r, sizeof(__pyx_k_invalid_url_r), 0, 1, 0, 0}, + {&__pyx_n_s_ln, __pyx_k_ln, sizeof(__pyx_k_ln), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_off, __pyx_k_off, sizeof(__pyx_k_off), 0, 0, 1, 1}, + {&__pyx_n_s_parse_url, __pyx_k_parse_url, sizeof(__pyx_k_parse_url), 0, 0, 1, 1}, + {&__pyx_n_u_parse_url, __pyx_k_parse_url, sizeof(__pyx_k_parse_url), 0, 1, 0, 1}, + {&__pyx_n_s_parsed, __pyx_k_parsed, sizeof(__pyx_k_parsed), 0, 0, 1, 1}, + {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, + {&__pyx_n_s_port, __pyx_k_port, sizeof(__pyx_k_port), 0, 0, 1, 1}, + {&__pyx_n_s_py_buf, __pyx_k_py_buf, sizeof(__pyx_k_py_buf), 0, 0, 1, 1}, + {&__pyx_n_s_query, __pyx_k_query, sizeof(__pyx_k_query), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, + {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1}, + {&__pyx_n_s_schema, __pyx_k_schema, sizeof(__pyx_k_schema), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_url, __pyx_k_url, sizeof(__pyx_k_url), 0, 0, 1, 1}, + {&__pyx_n_s_userinfo, __pyx_k_userinfo, sizeof(__pyx_k_userinfo), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 2, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "httptools/parser/url_parser.pyx":13 + * from . cimport url_cparser as uparser + * + * __all__ = ('parse_url',) # <<<<<<<<<<<<<< + * + * @cython.freelist(250) + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_n_u_parse_url); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "httptools/parser/url_parser.pyx":43 + * + * + * def parse_url(url): # <<<<<<<<<<<<<< + * cdef: + * Py_buffer py_buf + */ + __pyx_tuple__4 = PyTuple_Pack(15, __pyx_n_s_url, __pyx_n_s_py_buf, __pyx_n_s_buf_data, __pyx_n_s_parsed, __pyx_n_s_res, __pyx_n_s_schema, __pyx_n_s_host, __pyx_n_s_port, __pyx_n_s_path, __pyx_n_s_query, __pyx_n_s_fragment, __pyx_n_s_userinfo, __pyx_n_s_result, __pyx_n_s_off, __pyx_n_s_ln); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + __pyx_codeobj__5 = (PyObject*)__Pyx_PyCode_New(1, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__4, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_httptools_parser_url_parser_pyx, __pyx_n_s_parse_url, 43, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__5)) __PYX_ERR(1, 43, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(1, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + if (PyType_Ready(&__pyx_type_9httptools_6parser_10url_parser_URL) < 0) __PYX_ERR(1, 16, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type_9httptools_6parser_10url_parser_URL.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_10url_parser_URL.tp_dictoffset && __pyx_type_9httptools_6parser_10url_parser_URL.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_9httptools_6parser_10url_parser_URL.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_URL, (PyObject *)&__pyx_type_9httptools_6parser_10url_parser_URL) < 0) __PYX_ERR(1, 16, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_10url_parser_URL) < 0) __PYX_ERR(1, 16, __pyx_L1_error) + __pyx_ptype_9httptools_6parser_10url_parser_URL = &__pyx_type_9httptools_6parser_10url_parser_URL; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initurl_parser(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initurl_parser(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_url_parser(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_url_parser(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_url_parser(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'url_parser' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_url_parser(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(1, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(1, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(1, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + PyEval_InitThreads(); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("url_parser", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(1, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(1, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(1, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(1, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_httptools__parser__url_parser) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(1, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "httptools.parser.url_parser")) { + if (unlikely(PyDict_SetItemString(modules, "httptools.parser.url_parser", __pyx_m) < 0)) __PYX_ERR(1, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) + if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + + /* "httptools/parser/url_parser.pyx":8 + * Py_buffer + * + * from .errors import HttpParserInvalidURLError # <<<<<<<<<<<<<< + * + * cimport cython + */ + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_n_s_HttpParserInvalidURLError); + __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidURLError); + PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_HttpParserInvalidURLError); + __pyx_t_2 = __Pyx_Import(__pyx_n_s_errors, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidURLError, __pyx_t_1) < 0) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "httptools/parser/url_parser.pyx":13 + * from . cimport url_cparser as uparser + * + * __all__ = ('parse_url',) # <<<<<<<<<<<<<< + * + * @cython.freelist(250) + */ + if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_tuple__3) < 0) __PYX_ERR(1, 13, __pyx_L1_error) + + /* "httptools/parser/url_parser.pyx":43 + * + * + * def parse_url(url): # <<<<<<<<<<<<<< + * cdef: + * Py_buffer py_buf + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9httptools_6parser_10url_parser_1parse_url, NULL, __pyx_n_s_httptools_parser_url_parser); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_parse_url, __pyx_t_2) < 0) __PYX_ERR(1, 43, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "httptools/parser/url_parser.pyx":1 + * #cython: language_level=3 # <<<<<<<<<<<<<< + * + * from __future__ import print_function + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init httptools.parser.url_parser", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init httptools.parser.url_parser"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (__Pyx_PyFastCFunction_Check(func)) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* TypeImport */ +#ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const uint16_t neg_one = (uint16_t) -1, const_zero = (uint16_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(uint16_t) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(uint16_t) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(uint16_t) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(uint16_t) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(uint16_t) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(uint16_t), + little, !is_unsigned); + } +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/myenv/lib/python3.9/site-packages/httptools/parser/url_parser.cpython-39-darwin.so b/myenv/lib/python3.9/site-packages/httptools/parser/url_parser.cpython-39-darwin.so new file mode 100755 index 0000000..0f73120 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/httptools/parser/url_parser.cpython-39-darwin.so differ diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/LICENSE.md b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/LICENSE.md new file mode 100644 index 0000000..ab79d16 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/LICENSE.md @@ -0,0 +1,12 @@ +Copyright © 2019, [Encode OSS Ltd](https://www.encode.io/). +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/METADATA new file mode 100644 index 0000000..7ca65d0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/METADATA @@ -0,0 +1,1145 @@ +Metadata-Version: 2.1 +Name: httpx +Version: 0.23.0 +Summary: The next generation HTTP client. +Home-page: https://github.com/encode/httpx +Author: Tom Christie +Author-email: tom@tomchristie.com +License: BSD +Project-URL: Changelog, https://github.com/encode/httpx/blob/master/CHANGELOG.md +Project-URL: Documentation, https://www.python-httpx.org +Project-URL: Source, https://github.com/encode/httpx +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Framework :: AsyncIO +Classifier: Framework :: Trio +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Requires-Dist: certifi +Requires-Dist: sniffio +Requires-Dist: rfc3986[idna2008] (<2,>=1.3) +Requires-Dist: httpcore (<0.16.0,>=0.15.0) +Provides-Extra: brotli +Requires-Dist: brotlicffi ; (platform_python_implementation != "CPython") and extra == 'brotli' +Requires-Dist: brotli ; (platform_python_implementation == "CPython") and extra == 'brotli' +Provides-Extra: cli +Requires-Dist: click (==8.*) ; extra == 'cli' +Requires-Dist: rich (<13,>=10) ; extra == 'cli' +Requires-Dist: pygments (==2.*) ; extra == 'cli' +Provides-Extra: http2 +Requires-Dist: h2 (<5,>=3) ; extra == 'http2' +Provides-Extra: socks +Requires-Dist: socksio (==1.*) ; extra == 'socks' + +

+ HTTPX +

+ +

HTTPX - A next-generation HTTP client for Python.

+ +

+ + Test Suite + + + Package version + +

+ +HTTPX is a fully featured HTTP client library for Python 3. It includes **an integrated +command line client**, has support for both **HTTP/1.1 and HTTP/2**, and provides both **sync +and async APIs**. + +--- + +Install HTTPX using pip: + +```shell +$ pip install httpx +``` + +Now, let's get started: + +```pycon +>>> import httpx +>>> r = httpx.get('https://www.example.org/') +>>> r + +>>> r.status_code +200 +>>> r.headers['content-type'] +'text/html; charset=UTF-8' +>>> r.text +'\n\n\nExample Domain...' +``` + +Or, using the command-line client. + +```shell +$ pip install 'httpx[cli]' # The command line client is an optional dependency. +``` + +Which now allows us to use HTTPX directly from the command-line... + +

+ httpx --help +

+ +Sending a request... + +

+ httpx http://httpbin.org/json +

+ +## Features + +HTTPX builds on the well-established usability of `requests`, and gives you: + +* A broadly [requests-compatible API](https://www.python-httpx.org/compatibility/). +* An integrated command-line client. +* HTTP/1.1 [and HTTP/2 support](https://www.python-httpx.org/http2/). +* Standard synchronous interface, but with [async support if you need it](https://www.python-httpx.org/async/). +* Ability to make requests directly to [WSGI applications](https://www.python-httpx.org/advanced/#calling-into-python-web-apps) or [ASGI applications](https://www.python-httpx.org/async/#calling-into-python-web-apps). +* Strict timeouts everywhere. +* Fully type annotated. +* 100% test coverage. + +Plus all the standard features of `requests`... + +* International Domains and URLs +* Keep-Alive & Connection Pooling +* Sessions with Cookie Persistence +* Browser-style SSL Verification +* Basic/Digest Authentication +* Elegant Key/Value Cookies +* Automatic Decompression +* Automatic Content Decoding +* Unicode Response Bodies +* Multipart File Uploads +* HTTP(S) Proxy Support +* Connection Timeouts +* Streaming Downloads +* .netrc Support +* Chunked Requests + +## Installation + +Install with pip: + +```shell +$ pip install httpx +``` + +Or, to include the optional HTTP/2 support, use: + +```shell +$ pip install httpx[http2] +``` + +HTTPX requires Python 3.6+. + +## Documentation + +Project documentation is available at [https://www.python-httpx.org/](https://www.python-httpx.org/). + +For a run-through of all the basics, head over to the [QuickStart](https://www.python-httpx.org/quickstart/). + +For more advanced topics, see the [Advanced Usage](https://www.python-httpx.org/advanced/) section, the [async support](https://www.python-httpx.org/async/) section, or the [HTTP/2](https://www.python-httpx.org/http2/) section. + +The [Developer Interface](https://www.python-httpx.org/api/) provides a comprehensive API reference. + +To find out about tools that integrate with HTTPX, see [Third Party Packages](https://www.python-httpx.org/third_party_packages/). + +## Contribute + +If you want to contribute with HTTPX check out the [Contributing Guide](https://www.python-httpx.org/contributing/) to learn how to start. + +## Dependencies + +The HTTPX project relies on these excellent libraries: + +* `httpcore` - The underlying transport implementation for `httpx`. + * `h11` - HTTP/1.1 support. +* `certifi` - SSL certificates. +* `rfc3986` - URL parsing & normalization. + * `idna` - Internationalized domain name support. +* `sniffio` - Async library autodetection. + +As well as these optional installs: + +* `h2` - HTTP/2 support. *(Optional, with `httpx[http2]`)* +* `socksio` - SOCKS proxy support. *(Optional, with `httpx[socks]`)* +* `rich` - Rich terminal support. *(Optional, with `httpx[cli]`)* +* `click` - Command line client support. *(Optional, with `httpx[cli]`)* +* `brotli` or `brotlicffi` - Decoding for "brotli" compressed responses. *(Optional, with `httpx[brotli]`)* + +A huge amount of credit is due to `requests` for the API layout that +much of this work follows, as well as to `urllib3` for plenty of design +inspiration around the lower-level networking details. + +--- + +

HTTPX is BSD licensed code.
Designed & crafted with care.

— 🦋 —

+ + +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## 0.23.0 (23rd May, 2022) + +### Changed + +* Drop support for Python 3.6. (#2097) +* Use `utf-8` as the default character set, instead of falling back to `charset-normalizer` for auto-detection. To enable automatic character set detection, see [the documentation](https://www.python-httpx.org/advanced/#character-set-encodings-and-auto-detection). (#2165) + +### Fixed + +* Fix `URL.copy_with` for some oddly formed URL cases. (#2185) +* Digest authentication should use case-insensitive comparison for determining which algorithm is being used. (#2204) +* Fix console markup escaping in command line client. (#1866) +* When files are used in multipart upload, ensure we always seek to the start of the file. (#2065) +* Ensure that `iter_bytes` never yields zero-length chunks. (#2068) +* Preserve `Authorization` header for redirects that are to the same origin, but are an `http`-to-`https` upgrade. (#2074) +* When responses have binary output, don't print the output to the console in the command line client. Use output like `<16086 bytes of binary data>` instead. (#2076) +* Fix display of `--proxies` argument in the command line client help. (#2125) +* Close responses when task cancellations occur during stream reading. (#2156) +* Fix type error on accessing `.request` on `HTTPError` exceptions. (#2158) + +## 0.22.0 (26th January, 2022) + +### Added + +* Support for [the SOCKS5 proxy protocol](https://www.python-httpx.org/advanced/#socks) via [the `socksio` package](https://github.com/sethmlarson/socksio). (#2034) +* Support for custom headers in multipart/form-data requests (#1936) + +### Fixed + +* Don't perform unreliable close/warning on `__del__` with unclosed clients. (#2026) +* Fix `Headers.update(...)` to correctly handle repeated headers (#2038) + +## 0.21.3 (6th January, 2022) + +### Fixed + +* Fix streaming uploads using `SyncByteStream` or `AsyncByteStream`. Regression in 0.21.2. (#2016) + +## 0.21.2 (5th January, 2022) + +### Fixed + +* HTTP/2 support for tunnelled proxy cases. (#2009) +* Improved the speed of large file uploads. (#1948) + +## 0.21.1 (16th November, 2021) + +### Fixed + +* The `response.url` property is now correctly annotated as `URL`, instead of `Optional[URL]`. (#1940) + +## 0.21.0 (15th November, 2021) + +The 0.21.0 release integrates against a newly redesigned `httpcore` backend. + +Both packages ought to automatically update to the required versions, but if you are +seeing any issues, you should ensure that you have `httpx==0.21.*` and `httpcore==0.14.*` installed. + +### Added + +* The command-line client will now display connection information when `-v/--verbose` is used. +* The command-line client will now display server certificate information when `-v/--verbose` is used. +* The command-line client is now able to properly detect if the outgoing request +should be formatted as HTTP/1.1 or HTTP/2, based on the result of the HTTP/2 negotiation. + +### Removed + +* Curio support is no longer currently included. Please get in touch if you require this, so that we can assess priorities. + +## 0.20.0 (13th October, 2021) + +The 0.20.0 release adds an integrated command-line client, and also includes some +design changes. The most notable of these is that redirect responses are no longer +automatically followed, unless specifically requested. + +This design decision prioritises a more explicit approach to redirects, in order +to avoid code that unintentionally issues multiple requests as a result of +misconfigured URLs. + +For example, previously a client configured to send requests to `http://api.github.com/` +would end up sending every API request twice, as each request would be redirected to `https://api.github.com/`. + +If you do want auto-redirect behaviour, you can enable this either by configuring +the client instance with `Client(follow_redirects=True)`, or on a per-request +basis, with `.get(..., follow_redirects=True)`. + +This change is a classic trade-off between convenience and precision, with no "right" +answer. See [discussion #1785](https://github.com/encode/httpx/discussions/1785) for more +context. + +The other major design change is an update to the Transport API, which is the low-level +interface against which requests are sent. Previously this interface used only primitive +datastructures, like so... + +```python +(status_code, headers, stream, extensions) = transport.handle_request(method, url, headers, stream, extensions) +try + ... +finally: + stream.close() +``` + +Now the interface is much simpler... + +```python +response = transport.handle_request(request) +try + ... +finally: + response.close() +``` + +### Changed + +* The `allow_redirects` flag is now `follow_redirects` and defaults to `False`. +* The `raise_for_status()` method will now raise an exception for any responses + except those with 2xx status codes. Previously only 4xx and 5xx status codes + would result in an exception. +* The low-level transport API changes to the much simpler `response = transport.handle_request(request)`. +* The `client.send()` method no longer accepts a `timeout=...` argument, but the + `client.build_request()` does. This required by the signature change of the + Transport API. The request timeout configuration is now stored on the request + instance, as `request.extensions['timeout']`. + +### Added + +* Added the `httpx` command-line client. +* Response instances now include `.is_informational`, `.is_success`, `.is_redirect`, `.is_client_error`, and `.is_server_error` + properties for checking 1xx, 2xx, 3xx, 4xx, and 5xx response types. Note that the behaviour of `.is_redirect` is slightly different in that it now returns True for all 3xx responses, in order to allow for a consistent set of properties onto the different HTTP status code types. The `response.has_redirect_location` location may be used to determine responses with properly formed URL redirects. + +### Fixed + +* `response.iter_bytes()` no longer raises a ValueError when called on a response with no content. (Pull #1827) +* The `'wsgi.error'` configuration now defaults to `sys.stderr`, and is corrected to be a `TextIO` interface, not a `BytesIO` interface. Additionally, the WSGITransport now accepts a `wsgi_error` configuration. (Pull #1828) +* Follow the WSGI spec by properly closing the iterable returned by the application. (Pull #1830) + +## 0.19.0 (19th August, 2021) + +### Added + +* Add support for `Client(allow_redirects=)`. (Pull #1790) +* Add automatic character set detection, when no `charset` is included in the response `Content-Type` header. (Pull #1791) + +### Changed + +* Event hooks are now also called for any additional redirect or auth requests/responses. (Pull #1806) +* Strictly enforce that upload files must be opened in binary mode. (Pull #1736) +* Strictly enforce that client instances can only be opened and closed once, and cannot be re-opened. (Pull #1800) +* Drop `mode` argument from `httpx.Proxy(..., mode=...)`. (Pull #1795) + +## 0.18.2 (17th June, 2021) + +### Added + +* Support for Python 3.10. (Pull #1687) +* Expose `httpx.USE_CLIENT_DEFAULT`, used as the default to `auth` and `timeout` parameters in request methods. (Pull #1634) +* Support [HTTP/2 "prior knowledge"](https://python-hyper.org/projects/hyper-h2/en/v2.3.1/negotiating-http2.html#prior-knowledge), using `httpx.Client(http1=False, http2=True)`. (Pull #1624) + +### Fixed + +* Clean up some cases where warnings were being issued. (Pull #1687) +* Prefer Content-Length over Transfer-Encoding: chunked for content= cases. (Pull #1619) + +## 0.18.1 (29th April, 2021) + +### Changed + +* Update brotli support to use the `brotlicffi` package (Pull #1605) +* Ensure that `Request(..., stream=...)` does not auto-generate any headers on the request instance. (Pull #1607) + +### Fixed + +* Pass through `timeout=...` in top-level httpx.stream() function. (Pull #1613) +* Map httpcore transport close exceptions to httpx exceptions. (Pull #1606) + +## 0.18.0 (27th April, 2021) + +The 0.18.x release series formalises our low-level Transport API, introducing the base classes `httpx.BaseTransport` and `httpx.AsyncBaseTransport`. + +See the "[Writing custom transports](https://www.python-httpx.org/advanced/#writing-custom-transports)" documentation and the [`httpx.BaseTransport.handle_request()`](https://github.com/encode/httpx/blob/397aad98fdc8b7580a5fc3e88f1578b4302c6382/httpx/_transports/base.py#L77-L147) docstring for more complete details on implementing custom transports. + +Pull request #1522 includes a checklist of differences from the previous `httpcore` transport API, for developers implementing custom transports. + +The following API changes have been issuing deprecation warnings since 0.17.0 onwards, and are now fully deprecated... + +* You should now use httpx.codes consistently instead of httpx.StatusCodes. +* Use limits=... instead of pool_limits=.... +* Use proxies={"http://": ...} instead of proxies={"http": ...} for scheme-specific mounting. + +### Changed + +* Transport instances now inherit from `httpx.BaseTransport` or `httpx.AsyncBaseTransport`, + and should implement either the `handle_request` method or `handle_async_request` method. (Pull #1522, #1550) +* The `response.ext` property and `Response(ext=...)` argument are now named `extensions`. (Pull #1522) +* The recommendation to not use `data=` in favour of `content=` has now been escalated to a deprecation warning. (Pull #1573) +* Drop `Response(on_close=...)` from API, since it was a bit of leaking implementation detail. (Pull #1572) +* When using a client instance, cookies should always be set on the client, rather than on a per-request basis. We prefer enforcing a stricter API here because it provides clearer expectations around cookie persistence, particularly when redirects occur. (Pull #1574) +* The runtime exception `httpx.ResponseClosed` is now named `httpx.StreamClosed`. (#1584) +* The `httpx.QueryParams` model now presents an immutable interface. There is a discussion on [the design and motivation here](https://github.com/encode/httpx/discussions/1599). Use `client.params = client.params.merge(...)` instead of `client.params.update(...)`. The basic query manipulation methods are `query.set(...)`, `query.add(...)`, and `query.remove()`. (#1600) + +### Added + +* The `Request` and `Response` classes can now be serialized using pickle. (#1579) +* Handle `data={"key": [None|int|float|bool]}` cases. (Pull #1539) +* Support `httpx.URL(**kwargs)`, for example `httpx.URL(scheme="https", host="www.example.com", path="/')`, or `httpx.URL("https://www.example.com/", username="tom@gmail.com", password="123 456")`. (Pull #1601) +* Support `url.copy_with(params=...)`. (Pull #1601) +* Add `url.params` parameter, returning an immutable `QueryParams` instance. (Pull #1601) +* Support query manipulation methods on the URL class. These are `url.copy_set_param()`, `url.copy_add_param()`, `url.copy_remove_param()`, `url.copy_merge_params()`. (Pull #1601) +* The `httpx.URL` class now performs port normalization, so `:80` ports are stripped from `http` URLs and `:443` ports are stripped from `https` URLs. (Pull #1603) +* The `URL.host` property returns unicode strings for internationalized domain names. The `URL.raw_host` property returns byte strings with IDNA escaping applied. (Pull #1590) + +### Fixed + +* Fix Content-Length for cases of `files=...` where unicode string is used as the file content. (Pull #1537) +* Fix some cases of merging relative URLs against `Client(base_url=...)`. (Pull #1532) +* The `request.content` attribute is now always available except for streaming content, which requires an explicit `.read()`. (Pull #1583) + +## 0.17.1 (March 15th, 2021) + +### Fixed + +* Type annotation on `CertTypes` allows `keyfile` and `password` to be optional. (Pull #1503) +* Fix httpcore pinned version. (Pull #1495) + +## 0.17.0 (February 28th, 2021) + +### Added + +* Add `httpx.MockTransport()`, allowing to mock out a transport using pre-determined responses. (Pull #1401, Pull #1449) +* Add `httpx.HTTPTransport()` and `httpx.AsyncHTTPTransport()` default transports. (Pull #1399) +* Add mount API support, using `httpx.Client(mounts=...)`. (Pull #1362) +* Add `chunk_size` parameter to `iter_raw()`, `iter_bytes()`, `iter_text()`. (Pull #1277) +* Add `keepalive_expiry` parameter to `httpx.Limits()` configuration. (Pull #1398) +* Add repr to `httpx.Cookies` to display available cookies. (Pull #1411) +* Add support for `params=` (previously only `params=` was supported). (Pull #1426) + +### Fixed + +* Add missing `raw_path` to ASGI scope. (Pull #1357) +* Tweak `create_ssl_context` defaults to use `trust_env=True`. (Pull #1447) +* Properly URL-escape WSGI `PATH_INFO`. (Pull #1391) +* Properly set default ports in WSGI transport. (Pull #1469) +* Properly encode slashes when using `base_url`. (Pull #1407) +* Properly map exceptions in `request.aclose()`. (Pull #1465) + +## 0.16.1 (October 8th, 2020) + +### Fixed + +* Support literal IPv6 addresses in URLs. (Pull #1349) +* Force lowercase headers in ASGI scope dictionaries. (Pull #1351) + +## 0.16.0 (October 6th, 2020) + +### Changed + +* Preserve HTTP header casing. (Pull #1338, encode/httpcore#216, python-hyper/h11#104) +* Drop `response.next()` and `response.anext()` methods in favour of `response.next_request` attribute. (Pull #1339) +* Closed clients now raise a runtime error if attempting to send a request. (Pull #1346) + +### Added + +* Add Python 3.9 to officially supported versions. +* Type annotate `__enter__`/`__exit__`/`__aenter__`/`__aexit__` in a way that supports subclasses of `Client` and `AsyncClient`. (Pull #1336) + +## 0.15.5 (October 1st, 2020) + +### Added + +* Add `response.next_request` (Pull #1334) + +## 0.15.4 (September 25th, 2020) + +### Added + +* Support direct comparisons between `Headers` and dicts or lists of two-tuples. Eg. `assert response.headers == {"Content-Length": 24}` (Pull #1326) + +### Fixed + +* Fix automatic `.read()` when `Response` instances are created with `content=` (Pull #1324) + +## 0.15.3 (September 24th, 2020) + +### Fixed + +* Fixed connection leak in async client due to improper closing of response streams. (Pull #1316) + +## 0.15.2 (September 23nd, 2020) + +### Fixed + +* Fixed `response.elapsed` property. (Pull #1313) +* Fixed client authentication interaction with `.stream()`. (Pull #1312) + +## 0.15.1 (September 23nd, 2020) + +### Fixed + +* ASGITransport now properly applies URL decoding to the `path` component, as-per the ASGI spec. (Pull #1307) + +## 0.15.0 (September 22nd, 2020) + +### Added + +* Added support for curio. (Pull https://github.com/encode/httpcore/pull/168) +* Added support for event hooks. (Pull #1246) +* Added support for authentication flows which require either sync or async I/O. (Pull #1217) +* Added support for monitoring download progress with `response.num_bytes_downloaded`. (Pull #1268) +* Added `Request(content=...)` for byte content, instead of overloading `Request(data=...)` (Pull #1266) +* Added support for all URL components as parameter names when using `url.copy_with(...)`. (Pull #1285) +* Neater split between automatically populated headers on `Request` instances, vs default `client.headers`. (Pull #1248) +* Unclosed `AsyncClient` instances will now raise warnings if garbage collected. (Pull #1197) +* Support `Response(content=..., text=..., html=..., json=...)` for creating usable response instances in code. (Pull #1265, #1297) +* Support instantiating requests from the low-level transport API. (Pull #1293) +* Raise errors on invalid URL types. (Pull #1259) + +### Changed + +* Cleaned up expected behaviour for URL escaping. `url.path` is now URL escaped. (Pull #1285) +* Cleaned up expected behaviour for bytes vs str in URL components. `url.userinfo` and `url.query` are not URL escaped, and so return bytes. (Pull #1285) +* Drop `url.authority` property in favour of `url.netloc`, since "authority" was semantically incorrect. (Pull #1285) +* Drop `url.full_path` property in favour of `url.raw_path`, for better consistency with other parts of the API. (Pull #1285) +* No longer use the `chardet` library for auto-detecting charsets, instead defaulting to a simpler approach when no charset is specified. (#1269) + +### Fixed + +* Swapped ordering of redirects and authentication flow. (Pull #1267) +* `.netrc` lookups should use host, not host+port. (Pull #1298) + +### Removed + +* The `URLLib3Transport` class no longer exists. We've published it instead as an example of [a custom transport class](https://gist.github.com/florimondmanca/d56764d78d748eb9f73165da388e546e). (Pull #1182) +* Drop `request.timer` attribute, which was being used internally to set `response.elapsed`. (Pull #1249) +* Drop `response.decoder` attribute, which was being used internally. (Pull #1276) +* `Request.prepare()` is now a private method. (Pull #1284) +* The `Headers.getlist()` method had previously been deprecated in favour of `Headers.get_list()`. It is now fully removed. +* The `QueryParams.getlist()` method had previously been deprecated in favour of `QueryParams.get_list()`. It is now fully removed. +* The `URL.is_ssl` property had previously been deprecated in favour of `URL.scheme == "https"`. It is now fully removed. +* The `httpx.PoolLimits` class had previously been deprecated in favour of `httpx.Limits`. It is now fully removed. +* The `max_keepalive` setting had previously been deprecated in favour of the more explicit `max_keepalive_connections`. It is now fully removed. +* The verbose `httpx.Timeout(5.0, connect_timeout=60.0)` style had previously been deprecated in favour of `httpx.Timeout(5.0, connect=60.0)`. It is now fully removed. +* Support for instantiating a timeout config missing some defaults, such as `httpx.Timeout(connect=60.0)`, had previously been deprecated in favour of enforcing a more explicit style, such as `httpx.Timeout(5.0, connect=60.0)`. This is now strictly enforced. + +## 0.14.3 (September 2nd, 2020) + +### Added + +* `http.Response()` may now be instantiated without a `request=...` parameter. Useful for some unit testing cases. (Pull #1238) +* Add `103 Early Hints` and `425 Too Early` status codes. (Pull #1244) + +### Fixed + +* `DigestAuth` now handles responses that include multiple 'WWW-Authenticate' headers. (Pull #1240) +* Call into transport `__enter__`/`__exit__` or `__aenter__`/`__aexit__` when client is used in a context manager style. (Pull #1218) + +## 0.14.2 (August 24th, 2020) + +### Added + +* Support `client.get(..., auth=None)` to bypass the default authentication on a clients. (Pull #1115) +* Support `client.auth = ...` property setter. (Pull #1185) +* Support `httpx.get(..., proxies=...)` on top-level request functions. (Pull #1198) +* Display instances with nicer import styles. (Eg. ) (Pull #1155) +* Support `cookies=[(key, value)]` list-of-two-tuples style usage. (Pull #1211) + +### Fixed + +* Ensure that automatically included headers on a request may be modified. (Pull #1205) +* Allow explicit `Content-Length` header on streaming requests. (Pull #1170) +* Handle URL quoted usernames and passwords properly. (Pull #1159) +* Use more consistent default for `HEAD` requests, setting `allow_redirects=True`. (Pull #1183) +* If a transport error occurs while streaming the response, raise an `httpx` exception, not the underlying `httpcore` exception. (Pull #1190) +* Include the underlying `httpcore` traceback, when transport exceptions occur. (Pull #1199) + +## 0.14.1 (August 11th, 2020) + +### Added + +* The `httpx.URL(...)` class now raises `httpx.InvalidURL` on invalid URLs, rather than exposing the underlying `rfc3986` exception. If a redirect response includes an invalid 'Location' header, then a `RemoteProtocolError` exception is raised, which will be associated with the request that caused it. (Pull #1163) + +### Fixed + +* Handling multiple `Set-Cookie` headers became broken in the 0.14.0 release, and is now resolved. (Pull #1156) + +## 0.14.0 (August 7th, 2020) + +The 0.14 release includes a range of improvements to the public API, intended on preparing for our upcoming 1.0 release. + +* Our HTTP/2 support is now fully optional. **You now need to use `pip install httpx[http2]` if you want to include the HTTP/2 dependencies.** +* Our HSTS support has now been removed. Rewriting URLs from `http` to `https` if the host is on the HSTS list can be beneficial in avoiding roundtrips to incorrectly formed URLs, but on balance we've decided to remove this feature, on the principle of least surprise. Most programmatic clients do not include HSTS support, and for now we're opting to remove our support for it. +* Our exception hierarchy has been overhauled. Most users will want to stick with their existing `httpx.HTTPError` usage, but we've got a clearer overall structure now. See https://www.python-httpx.org/exceptions/ for more details. + +When upgrading you should be aware of the following public API changes. Note that deprecated usages will currently continue to function, but will issue warnings. + +* You should now use `httpx.codes` consistently instead of `httpx.StatusCodes`. +* Usage of `httpx.Timeout()` should now always include an explicit default. Eg. `httpx.Timeout(None, pool=5.0)`. +* When using `httpx.Timeout()`, we now have more concisely named keyword arguments. Eg. `read=5.0`, instead of `read_timeout=5.0`. +* Use `httpx.Limits()` instead of `httpx.PoolLimits()`, and `limits=...` instead of `pool_limits=...`. +* The `httpx.Limits(max_keepalive=...)` argument is now deprecated in favour of a more explicit `httpx.Limits(max_keepalive_connections=...)`. +* Keys used with `Client(proxies={...})` should now be in the style of `{"http://": ...}`, rather than `{"http": ...}`. +* The multidict methods `Headers.getlist()` and `QueryParams.getlist()` are deprecated in favour of more consistent `.get_list()` variants. +* The `URL.is_ssl` property is deprecated in favour of `URL.scheme == "https"`. +* The `URL.join(relative_url=...)` method is now `URL.join(url=...)`. This change does not support warnings for the deprecated usage style. + +One notable aspect of the 0.14.0 release is that it tightens up the public API for `httpx`, by ensuring that several internal attributes and methods have now become strictly private. + +The following previously had nominally public names on the client, but were all undocumented and intended solely for internal usage. They are all now replaced with underscored names, and should not be relied on or accessed. + +These changes should not affect users who have been working from the `httpx` documentation. + +* `.merge_url()`, `.merge_headers()`, `.merge_cookies()`, `.merge_queryparams()` +* `.build_auth()`, `.build_redirect_request()` +* `.redirect_method()`, `.redirect_url()`, `.redirect_headers()`, `.redirect_stream()` +* `.send_handling_redirects()`, `.send_handling_auth()`, `.send_single_request()` +* `.init_transport()`, `.init_proxy_transport()` +* `.proxies`, `.transport`, `.netrc`, `.get_proxy_map()` + +See pull requests #997, #1065, #1071. + +Some areas of API which were already on the deprecation path, and were raising warnings or errors in 0.13.x have now been escalated to being fully removed. + +* Drop `ASGIDispatch`, `WSGIDispatch`, which have been replaced by `ASGITransport`, `WSGITransport`. +* Drop `dispatch=...`` on client, which has been replaced by `transport=...`` +* Drop `soft_limit`, `hard_limit`, which have been replaced by `max_keepalive` and `max_connections`. +* Drop `Response.stream` and` `Response.raw`, which have been replaced by ``.aiter_bytes` and `.aiter_raw`. +* Drop `proxies=` in favor of `proxies=httpx.Proxy(...)`. + +See pull requests #1057, #1058. + +### Added + +* Added dedicated exception class `httpx.HTTPStatusError` for `.raise_for_status()` exceptions. (Pull #1072) +* Added `httpx.create_ssl_context()` helper function. (Pull #996) +* Support for proxy exlcusions like `proxies={"https://www.example.com": None}`. (Pull #1099) +* Support `QueryParams(None)` and `client.params = None`. (Pull #1060) + +### Changed + +* Use `httpx.codes` consistently in favour of `httpx.StatusCodes` which is placed into deprecation. (Pull #1088) +* Usage of `httpx.Timeout()` should now always include an explicit default. Eg. `httpx.Timeout(None, pool=5.0)`. (Pull #1085) +* Switch to more concise `httpx.Timeout()` keyword arguments. Eg. `read=5.0`, instead of `read_timeout=5.0`. (Pull #1111) +* Use `httpx.Limits()` instead of `httpx.PoolLimits()`, and `limits=...` instead of `pool_limits=...`. (Pull #1113) +* Keys used with `Client(proxies={...})` should now be in the style of `{"http://": ...}`, rather than `{"http": ...}`. (Pull #1127) +* The multidict methods `Headers.getlist` and `QueryParams.getlist` are deprecated in favour of more consistent `.get_list()` variants. (Pull #1089) +* `URL.port` becomes `Optional[int]`. Now only returns a port if one is explicitly included in the URL string. (Pull #1080) +* The `URL(..., allow_relative=[bool])` parameter no longer exists. All URL instances may be relative. (Pull #1073) +* Drop unnecessary `url.full_path = ...` property setter. (Pull #1069) +* The `URL.join(relative_url=...)` method is now `URL.join(url=...)`. (Pull #1129) +* The `URL.is_ssl` property is deprecated in favour of `URL.scheme == "https"`. (Pull #1128) + +### Fixed + +* Add missing `Response.next()` method. (Pull #1055) +* Ensure all exception classes are exposed as public API. (Pull #1045) +* Support multiple items with an identical field name in multipart encodings. (Pull #777) +* Skip HSTS preloading on single-label domains. (Pull #1074) +* Fixes for `Response.iter_lines()`. (Pull #1033, #1075) +* Ignore permission errors when accessing `.netrc` files. (Pull #1104) +* Allow bare hostnames in `HTTP_PROXY` etc... environment variables. (Pull #1120) +* Settings `app=...` or `transport=...` bypasses any environment based proxy defaults. (Pull #1122) +* Fix handling of `.base_url` when a path component is included in the base URL. (Pull #1130) + +--- + +## 0.13.3 (May 29th, 2020) + +### Fixed + +* Include missing keepalive expiry configuration. (Pull #1005) +* Improved error message when URL redirect has a custom scheme. (Pull #1002) + +## 0.13.2 (May 27th, 2020) + +### Fixed + +* Include explicit "Content-Length: 0" on POST, PUT, PATCH if no request body is used. (Pull #995) +* Add `http2` option to `httpx.Client`. (Pull #982) +* Tighten up API typing in places. (Pull #992, #999) + +## 0.13.1 (May 22nd, 2020) + +### Fixed + +* Fix pool options deprecation warning. (Pull #980) +* Include `httpx.URLLib3ProxyTransport` in top-level API. (Pull #979) + +## 0.13.0 (May 22nd, 2020) + +This release switches to `httpcore` for all the internal networking, which means: + +* We're using the same codebase for both our sync and async clients. +* HTTP/2 support is now available with the sync client. +* We no longer have a `urllib3` dependency for our sync client, although there is still an *optional* `URLLib3Transport` class. + +It also means we've had to remove our UDS support, since maintaining that would have meant having to push back our work towards a 1.0 release, which isn't a trade-off we wanted to make. + +We also now have [a public "Transport API"](https://www.python-httpx.org/advanced/#custom-transports), which you can use to implement custom transport implementations against. This formalises and replaces our previously private "Dispatch API". + +### Changed + +* Use `httpcore` for underlying HTTP transport. Drop `urllib3` requirement. (Pull #804, #967) +* Rename pool limit options from `soft_limit`/`hard_limit` to `max_keepalive`/`max_connections`. (Pull #968) +* The previous private "Dispatch API" has now been promoted to a public "Transport API". When customizing the transport use `transport=...`. The `ASGIDispatch` and `WSGIDispatch` class naming is deprecated in favour of `ASGITransport` and `WSGITransport`. (Pull #963) + +### Added + +* Added `URLLib3Transport` class for optional `urllib3` transport support. (Pull #804, #963) +* Streaming multipart uploads. (Pull #857) +* Logging via HTTPCORE_LOG_LEVEL and HTTPX_LOG_LEVEL environment variables +and TRACE level logging. (Pull encode/httpcore#79) + +### Fixed + +* Performance improvement in brotli decoder. (Pull #906) +* Proper warning level of deprecation notice in `Response.stream` and `Response.raw`. (Pull #908) +* Fix support for generator based WSGI apps. (Pull #887) +* Reuse of connections on HTTP/2 in close concurrency situations. (Pull encode/httpcore#81) +* Honor HTTP/2 max concurrent streams settings (Pull encode/httpcore#89, encode/httpcore#90) +* Fix bytes support in multipart uploads. (Pull #974) +* Improve typing support for `files=...`. (Pull #976) + +### Removed + +* Dropped support for `Client(uds=...)` (Pull #804) + +## 0.13.0.dev2 (May 12th, 2020) + +The 0.13.0.dev2 is a *pre-release* version. To install it, use `pip install httpx --pre`. + +### Added + +* Logging via HTTPCORE_LOG_LEVEL and HTTPX_LOG_LEVEL environment variables +and TRACE level logging. (HTTPCore Pull #79) + +### Fixed + +* Reuse of connections on HTTP/2 in close concurrency situations. (HTTPCore Pull #81) +* When using an `app=` observe neater disconnect behaviour instead of sending empty body messages. (Pull #919) + +## 0.13.0.dev1 (May 6th, 2020) + +The 0.13.0.dev1 is a *pre-release* version. To install it, use `pip install httpx --pre`. + +### Fixed + +* Passing `http2` flag to proxy dispatchers. (Pull #934) +* Use [`httpcore` v0.8.3](https://github.com/encode/httpcore/releases/tag/0.8.3) +which addresses problems in handling of headers when using proxies. + +## 0.13.0.dev0 (April 30th, 2020) + +The 0.13.0.dev0 is a *pre-release* version. To install it, use `pip install httpx --pre`. + +This release switches to `httpcore` for all the internal networking, which means: + +* We're using the same codebase for both our sync and async clients. +* HTTP/2 support is now available with the sync client. +* We no longer have a `urllib3` dependency for our sync client, although there is still an *optional* `URLLib3Dispatcher` class. + +It also means we've had to remove our UDS support, since maintaining that would have meant having to push back our work towards a 1.0 release, which isn't a trade-off we wanted to make. + +### Changed + +* Use `httpcore` for underlying HTTP transport. Drop `urllib3` requirement. (Pull #804) + +### Added + +* Added `URLLib3Dispatcher` class for optional `urllib3` transport support. (Pull #804) +* Streaming multipart uploads. (Pull #857) + +### Fixed + +* Performance improvement in brotli decoder. (Pull #906) +* Proper warning level of deprecation notice in `Response.stream` and `Response.raw`. (Pull #908) +* Fix support for generator based WSGI apps. (Pull #887) + +### Removed + +* Dropped support for `Client(uds=...)` (Pull #804) + +--- + +## 0.12.1 (March 19th, 2020) + +### Fixed + +* Resolved packaging issue, where additional files were being included. + +## 0.12.0 (March 9th, 2020) + +The 0.12 release tightens up the API expectations for `httpx` by switching to private module names to enforce better clarity around public API. + +All imports of `httpx` should import from the top-level package only, such as `from httpx import Request`, rather than importing from privately namespaced modules such as `from httpx._models import Request`. + +### Added + +* Support making response body available to auth classes with `.requires_response_body`. (Pull #803) +* Export `NetworkError` exception. (Pull #814) +* Add support for `NO_PROXY` environment variable. (Pull #835) + +### Changed + +* Switched to private module names. (Pull #785) +* Drop redirect looping detection and the `RedirectLoop` exception, instead using `TooManyRedirects`. (Pull #819) +* Drop `backend=...` parameter on `AsyncClient`, in favour of always autodetecting `trio`/`asyncio`. (Pull #791) + +### Fixed + +* Support basic auth credentials in proxy URLs. (Pull #780) +* Fix `httpx.Proxy(url, mode="FORWARD_ONLY")` configuration. (Pull #788) +* Fallback to setting headers as UTF-8 if no encoding is specified. (Pull #820) +* Close proxy dispatches classes on client close. (Pull #826) +* Support custom `cert` parameters even if `verify=False`. (Pull #796) +* Don't support invalid dict-of-dicts form data in `data=...`. (Pull #811) + +--- + +## 0.11.1 (January 17th, 2020) + +### Fixed + +* Fixed usage of `proxies=...` on `Client()`. (Pull #763) +* Support both `zlib` and `deflate` style encodings on `Content-Encoding: deflate`. (Pull #758) +* Fix for streaming a redirect response body with `allow_redirects=False`. (Pull #766) +* Handle redirect with malformed Location headers missing host. (Pull #774) + +## 0.11.0 (January 9th, 2020) + +The 0.11 release reintroduces our sync support, so that `httpx` now supports both a standard thread-concurrency API, and an async API. + +Existing async `httpx` users that are upgrading to 0.11 should ensure that: + +* Async codebases should always use a client instance to make requests, instead of the top-level API. +* The async client is named as `httpx.AsyncClient()`, instead of `httpx.Client()`. +* When instantiating proxy configurations use the `httpx.Proxy()` class, instead of the previous `httpx.HTTPProxy()`. This new configuration class works for configuring both sync and async clients. + +We believe the API is now pretty much stable, and are aiming for a 1.0 release sometime on or before April 2020. + +### Changed + +- Top level API such as `httpx.get(url, ...)`, `httpx.post(url, ...)`, `httpx.request(method, url, ...)` becomes synchronous. +- Added `httpx.Client()` for synchronous clients, with `httpx.AsyncClient` being used for async clients. +- Switched to `proxies=httpx.Proxy(...)` for proxy configuration. +- Network connection errors are wrapped in `httpx.NetworkError`, rather than exposing lower-level exception types directly. + +### Removed + +- The `request.url.origin` property and `httpx.Origin` class are no longer available. +- The per-request `cert`, `verify`, and `trust_env` arguments are escalated from raising errors if used, to no longer being available. These arguments should be used on a per-client instance instead, or in the top-level API. +- The `stream` argument has escalated from raising an error when used, to no longer being available. Use the `client.stream(...)` or `httpx.stream()` streaming API instead. + +### Fixed + +- Redirect loop detection matches against `(method, url)` rather than `url`. (Pull #734) + +--- + +## 0.10.1 (December 31st, 2019) + +### Fixed + +- Fix issue with concurrent connection acquiry. (Pull #700) +- Fix write error on closing HTTP/2 connections. (Pull #699) + +## 0.10.0 (December 29th, 2019) + +The 0.10.0 release makes some changes that will allow us to support both sync and async interfaces. + +In particular with streaming responses the `response.read()` method becomes `response.aread()`, and the `response.close()` method becomes `response.aclose()`. + +If following redirects explicitly the `response.next()` method becomes `response.anext()`. + +### Fixed + +- End HTTP/2 streams immediately on no-body requests, rather than sending an empty body message. (Pull #682) +- Improve typing for `Response.request`: switch from `Optional[Request]` to `Request`. (Pull #666) +- `Response.elapsed` now reflects the entire download time. (Pull #687, #692) + +### Changed + +- Added `AsyncClient` as a synonym for `Client`. (Pull #680) +- Switch to `response.aread()` for conditionally reading streaming responses. (Pull #674) +- Switch to `response.aclose()` and `client.aclose()` for explicit closing. (Pull #674, #675) +- Switch to `response.anext()` for resolving the next redirect response. (Pull #676) + +### Removed + +- When using a client instance, the per-request usage of `verify`, `cert`, and `trust_env` have now escalated from raising a warning to raising an error. You should set these arguments on the client instead. (Pull #617) +- Removed the undocumented `request.read()`, since end users should not require it. + +--- + +## 0.9.5 (December 20th, 2019) + +### Fixed + +- Fix Host header and HSTS rewrites when an explicit `:80` port is included in URL. (Pull #649) +- Query Params on the URL string are merged with any `params=...` argument. (Pull #653) +- More robust behavior when closing connections. (Pull #640) +- More robust behavior when handling HTTP/2 headers with trailing whitespace. (Pull #637) +- Allow any explicit `Content-Type` header to take precedence over the encoding default. (Pull #633) + +## 0.9.4 (December 12th, 2019) + +### Fixed + +- Added expiry to Keep-Alive connections, resolving issues with acquiring connections. (Pull #627) +- Increased flow control windows on HTTP/2, resolving download speed issues. (Pull #629) + +## 0.9.3 (December 7th, 2019) + +### Fixed + +- Fixed HTTP/2 with autodetection backend. (Pull #614) + +## 0.9.2 (December 7th, 2019) + +* Released due to packaging build artifact. + +## 0.9.1 (December 6th, 2019) + +* Released due to packaging build artifact. + +## 0.9.0 (December 6th, 2019) + +The 0.9 releases brings some major new features, including: + +* A new streaming API. +* Autodetection of either asyncio or trio. +* Nicer timeout configuration. +* HTTP/2 support off by default, but can be enabled. + +We've also removed all private types from the top-level package export. + +In order to ensure you are only ever working with public API you should make +sure to only import the top-level package eg. `import httpx`, rather than +importing modules within the package. + +### Added + +- Added concurrency backend autodetection. (Pull #585) +- Added `Client(backend='trio')` and `Client(backend='asyncio')` API. (Pull #585) +- Added `response.stream_lines()` API. (Pull #575) +- Added `response.is_error` API. (Pull #574) +- Added support for `timeout=Timeout(5.0, connect_timeout=60.0)` styles. (Pull #593) + +### Fixed + +- Requests or Clients with `timeout=None` now correctly always disable timeouts. (Pull #592) +- Request 'Authorization' headers now have priority over `.netrc` authentication info. (Commit 095b691) +- Files without a filename no longer set a Content-Type in multipart data. (Commit ed94950) + +### Changed + +- Added `httpx.stream()` API. Using `stream=True` now results in a warning. (Pull #600, #610) +- HTTP/2 support is switched to "off by default", but can be enabled explicitly. (Pull #584) +- Switched to `Client(http2=True)` API from `Client(http_versions=["HTTP/1.1", "HTTP/2"])`. (Pull #586) +- Removed all private types from the top-level package export. (Pull #608) +- The SSL configuration settings of `verify`, `cert`, and `trust_env` now raise warnings if used per-request when using a Client instance. They should always be set on the Client instance itself. (Pull #597) +- Use plain strings "TUNNEL_ONLY" or "FORWARD_ONLY" on the HTTPProxy `proxy_mode` argument. The `HTTPProxyMode` enum still exists, but its usage will raise warnings. (#610) +- Pool timeouts are now on the timeout configuration, not the pool limits configuration. (Pull #563) +- The timeout configuration is now named `httpx.Timeout(...)`, not `httpx.TimeoutConfig(...)`. The old version currently remains as a synonym for backwards compatibility. (Pull #591) + +--- + +## 0.8.0 (November 27, 2019) + +### Removed + +- The synchronous API has been removed, in order to allow us to fundamentally change how we approach supporting both sync and async variants. (See #588 for more details.) + +--- + +## 0.7.8 (November 17, 2019) + +### Added + +- Add support for proxy tunnels for Python 3.6 + asyncio. (Pull #521) + +## 0.7.7 (November 15, 2019) + +### Fixed + +- Resolve an issue with cookies behavior on redirect requests. (Pull #529) + +### Added + +- Add request/response DEBUG logs. (Pull #502) +- Use TRACE log level for low level info. (Pull #500) + +## 0.7.6 (November 2, 2019) + +### Removed + +- Drop `proxies` parameter from the high-level API. (Pull #485) + +### Fixed + +- Tweak multipart files: omit null filenames, add support for `str` file contents. (Pull #482) +- Cache NETRC authentication per-client. (Pull #400) +- Rely on `getproxies` for all proxy environment variables. (Pull #470) +- Wait for the `asyncio` stream to close when closing a connection. (Pull #494) + +## 0.7.5 (October 10, 2019) + +### Added + +- Allow lists of values to be passed to `params`. (Pull #386) +- `ASGIDispatch`, `WSGIDispatch` are now available in the `httpx.dispatch` namespace. (Pull #407) +- `HTTPError` is now available in the `httpx` namespace. (Pull #421) +- Add support for `start_tls()` to the Trio concurrency backend. (Pull #467) + +### Fixed + +- Username and password are no longer included in the `Host` header when basic authentication + credentials are supplied via the URL. (Pull #417) + +### Removed + +- The `.delete()` function no longer has `json`, `data`, or `files` parameters + to match the expected semantics of the `DELETE` method. (Pull #408) +- Removed the `trio` extra. Trio support is detected automatically. (Pull #390) + +## 0.7.4 (September 25, 2019) + +### Added + +- Add Trio concurrency backend. (Pull #276) +- Add `params` parameter to `Client` for setting default query parameters. (Pull #372) +- Add support for `SSL_CERT_FILE` and `SSL_CERT_DIR` environment variables. (Pull #307) +- Add debug logging to calls into ASGI apps. (Pull #371) +- Add debug logging to SSL configuration. (Pull #378) + +### Fixed + +- Fix a bug when using `Client` without timeouts in Python 3.6. (Pull #383) +- Propagate `Client` configuration to HTTP proxies. (Pull #377) + +## 0.7.3 (September 20, 2019) + +### Added + +- HTTP Proxy support. (Pulls #259, #353) +- Add Digest authentication. (Pull #332) +- Add `.build_request()` method to `Client` and `AsyncClient`. (Pull #319) +- Add `.elapsed` property on responses. (Pull #351) +- Add support for `SSLKEYLOGFILE` in Python 3.8b4+. (Pull #301) + +### Removed + +- Drop NPN support for HTTP version negotiation. (Pull #314) + +### Fixed + +- Fix distribution of type annotations for mypy (Pull #361). +- Set `Host` header when redirecting cross-origin. (Pull #321) +- Drop `Content-Length` headers on `GET` redirects. (Pull #310) +- Raise `KeyError` if header isn't found in `Headers`. (Pull #324) +- Raise `NotRedirectResponse` in `response.next()` if there is no redirection to perform. (Pull #297) +- Fix bug in calculating the HTTP/2 maximum frame size. (Pull #153) + +## 0.7.2 (August 28, 2019) + +- Enforce using `httpx.AsyncioBackend` for the synchronous client. (Pull #232) +- `httpx.ConnectionPool` will properly release a dropped connection. (Pull #230) +- Remove the `raise_app_exceptions` argument from `Client`. (Pull #238) +- `DecodeError` will no longer be raised for an empty body encoded with Brotli. (Pull #237) +- Added `http_versions` parameter to `Client`. (Pull #250) +- Only use HTTP/1.1 on short-lived connections like `httpx.get()`. (Pull #284) +- Convert `Client.cookies` and `Client.headers` when set as a property. (Pull #274) +- Setting `HTTPX_DEBUG=1` enables debug logging on all requests. (Pull #277) + +## 0.7.1 (August 18, 2019) + +- Include files with source distribution to be installable. (Pull #233) + +## 0.7.0 (August 17, 2019) + +- Add the `trust_env` property to `BaseClient`. (Pull #187) +- Add the `links` property to `BaseResponse`. (Pull #211) +- Accept `ssl.SSLContext` instances into `SSLConfig(verify=...)`. (Pull #215) +- Add `Response.stream_text()` with incremental encoding detection. (Pull #183) +- Properly updated the `Host` header when a redirect changes the origin. (Pull #199) +- Ignore invalid `Content-Encoding` headers. (Pull #196) +- Use `~/.netrc` and `~/_netrc` files by default when `trust_env=True`. (Pull #189) +- Create exception base class `HTTPError` with `request` and `response` properties. (Pull #162) +- Add HSTS preload list checking within `BaseClient` to upgrade HTTP URLs to HTTPS. (Pull #184) +- Switch IDNA encoding from IDNA 2003 to IDNA 2008. (Pull #161) +- Expose base classes for alternate concurrency backends. (Pull #178) +- Improve Multipart parameter encoding. (Pull #167) +- Add the `headers` property to `BaseClient`. (Pull #159) +- Add support for Google's `brotli` library. (Pull #156) +- Remove deprecated TLS versions (TLSv1 and TLSv1.1) from default `SSLConfig`. (Pull #155) +- Fix `URL.join(...)` to work similarly to RFC 3986 URL joining. (Pull #144) + +--- + +## 0.6.8 (July 25, 2019) + +- Check for disconnections when searching for an available + connection in `ConnectionPool.keepalive_connections` (Pull #145) +- Allow string comparison for `URL` objects (Pull #139) +- Add HTTP status codes 418 and 451 (Pull #135) +- Add support for client certificate passwords (Pull #118) +- Enable post-handshake client cert authentication for TLSv1.3 (Pull #118) +- Disable using `commonName` for hostname checking for OpenSSL 1.1.0+ (Pull #118) +- Detect encoding for `Response.json()` (Pull #116) + +## 0.6.7 (July 8, 2019) + +- Check for connection aliveness on re-acquiry (Pull #111) + +## 0.6.6 (July 3, 2019) + +- Improve `USER_AGENT` (Pull #110) +- Add `Connection: keep-alive` by default to HTTP/1.1 connections. (Pull #110) + +## 0.6.5 (June 27, 2019) + +- Include `Host` header by default. (Pull #109) +- Improve HTTP protocol detection. (Pull #107) + +## 0.6.4 (June 25, 2019) + +- Implement read and write timeouts (Pull #104) + +## 0.6.3 (June 24, 2019) + +- Handle early connection closes (Pull #103) + +## 0.6.2 (June 23, 2019) + +- Use urllib3's `DEFAULT_CIPHERS` for the `SSLConfig` object. (Pull #100) + +## 0.6.1 (June 21, 2019) + +- Add support for setting a `base_url` on the `Client`. + +## 0.6.0 (June 21, 2019) + +- Honor `local_flow_control_window` for HTTP/2 connections (Pull #98) + + diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/RECORD new file mode 100644 index 0000000..eb640b1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/RECORD @@ -0,0 +1,32 @@ +../../../bin/httpx,sha256=QcKfM-fkjnr3_I8fP0pWdXHdn2-1KruTxl7xaMwkMQY,245 +httpx/__init__.py,sha256=GHtP9_yAnMm1lWiaMcrKZpDdFCXZHYk5cpqB_PmcuUE,3181 +httpx/__version__.py,sha256=MF2vNzMmwODNv9hDqn3z9n6s0bcTjjeVKPBkfrWYxlg,108 +httpx/_api.py,sha256=cVU9ErzaXve5rqoPoSHr9yJbovHtICrcxR7yBoNSeOw,13011 +httpx/_auth.py,sha256=q66GwQRkvk5FhTh9-5ujgLPUBRhhAoF7FusvccUEWws,10250 +httpx/_client.py,sha256=mv17TOUDnkLBHJJ6gjrnKdDwTU8miOEs0aCIMEuc210,67940 +httpx/_compat.py,sha256=IRU0J9d5bQrULQOtfTvM2Qxl5O0t0yzkGzbClLvmzPQ,1547 +httpx/_config.py,sha256=qg-rnH8RhRJOg_Rl0zaCmY-KMXKuMkgloosp_1yWb3U,12329 +httpx/_content.py,sha256=CX8wi5xD3xP5FBHNvIBihkaBBt8-qVvT0IQPeyABJjA,7957 +httpx/_decoders.py,sha256=78zHx-qjiECUel4QIqs784OSEOJY92RAYgyxMHsz56U,10208 +httpx/_exceptions.py,sha256=5n8ZKXee3AmXBzWzGtZFzvwsLpzfILpcOAqjilIjLg4,7879 +httpx/_main.py,sha256=Xtbnk1yyJ2QwzIJ3sutcBdQBmMCItLDVAZRKpo0vpHY,15508 +httpx/_models.py,sha256=KlYlXS9uvx4WyN_axQSzzswoN6GHoTMS0Q43W8vDEgI,42106 +httpx/_multipart.py,sha256=3cC9YwiFkhyZWHVTezhs4jI4LVG-vLEJ1E181zvzGWM,7901 +httpx/_status_codes.py,sha256=9p71cQ6jrGSq_kVHzoTR8Q3MOevq6z3WKA1kYl2SPWY,5586 +httpx/_types.py,sha256=xJGcb-L0L7Xm2vI_baRdmf7M9nF6RcjHD4EUDhOxy9E,3984 +httpx/_urls.py,sha256=dqsMdgZs5-rAToBy-kFIhRVDHXIPMdOruS3WxKMMHG0,27883 +httpx/_utils.py,sha256=4gvRr2bSR1l7swk_-SIcWEceO5FxescOhVN6pCoRaZA,16901 +httpx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +httpx/_transports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +httpx/_transports/asgi.py,sha256=4VI5ID_LjapQpF_pXhy8RPwh966y83ZIizqXV-wDzUI,4959 +httpx/_transports/base.py,sha256=mS6d9wr5pyz1umUKh3kp260LQXsZ4WJLsuMHwzUNRiU,2508 +httpx/_transports/default.py,sha256=xafKg7eajxSR8RNsM70OgMh5IIUnFPMwXvzK3m_HKRQ,12653 +httpx/_transports/mock.py,sha256=opEbEyPD9cQ8LntlQRhfBhZ8fVJ_dEodV7WNp0wgIYg,923 +httpx/_transports/wsgi.py,sha256=RVcf6gbmFjutfIingDG74TTZUfRJljuG4ZKTHdk1HpM,4405 +httpx-0.23.0.dist-info/LICENSE.md,sha256=TsWdVE8StfU5o6cW_TIaxYzNgDC0ZSIfLIgCAM3yjY0,1508 +httpx-0.23.0.dist-info/METADATA,sha256=xD26jQUfrxwZSSncib6WE3byJIbN79uatkxYdifzdTo,52005 +httpx-0.23.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +httpx-0.23.0.dist-info/entry_points.txt,sha256=RfPz07Z-6okb4DXupAwHIQFyjq2Io_W7nlac92f4kqM,36 +httpx-0.23.0.dist-info/top_level.txt,sha256=8QYqFolXm27kV0x-8K8V5t-uZskSHKtq8jZVxGwtIq4,24 +httpx-0.23.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +httpx-0.23.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/entry_points.txt new file mode 100644 index 0000000..fef52b6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +httpx=httpx:main + diff --git a/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/top_level.txt new file mode 100644 index 0000000..c180eb2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx-0.23.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +httpx +httpx/_transports diff --git a/myenv/lib/python3.9/site-packages/httpx/__init__.py b/myenv/lib/python3.9/site-packages/httpx/__init__.py new file mode 100644 index 0000000..b93ca92 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/__init__.py @@ -0,0 +1,137 @@ +from .__version__ import __description__, __title__, __version__ +from ._api import delete, get, head, options, patch, post, put, request, stream +from ._auth import Auth, BasicAuth, DigestAuth +from ._client import USE_CLIENT_DEFAULT, AsyncClient, Client +from ._config import Limits, Proxy, Timeout, create_ssl_context +from ._content import ByteStream +from ._exceptions import ( + CloseError, + ConnectError, + ConnectTimeout, + CookieConflict, + DecodingError, + HTTPError, + HTTPStatusError, + InvalidURL, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + RequestError, + RequestNotRead, + ResponseNotRead, + StreamClosed, + StreamConsumed, + StreamError, + TimeoutException, + TooManyRedirects, + TransportError, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from ._models import Cookies, Headers, Request, Response +from ._status_codes import codes +from ._transports.asgi import ASGITransport +from ._transports.base import AsyncBaseTransport, BaseTransport +from ._transports.default import AsyncHTTPTransport, HTTPTransport +from ._transports.mock import MockTransport +from ._transports.wsgi import WSGITransport +from ._types import AsyncByteStream, SyncByteStream +from ._urls import URL, QueryParams + +try: + from ._main import main +except ImportError: # pragma: nocover + + def main() -> None: # type: ignore + import sys + + print( + "The httpx command line client could not run because the required " + "dependencies were not installed.\nMake sure you've installed " + "everything with: pip install 'httpx[cli]'" + ) + sys.exit(1) + + +__all__ = [ + "__description__", + "__title__", + "__version__", + "ASGITransport", + "AsyncBaseTransport", + "AsyncByteStream", + "AsyncClient", + "AsyncHTTPTransport", + "Auth", + "BaseTransport", + "BasicAuth", + "ByteStream", + "Client", + "CloseError", + "codes", + "ConnectError", + "ConnectTimeout", + "CookieConflict", + "Cookies", + "create_ssl_context", + "DecodingError", + "delete", + "DigestAuth", + "get", + "head", + "Headers", + "HTTPError", + "HTTPStatusError", + "HTTPTransport", + "InvalidURL", + "Limits", + "LocalProtocolError", + "main", + "MockTransport", + "NetworkError", + "options", + "patch", + "PoolTimeout", + "post", + "ProtocolError", + "Proxy", + "ProxyError", + "put", + "QueryParams", + "ReadError", + "ReadTimeout", + "RemoteProtocolError", + "request", + "Request", + "RequestError", + "RequestNotRead", + "Response", + "ResponseNotRead", + "stream", + "StreamClosed", + "StreamConsumed", + "StreamError", + "SyncByteStream", + "Timeout", + "TimeoutException", + "TooManyRedirects", + "TransportError", + "UnsupportedProtocol", + "URL", + "USE_CLIENT_DEFAULT", + "WriteError", + "WriteTimeout", + "WSGITransport", +] + + +__locals = locals() +for __name in __all__: + if not __name.startswith("__"): + setattr(__locals[__name], "__module__", "httpx") # noqa diff --git a/myenv/lib/python3.9/site-packages/httpx/__version__.py b/myenv/lib/python3.9/site-packages/httpx/__version__.py new file mode 100644 index 0000000..68831d0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/__version__.py @@ -0,0 +1,3 @@ +__title__ = "httpx" +__description__ = "A next generation HTTP client, for Python 3." +__version__ = "0.23.0" diff --git a/myenv/lib/python3.9/site-packages/httpx/_api.py b/myenv/lib/python3.9/site-packages/httpx/_api.py new file mode 100644 index 0000000..571289c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_api.py @@ -0,0 +1,445 @@ +import typing +from contextlib import contextmanager + +from ._client import Client +from ._config import DEFAULT_TIMEOUT_CONFIG +from ._models import Response +from ._types import ( + AuthTypes, + CertTypes, + CookieTypes, + HeaderTypes, + ProxiesTypes, + QueryParamTypes, + RequestContent, + RequestData, + RequestFiles, + TimeoutTypes, + URLTypes, + VerifyTypes, +) + + +def request( + method: str, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + trust_env: bool = True, +) -> Response: + """ + Sends an HTTP request. + + **Parameters:** + + * **method** - HTTP method for the new `Request` object: `GET`, `OPTIONS`, + `HEAD`, `POST`, `PUT`, `PATCH`, or `DELETE`. + * **url** - URL for the new `Request` object. + * **params** - *(optional)* Query parameters to include in the URL, as a + string, dictionary, or sequence of two-tuples. + * **content** - *(optional)* Binary content to include in the body of the + request, as bytes or a byte iterator. + * **data** - *(optional)* Form data to include in the body of the request, + as a dictionary. + * **files** - *(optional)* A dictionary of upload files to include in the + body of the request. + * **json** - *(optional)* A JSON serializable object to include in the body + of the request. + * **headers** - *(optional)* Dictionary of HTTP headers to include in the + request. + * **cookies** - *(optional)* Dictionary of Cookie items to include in the + request. + * **auth** - *(optional)* An authentication class to use when sending the + request. + * **proxies** - *(optional)* A dictionary mapping proxy keys to proxy URLs. + * **timeout** - *(optional)* The timeout configuration to use when sending + the request. + * **follow_redirects** - *(optional)* Enables or disables HTTP redirects. + * **verify** - *(optional)* SSL certificates (a.k.a CA bundle) used to + verify the identity of requested hosts. Either `True` (default CA bundle), + a path to an SSL certificate file, an `ssl.SSLContext`, or `False` + (which will disable verification). + * **cert** - *(optional)* An SSL certificate used by the requested host + to authenticate the client. Either a path to an SSL certificate file, or + two-tuple of (certificate file, key file), or a three-tuple of (certificate + file, key file, password). + * **trust_env** - *(optional)* Enables or disables usage of environment + variables for configuration. + + **Returns:** `Response` + + Usage: + + ``` + >>> import httpx + >>> response = httpx.request('GET', 'https://httpbin.org/get') + >>> response + + ``` + """ + with Client( + cookies=cookies, + proxies=proxies, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) as client: + return client.request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + auth=auth, + follow_redirects=follow_redirects, + ) + + +@contextmanager +def stream( + method: str, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + trust_env: bool = True, +) -> typing.Iterator[Response]: + """ + Alternative to `httpx.request()` that streams the response body + instead of loading it into memory at once. + + **Parameters**: See `httpx.request`. + + See also: [Streaming Responses][0] + + [0]: /quickstart#streaming-responses + """ + with Client( + cookies=cookies, + proxies=proxies, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) as client: + with client.stream( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + auth=auth, + follow_redirects=follow_redirects, + ) as response: + yield response + + +def get( + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `GET` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `GET` requests should not include a request body. + """ + return request( + "GET", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def options( + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends an `OPTIONS` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `OPTIONS` requests should not include a request body. + """ + return request( + "OPTIONS", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def head( + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `HEAD` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `HEAD` requests should not include a request body. + """ + return request( + "HEAD", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def post( + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `POST` request. + + **Parameters**: See `httpx.request`. + """ + return request( + "POST", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def put( + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `PUT` request. + + **Parameters**: See `httpx.request`. + """ + return request( + "PUT", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def patch( + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `PATCH` request. + + **Parameters**: See `httpx.request`. + """ + return request( + "PATCH", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def delete( + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Optional[AuthTypes] = None, + proxies: typing.Optional[ProxiesTypes] = None, + follow_redirects: bool = False, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `DELETE` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `DELETE` requests should not include a request body. + """ + return request( + "DELETE", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxies=proxies, + follow_redirects=follow_redirects, + cert=cert, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) diff --git a/myenv/lib/python3.9/site-packages/httpx/_auth.py b/myenv/lib/python3.9/site-packages/httpx/_auth.py new file mode 100644 index 0000000..2b00b49 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_auth.py @@ -0,0 +1,304 @@ +import hashlib +import os +import re +import time +import typing +from base64 import b64encode +from urllib.request import parse_http_list + +from ._exceptions import ProtocolError +from ._models import Request, Response +from ._utils import to_bytes, to_str, unquote + + +class Auth: + """ + Base class for all authentication schemes. + + To implement a custom authentication scheme, subclass `Auth` and override + the `.auth_flow()` method. + + If the authentication scheme does I/O such as disk access or network calls, or uses + synchronization primitives such as locks, you should override `.sync_auth_flow()` + and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized + implementations that will be used by `Client` and `AsyncClient` respectively. + """ + + requires_request_body = False + requires_response_body = False + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + """ + Execute the authentication flow. + + To dispatch a request, `yield` it: + + ``` + yield request + ``` + + The client will `.send()` the response back into the flow generator. You can + access it like so: + + ``` + response = yield request + ``` + + A `return` (or reaching the end of the generator) will result in the + client returning the last response obtained from the server. + + You can dispatch as many requests as is necessary. + """ + yield request + + def sync_auth_flow( + self, request: Request + ) -> typing.Generator[Request, Response, None]: + """ + Execute the authentication flow synchronously. + + By default, this defers to `.auth_flow()`. You should override this method + when the authentication scheme does I/O and/or uses concurrency primitives. + """ + if self.requires_request_body: + request.read() + + flow = self.auth_flow(request) + request = next(flow) + + while True: + response = yield request + if self.requires_response_body: + response.read() + + try: + request = flow.send(response) + except StopIteration: + break + + async def async_auth_flow( + self, request: Request + ) -> typing.AsyncGenerator[Request, Response]: + """ + Execute the authentication flow asynchronously. + + By default, this defers to `.auth_flow()`. You should override this method + when the authentication scheme does I/O and/or uses concurrency primitives. + """ + if self.requires_request_body: + await request.aread() + + flow = self.auth_flow(request) + request = next(flow) + + while True: + response = yield request + if self.requires_response_body: + await response.aread() + + try: + request = flow.send(response) + except StopIteration: + break + + +class FunctionAuth(Auth): + """ + Allows the 'auth' argument to be passed as a simple callable function, + that takes the request, and returns a new, modified request. + """ + + def __init__(self, func: typing.Callable[[Request], Request]) -> None: + self._func = func + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + yield self._func(request) + + +class BasicAuth(Auth): + """ + Allows the 'auth' argument to be passed as a (username, password) pair, + and uses HTTP Basic authentication. + """ + + def __init__( + self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] + ): + self._auth_header = self._build_auth_header(username, password) + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + request.headers["Authorization"] = self._auth_header + yield request + + def _build_auth_header( + self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] + ) -> str: + userpass = b":".join((to_bytes(username), to_bytes(password))) + token = b64encode(userpass).decode() + return f"Basic {token}" + + +class DigestAuth(Auth): + _ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable] = { + "MD5": hashlib.md5, + "MD5-SESS": hashlib.md5, + "SHA": hashlib.sha1, + "SHA-SESS": hashlib.sha1, + "SHA-256": hashlib.sha256, + "SHA-256-SESS": hashlib.sha256, + "SHA-512": hashlib.sha512, + "SHA-512-SESS": hashlib.sha512, + } + + def __init__( + self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] + ) -> None: + self._username = to_bytes(username) + self._password = to_bytes(password) + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + response = yield request + + if response.status_code != 401 or "www-authenticate" not in response.headers: + # If the response is not a 401 then we don't + # need to build an authenticated request. + return + + for auth_header in response.headers.get_list("www-authenticate"): + if auth_header.lower().startswith("digest "): + break + else: + # If the response does not include a 'WWW-Authenticate: Digest ...' + # header, then we don't need to build an authenticated request. + return + + challenge = self._parse_challenge(request, response, auth_header) + request.headers["Authorization"] = self._build_auth_header(request, challenge) + yield request + + def _parse_challenge( + self, request: Request, response: Response, auth_header: str + ) -> "_DigestAuthChallenge": + """ + Returns a challenge from a Digest WWW-Authenticate header. + These take the form of: + `Digest realm="realm@host.com",qop="auth,auth-int",nonce="abc",opaque="xyz"` + """ + scheme, _, fields = auth_header.partition(" ") + + # This method should only ever have been called with a Digest auth header. + assert scheme.lower() == "digest" + + header_dict: typing.Dict[str, str] = {} + for field in parse_http_list(fields): + key, value = field.strip().split("=", 1) + header_dict[key] = unquote(value) + + try: + realm = header_dict["realm"].encode() + nonce = header_dict["nonce"].encode() + algorithm = header_dict.get("algorithm", "MD5") + opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None + qop = header_dict["qop"].encode() if "qop" in header_dict else None + return _DigestAuthChallenge( + realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop + ) + except KeyError as exc: + message = "Malformed Digest WWW-Authenticate header" + raise ProtocolError(message, request=request) from exc + + def _build_auth_header( + self, request: Request, challenge: "_DigestAuthChallenge" + ) -> str: + hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm.upper()] + + def digest(data: bytes) -> bytes: + return hash_func(data).hexdigest().encode() + + A1 = b":".join((self._username, challenge.realm, self._password)) + + path = request.url.raw_path + A2 = b":".join((request.method.encode(), path)) + # TODO: implement auth-int + HA2 = digest(A2) + + nonce_count = 1 # TODO: implement nonce counting + nc_value = b"%08x" % nonce_count + cnonce = self._get_client_nonce(nonce_count, challenge.nonce) + + HA1 = digest(A1) + if challenge.algorithm.lower().endswith("-sess"): + HA1 = digest(b":".join((HA1, challenge.nonce, cnonce))) + + qop = self._resolve_qop(challenge.qop, request=request) + if qop is None: + digest_data = [HA1, challenge.nonce, HA2] + else: + digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2] + key_digest = b":".join(digest_data) + + format_args = { + "username": self._username, + "realm": challenge.realm, + "nonce": challenge.nonce, + "uri": path, + "response": digest(b":".join((HA1, key_digest))), + "algorithm": challenge.algorithm.encode(), + } + if challenge.opaque: + format_args["opaque"] = challenge.opaque + if qop: + format_args["qop"] = b"auth" + format_args["nc"] = nc_value + format_args["cnonce"] = cnonce + + return "Digest " + self._get_header_value(format_args) + + def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes: + s = str(nonce_count).encode() + s += nonce + s += time.ctime().encode() + s += os.urandom(8) + + return hashlib.sha1(s).hexdigest()[:16].encode() + + def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str: + NON_QUOTED_FIELDS = ("algorithm", "qop", "nc") + QUOTED_TEMPLATE = '{}="{}"' + NON_QUOTED_TEMPLATE = "{}={}" + + header_value = "" + for i, (field, value) in enumerate(header_fields.items()): + if i > 0: + header_value += ", " + template = ( + QUOTED_TEMPLATE + if field not in NON_QUOTED_FIELDS + else NON_QUOTED_TEMPLATE + ) + header_value += template.format(field, to_str(value)) + + return header_value + + def _resolve_qop( + self, qop: typing.Optional[bytes], request: Request + ) -> typing.Optional[bytes]: + if qop is None: + return None + qops = re.split(b", ?", qop) + if b"auth" in qops: + return b"auth" + + if qops == [b"auth-int"]: + raise NotImplementedError("Digest auth-int support is not yet implemented") + + message = f'Unexpected qop value "{qop!r}" in digest auth' + raise ProtocolError(message, request=request) + + +class _DigestAuthChallenge(typing.NamedTuple): + realm: bytes + nonce: bytes + algorithm: str + opaque: typing.Optional[bytes] + qop: typing.Optional[bytes] diff --git a/myenv/lib/python3.9/site-packages/httpx/_client.py b/myenv/lib/python3.9/site-packages/httpx/_client.py new file mode 100644 index 0000000..2ed3f49 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_client.py @@ -0,0 +1,2000 @@ +import datetime +import enum +import typing +import warnings +from contextlib import asynccontextmanager, contextmanager +from types import TracebackType + +from .__version__ import __version__ +from ._auth import Auth, BasicAuth, FunctionAuth +from ._config import ( + DEFAULT_LIMITS, + DEFAULT_MAX_REDIRECTS, + DEFAULT_TIMEOUT_CONFIG, + Limits, + Proxy, + Timeout, +) +from ._decoders import SUPPORTED_DECODERS +from ._exceptions import ( + InvalidURL, + RemoteProtocolError, + TooManyRedirects, + request_context, +) +from ._models import Cookies, Headers, Request, Response +from ._status_codes import codes +from ._transports.asgi import ASGITransport +from ._transports.base import AsyncBaseTransport, BaseTransport +from ._transports.default import AsyncHTTPTransport, HTTPTransport +from ._transports.wsgi import WSGITransport +from ._types import ( + AsyncByteStream, + AuthTypes, + CertTypes, + CookieTypes, + HeaderTypes, + ProxiesTypes, + QueryParamTypes, + RequestContent, + RequestData, + RequestFiles, + SyncByteStream, + TimeoutTypes, + URLTypes, + VerifyTypes, +) +from ._urls import URL, QueryParams +from ._utils import ( + NetRCInfo, + Timer, + URLPattern, + get_environment_proxies, + get_logger, + is_https_redirect, + same_origin, +) + +# The type annotation for @classmethod and context managers here follows PEP 484 +# https://www.python.org/dev/peps/pep-0484/#annotating-instance-and-class-methods +T = typing.TypeVar("T", bound="Client") +U = typing.TypeVar("U", bound="AsyncClient") + + +class UseClientDefault: + """ + For some parameters such as `auth=...` and `timeout=...` we need to be able + to indicate the default "unset" state, in a way that is distinctly different + to using `None`. + + The default "unset" state indicates that whatever default is set on the + client should be used. This is different to setting `None`, which + explicitly disables the parameter, possibly overriding a client default. + + For example we use `timeout=USE_CLIENT_DEFAULT` in the `request()` signature. + Omitting the `timeout` parameter will send a request using whatever default + timeout has been configured on the client. Including `timeout=None` will + ensure no timeout is used. + + Note that user code shouldn't need to use the `USE_CLIENT_DEFAULT` constant, + but it is used internally when a parameter is not included. + """ + + +USE_CLIENT_DEFAULT = UseClientDefault() + + +logger = get_logger(__name__) + +USER_AGENT = f"python-httpx/{__version__}" +ACCEPT_ENCODING = ", ".join( + [key for key in SUPPORTED_DECODERS.keys() if key != "identity"] +) + + +class ClientState(enum.Enum): + # UNOPENED: + # The client has been instantiated, but has not been used to send a request, + # or been opened by entering the context of a `with` block. + UNOPENED = 1 + # OPENED: + # The client has either sent a request, or is within a `with` block. + OPENED = 2 + # CLOSED: + # The client has either exited the `with` block, or `close()` has + # been called explicitly. + CLOSED = 3 + + +class BoundSyncStream(SyncByteStream): + """ + A byte stream that is bound to a given response instance, and that + ensures the `response.elapsed` is set once the response is closed. + """ + + def __init__( + self, stream: SyncByteStream, response: Response, timer: Timer + ) -> None: + self._stream = stream + self._response = response + self._timer = timer + + def __iter__(self) -> typing.Iterator[bytes]: + for chunk in self._stream: + yield chunk + + def close(self) -> None: + seconds = self._timer.sync_elapsed() + self._response.elapsed = datetime.timedelta(seconds=seconds) + self._stream.close() + + +class BoundAsyncStream(AsyncByteStream): + """ + An async byte stream that is bound to a given response instance, and that + ensures the `response.elapsed` is set once the response is closed. + """ + + def __init__( + self, stream: AsyncByteStream, response: Response, timer: Timer + ) -> None: + self._stream = stream + self._response = response + self._timer = timer + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + async for chunk in self._stream: + yield chunk + + async def aclose(self) -> None: + seconds = await self._timer.async_elapsed() + self._response.elapsed = datetime.timedelta(seconds=seconds) + await self._stream.aclose() + + +class BaseClient: + def __init__( + self, + *, + auth: typing.Optional[AuthTypes] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + max_redirects: int = DEFAULT_MAX_REDIRECTS, + event_hooks: typing.Optional[ + typing.Mapping[str, typing.List[typing.Callable]] + ] = None, + base_url: URLTypes = "", + trust_env: bool = True, + default_encoding: typing.Union[str, typing.Callable[[bytes], str]] = "utf-8", + ): + event_hooks = {} if event_hooks is None else event_hooks + + self._base_url = self._enforce_trailing_slash(URL(base_url)) + + self._auth = self._build_auth(auth) + self._params = QueryParams(params) + self.headers = Headers(headers) + self._cookies = Cookies(cookies) + self._timeout = Timeout(timeout) + self.follow_redirects = follow_redirects + self.max_redirects = max_redirects + self._event_hooks = { + "request": list(event_hooks.get("request", [])), + "response": list(event_hooks.get("response", [])), + } + self._trust_env = trust_env + self._default_encoding = default_encoding + self._netrc = NetRCInfo() + self._state = ClientState.UNOPENED + + @property + def is_closed(self) -> bool: + """ + Check if the client being closed + """ + return self._state == ClientState.CLOSED + + @property + def trust_env(self) -> bool: + return self._trust_env + + def _enforce_trailing_slash(self, url: URL) -> URL: + if url.raw_path.endswith(b"/"): + return url + return url.copy_with(raw_path=url.raw_path + b"/") + + def _get_proxy_map( + self, proxies: typing.Optional[ProxiesTypes], allow_env_proxies: bool + ) -> typing.Dict[str, typing.Optional[Proxy]]: + if proxies is None: + if allow_env_proxies: + return { + key: None if url is None else Proxy(url=url) + for key, url in get_environment_proxies().items() + } + return {} + if isinstance(proxies, dict): + new_proxies = {} + for key, value in proxies.items(): + proxy = Proxy(url=value) if isinstance(value, (str, URL)) else value + new_proxies[str(key)] = proxy + return new_proxies + else: + proxy = Proxy(url=proxies) if isinstance(proxies, (str, URL)) else proxies + return {"all://": proxy} + + @property + def timeout(self) -> Timeout: + return self._timeout + + @timeout.setter + def timeout(self, timeout: TimeoutTypes) -> None: + self._timeout = Timeout(timeout) + + @property + def event_hooks(self) -> typing.Dict[str, typing.List[typing.Callable]]: + return self._event_hooks + + @event_hooks.setter + def event_hooks( + self, event_hooks: typing.Dict[str, typing.List[typing.Callable]] + ) -> None: + self._event_hooks = { + "request": list(event_hooks.get("request", [])), + "response": list(event_hooks.get("response", [])), + } + + @property + def auth(self) -> typing.Optional[Auth]: + """ + Authentication class used when none is passed at the request-level. + + See also [Authentication][0]. + + [0]: /quickstart/#authentication + """ + return self._auth + + @auth.setter + def auth(self, auth: AuthTypes) -> None: + self._auth = self._build_auth(auth) + + @property + def base_url(self) -> URL: + """ + Base URL to use when sending requests with relative URLs. + """ + return self._base_url + + @base_url.setter + def base_url(self, url: URLTypes) -> None: + self._base_url = self._enforce_trailing_slash(URL(url)) + + @property + def headers(self) -> Headers: + """ + HTTP headers to include when sending requests. + """ + return self._headers + + @headers.setter + def headers(self, headers: HeaderTypes) -> None: + client_headers = Headers( + { + b"Accept": b"*/*", + b"Accept-Encoding": ACCEPT_ENCODING.encode("ascii"), + b"Connection": b"keep-alive", + b"User-Agent": USER_AGENT.encode("ascii"), + } + ) + client_headers.update(headers) + self._headers = client_headers + + @property + def cookies(self) -> Cookies: + """ + Cookie values to include when sending requests. + """ + return self._cookies + + @cookies.setter + def cookies(self, cookies: CookieTypes) -> None: + self._cookies = Cookies(cookies) + + @property + def params(self) -> QueryParams: + """ + Query parameters to include in the URL when sending requests. + """ + return self._params + + @params.setter + def params(self, params: QueryParamTypes) -> None: + self._params = QueryParams(params) + + def build_request( + self, + method: str, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Request: + """ + Build and return a request instance. + + * The `params`, `headers` and `cookies` arguments + are merged with any values set on the client. + * The `url` argument is merged with any `base_url` set on the client. + + See also: [Request instances][0] + + [0]: /advanced/#request-instances + """ + url = self._merge_url(url) + headers = self._merge_headers(headers) + cookies = self._merge_cookies(cookies) + params = self._merge_queryparams(params) + extensions = {} if extensions is None else extensions + if "timeout" not in extensions: + timeout = ( + self.timeout + if isinstance(timeout, UseClientDefault) + else Timeout(timeout) + ) + extensions["timeout"] = timeout.as_dict() + return Request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + extensions=extensions, + ) + + def _merge_url(self, url: URLTypes) -> URL: + """ + Merge a URL argument together with any 'base_url' on the client, + to create the URL used for the outgoing request. + """ + merge_url = URL(url) + if merge_url.is_relative_url: + # To merge URLs we always append to the base URL. To get this + # behaviour correct we always ensure the base URL ends in a '/' + # separator, and strip any leading '/' from the merge URL. + # + # So, eg... + # + # >>> client = Client(base_url="https://www.example.com/subpath") + # >>> client.base_url + # URL('https://www.example.com/subpath/') + # >>> client.build_request("GET", "/path").url + # URL('https://www.example.com/subpath/path') + merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/") + return self.base_url.copy_with(raw_path=merge_raw_path) + return merge_url + + def _merge_cookies( + self, cookies: typing.Optional[CookieTypes] = None + ) -> typing.Optional[CookieTypes]: + """ + Merge a cookies argument together with any cookies on the client, + to create the cookies used for the outgoing request. + """ + if cookies or self.cookies: + merged_cookies = Cookies(self.cookies) + merged_cookies.update(cookies) + return merged_cookies + return cookies + + def _merge_headers( + self, headers: typing.Optional[HeaderTypes] = None + ) -> typing.Optional[HeaderTypes]: + """ + Merge a headers argument together with any headers on the client, + to create the headers used for the outgoing request. + """ + merged_headers = Headers(self.headers) + merged_headers.update(headers) + return merged_headers + + def _merge_queryparams( + self, params: typing.Optional[QueryParamTypes] = None + ) -> typing.Optional[QueryParamTypes]: + """ + Merge a queryparams argument together with any queryparams on the client, + to create the queryparams used for the outgoing request. + """ + if params or self.params: + merged_queryparams = QueryParams(self.params) + return merged_queryparams.merge(params) + return params + + def _build_auth(self, auth: typing.Optional[AuthTypes]) -> typing.Optional[Auth]: + if auth is None: + return None + elif isinstance(auth, tuple): + return BasicAuth(username=auth[0], password=auth[1]) + elif isinstance(auth, Auth): + return auth + elif callable(auth): + return FunctionAuth(func=auth) + else: + raise TypeError(f'Invalid "auth" argument: {auth!r}') + + def _build_request_auth( + self, + request: Request, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + ) -> Auth: + auth = ( + self._auth if isinstance(auth, UseClientDefault) else self._build_auth(auth) + ) + + if auth is not None: + return auth + + username, password = request.url.username, request.url.password + if username or password: + return BasicAuth(username=username, password=password) + + if self.trust_env and "Authorization" not in request.headers: + credentials = self._netrc.get_credentials(request.url.host) + if credentials is not None: + return BasicAuth(username=credentials[0], password=credentials[1]) + + return Auth() + + def _build_redirect_request(self, request: Request, response: Response) -> Request: + """ + Given a request and a redirect response, return a new request that + should be used to effect the redirect. + """ + method = self._redirect_method(request, response) + url = self._redirect_url(request, response) + headers = self._redirect_headers(request, url, method) + stream = self._redirect_stream(request, method) + cookies = Cookies(self.cookies) + return Request( + method=method, + url=url, + headers=headers, + cookies=cookies, + stream=stream, + extensions=request.extensions, + ) + + def _redirect_method(self, request: Request, response: Response) -> str: + """ + When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.SEE_OTHER and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # Turn 302s into GETs. + if response.status_code == codes.FOUND and method != "HEAD": + method = "GET" + + # If a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in 'requests' issue 1704. + if response.status_code == codes.MOVED_PERMANENTLY and method == "POST": + method = "GET" + + return method + + def _redirect_url(self, request: Request, response: Response) -> URL: + """ + Return the URL for the redirect to follow. + """ + location = response.headers["Location"] + + try: + url = URL(location) + except InvalidURL as exc: + raise RemoteProtocolError( + f"Invalid URL in location header: {exc}.", request=request + ) from None + + # Handle malformed 'Location' headers that are "absolute" form, have no host. + # See: https://github.com/encode/httpx/issues/771 + if url.scheme and not url.host: + url = url.copy_with(host=request.url.host) + + # Facilitate relative 'Location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + if url.is_relative_url: + url = request.url.join(url) + + # Attach previous fragment if needed (RFC 7231 7.1.2) + if request.url.fragment and not url.fragment: + url = url.copy_with(fragment=request.url.fragment) + + return url + + def _redirect_headers(self, request: Request, url: URL, method: str) -> Headers: + """ + Return the headers that should be used for the redirect request. + """ + headers = Headers(request.headers) + + if not same_origin(url, request.url): + if not is_https_redirect(request.url, url): + # Strip Authorization headers when responses are redirected + # away from the origin. (Except for direct HTTP to HTTPS redirects.) + headers.pop("Authorization", None) + + # Update the Host header. + headers["Host"] = url.netloc.decode("ascii") + + if method != request.method and method == "GET": + # If we've switch to a 'GET' request, then strip any headers which + # are only relevant to the request body. + headers.pop("Content-Length", None) + headers.pop("Transfer-Encoding", None) + + # We should use the client cookie store to determine any cookie header, + # rather than whatever was on the original outgoing request. + headers.pop("Cookie", None) + + return headers + + def _redirect_stream( + self, request: Request, method: str + ) -> typing.Optional[typing.Union[SyncByteStream, AsyncByteStream]]: + """ + Return the body that should be used for the redirect request. + """ + if method != request.method and method == "GET": + return None + + return request.stream + + +class Client(BaseClient): + """ + An HTTP client, with connection pooling, HTTP/2, redirects, cookie persistence, etc. + + Usage: + + ```python + >>> client = httpx.Client() + >>> response = client.get('https://example.org') + ``` + + **Parameters:** + + * **auth** - *(optional)* An authentication class to use when sending + requests. + * **params** - *(optional)* Query parameters to include in request URLs, as + a string, dictionary, or sequence of two-tuples. + * **headers** - *(optional)* Dictionary of HTTP headers to include when + sending requests. + * **cookies** - *(optional)* Dictionary of Cookie items to include when + sending requests. + * **verify** - *(optional)* SSL certificates (a.k.a CA bundle) used to + verify the identity of requested hosts. Either `True` (default CA bundle), + a path to an SSL certificate file, an `ssl.SSLContext`, or `False` + (which will disable verification). + * **cert** - *(optional)* An SSL certificate used by the requested host + to authenticate the client. Either a path to an SSL certificate file, or + two-tuple of (certificate file, key file), or a three-tuple of (certificate + file, key file, password). + * **proxies** - *(optional)* A dictionary mapping proxy keys to proxy + URLs. + * **timeout** - *(optional)* The timeout configuration to use when sending + requests. + * **limits** - *(optional)* The limits configuration to use. + * **max_redirects** - *(optional)* The maximum number of redirect responses + that should be followed. + * **base_url** - *(optional)* A URL to use as the base when building + request URLs. + * **transport** - *(optional)* A transport class to use for sending requests + over the network. + * **app** - *(optional)* An WSGI application to send requests to, + rather than sending actual network requests. + * **trust_env** - *(optional)* Enables or disables usage of environment + variables for configuration. + * **default_encoding** - *(optional)* The default encoding to use for decoding + response text, if no charset information is included in a response Content-Type + header. Set to a callable for automatic character set detection. Default: "utf-8". + """ + + def __init__( + self, + *, + auth: typing.Optional[AuthTypes] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + proxies: typing.Optional[ProxiesTypes] = None, + mounts: typing.Optional[typing.Mapping[str, BaseTransport]] = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + limits: Limits = DEFAULT_LIMITS, + max_redirects: int = DEFAULT_MAX_REDIRECTS, + event_hooks: typing.Optional[ + typing.Mapping[str, typing.List[typing.Callable]] + ] = None, + base_url: URLTypes = "", + transport: typing.Optional[BaseTransport] = None, + app: typing.Optional[typing.Callable] = None, + trust_env: bool = True, + default_encoding: typing.Union[str, typing.Callable[[bytes], str]] = "utf-8", + ): + super().__init__( + auth=auth, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + follow_redirects=follow_redirects, + max_redirects=max_redirects, + event_hooks=event_hooks, + base_url=base_url, + trust_env=trust_env, + default_encoding=default_encoding, + ) + + if http2: + try: + import h2 # noqa + except ImportError: # pragma: nocover + raise ImportError( + "Using http2=True, but the 'h2' package is not installed. " + "Make sure to install httpx using `pip install httpx[http2]`." + ) from None + + allow_env_proxies = trust_env and app is None and transport is None + proxy_map = self._get_proxy_map(proxies, allow_env_proxies) + + self._transport = self._init_transport( + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + transport=transport, + app=app, + trust_env=trust_env, + ) + self._mounts: typing.Dict[URLPattern, typing.Optional[BaseTransport]] = { + URLPattern(key): None + if proxy is None + else self._init_proxy_transport( + proxy, + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + trust_env=trust_env, + ) + for key, proxy in proxy_map.items() + } + if mounts is not None: + self._mounts.update( + {URLPattern(key): transport for key, transport in mounts.items()} + ) + + self._mounts = dict(sorted(self._mounts.items())) + + def _init_transport( + self, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + transport: typing.Optional[BaseTransport] = None, + app: typing.Optional[typing.Callable] = None, + trust_env: bool = True, + ) -> BaseTransport: + if transport is not None: + return transport + + if app is not None: + return WSGITransport(app=app) + + return HTTPTransport( + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + trust_env=trust_env, + ) + + def _init_proxy_transport( + self, + proxy: Proxy, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + trust_env: bool = True, + ) -> BaseTransport: + return HTTPTransport( + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + trust_env=trust_env, + proxy=proxy, + ) + + def _transport_for_url(self, url: URL) -> BaseTransport: + """ + Returns the transport instance that should be used for a given URL. + This will either be the standard connection pool, or a proxy. + """ + for pattern, transport in self._mounts.items(): + if pattern.matches(url): + return self._transport if transport is None else transport + + return self._transport + + def request( + self, + method: str, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Build and send a request. + + Equivalent to: + + ```python + request = client.build_request(...) + response = client.send(request, ...) + ``` + + See `Client.build_request()`, `Client.send()` and + [Merging of configuration][0] for how the various parameters + are merged with client-level configuration. + + [0]: /advanced/#merging-of-configuration + """ + if cookies is not None: + message = ( + "Setting per-request cookies=<...> is being deprecated, because " + "the expected behaviour on cookie persistence is ambiguous. Set " + "cookies directly on the client instance instead." + ) + warnings.warn(message, DeprecationWarning) + + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + return self.send(request, auth=auth, follow_redirects=follow_redirects) + + @contextmanager + def stream( + self, + method: str, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> typing.Iterator[Response]: + """ + Alternative to `httpx.request()` that streams the response body + instead of loading it into memory at once. + + **Parameters**: See `httpx.request`. + + See also: [Streaming Responses][0] + + [0]: /quickstart#streaming-responses + """ + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + response = self.send( + request=request, + auth=auth, + follow_redirects=follow_redirects, + stream=True, + ) + try: + yield response + finally: + response.close() + + def send( + self, + request: Request, + *, + stream: bool = False, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + ) -> Response: + """ + Send a request. + + The request is sent as-is, unmodified. + + Typically you'll want to build one with `Client.build_request()` + so that any client-level configuration is merged into the request, + but passing an explicit `httpx.Request()` is supported as well. + + See also: [Request instances][0] + + [0]: /advanced/#request-instances + """ + if self._state == ClientState.CLOSED: + raise RuntimeError("Cannot send a request, as the client has been closed.") + + self._state = ClientState.OPENED + follow_redirects = ( + self.follow_redirects + if isinstance(follow_redirects, UseClientDefault) + else follow_redirects + ) + + auth = self._build_request_auth(request, auth) + + response = self._send_handling_auth( + request, + auth=auth, + follow_redirects=follow_redirects, + history=[], + ) + try: + if not stream: + response.read() + + return response + + except BaseException as exc: + response.close() + raise exc + + def _send_handling_auth( + self, + request: Request, + auth: Auth, + follow_redirects: bool, + history: typing.List[Response], + ) -> Response: + auth_flow = auth.sync_auth_flow(request) + try: + request = next(auth_flow) + + while True: + response = self._send_handling_redirects( + request, + follow_redirects=follow_redirects, + history=history, + ) + try: + try: + next_request = auth_flow.send(response) + except StopIteration: + return response + + response.history = list(history) + response.read() + request = next_request + history.append(response) + + except BaseException as exc: + response.close() + raise exc + finally: + auth_flow.close() + + def _send_handling_redirects( + self, + request: Request, + follow_redirects: bool, + history: typing.List[Response], + ) -> Response: + while True: + if len(history) > self.max_redirects: + raise TooManyRedirects( + "Exceeded maximum allowed redirects.", request=request + ) + + for hook in self._event_hooks["request"]: + hook(request) + + response = self._send_single_request(request) + try: + for hook in self._event_hooks["response"]: + hook(response) + response.history = list(history) + + if not response.has_redirect_location: + return response + + request = self._build_redirect_request(request, response) + history = history + [response] + + if follow_redirects: + response.read() + else: + response.next_request = request + return response + + except BaseException as exc: + response.close() + raise exc + + def _send_single_request(self, request: Request) -> Response: + """ + Sends a single request, without handling any redirections. + """ + transport = self._transport_for_url(request.url) + timer = Timer() + timer.sync_start() + + if not isinstance(request.stream, SyncByteStream): + raise RuntimeError( + "Attempted to send an async request with a sync Client instance." + ) + + with request_context(request=request): + response = transport.handle_request(request) + + assert isinstance(response.stream, SyncByteStream) + + response.request = request + response.stream = BoundSyncStream( + response.stream, response=response, timer=timer + ) + self.cookies.extract_cookies(response) + response.default_encoding = self._default_encoding + + status = f"{response.status_code} {response.reason_phrase}" + response_line = f"{response.http_version} {status}" + logger.debug( + 'HTTP Request: %s %s "%s"', request.method, request.url, response_line + ) + + return response + + def get( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `GET` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "GET", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def options( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send an `OPTIONS` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "OPTIONS", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def head( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `HEAD` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "HEAD", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def post( + self, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `POST` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "POST", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def put( + self, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `PUT` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "PUT", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def patch( + self, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `PATCH` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "PATCH", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def delete( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `DELETE` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "DELETE", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def close(self) -> None: + """ + Close transport and proxies. + """ + if self._state != ClientState.CLOSED: + self._state = ClientState.CLOSED + + self._transport.close() + for transport in self._mounts.values(): + if transport is not None: + transport.close() + + def __enter__(self: T) -> T: + if self._state != ClientState.UNOPENED: + msg = { + ClientState.OPENED: "Cannot open a client instance more than once.", + ClientState.CLOSED: "Cannot reopen a client instance, once it has been closed.", + }[self._state] + raise RuntimeError(msg) + + self._state = ClientState.OPENED + + self._transport.__enter__() + for transport in self._mounts.values(): + if transport is not None: + transport.__enter__() + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + self._state = ClientState.CLOSED + + self._transport.__exit__(exc_type, exc_value, traceback) + for transport in self._mounts.values(): + if transport is not None: + transport.__exit__(exc_type, exc_value, traceback) + + +class AsyncClient(BaseClient): + """ + An asynchronous HTTP client, with connection pooling, HTTP/2, redirects, + cookie persistence, etc. + + Usage: + + ```python + >>> async with httpx.AsyncClient() as client: + >>> response = await client.get('https://example.org') + ``` + + **Parameters:** + + * **auth** - *(optional)* An authentication class to use when sending + requests. + * **params** - *(optional)* Query parameters to include in request URLs, as + a string, dictionary, or sequence of two-tuples. + * **headers** - *(optional)* Dictionary of HTTP headers to include when + sending requests. + * **cookies** - *(optional)* Dictionary of Cookie items to include when + sending requests. + * **verify** - *(optional)* SSL certificates (a.k.a CA bundle) used to + verify the identity of requested hosts. Either `True` (default CA bundle), + a path to an SSL certificate file, or `False` (disable verification). + * **cert** - *(optional)* An SSL certificate used by the requested host + to authenticate the client. Either a path to an SSL certificate file, or + two-tuple of (certificate file, key file), or a three-tuple of (certificate + file, key file, password). + * **http2** - *(optional)* A boolean indicating if HTTP/2 support should be + enabled. Defaults to `False`. + * **proxies** - *(optional)* A dictionary mapping HTTP protocols to proxy + URLs. + * **timeout** - *(optional)* The timeout configuration to use when sending + requests. + * **limits** - *(optional)* The limits configuration to use. + * **max_redirects** - *(optional)* The maximum number of redirect responses + that should be followed. + * **base_url** - *(optional)* A URL to use as the base when building + request URLs. + * **transport** - *(optional)* A transport class to use for sending requests + over the network. + * **app** - *(optional)* An ASGI application to send requests to, + rather than sending actual network requests. + * **trust_env** - *(optional)* Enables or disables usage of environment + variables for configuration. + * **default_encoding** - *(optional)* The default encoding to use for decoding + response text, if no charset information is included in a response Content-Type + header. Set to a callable for automatic character set detection. Default: "utf-8". + """ + + def __init__( + self, + *, + auth: typing.Optional[AuthTypes] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + proxies: typing.Optional[ProxiesTypes] = None, + mounts: typing.Optional[typing.Mapping[str, AsyncBaseTransport]] = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + limits: Limits = DEFAULT_LIMITS, + max_redirects: int = DEFAULT_MAX_REDIRECTS, + event_hooks: typing.Optional[ + typing.Mapping[str, typing.List[typing.Callable]] + ] = None, + base_url: URLTypes = "", + transport: typing.Optional[AsyncBaseTransport] = None, + app: typing.Optional[typing.Callable] = None, + trust_env: bool = True, + default_encoding: str = "utf-8", + ): + super().__init__( + auth=auth, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + follow_redirects=follow_redirects, + max_redirects=max_redirects, + event_hooks=event_hooks, + base_url=base_url, + trust_env=trust_env, + default_encoding=default_encoding, + ) + + if http2: + try: + import h2 # noqa + except ImportError: # pragma: nocover + raise ImportError( + "Using http2=True, but the 'h2' package is not installed. " + "Make sure to install httpx using `pip install httpx[http2]`." + ) from None + + allow_env_proxies = trust_env and app is None and transport is None + proxy_map = self._get_proxy_map(proxies, allow_env_proxies) + + self._transport = self._init_transport( + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + transport=transport, + app=app, + trust_env=trust_env, + ) + + self._mounts: typing.Dict[URLPattern, typing.Optional[AsyncBaseTransport]] = { + URLPattern(key): None + if proxy is None + else self._init_proxy_transport( + proxy, + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + trust_env=trust_env, + ) + for key, proxy in proxy_map.items() + } + if mounts is not None: + self._mounts.update( + {URLPattern(key): transport for key, transport in mounts.items()} + ) + self._mounts = dict(sorted(self._mounts.items())) + + def _init_transport( + self, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + transport: typing.Optional[AsyncBaseTransport] = None, + app: typing.Optional[typing.Callable] = None, + trust_env: bool = True, + ) -> AsyncBaseTransport: + if transport is not None: + return transport + + if app is not None: + return ASGITransport(app=app) + + return AsyncHTTPTransport( + verify=verify, + cert=cert, + http1=http1, + http2=http2, + limits=limits, + trust_env=trust_env, + ) + + def _init_proxy_transport( + self, + proxy: Proxy, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + trust_env: bool = True, + ) -> AsyncBaseTransport: + return AsyncHTTPTransport( + verify=verify, + cert=cert, + http2=http2, + limits=limits, + trust_env=trust_env, + proxy=proxy, + ) + + def _transport_for_url(self, url: URL) -> AsyncBaseTransport: + """ + Returns the transport instance that should be used for a given URL. + This will either be the standard connection pool, or a proxy. + """ + for pattern, transport in self._mounts.items(): + if pattern.matches(url): + return self._transport if transport is None else transport + + return self._transport + + async def request( + self, + method: str, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Build and send a request. + + Equivalent to: + + ```python + request = client.build_request(...) + response = await client.send(request, ...) + ``` + + See `AsyncClient.build_request()`, `AsyncClient.send()` + and [Merging of configuration][0] for how the various parameters + are merged with client-level configuration. + + [0]: /advanced/#merging-of-configuration + """ + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + return await self.send(request, auth=auth, follow_redirects=follow_redirects) + + @asynccontextmanager + async def stream( + self, + method: str, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> typing.AsyncIterator[Response]: + """ + Alternative to `httpx.request()` that streams the response body + instead of loading it into memory at once. + + **Parameters**: See `httpx.request`. + + See also: [Streaming Responses][0] + + [0]: /quickstart#streaming-responses + """ + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + response = await self.send( + request=request, + auth=auth, + follow_redirects=follow_redirects, + stream=True, + ) + try: + yield response + finally: + await response.aclose() + + async def send( + self, + request: Request, + *, + stream: bool = False, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + ) -> Response: + """ + Send a request. + + The request is sent as-is, unmodified. + + Typically you'll want to build one with `AsyncClient.build_request()` + so that any client-level configuration is merged into the request, + but passing an explicit `httpx.Request()` is supported as well. + + See also: [Request instances][0] + + [0]: /advanced/#request-instances + """ + if self._state == ClientState.CLOSED: + raise RuntimeError("Cannot send a request, as the client has been closed.") + + self._state = ClientState.OPENED + follow_redirects = ( + self.follow_redirects + if isinstance(follow_redirects, UseClientDefault) + else follow_redirects + ) + + auth = self._build_request_auth(request, auth) + + response = await self._send_handling_auth( + request, + auth=auth, + follow_redirects=follow_redirects, + history=[], + ) + try: + if not stream: + await response.aread() + + return response + + except BaseException as exc: # pragma: no cover + await response.aclose() + raise exc + + async def _send_handling_auth( + self, + request: Request, + auth: Auth, + follow_redirects: bool, + history: typing.List[Response], + ) -> Response: + auth_flow = auth.async_auth_flow(request) + try: + request = await auth_flow.__anext__() + + while True: + response = await self._send_handling_redirects( + request, + follow_redirects=follow_redirects, + history=history, + ) + try: + try: + next_request = await auth_flow.asend(response) + except StopAsyncIteration: + return response + + response.history = list(history) + await response.aread() + request = next_request + history.append(response) + + except BaseException as exc: + await response.aclose() + raise exc + finally: + await auth_flow.aclose() + + async def _send_handling_redirects( + self, + request: Request, + follow_redirects: bool, + history: typing.List[Response], + ) -> Response: + while True: + if len(history) > self.max_redirects: + raise TooManyRedirects( + "Exceeded maximum allowed redirects.", request=request + ) + + for hook in self._event_hooks["request"]: + await hook(request) + + response = await self._send_single_request(request) + try: + for hook in self._event_hooks["response"]: + await hook(response) + + response.history = list(history) + + if not response.has_redirect_location: + return response + + request = self._build_redirect_request(request, response) + history = history + [response] + + if follow_redirects: + await response.aread() + else: + response.next_request = request + return response + + except BaseException as exc: + await response.aclose() + raise exc + + async def _send_single_request(self, request: Request) -> Response: + """ + Sends a single request, without handling any redirections. + """ + transport = self._transport_for_url(request.url) + timer = Timer() + await timer.async_start() + + if not isinstance(request.stream, AsyncByteStream): + raise RuntimeError( + "Attempted to send an sync request with an AsyncClient instance." + ) + + with request_context(request=request): + response = await transport.handle_async_request(request) + + assert isinstance(response.stream, AsyncByteStream) + response.request = request + response.stream = BoundAsyncStream( + response.stream, response=response, timer=timer + ) + self.cookies.extract_cookies(response) + response.default_encoding = self._default_encoding + + status = f"{response.status_code} {response.reason_phrase}" + response_line = f"{response.http_version} {status}" + logger.debug( + 'HTTP Request: %s %s "%s"', request.method, request.url, response_line + ) + + return response + + async def get( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault, None] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `GET` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "GET", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def options( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send an `OPTIONS` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "OPTIONS", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def head( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `HEAD` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "HEAD", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def post( + self, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `POST` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "POST", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def put( + self, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `PUT` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "PUT", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def patch( + self, + url: URLTypes, + *, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `PATCH` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "PATCH", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def delete( + self, + url: URLTypes, + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT, + timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT, + extensions: typing.Optional[dict] = None, + ) -> Response: + """ + Send a `DELETE` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "DELETE", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def aclose(self) -> None: + """ + Close transport and proxies. + """ + if self._state != ClientState.CLOSED: + self._state = ClientState.CLOSED + + await self._transport.aclose() + for proxy in self._mounts.values(): + if proxy is not None: + await proxy.aclose() + + async def __aenter__(self: U) -> U: + if self._state != ClientState.UNOPENED: + msg = { + ClientState.OPENED: "Cannot open a client instance more than once.", + ClientState.CLOSED: "Cannot reopen a client instance, once it has been closed.", + }[self._state] + raise RuntimeError(msg) + + self._state = ClientState.OPENED + + await self._transport.__aenter__() + for proxy in self._mounts.values(): + if proxy is not None: + await proxy.__aenter__() + return self + + async def __aexit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + self._state = ClientState.CLOSED + + await self._transport.__aexit__(exc_type, exc_value, traceback) + for proxy in self._mounts.values(): + if proxy is not None: + await proxy.__aexit__(exc_type, exc_value, traceback) diff --git a/myenv/lib/python3.9/site-packages/httpx/_compat.py b/myenv/lib/python3.9/site-packages/httpx/_compat.py new file mode 100644 index 0000000..6d96ad2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_compat.py @@ -0,0 +1,40 @@ +""" +The _compat module is used for code which requires branching between different +Python environments. It is excluded from the code coverage checks. +""" +import ssl +import sys + +# Brotli support is optional +# The C bindings in `brotli` are recommended for CPython. +# The CFFI bindings in `brotlicffi` are recommended for PyPy and everything else. +try: + import brotlicffi as brotli +except ImportError: # pragma: nocover + try: + import brotli + except ImportError: + brotli = None + +if sys.version_info >= (3, 10) or ( + sys.version_info >= (3, 7) and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0, 7) +): + + def set_minimum_tls_version_1_2(context: ssl.SSLContext) -> None: + # The OP_NO_SSL* and OP_NO_TLS* become deprecated in favor of + # 'SSLContext.minimum_version' from Python 3.7 onwards, however + # this attribute is not available unless the ssl module is compiled + # with OpenSSL 1.1.0g or newer. + # https://docs.python.org/3.10/library/ssl.html#ssl.SSLContext.minimum_version + # https://docs.python.org/3.7/library/ssl.html#ssl.SSLContext.minimum_version + context.minimum_version = ssl.TLSVersion.TLSv1_2 + +else: + + def set_minimum_tls_version_1_2(context: ssl.SSLContext) -> None: + # If 'minimum_version' isn't available, we configure these options with + # the older deprecated variants. + context.options |= ssl.OP_NO_SSLv2 + context.options |= ssl.OP_NO_SSLv3 + context.options |= ssl.OP_NO_TLSv1 + context.options |= ssl.OP_NO_TLSv1_1 diff --git a/myenv/lib/python3.9/site-packages/httpx/_config.py b/myenv/lib/python3.9/site-packages/httpx/_config.py new file mode 100644 index 0000000..d164e4c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_config.py @@ -0,0 +1,363 @@ +import os +import ssl +import typing +from pathlib import Path + +import certifi + +from ._compat import set_minimum_tls_version_1_2 +from ._models import URL, Headers +from ._types import CertTypes, HeaderTypes, TimeoutTypes, URLTypes, VerifyTypes +from ._utils import get_ca_bundle_from_env, get_logger + +DEFAULT_CIPHERS = ":".join( + [ + "ECDHE+AESGCM", + "ECDHE+CHACHA20", + "DHE+AESGCM", + "DHE+CHACHA20", + "ECDH+AESGCM", + "DH+AESGCM", + "ECDH+AES", + "DH+AES", + "RSA+AESGCM", + "RSA+AES", + "!aNULL", + "!eNULL", + "!MD5", + "!DSS", + ] +) + + +logger = get_logger(__name__) + + +class UnsetType: + pass # pragma: nocover + + +UNSET = UnsetType() + + +def create_ssl_context( + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + trust_env: bool = True, + http2: bool = False, +) -> ssl.SSLContext: + return SSLConfig( + cert=cert, verify=verify, trust_env=trust_env, http2=http2 + ).ssl_context + + +class SSLConfig: + """ + SSL Configuration. + """ + + DEFAULT_CA_BUNDLE_PATH = Path(certifi.where()) + + def __init__( + self, + *, + cert: typing.Optional[CertTypes] = None, + verify: VerifyTypes = True, + trust_env: bool = True, + http2: bool = False, + ): + self.cert = cert + self.verify = verify + self.trust_env = trust_env + self.http2 = http2 + self.ssl_context = self.load_ssl_context() + + def load_ssl_context(self) -> ssl.SSLContext: + logger.trace( + f"load_ssl_context " + f"verify={self.verify!r} " + f"cert={self.cert!r} " + f"trust_env={self.trust_env!r} " + f"http2={self.http2!r}" + ) + + if self.verify: + return self.load_ssl_context_verify() + return self.load_ssl_context_no_verify() + + def load_ssl_context_no_verify(self) -> ssl.SSLContext: + """ + Return an SSL context for unverified connections. + """ + context = self._create_default_ssl_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + self._load_client_certs(context) + return context + + def load_ssl_context_verify(self) -> ssl.SSLContext: + """ + Return an SSL context for verified connections. + """ + if self.trust_env and self.verify is True: + ca_bundle = get_ca_bundle_from_env() + if ca_bundle is not None: + self.verify = ca_bundle + + if isinstance(self.verify, ssl.SSLContext): + # Allow passing in our own SSLContext object that's pre-configured. + context = self.verify + self._load_client_certs(context) + return context + elif isinstance(self.verify, bool): + ca_bundle_path = self.DEFAULT_CA_BUNDLE_PATH + elif Path(self.verify).exists(): + ca_bundle_path = Path(self.verify) + else: + raise IOError( + "Could not find a suitable TLS CA certificate bundle, " + "invalid path: {}".format(self.verify) + ) + + context = self._create_default_ssl_context() + context.verify_mode = ssl.CERT_REQUIRED + context.check_hostname = True + + # Signal to server support for PHA in TLS 1.3. Raises an + # AttributeError if only read-only access is implemented. + try: + context.post_handshake_auth = True # type: ignore + except AttributeError: # pragma: nocover + pass + + # Disable using 'commonName' for SSLContext.check_hostname + # when the 'subjectAltName' extension isn't available. + try: + context.hostname_checks_common_name = False # type: ignore + except AttributeError: # pragma: nocover + pass + + if ca_bundle_path.is_file(): + logger.trace(f"load_verify_locations cafile={ca_bundle_path!s}") + context.load_verify_locations(cafile=str(ca_bundle_path)) + elif ca_bundle_path.is_dir(): + logger.trace(f"load_verify_locations capath={ca_bundle_path!s}") + context.load_verify_locations(capath=str(ca_bundle_path)) + + self._load_client_certs(context) + + return context + + def _create_default_ssl_context(self) -> ssl.SSLContext: + """ + Creates the default SSLContext object that's used for both verified + and unverified connections. + """ + context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + set_minimum_tls_version_1_2(context) + context.options |= ssl.OP_NO_COMPRESSION + context.set_ciphers(DEFAULT_CIPHERS) + + if ssl.HAS_ALPN: + alpn_idents = ["http/1.1", "h2"] if self.http2 else ["http/1.1"] + context.set_alpn_protocols(alpn_idents) + + if hasattr(context, "keylog_filename"): # pragma: nocover (Available in 3.8+) + keylogfile = os.environ.get("SSLKEYLOGFILE") + if keylogfile and self.trust_env: + context.keylog_filename = keylogfile # type: ignore + + return context + + def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None: + """ + Loads client certificates into our SSLContext object + """ + if self.cert is not None: + if isinstance(self.cert, str): + ssl_context.load_cert_chain(certfile=self.cert) + elif isinstance(self.cert, tuple) and len(self.cert) == 2: + ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1]) + elif isinstance(self.cert, tuple) and len(self.cert) == 3: + ssl_context.load_cert_chain( + certfile=self.cert[0], + keyfile=self.cert[1], + password=self.cert[2], # type: ignore + ) + + +class Timeout: + """ + Timeout configuration. + + **Usage**: + + Timeout(None) # No timeouts. + Timeout(5.0) # 5s timeout on all operations. + Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts. + Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere. + Timeout(5.0, pool=None) # No timeout on acquiring connection from pool. + # 5s timeout elsewhere. + """ + + def __init__( + self, + timeout: typing.Union[TimeoutTypes, UnsetType] = UNSET, + *, + connect: typing.Union[None, float, UnsetType] = UNSET, + read: typing.Union[None, float, UnsetType] = UNSET, + write: typing.Union[None, float, UnsetType] = UNSET, + pool: typing.Union[None, float, UnsetType] = UNSET, + ): + if isinstance(timeout, Timeout): + # Passed as a single explicit Timeout. + assert connect is UNSET + assert read is UNSET + assert write is UNSET + assert pool is UNSET + self.connect = timeout.connect # type: typing.Optional[float] + self.read = timeout.read # type: typing.Optional[float] + self.write = timeout.write # type: typing.Optional[float] + self.pool = timeout.pool # type: typing.Optional[float] + elif isinstance(timeout, tuple): + # Passed as a tuple. + self.connect = timeout[0] + self.read = timeout[1] + self.write = None if len(timeout) < 3 else timeout[2] + self.pool = None if len(timeout) < 4 else timeout[3] + elif not ( + isinstance(connect, UnsetType) + or isinstance(read, UnsetType) + or isinstance(write, UnsetType) + or isinstance(pool, UnsetType) + ): + self.connect = connect + self.read = read + self.write = write + self.pool = pool + else: + if isinstance(timeout, UnsetType): + raise ValueError( + "httpx.Timeout must either include a default, or set all " + "four parameters explicitly." + ) + self.connect = timeout if isinstance(connect, UnsetType) else connect + self.read = timeout if isinstance(read, UnsetType) else read + self.write = timeout if isinstance(write, UnsetType) else write + self.pool = timeout if isinstance(pool, UnsetType) else pool + + def as_dict(self) -> typing.Dict[str, typing.Optional[float]]: + return { + "connect": self.connect, + "read": self.read, + "write": self.write, + "pool": self.pool, + } + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, self.__class__) + and self.connect == other.connect + and self.read == other.read + and self.write == other.write + and self.pool == other.pool + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + if len({self.connect, self.read, self.write, self.pool}) == 1: + return f"{class_name}(timeout={self.connect})" + return ( + f"{class_name}(connect={self.connect}, " + f"read={self.read}, write={self.write}, pool={self.pool})" + ) + + +class Limits: + """ + Configuration for limits to various client behaviors. + + **Parameters:** + + * **max_connections** - The maximum number of concurrent connections that may be + established. + * **max_keepalive_connections** - Allow the connection pool to maintain + keep-alive connections below this point. Should be less than or equal + to `max_connections`. + * **keepalive_expiry** - Time limit on idle keep-alive connections in seconds. + """ + + def __init__( + self, + *, + max_connections: typing.Optional[int] = None, + max_keepalive_connections: typing.Optional[int] = None, + keepalive_expiry: typing.Optional[float] = 5.0, + ): + self.max_connections = max_connections + self.max_keepalive_connections = max_keepalive_connections + self.keepalive_expiry = keepalive_expiry + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, self.__class__) + and self.max_connections == other.max_connections + and self.max_keepalive_connections == other.max_keepalive_connections + and self.keepalive_expiry == other.keepalive_expiry + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + return ( + f"{class_name}(max_connections={self.max_connections}, " + f"max_keepalive_connections={self.max_keepalive_connections}, " + f"keepalive_expiry={self.keepalive_expiry})" + ) + + +class Proxy: + def __init__( + self, + url: URLTypes, + *, + auth: typing.Optional[typing.Tuple[str, str]] = None, + headers: typing.Optional[HeaderTypes] = None, + ): + url = URL(url) + headers = Headers(headers) + + if url.scheme not in ("http", "https", "socks5"): + raise ValueError(f"Unknown scheme for proxy URL {url!r}") + + if url.username or url.password: + # Remove any auth credentials from the URL. + auth = (url.username, url.password) + url = url.copy_with(username=None, password=None) + + self.url = url + self.auth = auth + self.headers = headers + + @property + def raw_auth(self) -> typing.Optional[typing.Tuple[bytes, bytes]]: + # The proxy authentication as raw bytes. + return ( + None + if self.auth is None + else (self.auth[0].encode("utf-8"), self.auth[1].encode("utf-8")) + ) + + def __repr__(self) -> str: + # The authentication is represented with the password component masked. + auth = (self.auth[0], "********") if self.auth else None + + # Build a nice concise representation. + url_str = f"{str(self.url)!r}" + auth_str = f", auth={auth!r}" if auth else "" + headers_str = f", headers={dict(self.headers)!r}" if self.headers else "" + return f"Proxy({url_str}{auth_str}{headers_str})" + + +DEFAULT_TIMEOUT_CONFIG = Timeout(timeout=5.0) +DEFAULT_LIMITS = Limits(max_connections=100, max_keepalive_connections=20) +DEFAULT_MAX_REDIRECTS = 20 diff --git a/myenv/lib/python3.9/site-packages/httpx/_content.py b/myenv/lib/python3.9/site-packages/httpx/_content.py new file mode 100644 index 0000000..24a967d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_content.py @@ -0,0 +1,238 @@ +import inspect +import warnings +from json import dumps as json_dumps +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Dict, + Iterable, + Iterator, + Optional, + Tuple, + Union, +) +from urllib.parse import urlencode + +from ._exceptions import StreamClosed, StreamConsumed +from ._multipart import MultipartStream +from ._types import ( + AsyncByteStream, + RequestContent, + RequestData, + RequestFiles, + ResponseContent, + SyncByteStream, +) +from ._utils import peek_filelike_length, primitive_value_to_str + + +class ByteStream(AsyncByteStream, SyncByteStream): + def __init__(self, stream: bytes) -> None: + self._stream = stream + + def __iter__(self) -> Iterator[bytes]: + yield self._stream + + async def __aiter__(self) -> AsyncIterator[bytes]: + yield self._stream + + +class IteratorByteStream(SyncByteStream): + CHUNK_SIZE = 65_536 + + def __init__(self, stream: Iterable[bytes]): + self._stream = stream + self._is_stream_consumed = False + self._is_generator = inspect.isgenerator(stream) + + def __iter__(self) -> Iterator[bytes]: + if self._is_stream_consumed and self._is_generator: + raise StreamConsumed() + + self._is_stream_consumed = True + if hasattr(self._stream, "read") and not isinstance( + self._stream, SyncByteStream + ): + # File-like interfaces should use 'read' directly. + chunk = self._stream.read(self.CHUNK_SIZE) # type: ignore + while chunk: + yield chunk + chunk = self._stream.read(self.CHUNK_SIZE) # type: ignore + else: + # Otherwise iterate. + for part in self._stream: + yield part + + +class AsyncIteratorByteStream(AsyncByteStream): + CHUNK_SIZE = 65_536 + + def __init__(self, stream: AsyncIterable[bytes]): + self._stream = stream + self._is_stream_consumed = False + self._is_generator = inspect.isasyncgen(stream) + + async def __aiter__(self) -> AsyncIterator[bytes]: + if self._is_stream_consumed and self._is_generator: + raise StreamConsumed() + + self._is_stream_consumed = True + if hasattr(self._stream, "aread") and not isinstance( + self._stream, AsyncByteStream + ): + # File-like interfaces should use 'aread' directly. + chunk = await self._stream.aread(self.CHUNK_SIZE) # type: ignore + while chunk: + yield chunk + chunk = await self._stream.aread(self.CHUNK_SIZE) # type: ignore + else: + # Otherwise iterate. + async for part in self._stream: + yield part + + +class UnattachedStream(AsyncByteStream, SyncByteStream): + """ + If a request or response is serialized using pickle, then it is no longer + attached to a stream for I/O purposes. Any stream operations should result + in `httpx.StreamClosed`. + """ + + def __iter__(self) -> Iterator[bytes]: + raise StreamClosed() + + async def __aiter__(self) -> AsyncIterator[bytes]: + raise StreamClosed() + yield b"" # pragma: nocover + + +def encode_content( + content: Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]] +) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]: + + if isinstance(content, (bytes, str)): + body = content.encode("utf-8") if isinstance(content, str) else content + content_length = len(body) + headers = {"Content-Length": str(content_length)} if body else {} + return headers, ByteStream(body) + + elif isinstance(content, Iterable): + content_length_or_none = peek_filelike_length(content) + + if content_length_or_none is None: + headers = {"Transfer-Encoding": "chunked"} + else: + headers = {"Content-Length": str(content_length_or_none)} + return headers, IteratorByteStream(content) # type: ignore + + elif isinstance(content, AsyncIterable): + headers = {"Transfer-Encoding": "chunked"} + return headers, AsyncIteratorByteStream(content) + + raise TypeError(f"Unexpected type for 'content', {type(content)!r}") + + +def encode_urlencoded_data( + data: dict, +) -> Tuple[Dict[str, str], ByteStream]: + plain_data = [] + for key, value in data.items(): + if isinstance(value, (list, tuple)): + plain_data.extend([(key, primitive_value_to_str(item)) for item in value]) + else: + plain_data.append((key, primitive_value_to_str(value))) + body = urlencode(plain_data, doseq=True).encode("utf-8") + content_length = str(len(body)) + content_type = "application/x-www-form-urlencoded" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_multipart_data( + data: dict, files: RequestFiles, boundary: Optional[bytes] = None +) -> Tuple[Dict[str, str], MultipartStream]: + multipart = MultipartStream(data=data, files=files, boundary=boundary) + headers = multipart.get_headers() + return headers, multipart + + +def encode_text(text: str) -> Tuple[Dict[str, str], ByteStream]: + body = text.encode("utf-8") + content_length = str(len(body)) + content_type = "text/plain; charset=utf-8" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_html(html: str) -> Tuple[Dict[str, str], ByteStream]: + body = html.encode("utf-8") + content_length = str(len(body)) + content_type = "text/html; charset=utf-8" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_json(json: Any) -> Tuple[Dict[str, str], ByteStream]: + body = json_dumps(json).encode("utf-8") + content_length = str(len(body)) + content_type = "application/json" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_request( + content: Optional[RequestContent] = None, + data: Optional[RequestData] = None, + files: Optional[RequestFiles] = None, + json: Optional[Any] = None, + boundary: Optional[bytes] = None, +) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]: + """ + Handles encoding the given `content`, `data`, `files`, and `json`, + returning a two-tuple of (, ). + """ + if data is not None and not isinstance(data, dict): + # We prefer to separate `content=` + # for raw request content, and `data=
` for url encoded or + # multipart form content. + # + # However for compat with requests, we *do* still support + # `data=` usages. We deal with that case here, treating it + # as if `content=<...>` had been supplied instead. + message = "Use 'content=<...>' to upload raw bytes/text content." + warnings.warn(message, DeprecationWarning) + return encode_content(data) + + if content is not None: + return encode_content(content) + elif files: + return encode_multipart_data(data or {}, files, boundary) + elif data: + return encode_urlencoded_data(data) + elif json is not None: + return encode_json(json) + + return {}, ByteStream(b"") + + +def encode_response( + content: Optional[ResponseContent] = None, + text: Optional[str] = None, + html: Optional[str] = None, + json: Optional[Any] = None, +) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]: + """ + Handles encoding the given `content`, returning a two-tuple of + (, ). + """ + if content is not None: + return encode_content(content) + elif text is not None: + return encode_text(text) + elif html is not None: + return encode_html(html) + elif json is not None: + return encode_json(json) + + return {}, ByteStream(b"") diff --git a/myenv/lib/python3.9/site-packages/httpx/_decoders.py b/myenv/lib/python3.9/site-packages/httpx/_decoders.py new file mode 100644 index 0000000..69c0369 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_decoders.py @@ -0,0 +1,333 @@ +""" +Handlers for Content-Encoding. + +See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding +""" +import codecs +import io +import typing +import zlib + +from ._compat import brotli +from ._exceptions import DecodingError + + +class ContentDecoder: + def decode(self, data: bytes) -> bytes: + raise NotImplementedError() # pragma: nocover + + def flush(self) -> bytes: + raise NotImplementedError() # pragma: nocover + + +class IdentityDecoder(ContentDecoder): + """ + Handle unencoded data. + """ + + def decode(self, data: bytes) -> bytes: + return data + + def flush(self) -> bytes: + return b"" + + +class DeflateDecoder(ContentDecoder): + """ + Handle 'deflate' decoding. + + See: https://stackoverflow.com/questions/1838699 + """ + + def __init__(self) -> None: + self.first_attempt = True + self.decompressor = zlib.decompressobj() + + def decode(self, data: bytes) -> bytes: + was_first_attempt = self.first_attempt + self.first_attempt = False + try: + return self.decompressor.decompress(data) + except zlib.error as exc: + if was_first_attempt: + self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS) + return self.decode(data) + raise DecodingError(str(exc)) from exc + + def flush(self) -> bytes: + try: + return self.decompressor.flush() + except zlib.error as exc: # pragma: nocover + raise DecodingError(str(exc)) from exc + + +class GZipDecoder(ContentDecoder): + """ + Handle 'gzip' decoding. + + See: https://stackoverflow.com/questions/1838699 + """ + + def __init__(self) -> None: + self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16) + + def decode(self, data: bytes) -> bytes: + try: + return self.decompressor.decompress(data) + except zlib.error as exc: + raise DecodingError(str(exc)) from exc + + def flush(self) -> bytes: + try: + return self.decompressor.flush() + except zlib.error as exc: # pragma: nocover + raise DecodingError(str(exc)) from exc + + +class BrotliDecoder(ContentDecoder): + """ + Handle 'brotli' decoding. + + Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/ + or `pip install brotli`. See https://github.com/google/brotli + Supports both 'brotlipy' and 'Brotli' packages since they share an import + name. The top branches are for 'brotlipy' and bottom branches for 'Brotli' + """ + + def __init__(self) -> None: + if brotli is None: # pragma: nocover + raise ImportError( + "Using 'BrotliDecoder', but neither of the 'brotlicffi' or 'brotli' " + "packages have been installed. " + "Make sure to install httpx using `pip install httpx[brotli]`." + ) from None + + self.decompressor = brotli.Decompressor() + self.seen_data = False + if hasattr(self.decompressor, "decompress"): + # The 'brotlicffi' package. + self._decompress = self.decompressor.decompress # pragma: nocover + else: + # The 'brotli' package. + self._decompress = self.decompressor.process # pragma: nocover + + def decode(self, data: bytes) -> bytes: + if not data: + return b"" + self.seen_data = True + try: + return self._decompress(data) + except brotli.error as exc: + raise DecodingError(str(exc)) from exc + + def flush(self) -> bytes: + if not self.seen_data: + return b"" + try: + if hasattr(self.decompressor, "finish"): + # Only available in the 'brotlicffi' package. + + # As the decompressor decompresses eagerly, this + # will never actually emit any data. However, it will potentially throw + # errors if a truncated or damaged data stream has been used. + self.decompressor.finish() # pragma: nocover + return b"" + except brotli.error as exc: # pragma: nocover + raise DecodingError(str(exc)) from exc + + +class MultiDecoder(ContentDecoder): + """ + Handle the case where multiple encodings have been applied. + """ + + def __init__(self, children: typing.Sequence[ContentDecoder]) -> None: + """ + 'children' should be a sequence of decoders in the order in which + each was applied. + """ + # Note that we reverse the order for decoding. + self.children = list(reversed(children)) + + def decode(self, data: bytes) -> bytes: + for child in self.children: + data = child.decode(data) + return data + + def flush(self) -> bytes: + data = b"" + for child in self.children: + data = child.decode(data) + child.flush() + return data + + +class ByteChunker: + """ + Handles returning byte content in fixed-size chunks. + """ + + def __init__(self, chunk_size: typing.Optional[int] = None) -> None: + self._buffer = io.BytesIO() + self._chunk_size = chunk_size + + def decode(self, content: bytes) -> typing.List[bytes]: + if self._chunk_size is None: + return [content] if content else [] + + self._buffer.write(content) + if self._buffer.tell() >= self._chunk_size: + value = self._buffer.getvalue() + chunks = [ + value[i : i + self._chunk_size] + for i in range(0, len(value), self._chunk_size) + ] + if len(chunks[-1]) == self._chunk_size: + self._buffer.seek(0) + self._buffer.truncate() + return chunks + else: + self._buffer.seek(0) + self._buffer.write(chunks[-1]) + self._buffer.truncate() + return chunks[:-1] + else: + return [] + + def flush(self) -> typing.List[bytes]: + value = self._buffer.getvalue() + self._buffer.seek(0) + self._buffer.truncate() + return [value] if value else [] + + +class TextChunker: + """ + Handles returning text content in fixed-size chunks. + """ + + def __init__(self, chunk_size: typing.Optional[int] = None) -> None: + self._buffer = io.StringIO() + self._chunk_size = chunk_size + + def decode(self, content: str) -> typing.List[str]: + if self._chunk_size is None: + return [content] + + self._buffer.write(content) + if self._buffer.tell() >= self._chunk_size: + value = self._buffer.getvalue() + chunks = [ + value[i : i + self._chunk_size] + for i in range(0, len(value), self._chunk_size) + ] + if len(chunks[-1]) == self._chunk_size: + self._buffer.seek(0) + self._buffer.truncate() + return chunks + else: + self._buffer.seek(0) + self._buffer.write(chunks[-1]) + self._buffer.truncate() + return chunks[:-1] + else: + return [] + + def flush(self) -> typing.List[str]: + value = self._buffer.getvalue() + self._buffer.seek(0) + self._buffer.truncate() + return [value] if value else [] + + +class TextDecoder: + """ + Handles incrementally decoding bytes into text + """ + + def __init__(self, encoding: str = "utf-8"): + self.decoder = codecs.getincrementaldecoder(encoding)(errors="replace") + + def decode(self, data: bytes) -> str: + return self.decoder.decode(data) + + def flush(self) -> str: + return self.decoder.decode(b"", True) + + +class LineDecoder: + """ + Handles incrementally reading lines from text. + + Uses universal line decoding, supporting any of `\n`, `\r`, or `\r\n` + as line endings, normalizing to `\n`. + """ + + def __init__(self) -> None: + self.buffer = "" + + def decode(self, text: str) -> typing.List[str]: + lines = [] + + if text and self.buffer and self.buffer[-1] == "\r": + if text.startswith("\n"): + # Handle the case where we have an "\r\n" split across + # our previous input, and our new chunk. + lines.append(self.buffer[:-1] + "\n") + self.buffer = "" + text = text[1:] + else: + # Handle the case where we have "\r" at the end of our + # previous input. + lines.append(self.buffer[:-1] + "\n") + self.buffer = "" + + while text: + num_chars = len(text) + for idx in range(num_chars): + char = text[idx] + next_char = None if idx + 1 == num_chars else text[idx + 1] + if char == "\n": + lines.append(self.buffer + text[: idx + 1]) + self.buffer = "" + text = text[idx + 1 :] + break + elif char == "\r" and next_char == "\n": + lines.append(self.buffer + text[:idx] + "\n") + self.buffer = "" + text = text[idx + 2 :] + break + elif char == "\r" and next_char is not None: + lines.append(self.buffer + text[:idx] + "\n") + self.buffer = "" + text = text[idx + 1 :] + break + elif next_char is None: + self.buffer += text + text = "" + break + + return lines + + def flush(self) -> typing.List[str]: + if self.buffer.endswith("\r"): + # Handle the case where we had a trailing '\r', which could have + # been a '\r\n' pair. + lines = [self.buffer[:-1] + "\n"] + elif self.buffer: + lines = [self.buffer] + else: + lines = [] + self.buffer = "" + return lines + + +SUPPORTED_DECODERS = { + "identity": IdentityDecoder, + "gzip": GZipDecoder, + "deflate": DeflateDecoder, + "br": BrotliDecoder, +} + + +if brotli is None: + SUPPORTED_DECODERS.pop("br") # pragma: nocover diff --git a/myenv/lib/python3.9/site-packages/httpx/_exceptions.py b/myenv/lib/python3.9/site-packages/httpx/_exceptions.py new file mode 100644 index 0000000..1941250 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_exceptions.py @@ -0,0 +1,343 @@ +""" +Our exception hierarchy: + +* HTTPError + x RequestError + + TransportError + - TimeoutException + · ConnectTimeout + · ReadTimeout + · WriteTimeout + · PoolTimeout + - NetworkError + · ConnectError + · ReadError + · WriteError + · CloseError + - ProtocolError + · LocalProtocolError + · RemoteProtocolError + - ProxyError + - UnsupportedProtocol + + DecodingError + + TooManyRedirects + x HTTPStatusError +* InvalidURL +* CookieConflict +* StreamError + x StreamConsumed + x StreamClosed + x ResponseNotRead + x RequestNotRead +""" +import contextlib +import typing + +if typing.TYPE_CHECKING: + from ._models import Request, Response # pragma: nocover + + +class HTTPError(Exception): + """ + Base class for `RequestError` and `HTTPStatusError`. + + Useful for `try...except` blocks when issuing a request, + and then calling `.raise_for_status()`. + + For example: + + ``` + try: + response = httpx.get("https://www.example.com") + response.raise_for_status() + except httpx.HTTPError as exc: + print(f"HTTP Exception for {exc.request.url} - {exc}") + ``` + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + self._request: typing.Optional["Request"] = None + + @property + def request(self) -> "Request": + if self._request is None: + raise RuntimeError("The .request property has not been set.") + return self._request + + @request.setter + def request(self, request: "Request") -> None: + self._request = request + + +class RequestError(HTTPError): + """ + Base class for all exceptions that may occur when issuing a `.request()`. + """ + + def __init__( + self, message: str, *, request: typing.Optional["Request"] = None + ) -> None: + super().__init__(message) + # At the point an exception is raised we won't typically have a request + # instance to associate it with. + # + # The 'request_context' context manager is used within the Client and + # Response methods in order to ensure that any raised exceptions + # have a `.request` property set on them. + self._request = request + + +class TransportError(RequestError): + """ + Base class for all exceptions that occur at the level of the Transport API. + """ + + +# Timeout exceptions... + + +class TimeoutException(TransportError): + """ + The base class for timeout errors. + + An operation has timed out. + """ + + +class ConnectTimeout(TimeoutException): + """ + Timed out while connecting to the host. + """ + + +class ReadTimeout(TimeoutException): + """ + Timed out while receiving data from the host. + """ + + +class WriteTimeout(TimeoutException): + """ + Timed out while sending data to the host. + """ + + +class PoolTimeout(TimeoutException): + """ + Timed out waiting to acquire a connection from the pool. + """ + + +# Core networking exceptions... + + +class NetworkError(TransportError): + """ + The base class for network-related errors. + + An error occurred while interacting with the network. + """ + + +class ReadError(NetworkError): + """ + Failed to receive data from the network. + """ + + +class WriteError(NetworkError): + """ + Failed to send data through the network. + """ + + +class ConnectError(NetworkError): + """ + Failed to establish a connection. + """ + + +class CloseError(NetworkError): + """ + Failed to close a connection. + """ + + +# Other transport exceptions... + + +class ProxyError(TransportError): + """ + An error occurred while establishing a proxy connection. + """ + + +class UnsupportedProtocol(TransportError): + """ + Attempted to make a request to an unsupported protocol. + + For example issuing a request to `ftp://www.example.com`. + """ + + +class ProtocolError(TransportError): + """ + The protocol was violated. + """ + + +class LocalProtocolError(ProtocolError): + """ + A protocol was violated by the client. + + For example if the user instantiated a `Request` instance explicitly, + failed to include the mandatory `Host:` header, and then issued it directly + using `client.send()`. + """ + + +class RemoteProtocolError(ProtocolError): + """ + The protocol was violated by the server. + + For example, returning malformed HTTP. + """ + + +# Other request exceptions... + + +class DecodingError(RequestError): + """ + Decoding of the response failed, due to a malformed encoding. + """ + + +class TooManyRedirects(RequestError): + """ + Too many redirects. + """ + + +# Client errors + + +class HTTPStatusError(HTTPError): + """ + The response had an error HTTP status of 4xx or 5xx. + + May be raised when calling `response.raise_for_status()` + """ + + def __init__( + self, message: str, *, request: "Request", response: "Response" + ) -> None: + super().__init__(message) + self.request = request + self.response = response + + +class InvalidURL(Exception): + """ + URL is improperly formed or cannot be parsed. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +class CookieConflict(Exception): + """ + Attempted to lookup a cookie by name, but multiple cookies existed. + + Can occur when calling `response.cookies.get(...)`. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +# Stream exceptions... + +# These may occur as the result of a programming error, by accessing +# the request/response stream in an invalid manner. + + +class StreamError(RuntimeError): + """ + The base class for stream exceptions. + + The developer made an error in accessing the request stream in + an invalid way. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +class StreamConsumed(StreamError): + """ + Attempted to read or stream content, but the content has already + been streamed. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream some content, but the content has " + "already been streamed. For requests, this could be due to passing " + "a generator as request content, and then receiving a redirect " + "response or a secondary request as part of an authentication flow." + "For responses, this could be due to attempting to stream the response " + "content more than once." + ) + super().__init__(message) + + +class StreamClosed(StreamError): + """ + Attempted to read or stream response content, but the request has been + closed. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream content, but the stream has " "been closed." + ) + super().__init__(message) + + +class ResponseNotRead(StreamError): + """ + Attempted to access streaming response content, without having called `read()`. + """ + + def __init__(self) -> None: + message = "Attempted to access streaming response content, without having called `read()`." + super().__init__(message) + + +class RequestNotRead(StreamError): + """ + Attempted to access streaming request content, without having called `read()`. + """ + + def __init__(self) -> None: + message = "Attempted to access streaming request content, without having called `read()`." + super().__init__(message) + + +@contextlib.contextmanager +def request_context( + request: typing.Optional["Request"] = None, +) -> typing.Iterator[None]: + """ + A context manager that can be used to attach the given request context + to any `RequestError` exceptions that are raised within the block. + """ + try: + yield + except RequestError as exc: + if request is not None: + exc.request = request + raise exc diff --git a/myenv/lib/python3.9/site-packages/httpx/_main.py b/myenv/lib/python3.9/site-packages/httpx/_main.py new file mode 100644 index 0000000..ebcb652 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_main.py @@ -0,0 +1,497 @@ +import functools +import json +import sys +import typing + +import click +import httpcore +import pygments.lexers +import pygments.util +import rich.console +import rich.markup +import rich.progress +import rich.syntax +import rich.table + +from ._client import Client +from ._exceptions import RequestError +from ._models import Response +from ._status_codes import codes + + +def print_help() -> None: + console = rich.console.Console() + + console.print("[bold]HTTPX :butterfly:", justify="center") + console.print() + console.print("A next generation HTTP client.", justify="center") + console.print() + console.print( + "Usage: [bold]httpx[/bold] [cyan] [OPTIONS][/cyan] ", justify="left" + ) + console.print() + + table = rich.table.Table.grid(padding=1, pad_edge=True) + table.add_column("Parameter", no_wrap=True, justify="left", style="bold") + table.add_column("Description") + table.add_row( + "-m, --method [cyan]METHOD", + "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.\n" + "[Default: GET, or POST if a request body is included]", + ) + table.add_row( + "-p, --params [cyan] ...", + "Query parameters to include in the request URL.", + ) + table.add_row( + "-c, --content [cyan]TEXT", "Byte content to include in the request body." + ) + table.add_row( + "-d, --data [cyan] ...", "Form data to include in the request body." + ) + table.add_row( + "-f, --files [cyan] ...", + "Form files to include in the request body.", + ) + table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.") + table.add_row( + "-h, --headers [cyan] ...", + "Include additional HTTP headers in the request.", + ) + table.add_row( + "--cookies [cyan] ...", "Cookies to include in the request." + ) + table.add_row( + "--auth [cyan]", + "Username and password to include in the request. Specify '-' for the password to use " + "a password prompt. Note that using --verbose/-v will expose the Authorization " + "header, including the password encoding in a trivially reversible format.", + ) + + table.add_row( + "--proxies [cyan]URL", + "Send the request via a proxy. Should be the URL giving the proxy address.", + ) + + table.add_row( + "--timeout [cyan]FLOAT", + "Timeout value to use for network operations, such as establishing the connection, " + "reading some data, etc... [Default: 5.0]", + ) + + table.add_row("--follow-redirects", "Automatically follow redirects.") + table.add_row("--no-verify", "Disable SSL verification.") + table.add_row( + "--http2", "Send the request using HTTP/2, if the remote server supports it." + ) + + table.add_row( + "--download [cyan]FILE", + "Save the response content as a file, rather than displaying it.", + ) + + table.add_row("-v, --verbose", "Verbose output. Show request as well as response.") + table.add_row("--help", "Show this message and exit.") + console.print(table) + + +def get_lexer_for_response(response: Response) -> str: + content_type = response.headers.get("Content-Type") + if content_type is not None: + mime_type, _, _ = content_type.partition(";") + try: + return pygments.lexers.get_lexer_for_mimetype(mime_type.strip()).name + except pygments.util.ClassNotFound: # pragma: nocover + pass + return "" # pragma: nocover + + +def format_request_headers(request: httpcore.Request, http2: bool = False) -> str: + version = "HTTP/2" if http2 else "HTTP/1.1" + headers = [ + (name.lower() if http2 else name, value) for name, value in request.headers + ] + method = request.method.decode("ascii") + target = request.url.target.decode("ascii") + lines = [f"{method} {target} {version}"] + [ + f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers + ] + return "\n".join(lines) + + +def format_response_headers( + http_version: bytes, + status: int, + reason_phrase: typing.Optional[bytes], + headers: typing.List[typing.Tuple[bytes, bytes]], +) -> str: + version = http_version.decode("ascii") + reason = ( + codes.get_reason_phrase(status) + if reason_phrase is None + else reason_phrase.decode("ascii") + ) + lines = [f"{version} {status} {reason}"] + [ + f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers + ] + return "\n".join(lines) + + +def print_request_headers(request: httpcore.Request, http2: bool = False) -> None: + console = rich.console.Console() + http_text = format_request_headers(request, http2=http2) + syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + + +def print_response_headers( + http_version: bytes, + status: int, + reason_phrase: typing.Optional[bytes], + headers: typing.List[typing.Tuple[bytes, bytes]], +) -> None: + console = rich.console.Console() + http_text = format_response_headers(http_version, status, reason_phrase, headers) + syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + + +def print_response(response: Response) -> None: + console = rich.console.Console() + lexer_name = get_lexer_for_response(response) + if lexer_name: + if lexer_name.lower() == "json": + try: + data = response.json() + text = json.dumps(data, indent=4) + except ValueError: # pragma: nocover + text = response.text + else: + text = response.text + + syntax = rich.syntax.Syntax(text, lexer_name, theme="ansi_dark", word_wrap=True) + console.print(syntax) + else: + console.print(f"<{len(response.content)} bytes of binary data>") + + +def format_certificate(cert: dict) -> str: # pragma: nocover + lines = [] + for key, value in cert.items(): + if isinstance(value, (list, tuple)): + lines.append(f"* {key}:") + for item in value: + if key in ("subject", "issuer"): + for sub_item in item: + lines.append(f"* {sub_item[0]}: {sub_item[1]!r}") + elif isinstance(item, tuple) and len(item) == 2: + lines.append(f"* {item[0]}: {item[1]!r}") + else: + lines.append(f"* {item!r}") + else: + lines.append(f"* {key}: {value!r}") + return "\n".join(lines) + + +def trace(name: str, info: dict, verbose: bool = False) -> None: + console = rich.console.Console() + if name == "connection.connect_tcp.started" and verbose: + host = info["host"] + console.print(f"* Connecting to {host!r}") + elif name == "connection.connect_tcp.complete" and verbose: + stream = info["return_value"] + server_addr = stream.get_extra_info("server_addr") + console.print(f"* Connected to {server_addr[0]!r} on port {server_addr[1]}") + elif name == "connection.start_tls.complete" and verbose: # pragma: nocover + stream = info["return_value"] + ssl_object = stream.get_extra_info("ssl_object") + version = ssl_object.version() + cipher = ssl_object.cipher() + server_cert = ssl_object.getpeercert() + alpn = ssl_object.selected_alpn_protocol() + console.print(f"* SSL established using {version!r} / {cipher[0]!r}") + console.print(f"* Selected ALPN protocol: {alpn!r}") + if server_cert: + console.print("* Server certificate:") + console.print(format_certificate(server_cert)) + elif name == "http11.send_request_headers.started" and verbose: + request = info["request"] + print_request_headers(request, http2=False) + elif name == "http2.send_request_headers.started" and verbose: # pragma: nocover + request = info["request"] + print_request_headers(request, http2=True) + elif name == "http11.receive_response_headers.complete": + http_version, status, reason_phrase, headers = info["return_value"] + print_response_headers(http_version, status, reason_phrase, headers) + elif name == "http2.receive_response_headers.complete": # pragma: nocover + status, headers = info["return_value"] + http_version = b"HTTP/2" + reason_phrase = None + print_response_headers(http_version, status, reason_phrase, headers) + + +def download_response(response: Response, download: typing.BinaryIO) -> None: + console = rich.console.Console() + console.print() + content_length = response.headers.get("Content-Length") + with rich.progress.Progress( + "[progress.description]{task.description}", + "[progress.percentage]{task.percentage:>3.0f}%", + rich.progress.BarColumn(bar_width=None), + rich.progress.DownloadColumn(), + rich.progress.TransferSpeedColumn(), + ) as progress: + description = f"Downloading [bold]{rich.markup.escape(download.name)}" + download_task = progress.add_task( + description, + total=int(content_length or 0), + start=content_length is not None, + ) + for chunk in response.iter_bytes(): + download.write(chunk) + progress.update(download_task, completed=response.num_bytes_downloaded) + + +def validate_json( + ctx: click.Context, + param: typing.Union[click.Option, click.Parameter], + value: typing.Any, +) -> typing.Any: + if value is None: + return None + + try: + return json.loads(value) + except json.JSONDecodeError: # pragma: nocover + raise click.BadParameter("Not valid JSON") + + +def validate_auth( + ctx: click.Context, + param: typing.Union[click.Option, click.Parameter], + value: typing.Any, +) -> typing.Any: + if value == (None, None): + return None + + username, password = value + if password == "-": # pragma: nocover + password = click.prompt("Password", hide_input=True) + return (username, password) + + +def handle_help( + ctx: click.Context, + param: typing.Union[click.Option, click.Parameter], + value: typing.Any, +) -> None: + if not value or ctx.resilient_parsing: + return + + print_help() + ctx.exit() + + +@click.command(add_help_option=False) +@click.argument("url", type=str) +@click.option( + "--method", + "-m", + "method", + type=str, + help=( + "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. " + "[Default: GET, or POST if a request body is included]" + ), +) +@click.option( + "--params", + "-p", + "params", + type=(str, str), + multiple=True, + help="Query parameters to include in the request URL.", +) +@click.option( + "--content", + "-c", + "content", + type=str, + help="Byte content to include in the request body.", +) +@click.option( + "--data", + "-d", + "data", + type=(str, str), + multiple=True, + help="Form data to include in the request body.", +) +@click.option( + "--files", + "-f", + "files", + type=(str, click.File(mode="rb")), + multiple=True, + help="Form files to include in the request body.", +) +@click.option( + "--json", + "-j", + "json", + type=str, + callback=validate_json, + help="JSON data to include in the request body.", +) +@click.option( + "--headers", + "-h", + "headers", + type=(str, str), + multiple=True, + help="Include additional HTTP headers in the request.", +) +@click.option( + "--cookies", + "cookies", + type=(str, str), + multiple=True, + help="Cookies to include in the request.", +) +@click.option( + "--auth", + "auth", + type=(str, str), + default=(None, None), + callback=validate_auth, + help=( + "Username and password to include in the request. " + "Specify '-' for the password to use a password prompt. " + "Note that using --verbose/-v will expose the Authorization header, " + "including the password encoding in a trivially reversible format." + ), +) +@click.option( + "--proxies", + "proxies", + type=str, + default=None, + help="Send the request via a proxy. Should be the URL giving the proxy address.", +) +@click.option( + "--timeout", + "timeout", + type=float, + default=5.0, + help=( + "Timeout value to use for network operations, such as establishing the " + "connection, reading some data, etc... [Default: 5.0]" + ), +) +@click.option( + "--follow-redirects", + "follow_redirects", + is_flag=True, + default=False, + help="Automatically follow redirects.", +) +@click.option( + "--no-verify", + "verify", + is_flag=True, + default=True, + help="Disable SSL verification.", +) +@click.option( + "--http2", + "http2", + type=bool, + is_flag=True, + default=False, + help="Send the request using HTTP/2, if the remote server supports it.", +) +@click.option( + "--download", + type=click.File("wb"), + help="Save the response content as a file, rather than displaying it.", +) +@click.option( + "--verbose", + "-v", + type=bool, + is_flag=True, + default=False, + help="Verbose. Show request as well as response.", +) +@click.option( + "--help", + is_flag=True, + is_eager=True, + expose_value=False, + callback=handle_help, + help="Show this message and exit.", +) +def main( + url: str, + method: str, + params: typing.List[typing.Tuple[str, str]], + content: str, + data: typing.List[typing.Tuple[str, str]], + files: typing.List[typing.Tuple[str, click.File]], + json: str, + headers: typing.List[typing.Tuple[str, str]], + cookies: typing.List[typing.Tuple[str, str]], + auth: typing.Optional[typing.Tuple[str, str]], + proxies: str, + timeout: float, + follow_redirects: bool, + verify: bool, + http2: bool, + download: typing.Optional[typing.BinaryIO], + verbose: bool, +) -> None: + """ + An HTTP command line client. + Sends a request and displays the response. + """ + if not method: + method = "POST" if content or data or files or json else "GET" + + try: + with Client( + proxies=proxies, + timeout=timeout, + verify=verify, + http2=http2, + ) as client: + with client.stream( + method, + url, + params=list(params), + content=content, + data=dict(data), + files=files, # type: ignore + json=json, + headers=headers, + cookies=dict(cookies), + auth=auth, + follow_redirects=follow_redirects, + extensions={"trace": functools.partial(trace, verbose=verbose)}, + ) as response: + if download is not None: + download_response(response, download) + else: + response.read() + if response.content: + print_response(response) + + except RequestError as exc: + console = rich.console.Console() + console.print(f"[red]{type(exc).__name__}[/red]: {exc}") + sys.exit(1) + + sys.exit(0 if response.is_success else 1) diff --git a/myenv/lib/python3.9/site-packages/httpx/_models.py b/myenv/lib/python3.9/site-packages/httpx/_models.py new file mode 100644 index 0000000..cff6929 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_models.py @@ -0,0 +1,1196 @@ +import cgi +import datetime +import email.message +import json as jsonlib +import typing +import urllib.request +from collections.abc import MutableMapping +from http.cookiejar import Cookie, CookieJar + +from ._content import ByteStream, UnattachedStream, encode_request, encode_response +from ._decoders import ( + SUPPORTED_DECODERS, + ByteChunker, + ContentDecoder, + IdentityDecoder, + LineDecoder, + MultiDecoder, + TextChunker, + TextDecoder, +) +from ._exceptions import ( + CookieConflict, + HTTPStatusError, + RequestNotRead, + ResponseNotRead, + StreamClosed, + StreamConsumed, + request_context, +) +from ._status_codes import codes +from ._types import ( + AsyncByteStream, + CookieTypes, + HeaderTypes, + QueryParamTypes, + RawURL, + RequestContent, + RequestData, + RequestFiles, + ResponseContent, + SyncByteStream, +) +from ._urls import URL +from ._utils import ( + guess_json_utf, + is_known_encoding, + normalize_header_key, + normalize_header_value, + obfuscate_sensitive_headers, + parse_header_links, +) + + +class Headers(typing.MutableMapping[str, str]): + """ + HTTP headers, as a case-insensitive multi-dict. + """ + + def __init__( + self, + headers: typing.Optional[HeaderTypes] = None, + encoding: typing.Optional[str] = None, + ) -> None: + if headers is None: + self._list = [] # type: typing.List[typing.Tuple[bytes, bytes, bytes]] + elif isinstance(headers, Headers): + self._list = list(headers._list) + elif isinstance(headers, dict): + self._list = [ + ( + normalize_header_key(k, lower=False, encoding=encoding), + normalize_header_key(k, lower=True, encoding=encoding), + normalize_header_value(v, encoding), + ) + for k, v in headers.items() + ] + else: + self._list = [ + ( + normalize_header_key(k, lower=False, encoding=encoding), + normalize_header_key(k, lower=True, encoding=encoding), + normalize_header_value(v, encoding), + ) + for k, v in headers + ] + + self._encoding = encoding + + @property + def encoding(self) -> str: + """ + Header encoding is mandated as ascii, but we allow fallbacks to utf-8 + or iso-8859-1. + """ + if self._encoding is None: + for encoding in ["ascii", "utf-8"]: + for key, value in self.raw: + try: + key.decode(encoding) + value.decode(encoding) + except UnicodeDecodeError: + break + else: + # The else block runs if 'break' did not occur, meaning + # all values fitted the encoding. + self._encoding = encoding + break + else: + # The ISO-8859-1 encoding covers all 256 code points in a byte, + # so will never raise decode errors. + self._encoding = "iso-8859-1" + return self._encoding + + @encoding.setter + def encoding(self, value: str) -> None: + self._encoding = value + + @property + def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: + """ + Returns a list of the raw header items, as byte pairs. + """ + return [(raw_key, value) for raw_key, _, value in self._list] + + def keys(self) -> typing.KeysView[str]: + return {key.decode(self.encoding): None for _, key, value in self._list}.keys() + + def values(self) -> typing.ValuesView[str]: + values_dict: typing.Dict[str, str] = {} + for _, key, value in self._list: + str_key = key.decode(self.encoding) + str_value = value.decode(self.encoding) + if str_key in values_dict: + values_dict[str_key] += f", {str_value}" + else: + values_dict[str_key] = str_value + return values_dict.values() + + def items(self) -> typing.ItemsView[str, str]: + """ + Return `(key, value)` items of headers. Concatenate headers + into a single comma separated value when a key occurs multiple times. + """ + values_dict: typing.Dict[str, str] = {} + for _, key, value in self._list: + str_key = key.decode(self.encoding) + str_value = value.decode(self.encoding) + if str_key in values_dict: + values_dict[str_key] += f", {str_value}" + else: + values_dict[str_key] = str_value + return values_dict.items() + + def multi_items(self) -> typing.List[typing.Tuple[str, str]]: + """ + Return a list of `(key, value)` pairs of headers. Allow multiple + occurrences of the same key without concatenating into a single + comma separated value. + """ + return [ + (key.decode(self.encoding), value.decode(self.encoding)) + for _, key, value in self._list + ] + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Return a header value. If multiple occurrences of the header occur + then concatenate them together with commas. + """ + try: + return self[key] + except KeyError: + return default + + def get_list(self, key: str, split_commas: bool = False) -> typing.List[str]: + """ + Return a list of all header values for a given key. + If `split_commas=True` is passed, then any comma separated header + values are split into multiple return strings. + """ + get_header_key = key.lower().encode(self.encoding) + + values = [ + item_value.decode(self.encoding) + for _, item_key, item_value in self._list + if item_key.lower() == get_header_key + ] + + if not split_commas: + return values + + split_values = [] + for value in values: + split_values.extend([item.strip() for item in value.split(",")]) + return split_values + + def update(self, headers: typing.Optional[HeaderTypes] = None) -> None: # type: ignore + headers = Headers(headers) + for key in headers.keys(): + if key in self: + self.pop(key) + self._list.extend(headers._list) + + def copy(self) -> "Headers": + return Headers(self, encoding=self.encoding) + + def __getitem__(self, key: str) -> str: + """ + Return a single header value. + + If there are multiple headers with the same key, then we concatenate + them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2 + """ + normalized_key = key.lower().encode(self.encoding) + + items = [ + header_value.decode(self.encoding) + for _, header_key, header_value in self._list + if header_key == normalized_key + ] + + if items: + return ", ".join(items) + + raise KeyError(key) + + def __setitem__(self, key: str, value: str) -> None: + """ + Set the header `key` to `value`, removing any duplicate entries. + Retains insertion order. + """ + set_key = key.encode(self._encoding or "utf-8") + set_value = value.encode(self._encoding or "utf-8") + lookup_key = set_key.lower() + + found_indexes = [ + idx + for idx, (_, item_key, _) in enumerate(self._list) + if item_key == lookup_key + ] + + for idx in reversed(found_indexes[1:]): + del self._list[idx] + + if found_indexes: + idx = found_indexes[0] + self._list[idx] = (set_key, lookup_key, set_value) + else: + self._list.append((set_key, lookup_key, set_value)) + + def __delitem__(self, key: str) -> None: + """ + Remove the header `key`. + """ + del_key = key.lower().encode(self.encoding) + + pop_indexes = [ + idx + for idx, (_, item_key, _) in enumerate(self._list) + if item_key.lower() == del_key + ] + + if not pop_indexes: + raise KeyError(key) + + for idx in reversed(pop_indexes): + del self._list[idx] + + def __contains__(self, key: typing.Any) -> bool: + header_key = key.lower().encode(self.encoding) + return header_key in [key for _, key, _ in self._list] + + def __iter__(self) -> typing.Iterator[typing.Any]: + return iter(self.keys()) + + def __len__(self) -> int: + return len(self._list) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_headers = Headers(other) + except ValueError: + return False + + self_list = [(key, value) for _, key, value in self._list] + other_list = [(key, value) for _, key, value in other_headers._list] + return sorted(self_list) == sorted(other_list) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + + encoding_str = "" + if self.encoding != "ascii": + encoding_str = f", encoding={self.encoding!r}" + + as_list = list(obfuscate_sensitive_headers(self.multi_items())) + as_dict = dict(as_list) + + no_duplicate_keys = len(as_dict) == len(as_list) + if no_duplicate_keys: + return f"{class_name}({as_dict!r}{encoding_str})" + return f"{class_name}({as_list!r}{encoding_str})" + + +class Request: + def __init__( + self, + method: typing.Union[str, bytes], + url: typing.Union["URL", str, RawURL], + *, + params: typing.Optional[QueryParamTypes] = None, + headers: typing.Optional[HeaderTypes] = None, + cookies: typing.Optional[CookieTypes] = None, + content: typing.Optional[RequestContent] = None, + data: typing.Optional[RequestData] = None, + files: typing.Optional[RequestFiles] = None, + json: typing.Optional[typing.Any] = None, + stream: typing.Union[SyncByteStream, AsyncByteStream, None] = None, + extensions: typing.Optional[dict] = None, + ): + self.method = ( + method.decode("ascii").upper() + if isinstance(method, bytes) + else method.upper() + ) + self.url = URL(url) + if params is not None: + self.url = self.url.copy_merge_params(params=params) + self.headers = Headers(headers) + self.extensions = {} if extensions is None else extensions + + if cookies: + Cookies(cookies).set_cookie_header(self) + + if stream is None: + headers, stream = encode_request(content, data, files, json) + self._prepare(headers) + self.stream = stream + # Load the request body, except for streaming content. + if isinstance(stream, ByteStream): + self.read() + else: + # There's an important distinction between `Request(content=...)`, + # and `Request(stream=...)`. + # + # Using `content=...` implies automatically populated `Host` and content + # headers, of either `Content-Length: ...` or `Transfer-Encoding: chunked`. + # + # Using `stream=...` will not automatically include *any* auto-populated headers. + # + # As an end-user you don't really need `stream=...`. It's only + # useful when: + # + # * Preserving the request stream when copying requests, eg for redirects. + # * Creating request instances on the *server-side* of the transport API. + self.stream = stream + + def _prepare(self, default_headers: typing.Dict[str, str]) -> None: + for key, value in default_headers.items(): + # Ignore Transfer-Encoding if the Content-Length has been set explicitly. + if key.lower() == "transfer-encoding" and "Content-Length" in self.headers: + continue + self.headers.setdefault(key, value) + + auto_headers: typing.List[typing.Tuple[bytes, bytes]] = [] + + has_host = "Host" in self.headers + has_content_length = ( + "Content-Length" in self.headers or "Transfer-Encoding" in self.headers + ) + + if not has_host and self.url.host: + auto_headers.append((b"Host", self.url.netloc)) + if not has_content_length and self.method in ("POST", "PUT", "PATCH"): + auto_headers.append((b"Content-Length", b"0")) + + self.headers = Headers(auto_headers + self.headers.raw) + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + raise RequestNotRead() + return self._content + + def read(self) -> bytes: + """ + Read and return the request content. + """ + if not hasattr(self, "_content"): + assert isinstance(self.stream, typing.Iterable) + self._content = b"".join(self.stream) + if not isinstance(self.stream, ByteStream): + # If a streaming request has been read entirely into memory, then + # we can replace the stream with a raw bytes implementation, + # to ensure that any non-replayable streams can still be used. + self.stream = ByteStream(self._content) + return self._content + + async def aread(self) -> bytes: + """ + Read and return the request content. + """ + if not hasattr(self, "_content"): + assert isinstance(self.stream, typing.AsyncIterable) + self._content = b"".join([part async for part in self.stream]) + if not isinstance(self.stream, ByteStream): + # If a streaming request has been read entirely into memory, then + # we can replace the stream with a raw bytes implementation, + # to ensure that any non-replayable streams can still be used. + self.stream = ByteStream(self._content) + return self._content + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + url = str(self.url) + return f"<{class_name}({self.method!r}, {url!r})>" + + def __getstate__(self) -> typing.Dict[str, typing.Any]: + return { + name: value + for name, value in self.__dict__.items() + if name not in ["extensions", "stream"] + } + + def __setstate__(self, state: typing.Dict[str, typing.Any]) -> None: + for name, value in state.items(): + setattr(self, name, value) + self.extensions = {} + self.stream = UnattachedStream() + + +class Response: + def __init__( + self, + status_code: int, + *, + headers: typing.Optional[HeaderTypes] = None, + content: typing.Optional[ResponseContent] = None, + text: typing.Optional[str] = None, + html: typing.Optional[str] = None, + json: typing.Any = None, + stream: typing.Union[SyncByteStream, AsyncByteStream, None] = None, + request: typing.Optional[Request] = None, + extensions: typing.Optional[dict] = None, + history: typing.Optional[typing.List["Response"]] = None, + default_encoding: typing.Union[str, typing.Callable[[bytes], str]] = "utf-8", + ): + self.status_code = status_code + self.headers = Headers(headers) + + self._request: typing.Optional[Request] = request + + # When follow_redirects=False and a redirect is received, + # the client will set `response.next_request`. + self.next_request: typing.Optional[Request] = None + + self.extensions = {} if extensions is None else extensions + self.history = [] if history is None else list(history) + + self.is_closed = False + self.is_stream_consumed = False + + self.default_encoding = default_encoding + + if stream is None: + headers, stream = encode_response(content, text, html, json) + self._prepare(headers) + self.stream = stream + if isinstance(stream, ByteStream): + # Load the response body, except for streaming content. + self.read() + else: + # There's an important distinction between `Response(content=...)`, + # and `Response(stream=...)`. + # + # Using `content=...` implies automatically populated content headers, + # of either `Content-Length: ...` or `Transfer-Encoding: chunked`. + # + # Using `stream=...` will not automatically include any content headers. + # + # As an end-user you don't really need `stream=...`. It's only + # useful when creating response instances having received a stream + # from the transport API. + self.stream = stream + + self._num_bytes_downloaded = 0 + + def _prepare(self, default_headers: typing.Dict[str, str]) -> None: + for key, value in default_headers.items(): + # Ignore Transfer-Encoding if the Content-Length has been set explicitly. + if key.lower() == "transfer-encoding" and "content-length" in self.headers: + continue + self.headers.setdefault(key, value) + + @property + def elapsed(self) -> datetime.timedelta: + """ + Returns the time taken for the complete request/response + cycle to complete. + """ + if not hasattr(self, "_elapsed"): + raise RuntimeError( + "'.elapsed' may only be accessed after the response " + "has been read or closed." + ) + return self._elapsed + + @elapsed.setter + def elapsed(self, elapsed: datetime.timedelta) -> None: + self._elapsed = elapsed + + @property + def request(self) -> Request: + """ + Returns the request instance associated to the current response. + """ + if self._request is None: + raise RuntimeError( + "The request instance has not been set on this response." + ) + return self._request + + @request.setter + def request(self, value: Request) -> None: + self._request = value + + @property + def http_version(self) -> str: + try: + return self.extensions["http_version"].decode("ascii", errors="ignore") + except KeyError: + return "HTTP/1.1" + + @property + def reason_phrase(self) -> str: + try: + return self.extensions["reason_phrase"].decode("ascii", errors="ignore") + except KeyError: + return codes.get_reason_phrase(self.status_code) + + @property + def url(self) -> URL: + """ + Returns the URL for which the request was made. + """ + return self.request.url + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + raise ResponseNotRead() + return self._content + + @property + def text(self) -> str: + if not hasattr(self, "_text"): + content = self.content + if not content: + self._text = "" + else: + decoder = TextDecoder(encoding=self.encoding or "utf-8") + self._text = "".join([decoder.decode(self.content), decoder.flush()]) + return self._text + + @property + def encoding(self) -> typing.Optional[str]: + """ + Return an encoding to use for decoding the byte content into text. + The priority for determining this is given by... + + * `.encoding = <>` has been set explicitly. + * The encoding as specified by the charset parameter in the Content-Type header. + * The encoding as determined by `default_encoding`, which may either be + a string like "utf-8" indicating the encoding to use, or may be a callable + which enables charset autodetection. + """ + if not hasattr(self, "_encoding"): + encoding = self.charset_encoding + if encoding is None or not is_known_encoding(encoding): + if isinstance(self.default_encoding, str): + encoding = self.default_encoding + elif hasattr(self, "_content"): + encoding = self.default_encoding(self._content) + self._encoding = encoding or "utf-8" + return self._encoding + + @encoding.setter + def encoding(self, value: str) -> None: + self._encoding = value + + @property + def charset_encoding(self) -> typing.Optional[str]: + """ + Return the encoding, as specified by the Content-Type header. + """ + content_type = self.headers.get("Content-Type") + if content_type is None: + return None + + _, params = cgi.parse_header(content_type) + if "charset" not in params: + return None + + return params["charset"].strip("'\"") + + def _get_content_decoder(self) -> ContentDecoder: + """ + Returns a decoder instance which can be used to decode the raw byte + content, depending on the Content-Encoding used in the response. + """ + if not hasattr(self, "_decoder"): + decoders: typing.List[ContentDecoder] = [] + values = self.headers.get_list("content-encoding", split_commas=True) + for value in values: + value = value.strip().lower() + try: + decoder_cls = SUPPORTED_DECODERS[value] + decoders.append(decoder_cls()) + except KeyError: + continue + + if len(decoders) == 1: + self._decoder = decoders[0] + elif len(decoders) > 1: + self._decoder = MultiDecoder(children=decoders) + else: + self._decoder = IdentityDecoder() + + return self._decoder + + @property + def is_informational(self) -> bool: + """ + A property which is `True` for 1xx status codes, `False` otherwise. + """ + return codes.is_informational(self.status_code) + + @property + def is_success(self) -> bool: + """ + A property which is `True` for 2xx status codes, `False` otherwise. + """ + return codes.is_success(self.status_code) + + @property + def is_redirect(self) -> bool: + """ + A property which is `True` for 3xx status codes, `False` otherwise. + + Note that not all responses with a 3xx status code indicate a URL redirect. + + Use `response.has_redirect_location` to determine responses with a properly + formed URL redirection. + """ + return codes.is_redirect(self.status_code) + + @property + def is_client_error(self) -> bool: + """ + A property which is `True` for 4xx status codes, `False` otherwise. + """ + return codes.is_client_error(self.status_code) + + @property + def is_server_error(self) -> bool: + """ + A property which is `True` for 5xx status codes, `False` otherwise. + """ + return codes.is_server_error(self.status_code) + + @property + def is_error(self) -> bool: + """ + A property which is `True` for 4xx and 5xx status codes, `False` otherwise. + """ + return codes.is_error(self.status_code) + + @property + def has_redirect_location(self) -> bool: + """ + Returns True for 3xx responses with a properly formed URL redirection, + `False` otherwise. + """ + return ( + self.status_code + in ( + # 301 (Cacheable redirect. Method may change to GET.) + codes.MOVED_PERMANENTLY, + # 302 (Uncacheable redirect. Method may change to GET.) + codes.FOUND, + # 303 (Client should make a GET or HEAD request.) + codes.SEE_OTHER, + # 307 (Equiv. 302, but retain method) + codes.TEMPORARY_REDIRECT, + # 308 (Equiv. 301, but retain method) + codes.PERMANENT_REDIRECT, + ) + and "Location" in self.headers + ) + + def raise_for_status(self) -> None: + """ + Raise the `HTTPStatusError` if one occurred. + """ + request = self._request + if request is None: + raise RuntimeError( + "Cannot call `raise_for_status` as the request " + "instance has not been set on this response." + ) + + if self.is_success: + return + + if self.has_redirect_location: + message = ( + "{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n" + "Redirect location: '{0.headers[location]}'\n" + "For more information check: https://httpstatuses.com/{0.status_code}" + ) + else: + message = ( + "{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n" + "For more information check: https://httpstatuses.com/{0.status_code}" + ) + + status_class = self.status_code // 100 + error_types = { + 1: "Informational response", + 3: "Redirect response", + 4: "Client error", + 5: "Server error", + } + error_type = error_types.get(status_class, "Invalid status code") + message = message.format(self, error_type=error_type) + raise HTTPStatusError(message, request=request, response=self) + + def json(self, **kwargs: typing.Any) -> typing.Any: + if self.charset_encoding is None and self.content and len(self.content) > 3: + encoding = guess_json_utf(self.content) + if encoding is not None: + return jsonlib.loads(self.content.decode(encoding), **kwargs) + return jsonlib.loads(self.text, **kwargs) + + @property + def cookies(self) -> "Cookies": + if not hasattr(self, "_cookies"): + self._cookies = Cookies() + self._cookies.extract_cookies(self) + return self._cookies + + @property + def links(self) -> typing.Dict[typing.Optional[str], typing.Dict[str, str]]: + """ + Returns the parsed header links of the response, if any + """ + header = self.headers.get("link") + ldict = {} + if header: + links = parse_header_links(header) + for link in links: + key = link.get("rel") or link.get("url") + ldict[key] = link + return ldict + + @property + def num_bytes_downloaded(self) -> int: + return self._num_bytes_downloaded + + def __repr__(self) -> str: + return f"" + + def __getstate__(self) -> typing.Dict[str, typing.Any]: + return { + name: value + for name, value in self.__dict__.items() + if name not in ["extensions", "stream", "is_closed", "_decoder"] + } + + def __setstate__(self, state: typing.Dict[str, typing.Any]) -> None: + for name, value in state.items(): + setattr(self, name, value) + self.is_closed = True + self.extensions = {} + self.stream = UnattachedStream() + + def read(self) -> bytes: + """ + Read and return the response content. + """ + if not hasattr(self, "_content"): + self._content = b"".join(self.iter_bytes()) + return self._content + + def iter_bytes( + self, chunk_size: typing.Optional[int] = None + ) -> typing.Iterator[bytes]: + """ + A byte-iterator over the decoded response content. + This allows us to handle gzip, deflate, and brotli encoded responses. + """ + if hasattr(self, "_content"): + chunk_size = len(self._content) if chunk_size is None else chunk_size + for i in range(0, len(self._content), max(chunk_size, 1)): + yield self._content[i : i + chunk_size] + else: + decoder = self._get_content_decoder() + chunker = ByteChunker(chunk_size=chunk_size) + with request_context(request=self._request): + for raw_bytes in self.iter_raw(): + decoded = decoder.decode(raw_bytes) + for chunk in chunker.decode(decoded): + yield chunk + decoded = decoder.flush() + for chunk in chunker.decode(decoded): + yield chunk # pragma: nocover + for chunk in chunker.flush(): + yield chunk + + def iter_text( + self, chunk_size: typing.Optional[int] = None + ) -> typing.Iterator[str]: + """ + A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + decoder = TextDecoder(encoding=self.encoding or "utf-8") + chunker = TextChunker(chunk_size=chunk_size) + with request_context(request=self._request): + for byte_content in self.iter_bytes(): + text_content = decoder.decode(byte_content) + for chunk in chunker.decode(text_content): + yield chunk + text_content = decoder.flush() + for chunk in chunker.decode(text_content): + yield chunk + for chunk in chunker.flush(): + yield chunk + + def iter_lines(self) -> typing.Iterator[str]: + decoder = LineDecoder() + with request_context(request=self._request): + for text in self.iter_text(): + for line in decoder.decode(text): + yield line + for line in decoder.flush(): + yield line + + def iter_raw( + self, chunk_size: typing.Optional[int] = None + ) -> typing.Iterator[bytes]: + """ + A byte-iterator over the raw response content. + """ + if self.is_stream_consumed: + raise StreamConsumed() + if self.is_closed: + raise StreamClosed() + if not isinstance(self.stream, SyncByteStream): + raise RuntimeError("Attempted to call a sync iterator on an async stream.") + + self.is_stream_consumed = True + self._num_bytes_downloaded = 0 + chunker = ByteChunker(chunk_size=chunk_size) + + with request_context(request=self._request): + for raw_stream_bytes in self.stream: + self._num_bytes_downloaded += len(raw_stream_bytes) + for chunk in chunker.decode(raw_stream_bytes): + yield chunk + + for chunk in chunker.flush(): + yield chunk + + self.close() + + def close(self) -> None: + """ + Close the response and release the connection. + Automatically called if the response body is read to completion. + """ + if not isinstance(self.stream, SyncByteStream): + raise RuntimeError("Attempted to call an sync close on an async stream.") + + if not self.is_closed: + self.is_closed = True + with request_context(request=self._request): + self.stream.close() + + async def aread(self) -> bytes: + """ + Read and return the response content. + """ + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_bytes()]) + return self._content + + async def aiter_bytes( + self, chunk_size: typing.Optional[int] = None + ) -> typing.AsyncIterator[bytes]: + """ + A byte-iterator over the decoded response content. + This allows us to handle gzip, deflate, and brotli encoded responses. + """ + if hasattr(self, "_content"): + chunk_size = len(self._content) if chunk_size is None else chunk_size + for i in range(0, len(self._content), max(chunk_size, 1)): + yield self._content[i : i + chunk_size] + else: + decoder = self._get_content_decoder() + chunker = ByteChunker(chunk_size=chunk_size) + with request_context(request=self._request): + async for raw_bytes in self.aiter_raw(): + decoded = decoder.decode(raw_bytes) + for chunk in chunker.decode(decoded): + yield chunk + decoded = decoder.flush() + for chunk in chunker.decode(decoded): + yield chunk # pragma: nocover + for chunk in chunker.flush(): + yield chunk + + async def aiter_text( + self, chunk_size: typing.Optional[int] = None + ) -> typing.AsyncIterator[str]: + """ + A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + decoder = TextDecoder(encoding=self.encoding or "utf-8") + chunker = TextChunker(chunk_size=chunk_size) + with request_context(request=self._request): + async for byte_content in self.aiter_bytes(): + text_content = decoder.decode(byte_content) + for chunk in chunker.decode(text_content): + yield chunk + text_content = decoder.flush() + for chunk in chunker.decode(text_content): + yield chunk + for chunk in chunker.flush(): + yield chunk + + async def aiter_lines(self) -> typing.AsyncIterator[str]: + decoder = LineDecoder() + with request_context(request=self._request): + async for text in self.aiter_text(): + for line in decoder.decode(text): + yield line + for line in decoder.flush(): + yield line + + async def aiter_raw( + self, chunk_size: typing.Optional[int] = None + ) -> typing.AsyncIterator[bytes]: + """ + A byte-iterator over the raw response content. + """ + if self.is_stream_consumed: + raise StreamConsumed() + if self.is_closed: + raise StreamClosed() + if not isinstance(self.stream, AsyncByteStream): + raise RuntimeError("Attempted to call an async iterator on an sync stream.") + + self.is_stream_consumed = True + self._num_bytes_downloaded = 0 + chunker = ByteChunker(chunk_size=chunk_size) + + with request_context(request=self._request): + async for raw_stream_bytes in self.stream: + self._num_bytes_downloaded += len(raw_stream_bytes) + for chunk in chunker.decode(raw_stream_bytes): + yield chunk + + for chunk in chunker.flush(): + yield chunk + + await self.aclose() + + async def aclose(self) -> None: + """ + Close the response and release the connection. + Automatically called if the response body is read to completion. + """ + if not isinstance(self.stream, AsyncByteStream): + raise RuntimeError("Attempted to call an async close on an sync stream.") + + if not self.is_closed: + self.is_closed = True + with request_context(request=self._request): + await self.stream.aclose() + + +class Cookies(MutableMapping): + """ + HTTP Cookies, as a mutable mapping. + """ + + def __init__(self, cookies: typing.Optional[CookieTypes] = None) -> None: + if cookies is None or isinstance(cookies, dict): + self.jar = CookieJar() + if isinstance(cookies, dict): + for key, value in cookies.items(): + self.set(key, value) + elif isinstance(cookies, list): + self.jar = CookieJar() + for key, value in cookies: + self.set(key, value) + elif isinstance(cookies, Cookies): + self.jar = CookieJar() + for cookie in cookies.jar: + self.jar.set_cookie(cookie) + else: + self.jar = cookies + + def extract_cookies(self, response: Response) -> None: + """ + Loads any cookies based on the response `Set-Cookie` headers. + """ + urllib_response = self._CookieCompatResponse(response) + urllib_request = self._CookieCompatRequest(response.request) + + self.jar.extract_cookies(urllib_response, urllib_request) # type: ignore + + def set_cookie_header(self, request: Request) -> None: + """ + Sets an appropriate 'Cookie:' HTTP header on the `Request`. + """ + urllib_request = self._CookieCompatRequest(request) + self.jar.add_cookie_header(urllib_request) + + def set(self, name: str, value: str, domain: str = "", path: str = "/") -> None: + """ + Set a cookie value by name. May optionally include domain and path. + """ + kwargs = { + "version": 0, + "name": name, + "value": value, + "port": None, + "port_specified": False, + "domain": domain, + "domain_specified": bool(domain), + "domain_initial_dot": domain.startswith("."), + "path": path, + "path_specified": bool(path), + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + cookie = Cookie(**kwargs) # type: ignore + self.jar.set_cookie(cookie) + + def get( # type: ignore + self, + name: str, + default: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + path: typing.Optional[str] = None, + ) -> typing.Optional[str]: + """ + Get a cookie by name. May optionally include domain and path + in order to specify exactly which cookie to retrieve. + """ + value = None + for cookie in self.jar: + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if value is not None: + message = f"Multiple cookies exist with name={name}" + raise CookieConflict(message) + value = cookie.value + + if value is None: + return default + return value + + def delete( + self, + name: str, + domain: typing.Optional[str] = None, + path: typing.Optional[str] = None, + ) -> None: + """ + Delete a cookie by name. May optionally include domain and path + in order to specify exactly which cookie to delete. + """ + if domain is not None and path is not None: + return self.jar.clear(domain, path, name) + + remove = [ + cookie + for cookie in self.jar + if cookie.name == name + and (domain is None or cookie.domain == domain) + and (path is None or cookie.path == path) + ] + + for cookie in remove: + self.jar.clear(cookie.domain, cookie.path, cookie.name) + + def clear( + self, domain: typing.Optional[str] = None, path: typing.Optional[str] = None + ) -> None: + """ + Delete all cookies. Optionally include a domain and path in + order to only delete a subset of all the cookies. + """ + args = [] + if domain is not None: + args.append(domain) + if path is not None: + assert domain is not None + args.append(path) + self.jar.clear(*args) + + def update(self, cookies: typing.Optional[CookieTypes] = None) -> None: # type: ignore + cookies = Cookies(cookies) + for cookie in cookies.jar: + self.jar.set_cookie(cookie) + + def __setitem__(self, name: str, value: str) -> None: + return self.set(name, value) + + def __getitem__(self, name: str) -> str: + value = self.get(name) + if value is None: + raise KeyError(name) + return value + + def __delitem__(self, name: str) -> None: + return self.delete(name) + + def __len__(self) -> int: + return len(self.jar) + + def __iter__(self) -> typing.Iterator[str]: + return (cookie.name for cookie in self.jar) + + def __bool__(self) -> bool: + for _ in self.jar: + return True + return False + + def __repr__(self) -> str: + cookies_repr = ", ".join( + [ + f"" + for cookie in self.jar + ] + ) + + return f"" + + class _CookieCompatRequest(urllib.request.Request): + """ + Wraps a `Request` instance up in a compatibility interface suitable + for use with `CookieJar` operations. + """ + + def __init__(self, request: Request) -> None: + super().__init__( + url=str(request.url), + headers=dict(request.headers), + method=request.method, + ) + self.request = request + + def add_unredirected_header(self, key: str, value: str) -> None: + super().add_unredirected_header(key, value) + self.request.headers[key] = value + + class _CookieCompatResponse: + """ + Wraps a `Request` instance up in a compatibility interface suitable + for use with `CookieJar` operations. + """ + + def __init__(self, response: Response): + self.response = response + + def info(self) -> email.message.Message: + info = email.message.Message() + for key, value in self.response.headers.multi_items(): + # Note that setting `info[key]` here is an "append" operation, + # not a "replace" operation. + # https://docs.python.org/3/library/email.compat32-message.html#email.message.Message.__setitem__ + info[key] = value + return info diff --git a/myenv/lib/python3.9/site-packages/httpx/_multipart.py b/myenv/lib/python3.9/site-packages/httpx/_multipart.py new file mode 100644 index 0000000..d42f5cb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_multipart.py @@ -0,0 +1,236 @@ +import binascii +import io +import os +import typing +from pathlib import Path + +from ._types import ( + AsyncByteStream, + FileContent, + FileTypes, + RequestFiles, + SyncByteStream, +) +from ._utils import ( + format_form_param, + guess_content_type, + peek_filelike_length, + primitive_value_to_str, + to_bytes, +) + + +class DataField: + """ + A single form field item, within a multipart form field. + """ + + def __init__( + self, name: str, value: typing.Union[str, bytes, int, float, None] + ) -> None: + if not isinstance(name, str): + raise TypeError( + f"Invalid type for name. Expected str, got {type(name)}: {name!r}" + ) + if value is not None and not isinstance(value, (str, bytes, int, float)): + raise TypeError( + f"Invalid type for value. Expected primitive type, got {type(value)}: {value!r}" + ) + self.name = name + self.value: typing.Union[str, bytes] = ( + value if isinstance(value, bytes) else primitive_value_to_str(value) + ) + + def render_headers(self) -> bytes: + if not hasattr(self, "_headers"): + name = format_form_param("name", self.name) + self._headers = b"".join( + [b"Content-Disposition: form-data; ", name, b"\r\n\r\n"] + ) + + return self._headers + + def render_data(self) -> bytes: + if not hasattr(self, "_data"): + self._data = to_bytes(self.value) + + return self._data + + def get_length(self) -> int: + headers = self.render_headers() + data = self.render_data() + return len(headers) + len(data) + + def render(self) -> typing.Iterator[bytes]: + yield self.render_headers() + yield self.render_data() + + +class FileField: + """ + A single file field item, within a multipart form field. + """ + + CHUNK_SIZE = 64 * 1024 + + def __init__(self, name: str, value: FileTypes) -> None: + self.name = name + + fileobj: FileContent + + headers: typing.Dict[str, str] = {} + content_type: typing.Optional[str] = None + + # This large tuple based API largely mirror's requests' API + # It would be good to think of better APIs for this that we could include in httpx 2.0 + # since variable length tuples (especially of 4 elements) are quite unwieldly + if isinstance(value, tuple): + if len(value) == 2: + # neither the 3rd parameter (content_type) nor the 4th (headers) was included + filename, fileobj = value # type: ignore + elif len(value) == 3: + filename, fileobj, content_type = value # type: ignore + else: + # all 4 parameters included + filename, fileobj, content_type, headers = value # type: ignore + else: + filename = Path(str(getattr(value, "name", "upload"))).name + fileobj = value + + if content_type is None: + content_type = guess_content_type(filename) + + has_content_type_header = any("content-type" in key.lower() for key in headers) + if content_type is not None and not has_content_type_header: + # note that unlike requests, we ignore the content_type + # provided in the 3rd tuple element if it is also included in the headers + # requests does the opposite (it overwrites the header with the 3rd tuple element) + headers["Content-Type"] = content_type + + if isinstance(fileobj, (str, io.StringIO)): + raise TypeError(f"Expected bytes or bytes-like object got: {type(fileobj)}") + + self.filename = filename + self.file = fileobj + self.headers = headers + + def get_length(self) -> int: + headers = self.render_headers() + + if isinstance(self.file, (str, bytes)): + return len(headers) + len(to_bytes(self.file)) + + # Let's do our best not to read `file` into memory. + file_length = peek_filelike_length(self.file) + if file_length is None: + # As a last resort, read file and cache contents for later. + assert not hasattr(self, "_data") + self._data = to_bytes(self.file.read()) + file_length = len(self._data) + + return len(headers) + file_length + + def render_headers(self) -> bytes: + if not hasattr(self, "_headers"): + parts = [ + b"Content-Disposition: form-data; ", + format_form_param("name", self.name), + ] + if self.filename: + filename = format_form_param("filename", self.filename) + parts.extend([b"; ", filename]) + for header_name, header_value in self.headers.items(): + key, val = f"\r\n{header_name}: ".encode(), header_value.encode() + parts.extend([key, val]) + parts.append(b"\r\n\r\n") + self._headers = b"".join(parts) + + return self._headers + + def render_data(self) -> typing.Iterator[bytes]: + if isinstance(self.file, (str, bytes)): + yield to_bytes(self.file) + return + + if hasattr(self, "_data"): + # Already rendered. + yield self._data + return + + if hasattr(self.file, "seek"): + self.file.seek(0) + + chunk = self.file.read(self.CHUNK_SIZE) + while chunk: + yield to_bytes(chunk) + chunk = self.file.read(self.CHUNK_SIZE) + + def render(self) -> typing.Iterator[bytes]: + yield self.render_headers() + yield from self.render_data() + + +class MultipartStream(SyncByteStream, AsyncByteStream): + """ + Request content as streaming multipart encoded form data. + """ + + def __init__( + self, data: dict, files: RequestFiles, boundary: typing.Optional[bytes] = None + ) -> None: + if boundary is None: + boundary = binascii.hexlify(os.urandom(16)) + + self.boundary = boundary + self.content_type = "multipart/form-data; boundary=%s" % boundary.decode( + "ascii" + ) + self.fields = list(self._iter_fields(data, files)) + + def _iter_fields( + self, data: dict, files: RequestFiles + ) -> typing.Iterator[typing.Union[FileField, DataField]]: + for name, value in data.items(): + if isinstance(value, list): + for item in value: + yield DataField(name=name, value=item) + else: + yield DataField(name=name, value=value) + + file_items = files.items() if isinstance(files, typing.Mapping) else files + for name, value in file_items: + yield FileField(name=name, value=value) + + def iter_chunks(self) -> typing.Iterator[bytes]: + for field in self.fields: + yield b"--%s\r\n" % self.boundary + yield from field.render() + yield b"\r\n" + yield b"--%s--\r\n" % self.boundary + + def iter_chunks_lengths(self) -> typing.Iterator[int]: + boundary_length = len(self.boundary) + # Follow closely what `.iter_chunks()` does. + for field in self.fields: + yield 2 + boundary_length + 2 + yield field.get_length() + yield 2 + yield 2 + boundary_length + 4 + + def get_content_length(self) -> int: + return sum(self.iter_chunks_lengths()) + + # Content stream interface. + + def get_headers(self) -> typing.Dict[str, str]: + content_length = str(self.get_content_length()) + content_type = self.content_type + return {"Content-Length": content_length, "Content-Type": content_type} + + def __iter__(self) -> typing.Iterator[bytes]: + for chunk in self.iter_chunks(): + yield chunk + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + for chunk in self.iter_chunks(): + yield chunk diff --git a/myenv/lib/python3.9/site-packages/httpx/_status_codes.py b/myenv/lib/python3.9/site-packages/httpx/_status_codes.py new file mode 100644 index 0000000..e500441 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_status_codes.py @@ -0,0 +1,158 @@ +from enum import IntEnum + + +class codes(IntEnum): + """HTTP status codes and reason phrases + + Status codes from the following RFCs are all observed: + + * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616 + * RFC 6585: Additional HTTP Status Codes + * RFC 3229: Delta encoding in HTTP + * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518 + * RFC 5842: Binding Extensions to WebDAV + * RFC 7238: Permanent Redirect + * RFC 2295: Transparent Content Negotiation in HTTP + * RFC 2774: An HTTP Extension Framework + * RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2) + * RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0) + * RFC 7725: An HTTP Status Code to Report Legal Obstacles + * RFC 8297: An HTTP Status Code for Indicating Hints + * RFC 8470: Using Early Data in HTTP + """ + + def __new__(cls, value: int, phrase: str = "") -> "codes": + obj = int.__new__(cls, value) # type: ignore + obj._value_ = value + + obj.phrase = phrase # type: ignore + return obj + + def __str__(self) -> str: + return str(self.value) + + @classmethod + def get_reason_phrase(cls, value: int) -> str: + try: + return codes(value).phrase # type: ignore + except ValueError: + return "" + + @classmethod + def is_informational(cls, value: int) -> bool: + """ + Returns `True` for 1xx status codes, `False` otherwise. + """ + return 100 <= value <= 199 + + @classmethod + def is_success(cls, value: int) -> bool: + """ + Returns `True` for 2xx status codes, `False` otherwise. + """ + return 200 <= value <= 299 + + @classmethod + def is_redirect(cls, value: int) -> bool: + """ + Returns `True` for 3xx status codes, `False` otherwise. + """ + return 300 <= value <= 399 + + @classmethod + def is_client_error(cls, value: int) -> bool: + """ + Returns `True` for 4xx status codes, `False` otherwise. + """ + return 400 <= value <= 499 + + @classmethod + def is_server_error(cls, value: int) -> bool: + """ + Returns `True` for 5xx status codes, `False` otherwise. + """ + return 500 <= value <= 599 + + @classmethod + def is_error(cls, value: int) -> bool: + """ + Returns `True` for 4xx or 5xx status codes, `False` otherwise. + """ + return 400 <= value <= 599 + + # informational + CONTINUE = 100, "Continue" + SWITCHING_PROTOCOLS = 101, "Switching Protocols" + PROCESSING = 102, "Processing" + EARLY_HINTS = 103, "Early Hints" + + # success + OK = 200, "OK" + CREATED = 201, "Created" + ACCEPTED = 202, "Accepted" + NON_AUTHORITATIVE_INFORMATION = 203, "Non-Authoritative Information" + NO_CONTENT = 204, "No Content" + RESET_CONTENT = 205, "Reset Content" + PARTIAL_CONTENT = 206, "Partial Content" + MULTI_STATUS = 207, "Multi-Status" + ALREADY_REPORTED = 208, "Already Reported" + IM_USED = 226, "IM Used" + + # redirection + MULTIPLE_CHOICES = 300, "Multiple Choices" + MOVED_PERMANENTLY = 301, "Moved Permanently" + FOUND = 302, "Found" + SEE_OTHER = 303, "See Other" + NOT_MODIFIED = 304, "Not Modified" + USE_PROXY = 305, "Use Proxy" + TEMPORARY_REDIRECT = 307, "Temporary Redirect" + PERMANENT_REDIRECT = 308, "Permanent Redirect" + + # client error + BAD_REQUEST = 400, "Bad Request" + UNAUTHORIZED = 401, "Unauthorized" + PAYMENT_REQUIRED = 402, "Payment Required" + FORBIDDEN = 403, "Forbidden" + NOT_FOUND = 404, "Not Found" + METHOD_NOT_ALLOWED = 405, "Method Not Allowed" + NOT_ACCEPTABLE = 406, "Not Acceptable" + PROXY_AUTHENTICATION_REQUIRED = 407, "Proxy Authentication Required" + REQUEST_TIMEOUT = 408, "Request Timeout" + CONFLICT = 409, "Conflict" + GONE = 410, "Gone" + LENGTH_REQUIRED = 411, "Length Required" + PRECONDITION_FAILED = 412, "Precondition Failed" + REQUEST_ENTITY_TOO_LARGE = 413, "Request Entity Too Large" + REQUEST_URI_TOO_LONG = 414, "Request-URI Too Long" + UNSUPPORTED_MEDIA_TYPE = 415, "Unsupported Media Type" + REQUESTED_RANGE_NOT_SATISFIABLE = 416, "Requested Range Not Satisfiable" + EXPECTATION_FAILED = 417, "Expectation Failed" + IM_A_TEAPOT = 418, "I'm a teapot" + MISDIRECTED_REQUEST = 421, "Misdirected Request" + UNPROCESSABLE_ENTITY = 422, "Unprocessable Entity" + LOCKED = 423, "Locked" + FAILED_DEPENDENCY = 424, "Failed Dependency" + TOO_EARLY = 425, "Too Early" + UPGRADE_REQUIRED = 426, "Upgrade Required" + PRECONDITION_REQUIRED = 428, "Precondition Required" + TOO_MANY_REQUESTS = 429, "Too Many Requests" + REQUEST_HEADER_FIELDS_TOO_LARGE = 431, "Request Header Fields Too Large" + UNAVAILABLE_FOR_LEGAL_REASONS = 451, "Unavailable For Legal Reasons" + + # server errors + INTERNAL_SERVER_ERROR = 500, "Internal Server Error" + NOT_IMPLEMENTED = 501, "Not Implemented" + BAD_GATEWAY = 502, "Bad Gateway" + SERVICE_UNAVAILABLE = 503, "Service Unavailable" + GATEWAY_TIMEOUT = 504, "Gateway Timeout" + HTTP_VERSION_NOT_SUPPORTED = 505, "HTTP Version Not Supported" + VARIANT_ALSO_NEGOTIATES = 506, "Variant Also Negotiates" + INSUFFICIENT_STORAGE = 507, "Insufficient Storage" + LOOP_DETECTED = 508, "Loop Detected" + NOT_EXTENDED = 510, "Not Extended" + NETWORK_AUTHENTICATION_REQUIRED = 511, "Network Authentication Required" + + +# Include lower-case styles for `requests` compatibility. +for code in codes: + setattr(codes, code._name_.lower(), int(code)) diff --git a/myenv/lib/python3.9/site-packages/httpx/_transports/__init__.py b/myenv/lib/python3.9/site-packages/httpx/_transports/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/httpx/_transports/asgi.py b/myenv/lib/python3.9/site-packages/httpx/_transports/asgi.py new file mode 100644 index 0000000..711a6f6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_transports/asgi.py @@ -0,0 +1,163 @@ +import typing + +import sniffio + +from .._models import Request, Response +from .._types import AsyncByteStream +from .base import AsyncBaseTransport + +if typing.TYPE_CHECKING: # pragma: no cover + import asyncio + + import trio + + Event = typing.Union[asyncio.Event, trio.Event] + + +def create_event() -> "Event": + if sniffio.current_async_library() == "trio": + import trio + + return trio.Event() + else: + import asyncio + + return asyncio.Event() + + +class ASGIResponseStream(AsyncByteStream): + def __init__(self, body: typing.List[bytes]) -> None: + self._body = body + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + yield b"".join(self._body) + + +class ASGITransport(AsyncBaseTransport): + """ + A custom AsyncTransport that handles sending requests directly to an ASGI app. + The simplest way to use this functionality is to use the `app` argument. + + ``` + client = httpx.AsyncClient(app=app) + ``` + + Alternatively, you can setup the transport instance explicitly. + This allows you to include any additional configuration arguments specific + to the ASGITransport class: + + ``` + transport = httpx.ASGITransport( + app=app, + root_path="/submount", + client=("1.2.3.4", 123) + ) + client = httpx.AsyncClient(transport=transport) + ``` + + Arguments: + + * `app` - The ASGI application. + * `raise_app_exceptions` - Boolean indicating if exceptions in the application + should be raised. Default to `True`. Can be set to `False` for use cases + such as testing the content of a client 500 response. + * `root_path` - The root path on which the ASGI application should be mounted. + * `client` - A two-tuple indicating the client IP and port of incoming requests. + ``` + """ + + def __init__( + self, + app: typing.Callable, + raise_app_exceptions: bool = True, + root_path: str = "", + client: typing.Tuple[str, int] = ("127.0.0.1", 123), + ) -> None: + self.app = app + self.raise_app_exceptions = raise_app_exceptions + self.root_path = root_path + self.client = client + + async def handle_async_request( + self, + request: Request, + ) -> Response: + assert isinstance(request.stream, AsyncByteStream) + + # ASGI scope. + scope = { + "type": "http", + "asgi": {"version": "3.0"}, + "http_version": "1.1", + "method": request.method, + "headers": [(k.lower(), v) for (k, v) in request.headers.raw], + "scheme": request.url.scheme, + "path": request.url.path, + "raw_path": request.url.raw_path, + "query_string": request.url.query, + "server": (request.url.host, request.url.port), + "client": self.client, + "root_path": self.root_path, + } + + # Request. + request_body_chunks = request.stream.__aiter__() + request_complete = False + + # Response. + status_code = None + response_headers = None + body_parts = [] + response_started = False + response_complete = create_event() + + # ASGI callables. + + async def receive() -> dict: + nonlocal request_complete + + if request_complete: + await response_complete.wait() + return {"type": "http.disconnect"} + + try: + body = await request_body_chunks.__anext__() + except StopAsyncIteration: + request_complete = True + return {"type": "http.request", "body": b"", "more_body": False} + return {"type": "http.request", "body": body, "more_body": True} + + async def send(message: dict) -> None: + nonlocal status_code, response_headers, response_started + + if message["type"] == "http.response.start": + assert not response_started + + status_code = message["status"] + response_headers = message.get("headers", []) + response_started = True + + elif message["type"] == "http.response.body": + assert not response_complete.is_set() + body = message.get("body", b"") + more_body = message.get("more_body", False) + + if body and request.method != "HEAD": + body_parts.append(body) + + if not more_body: + response_complete.set() + + try: + await self.app(scope, receive, send) + except Exception: # noqa: PIE-786 + if self.raise_app_exceptions or not response_complete.is_set(): + raise + + assert response_complete.is_set() + assert status_code is not None + assert response_headers is not None + + stream = ASGIResponseStream(body_parts) + + return Response(status_code, headers=response_headers, stream=stream) diff --git a/myenv/lib/python3.9/site-packages/httpx/_transports/base.py b/myenv/lib/python3.9/site-packages/httpx/_transports/base.py new file mode 100644 index 0000000..ffba6a4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_transports/base.py @@ -0,0 +1,82 @@ +import typing +from types import TracebackType + +from .._models import Request, Response + +T = typing.TypeVar("T", bound="BaseTransport") +A = typing.TypeVar("A", bound="AsyncBaseTransport") + + +class BaseTransport: + def __enter__(self: T) -> T: + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + self.close() + + def handle_request(self, request: Request) -> Response: + """ + Send a single HTTP request and return a response. + + Developers shouldn't typically ever need to call into this API directly, + since the Client class provides all the higher level user-facing API + niceties. + + In order to properly release any network resources, the response + stream should *either* be consumed immediately, with a call to + `response.stream.read()`, or else the `handle_request` call should + be followed with a try/finally block to ensuring the stream is + always closed. + + Example usage: + + with httpx.HTTPTransport() as transport: + req = httpx.Request( + method=b"GET", + url=(b"https", b"www.example.com", 443, b"/"), + headers=[(b"Host", b"www.example.com")], + ) + resp = transport.handle_request(req) + body = resp.stream.read() + print(resp.status_code, resp.headers, body) + + + Takes a `Request` instance as the only argument. + + Returns a `Response` instance. + """ + raise NotImplementedError( + "The 'handle_request' method must be implemented." + ) # pragma: nocover + + def close(self) -> None: + pass + + +class AsyncBaseTransport: + async def __aenter__(self: A) -> A: + return self + + async def __aexit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + await self.aclose() + + async def handle_async_request( + self, + request: Request, + ) -> Response: + raise NotImplementedError( + "The 'handle_async_request' method must be implemented." + ) # pragma: nocover + + async def aclose(self) -> None: + pass diff --git a/myenv/lib/python3.9/site-packages/httpx/_transports/default.py b/myenv/lib/python3.9/site-packages/httpx/_transports/default.py new file mode 100644 index 0000000..2086454 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_transports/default.py @@ -0,0 +1,365 @@ +""" +Custom transports, with nicely configured defaults. + +The following additional keyword arguments are currently supported by httpcore... + +* uds: str +* local_address: str +* retries: int + +Example usages... + +# Disable HTTP/2 on a single specific domain. +mounts = { + "all://": httpx.HTTPTransport(http2=True), + "all://*example.org": httpx.HTTPTransport() +} + +# Using advanced httpcore configuration, with connection retries. +transport = httpx.HTTPTransport(retries=1) +client = httpx.Client(transport=transport) + +# Using advanced httpcore configuration, with unix domain sockets. +transport = httpx.HTTPTransport(uds="socket.uds") +client = httpx.Client(transport=transport) +""" +import contextlib +import typing +from types import TracebackType + +import httpcore + +from .._config import DEFAULT_LIMITS, Limits, Proxy, create_ssl_context +from .._exceptions import ( + ConnectError, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from .._models import Request, Response +from .._types import AsyncByteStream, CertTypes, SyncByteStream, VerifyTypes +from .base import AsyncBaseTransport, BaseTransport + +T = typing.TypeVar("T", bound="HTTPTransport") +A = typing.TypeVar("A", bound="AsyncHTTPTransport") + + +@contextlib.contextmanager +def map_httpcore_exceptions() -> typing.Iterator[None]: + try: + yield + except Exception as exc: # noqa: PIE-786 + mapped_exc = None + + for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): + if not isinstance(exc, from_exc): + continue + # We want to map to the most specific exception we can find. + # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to + # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. + if mapped_exc is None or issubclass(to_exc, mapped_exc): + mapped_exc = to_exc + + if mapped_exc is None: # pragma: nocover + raise + + message = str(exc) + raise mapped_exc(message) from exc + + +HTTPCORE_EXC_MAP = { + httpcore.TimeoutException: TimeoutException, + httpcore.ConnectTimeout: ConnectTimeout, + httpcore.ReadTimeout: ReadTimeout, + httpcore.WriteTimeout: WriteTimeout, + httpcore.PoolTimeout: PoolTimeout, + httpcore.NetworkError: NetworkError, + httpcore.ConnectError: ConnectError, + httpcore.ReadError: ReadError, + httpcore.WriteError: WriteError, + httpcore.ProxyError: ProxyError, + httpcore.UnsupportedProtocol: UnsupportedProtocol, + httpcore.ProtocolError: ProtocolError, + httpcore.LocalProtocolError: LocalProtocolError, + httpcore.RemoteProtocolError: RemoteProtocolError, +} + + +class ResponseStream(SyncByteStream): + def __init__(self, httpcore_stream: typing.Iterable[bytes]): + self._httpcore_stream = httpcore_stream + + def __iter__(self) -> typing.Iterator[bytes]: + with map_httpcore_exceptions(): + for part in self._httpcore_stream: + yield part + + def close(self) -> None: + if hasattr(self._httpcore_stream, "close"): + self._httpcore_stream.close() # type: ignore + + +class HTTPTransport(BaseTransport): + def __init__( + self, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + trust_env: bool = True, + proxy: typing.Optional[Proxy] = None, + uds: typing.Optional[str] = None, + local_address: typing.Optional[str] = None, + retries: int = 0, + ) -> None: + ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env) + + if proxy is None: + self._pool = httpcore.ConnectionPool( + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + uds=uds, + local_address=local_address, + retries=retries, + ) + elif proxy.url.scheme in ("http", "https"): + self._pool = httpcore.HTTPProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + proxy_headers=proxy.headers.raw, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + ) + elif proxy.url.scheme == "socks5": + try: + import socksio # noqa + except ImportError: # pragma: nocover + raise ImportError( + "Using SOCKS proxy, but the 'socksio' package is not installed. " + "Make sure to install httpx using `pip install httpx[socks]`." + ) from None + + self._pool = httpcore.SOCKSProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + ) + else: # pragma: nocover + raise ValueError( + f"Proxy protocol must be either 'http', 'https', or 'socks5', but got {proxy.url.scheme!r}." + ) + + def __enter__(self: T) -> T: # Use generics for subclass support. + self._pool.__enter__() + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + with map_httpcore_exceptions(): + self._pool.__exit__(exc_type, exc_value, traceback) + + def handle_request( + self, + request: Request, + ) -> Response: + assert isinstance(request.stream, SyncByteStream) + + req = httpcore.Request( + method=request.method, + url=httpcore.URL( + scheme=request.url.raw_scheme, + host=request.url.raw_host, + port=request.url.port, + target=request.url.raw_path, + ), + headers=request.headers.raw, + content=request.stream, + extensions=request.extensions, + ) + with map_httpcore_exceptions(): + resp = self._pool.handle_request(req) + + assert isinstance(resp.stream, typing.Iterable) + + return Response( + status_code=resp.status, + headers=resp.headers, + stream=ResponseStream(resp.stream), + extensions=resp.extensions, + ) + + def close(self) -> None: + self._pool.close() + + +class AsyncResponseStream(AsyncByteStream): + def __init__(self, httpcore_stream: typing.AsyncIterable[bytes]): + self._httpcore_stream = httpcore_stream + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + with map_httpcore_exceptions(): + async for part in self._httpcore_stream: + yield part + + async def aclose(self) -> None: + if hasattr(self._httpcore_stream, "aclose"): + await self._httpcore_stream.aclose() # type: ignore + + +class AsyncHTTPTransport(AsyncBaseTransport): + def __init__( + self, + verify: VerifyTypes = True, + cert: typing.Optional[CertTypes] = None, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + trust_env: bool = True, + proxy: typing.Optional[Proxy] = None, + uds: typing.Optional[str] = None, + local_address: typing.Optional[str] = None, + retries: int = 0, + ) -> None: + ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env) + + if proxy is None: + self._pool = httpcore.AsyncConnectionPool( + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + uds=uds, + local_address=local_address, + retries=retries, + ) + elif proxy.url.scheme in ("http", "https"): + self._pool = httpcore.AsyncHTTPProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + proxy_headers=proxy.headers.raw, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + ) + elif proxy.url.scheme == "socks5": + try: + import socksio # noqa + except ImportError: # pragma: nocover + raise ImportError( + "Using SOCKS proxy, but the 'socksio' package is not installed. " + "Make sure to install httpx using `pip install httpx[socks]`." + ) from None + + self._pool = httpcore.AsyncSOCKSProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + ) + else: # pragma: nocover + raise ValueError( + f"Proxy protocol must be either 'http', 'https', or 'socks5', but got {proxy.url.scheme!r}." + ) + + async def __aenter__(self: A) -> A: # Use generics for subclass support. + await self._pool.__aenter__() + return self + + async def __aexit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + with map_httpcore_exceptions(): + await self._pool.__aexit__(exc_type, exc_value, traceback) + + async def handle_async_request( + self, + request: Request, + ) -> Response: + assert isinstance(request.stream, AsyncByteStream) + + req = httpcore.Request( + method=request.method, + url=httpcore.URL( + scheme=request.url.raw_scheme, + host=request.url.raw_host, + port=request.url.port, + target=request.url.raw_path, + ), + headers=request.headers.raw, + content=request.stream, + extensions=request.extensions, + ) + with map_httpcore_exceptions(): + resp = await self._pool.handle_async_request(req) + + assert isinstance(resp.stream, typing.AsyncIterable) + + return Response( + status_code=resp.status, + headers=resp.headers, + stream=AsyncResponseStream(resp.stream), + extensions=resp.extensions, + ) + + async def aclose(self) -> None: + await self._pool.aclose() diff --git a/myenv/lib/python3.9/site-packages/httpx/_transports/mock.py b/myenv/lib/python3.9/site-packages/httpx/_transports/mock.py new file mode 100644 index 0000000..f61aee7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_transports/mock.py @@ -0,0 +1,34 @@ +import asyncio +import typing + +from .._models import Request, Response +from .base import AsyncBaseTransport, BaseTransport + + +class MockTransport(AsyncBaseTransport, BaseTransport): + def __init__(self, handler: typing.Callable) -> None: + self.handler = handler + + def handle_request( + self, + request: Request, + ) -> Response: + request.read() + return self.handler(request) + + async def handle_async_request( + self, + request: Request, + ) -> Response: + await request.aread() + response = self.handler(request) + + # Allow handler to *optionally* be an `async` function. + # If it is, then the `response` variable need to be awaited to actually + # return the result. + + # https://simonwillison.net/2020/Sep/2/await-me-maybe/ + if asyncio.iscoroutine(response): + response = await response + + return response diff --git a/myenv/lib/python3.9/site-packages/httpx/_transports/wsgi.py b/myenv/lib/python3.9/site-packages/httpx/_transports/wsgi.py new file mode 100644 index 0000000..3dedf49 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_transports/wsgi.py @@ -0,0 +1,134 @@ +import io +import itertools +import sys +import typing + +from .._models import Request, Response +from .._types import SyncByteStream +from .base import BaseTransport + + +def _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable: + body = iter(body) + for chunk in body: + if chunk: + return itertools.chain([chunk], body) + return [] + + +class WSGIByteStream(SyncByteStream): + def __init__(self, result: typing.Iterable[bytes]) -> None: + self._close = getattr(result, "close", None) + self._result = _skip_leading_empty_chunks(result) + + def __iter__(self) -> typing.Iterator[bytes]: + for part in self._result: + yield part + + def close(self) -> None: + if self._close is not None: + self._close() + + +class WSGITransport(BaseTransport): + """ + A custom transport that handles sending requests directly to an WSGI app. + The simplest way to use this functionality is to use the `app` argument. + + ``` + client = httpx.Client(app=app) + ``` + + Alternatively, you can setup the transport instance explicitly. + This allows you to include any additional configuration arguments specific + to the WSGITransport class: + + ``` + transport = httpx.WSGITransport( + app=app, + script_name="/submount", + remote_addr="1.2.3.4" + ) + client = httpx.Client(transport=transport) + ``` + + Arguments: + + * `app` - The ASGI application. + * `raise_app_exceptions` - Boolean indicating if exceptions in the application + should be raised. Default to `True`. Can be set to `False` for use cases + such as testing the content of a client 500 response. + * `script_name` - The root path on which the WSGI application should be mounted. + * `remote_addr` - A string indicating the client IP of incoming requests. + ``` + """ + + def __init__( + self, + app: typing.Callable, + raise_app_exceptions: bool = True, + script_name: str = "", + remote_addr: str = "127.0.0.1", + wsgi_errors: typing.Optional[typing.TextIO] = None, + ) -> None: + self.app = app + self.raise_app_exceptions = raise_app_exceptions + self.script_name = script_name + self.remote_addr = remote_addr + self.wsgi_errors = wsgi_errors + + def handle_request(self, request: Request) -> Response: + request.read() + wsgi_input = io.BytesIO(request.content) + + port = request.url.port or {"http": 80, "https": 443}[request.url.scheme] + environ = { + "wsgi.version": (1, 0), + "wsgi.url_scheme": request.url.scheme, + "wsgi.input": wsgi_input, + "wsgi.errors": self.wsgi_errors or sys.stderr, + "wsgi.multithread": True, + "wsgi.multiprocess": False, + "wsgi.run_once": False, + "REQUEST_METHOD": request.method, + "SCRIPT_NAME": self.script_name, + "PATH_INFO": request.url.path, + "QUERY_STRING": request.url.query.decode("ascii"), + "SERVER_NAME": request.url.host, + "SERVER_PORT": str(port), + "REMOTE_ADDR": self.remote_addr, + } + for header_key, header_value in request.headers.raw: + key = header_key.decode("ascii").upper().replace("-", "_") + if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"): + key = "HTTP_" + key + environ[key] = header_value.decode("ascii") + + seen_status = None + seen_response_headers = None + seen_exc_info = None + + def start_response( + status: str, response_headers: list, exc_info: typing.Any = None + ) -> None: + nonlocal seen_status, seen_response_headers, seen_exc_info + seen_status = status + seen_response_headers = response_headers + seen_exc_info = exc_info + + result = self.app(environ, start_response) + + stream = WSGIByteStream(result) + + assert seen_status is not None + assert seen_response_headers is not None + if seen_exc_info and self.raise_app_exceptions: + raise seen_exc_info[1] + + status_code = int(seen_status.split()[0]) + headers = [ + (key.encode("ascii"), value.encode("ascii")) + for key, value in seen_response_headers + ] + + return Response(status_code, headers=headers, stream=stream) diff --git a/myenv/lib/python3.9/site-packages/httpx/_types.py b/myenv/lib/python3.9/site-packages/httpx/_types.py new file mode 100644 index 0000000..c4063fe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_types.py @@ -0,0 +1,151 @@ +""" +Type definitions for type checking purposes. +""" + +import ssl +from http.cookiejar import CookieJar +from typing import ( + IO, + TYPE_CHECKING, + AsyncIterable, + AsyncIterator, + Callable, + Dict, + Iterable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +if TYPE_CHECKING: # pragma: no cover + from ._auth import Auth # noqa: F401 + from ._config import Proxy, Timeout # noqa: F401 + from ._models import Cookies, Headers, Request # noqa: F401 + from ._urls import URL, QueryParams # noqa: F401 + + +PrimitiveData = Optional[Union[str, int, float, bool]] + +RawURL = Tuple[bytes, bytes, Optional[int], bytes] + +URLTypes = Union["URL", str] + +QueryParamTypes = Union[ + "QueryParams", + Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]], + List[Tuple[str, PrimitiveData]], + Tuple[Tuple[str, PrimitiveData], ...], + str, + bytes, +] + +HeaderTypes = Union[ + "Headers", + Dict[str, str], + Dict[bytes, bytes], + Sequence[Tuple[str, str]], + Sequence[Tuple[bytes, bytes]], +] + +CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]] + +CertTypes = Union[ + # certfile + str, + # (certfile, keyfile) + Tuple[str, Optional[str]], + # (certfile, keyfile, password) + Tuple[str, Optional[str], Optional[str]], +] +VerifyTypes = Union[str, bool, ssl.SSLContext] +TimeoutTypes = Union[ + Optional[float], + Tuple[Optional[float], Optional[float], Optional[float], Optional[float]], + "Timeout", +] +ProxiesTypes = Union[URLTypes, "Proxy", Dict[URLTypes, Union[None, URLTypes, "Proxy"]]] + +AuthTypes = Union[ + Tuple[Union[str, bytes], Union[str, bytes]], + Callable[["Request"], "Request"], + "Auth", +] + +RequestContent = Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]] +ResponseContent = Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]] + +RequestData = dict + +FileContent = Union[IO[bytes], bytes] +FileTypes = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], +] +RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]] + + +class SyncByteStream: + def __iter__(self) -> Iterator[bytes]: + raise NotImplementedError( + "The '__iter__' method must be implemented." + ) # pragma: nocover + yield b"" # pragma: nocover + + def close(self) -> None: + """ + Subclasses can override this method to release any network resources + after a request/response cycle is complete. + + Streaming cases should use a `try...finally` block to ensure that + the stream `close()` method is always called. + + Example: + + status_code, headers, stream, extensions = transport.handle_request(...) + try: + ... + finally: + stream.close() + """ + + def read(self) -> bytes: + """ + Simple cases can use `.read()` as a convenience method for consuming + the entire stream and then closing it. + + Example: + + status_code, headers, stream, extensions = transport.handle_request(...) + body = stream.read() + """ + try: + return b"".join([part for part in self]) + finally: + self.close() + + +class AsyncByteStream: + async def __aiter__(self) -> AsyncIterator[bytes]: + raise NotImplementedError( + "The '__aiter__' method must be implemented." + ) # pragma: nocover + yield b"" # pragma: nocover + + async def aclose(self) -> None: + pass + + async def aread(self) -> bytes: + try: + return b"".join([part async for part in self]) + finally: + await self.aclose() diff --git a/myenv/lib/python3.9/site-packages/httpx/_urls.py b/myenv/lib/python3.9/site-packages/httpx/_urls.py new file mode 100644 index 0000000..8beeacf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_urls.py @@ -0,0 +1,779 @@ +import typing +from urllib.parse import parse_qs, quote, unquote, urlencode + +import idna +import rfc3986 +import rfc3986.exceptions + +from ._exceptions import InvalidURL +from ._types import PrimitiveData, QueryParamTypes, RawURL, URLTypes +from ._utils import primitive_value_to_str + + +class URL: + """ + url = httpx.URL("HTTPS://jo%40email.com:a%20secret@müller.de:1234/pa%20th?search=ab#anchorlink") + + assert url.scheme == "https" + assert url.username == "jo@email.com" + assert url.password == "a secret" + assert url.userinfo == b"jo%40email.com:a%20secret" + assert url.host == "müller.de" + assert url.raw_host == b"xn--mller-kva.de" + assert url.port == 1234 + assert url.netloc == b"xn--mller-kva.de:1234" + assert url.path == "/pa th" + assert url.query == b"?search=ab" + assert url.raw_path == b"/pa%20th?search=ab" + assert url.fragment == "anchorlink" + + The components of a URL are broken down like this: + + https://jo%40email.com:a%20secret@müller.de:1234/pa%20th?search=ab#anchorlink + [scheme] [ username ] [password] [ host ][port][ path ] [ query ] [fragment] + [ userinfo ] [ netloc ][ raw_path ] + + Note that: + + * `url.scheme` is normalized to always be lowercased. + + * `url.host` is normalized to always be lowercased. Internationalized domain + names are represented in unicode, without IDNA encoding applied. For instance: + + url = httpx.URL("http://中国.icom.museum") + assert url.host == "中国.icom.museum" + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.host == "中国.icom.museum" + + * `url.raw_host` is normalized to always be lowercased, and is IDNA encoded. + + url = httpx.URL("http://中国.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + + * `url.port` is either None or an integer. URLs that include the default port for + "http", "https", "ws", "wss", and "ftp" schemes have their port normalized to `None`. + + assert httpx.URL("http://example.com") == httpx.URL("http://example.com:80") + assert httpx.URL("http://example.com").port is None + assert httpx.URL("http://example.com:80").port is None + + * `url.userinfo` is raw bytes, without URL escaping. Usually you'll want to work with + `url.username` and `url.password` instead, which handle the URL escaping. + + * `url.raw_path` is raw bytes of both the path and query, without URL escaping. + This portion is used as the target when constructing HTTP requests. Usually you'll + want to work with `url.path` instead. + + * `url.query` is raw bytes, without URL escaping. A URL query string portion can only + be properly URL escaped when decoding the parameter names and values themselves. + """ + + def __init__( + self, url: typing.Union["URL", str, RawURL] = "", **kwargs: typing.Any + ) -> None: + if isinstance(url, (str, tuple)): + if isinstance(url, tuple): + raw_scheme, raw_host, port, raw_path = url + scheme = raw_scheme.decode("ascii") + host = raw_host.decode("ascii") + if host and ":" in host and host[0] != "[": + # it's an IPv6 address, so it should be enclosed in "[" and "]" + # ref: https://tools.ietf.org/html/rfc2732#section-2 + # ref: https://tools.ietf.org/html/rfc3986#section-3.2.2 + host = f"[{host}]" + port_str = "" if port is None else f":{port}" + path = raw_path.decode("ascii") + url = f"{scheme}://{host}{port_str}{path}" + + try: + self._uri_reference = rfc3986.iri_reference(url).encode() + except rfc3986.exceptions.InvalidAuthority as exc: + raise InvalidURL(message=str(exc)) from None + + if self.is_absolute_url: + # We don't want to normalize relative URLs, since doing so + # removes any leading `../` portion. + self._uri_reference = self._uri_reference.normalize() + elif isinstance(url, URL): + self._uri_reference = url._uri_reference + else: + raise TypeError( + f"Invalid type for url. Expected str or httpx.URL, got {type(url)}: {url!r}" + ) + + # Perform port normalization, following the WHATWG spec for default ports. + # + # See: + # * https://tools.ietf.org/html/rfc3986#section-3.2.3 + # * https://url.spec.whatwg.org/#url-miscellaneous + # * https://url.spec.whatwg.org/#scheme-state + default_port = { + "ftp": ":21", + "http": ":80", + "https": ":443", + "ws": ":80", + "wss": ":443", + }.get(self._uri_reference.scheme, "") + authority = self._uri_reference.authority or "" + if default_port and authority.endswith(default_port): + authority = authority[: -len(default_port)] + self._uri_reference = self._uri_reference.copy_with(authority=authority) + + if kwargs: + self._uri_reference = self.copy_with(**kwargs)._uri_reference + + @property + def scheme(self) -> str: + """ + The URL scheme, such as "http", "https". + Always normalised to lowercase. + """ + return self._uri_reference.scheme or "" + + @property + def raw_scheme(self) -> bytes: + """ + The raw bytes representation of the URL scheme, such as b"http", b"https". + Always normalised to lowercase. + """ + return self.scheme.encode("ascii") + + @property + def userinfo(self) -> bytes: + """ + The URL userinfo as a raw bytestring. + For example: b"jo%40email.com:a%20secret". + """ + userinfo = self._uri_reference.userinfo or "" + return userinfo.encode("ascii") + + @property + def username(self) -> str: + """ + The URL username as a string, with URL decoding applied. + For example: "jo@email.com" + """ + userinfo = self._uri_reference.userinfo or "" + return unquote(userinfo.partition(":")[0]) + + @property + def password(self) -> str: + """ + The URL password as a string, with URL decoding applied. + For example: "a secret" + """ + userinfo = self._uri_reference.userinfo or "" + return unquote(userinfo.partition(":")[2]) + + @property + def host(self) -> str: + """ + The URL host as a string. + Always normalized to lowercase, with IDNA hosts decoded into unicode. + + Examples: + + url = httpx.URL("http://www.EXAMPLE.org") + assert url.host == "www.example.org" + + url = httpx.URL("http://中国.icom.museum") + assert url.host == "中国.icom.museum" + + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.host == "中国.icom.museum" + + url = httpx.URL("https://[::ffff:192.168.0.1]") + assert url.host == "::ffff:192.168.0.1" + """ + host: str = self._uri_reference.host or "" + + if host and ":" in host and host[0] == "[": + # it's an IPv6 address + host = host.lstrip("[").rstrip("]") + + if host.startswith("xn--"): + host = idna.decode(host) + + return host + + @property + def raw_host(self) -> bytes: + """ + The raw bytes representation of the URL host. + Always normalized to lowercase, and IDNA encoded. + + Examples: + + url = httpx.URL("http://www.EXAMPLE.org") + assert url.raw_host == b"www.example.org" + + url = httpx.URL("http://中国.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + + url = httpx.URL("https://[::ffff:192.168.0.1]") + assert url.raw_host == b"::ffff:192.168.0.1" + """ + host: str = self._uri_reference.host or "" + + if host and ":" in host and host[0] == "[": + # it's an IPv6 address + host = host.lstrip("[").rstrip("]") + + return host.encode("ascii") + + @property + def port(self) -> typing.Optional[int]: + """ + The URL port as an integer. + + Note that the URL class performs port normalization as per the WHATWG spec. + Default ports for "http", "https", "ws", "wss", and "ftp" schemes are always + treated as `None`. + + For example: + + assert httpx.URL("http://www.example.com") == httpx.URL("http://www.example.com:80") + assert httpx.URL("http://www.example.com:80").port is None + """ + port = self._uri_reference.port + return int(port) if port else None + + @property + def netloc(self) -> bytes: + """ + Either `` or `:` as bytes. + Always normalized to lowercase, and IDNA encoded. + + This property may be used for generating the value of a request + "Host" header. + """ + host = self._uri_reference.host or "" + port = self._uri_reference.port + netloc = host.encode("ascii") + if port: + netloc = netloc + b":" + port.encode("ascii") + return netloc + + @property + def path(self) -> str: + """ + The URL path as a string. Excluding the query string, and URL decoded. + + For example: + + url = httpx.URL("https://example.com/pa%20th") + assert url.path == "/pa th" + """ + path = self._uri_reference.path or "/" + return unquote(path) + + @property + def query(self) -> bytes: + """ + The URL query string, as raw bytes, excluding the leading b"?". + + This is necessarily a bytewise interface, because we cannot + perform URL decoding of this representation until we've parsed + the keys and values into a QueryParams instance. + + For example: + + url = httpx.URL("https://example.com/?filter=some%20search%20terms") + assert url.query == b"filter=some%20search%20terms" + """ + query = self._uri_reference.query or "" + return query.encode("ascii") + + @property + def params(self) -> "QueryParams": + """ + The URL query parameters, neatly parsed and packaged into an immutable + multidict representation. + """ + return QueryParams(self._uri_reference.query) + + @property + def raw_path(self) -> bytes: + """ + The complete URL path and query string as raw bytes. + Used as the target when constructing HTTP requests. + + For example: + + GET /users?search=some%20text HTTP/1.1 + Host: www.example.org + Connection: close + """ + path = self._uri_reference.path or "/" + if self._uri_reference.query is not None: + path += "?" + self._uri_reference.query + return path.encode("ascii") + + @property + def fragment(self) -> str: + """ + The URL fragments, as used in HTML anchors. + As a string, without the leading '#'. + """ + return unquote(self._uri_reference.fragment or "") + + @property + def raw(self) -> RawURL: + """ + The URL in the raw representation used by the low level + transport API. See `BaseTransport.handle_request`. + + Provides the (scheme, host, port, target) for the outgoing request. + """ + return ( + self.raw_scheme, + self.raw_host, + self.port, + self.raw_path, + ) + + @property + def is_absolute_url(self) -> bool: + """ + Return `True` for absolute URLs such as 'http://example.com/path', + and `False` for relative URLs such as '/path'. + """ + # We don't use `.is_absolute` from `rfc3986` because it treats + # URLs with a fragment portion as not absolute. + # What we actually care about is if the URL provides + # a scheme and hostname to which connections should be made. + return bool(self._uri_reference.scheme and self._uri_reference.host) + + @property + def is_relative_url(self) -> bool: + """ + Return `False` for absolute URLs such as 'http://example.com/path', + and `True` for relative URLs such as '/path'. + """ + return not self.is_absolute_url + + def copy_with(self, **kwargs: typing.Any) -> "URL": + """ + Copy this URL, returning a new URL with some components altered. + Accepts the same set of parameters as the components that are made + available via properties on the `URL` class. + + For example: + + url = httpx.URL("https://www.example.com").copy_with(username="jo@gmail.com", password="a secret") + assert url == "https://jo%40email.com:a%20secret@www.example.com" + """ + allowed = { + "scheme": str, + "username": str, + "password": str, + "userinfo": bytes, + "host": str, + "port": int, + "netloc": bytes, + "path": str, + "query": bytes, + "raw_path": bytes, + "fragment": str, + "params": object, + } + + # Step 1 + # ====== + # + # Perform type checking for all supported keyword arguments. + for key, value in kwargs.items(): + if key not in allowed: + message = f"{key!r} is an invalid keyword argument for copy_with()" + raise TypeError(message) + if value is not None and not isinstance(value, allowed[key]): + expected = allowed[key].__name__ + seen = type(value).__name__ + message = f"Argument {key!r} must be {expected} but got {seen}" + raise TypeError(message) + + # Step 2 + # ====== + # + # Consolidate "username", "password", "userinfo", "host", "port" and "netloc" + # into a single "authority" keyword, for `rfc3986`. + if "username" in kwargs or "password" in kwargs: + # Consolidate "username" and "password" into "userinfo". + username = quote(kwargs.pop("username", self.username) or "") + password = quote(kwargs.pop("password", self.password) or "") + userinfo = f"{username}:{password}" if password else username + kwargs["userinfo"] = userinfo.encode("ascii") + + if "host" in kwargs or "port" in kwargs: + # Consolidate "host" and "port" into "netloc". + host = kwargs.pop("host", self.host) or "" + port = kwargs.pop("port", self.port) + + if host and ":" in host and host[0] != "[": + # IPv6 addresses need to be escaped within square brackets. + host = f"[{host}]" + + kwargs["netloc"] = ( + f"{host}:{port}".encode("ascii") + if port is not None + else host.encode("ascii") + ) + + if "userinfo" in kwargs or "netloc" in kwargs: + # Consolidate "userinfo" and "netloc" into authority. + userinfo = (kwargs.pop("userinfo", self.userinfo) or b"").decode("ascii") + netloc = (kwargs.pop("netloc", self.netloc) or b"").decode("ascii") + authority = f"{userinfo}@{netloc}" if userinfo else netloc + kwargs["authority"] = authority + + # Step 3 + # ====== + # + # Wrangle any "path", "query", "raw_path" and "params" keywords into + # "query" and "path" keywords for `rfc3986`. + if "raw_path" in kwargs: + # If "raw_path" is included, then split it into "path" and "query" components. + raw_path = kwargs.pop("raw_path") or b"" + path, has_query, query = raw_path.decode("ascii").partition("?") + kwargs["path"] = path + kwargs["query"] = query if has_query else None + + else: + if kwargs.get("path") is not None: + # Ensure `kwargs["path"] = ` for `rfc3986`. + kwargs["path"] = quote(kwargs["path"]) + + if kwargs.get("query") is not None: + # Ensure `kwargs["query"] = ` for `rfc3986`. + # + # Note that `.copy_with(query=None)` and `.copy_with(query=b"")` + # are subtly different. The `None` style will not include an empty + # trailing "?" character. + kwargs["query"] = kwargs["query"].decode("ascii") + + if "params" in kwargs: + # Replace any "params" keyword with the raw "query" instead. + # + # Ensure that empty params use `kwargs["query"] = None` rather + # than `kwargs["query"] = ""`, so that generated URLs do not + # include an empty trailing "?". + params = kwargs.pop("params") + kwargs["query"] = None if not params else str(QueryParams(params)) + + # Step 4 + # ====== + # + # Ensure any fragment component is quoted. + if kwargs.get("fragment") is not None: + kwargs["fragment"] = quote(kwargs["fragment"]) + + # Step 5 + # ====== + # + # At this point kwargs may include keys for "scheme", "authority", "path", + # "query" and "fragment". Together these constitute the entire URL. + # + # See https://tools.ietf.org/html/rfc3986#section-3 + # + # foo://example.com:8042/over/there?name=ferret#nose + # \_/ \______________/\_________/ \_________/ \__/ + # | | | | | + # scheme authority path query fragment + new_url = URL(self) + new_url._uri_reference = self._uri_reference.copy_with(**kwargs) + if new_url.is_absolute_url: + new_url._uri_reference = new_url._uri_reference.normalize() + return URL(new_url) + + def copy_set_param(self, key: str, value: typing.Any = None) -> "URL": + return self.copy_with(params=self.params.set(key, value)) + + def copy_add_param(self, key: str, value: typing.Any = None) -> "URL": + return self.copy_with(params=self.params.add(key, value)) + + def copy_remove_param(self, key: str) -> "URL": + return self.copy_with(params=self.params.remove(key)) + + def copy_merge_params(self, params: QueryParamTypes) -> "URL": + return self.copy_with(params=self.params.merge(params)) + + def join(self, url: URLTypes) -> "URL": + """ + Return an absolute URL, using this URL as the base. + + Eg. + + url = httpx.URL("https://www.example.com/test") + url = url.join("/new/path") + assert url == "https://www.example.com/new/path" + """ + if self.is_relative_url: + # Workaround to handle relative URLs, which otherwise raise + # rfc3986.exceptions.ResolutionError when used as an argument + # in `.resolve_with`. + return ( + self.copy_with(scheme="http", host="example.com") + .join(url) + .copy_with(scheme=None, host=None) + ) + + # We drop any fragment portion, because RFC 3986 strictly + # treats URLs with a fragment portion as not being absolute URLs. + base_uri = self._uri_reference.copy_with(fragment=None) + relative_url = URL(url) + return URL(relative_url._uri_reference.resolve_with(base_uri).unsplit()) + + def __hash__(self) -> int: + return hash(str(self)) + + def __eq__(self, other: typing.Any) -> bool: + return isinstance(other, (URL, str)) and str(self) == str(URL(other)) + + def __str__(self) -> str: + return self._uri_reference.unsplit() + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + url_str = str(self) + if self._uri_reference.userinfo: + # Mask any password component in the URL representation, to lower the + # risk of unintended leakage, such as in debug information and logging. + username = quote(self.username) + url_str = ( + rfc3986.urlparse(url_str) + .copy_with(userinfo=f"{username}:[secure]") + .unsplit() + ) + return f"{class_name}({url_str!r})" + + +class QueryParams(typing.Mapping[str, str]): + """ + URL query parameters, as a multi-dict. + """ + + def __init__( + self, *args: typing.Optional[QueryParamTypes], **kwargs: typing.Any + ) -> None: + assert len(args) < 2, "Too many arguments." + assert not (args and kwargs), "Cannot mix named and unnamed arguments." + + value = args[0] if args else kwargs + + items: typing.Sequence[typing.Tuple[str, PrimitiveData]] + if value is None or isinstance(value, (str, bytes)): + value = value.decode("ascii") if isinstance(value, bytes) else value + self._dict = parse_qs(value) + elif isinstance(value, QueryParams): + self._dict = {k: list(v) for k, v in value._dict.items()} + else: + dict_value: typing.Dict[typing.Any, typing.List[typing.Any]] = {} + if isinstance(value, (list, tuple)): + # Convert list inputs like: + # [("a", "123"), ("a", "456"), ("b", "789")] + # To a dict representation, like: + # {"a": ["123", "456"], "b": ["789"]} + for item in value: + dict_value.setdefault(item[0], []).append(item[1]) + else: + # Convert dict inputs like: + # {"a": "123", "b": ["456", "789"]} + # To dict inputs where values are always lists, like: + # {"a": ["123"], "b": ["456", "789"]} + dict_value = { + k: list(v) if isinstance(v, (list, tuple)) else [v] + for k, v in value.items() + } + + # Ensure that keys and values are neatly coerced to strings. + # We coerce values `True` and `False` to JSON-like "true" and "false" + # representations, and coerce `None` values to the empty string. + self._dict = { + str(k): [primitive_value_to_str(item) for item in v] + for k, v in dict_value.items() + } + + def keys(self) -> typing.KeysView: + """ + Return all the keys in the query params. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.keys()) == ["a", "b"] + """ + return self._dict.keys() + + def values(self) -> typing.ValuesView: + """ + Return all the values in the query params. If a key occurs more than once + only the first item for that key is returned. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.values()) == ["123", "789"] + """ + return {k: v[0] for k, v in self._dict.items()}.values() + + def items(self) -> typing.ItemsView: + """ + Return all items in the query params. If a key occurs more than once + only the first item for that key is returned. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.items()) == [("a", "123"), ("b", "789")] + """ + return {k: v[0] for k, v in self._dict.items()}.items() + + def multi_items(self) -> typing.List[typing.Tuple[str, str]]: + """ + Return all items in the query params. Allow duplicate keys to occur. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.multi_items()) == [("a", "123"), ("a", "456"), ("b", "789")] + """ + multi_items: typing.List[typing.Tuple[str, str]] = [] + for k, v in self._dict.items(): + multi_items.extend([(k, i) for i in v]) + return multi_items + + def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any: + """ + Get a value from the query param for a given key. If the key occurs + more than once, then only the first value is returned. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert q.get("a") == "123" + """ + if key in self._dict: + return self._dict[str(key)][0] + return default + + def get_list(self, key: str) -> typing.List[str]: + """ + Get all values from the query param for a given key. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert q.get_list("a") == ["123", "456"] + """ + return list(self._dict.get(str(key), [])) + + def set(self, key: str, value: typing.Any = None) -> "QueryParams": + """ + Return a new QueryParams instance, setting the value of a key. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.set("a", "456") + assert q == httpx.QueryParams("a=456") + """ + q = QueryParams() + q._dict = dict(self._dict) + q._dict[str(key)] = [primitive_value_to_str(value)] + return q + + def add(self, key: str, value: typing.Any = None) -> "QueryParams": + """ + Return a new QueryParams instance, setting or appending the value of a key. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.add("a", "456") + assert q == httpx.QueryParams("a=123&a=456") + """ + q = QueryParams() + q._dict = dict(self._dict) + q._dict[str(key)] = q.get_list(key) + [primitive_value_to_str(value)] + return q + + def remove(self, key: str) -> "QueryParams": + """ + Return a new QueryParams instance, removing the value of a key. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.remove("a") + assert q == httpx.QueryParams("") + """ + q = QueryParams() + q._dict = dict(self._dict) + q._dict.pop(str(key), None) + return q + + def merge(self, params: typing.Optional[QueryParamTypes] = None) -> "QueryParams": + """ + Return a new QueryParams instance, updated with. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.merge({"b": "456"}) + assert q == httpx.QueryParams("a=123&b=456") + + q = httpx.QueryParams("a=123") + q = q.merge({"a": "456", "b": "789"}) + assert q == httpx.QueryParams("a=456&b=789") + """ + q = QueryParams(params) + q._dict = {**self._dict, **q._dict} + return q + + def __getitem__(self, key: typing.Any) -> str: + return self._dict[key][0] + + def __contains__(self, key: typing.Any) -> bool: + return key in self._dict + + def __iter__(self) -> typing.Iterator[typing.Any]: + return iter(self.keys()) + + def __len__(self) -> int: + return len(self._dict) + + def __bool__(self) -> bool: + return bool(self._dict) + + def __hash__(self) -> int: + return hash(str(self)) + + def __eq__(self, other: typing.Any) -> bool: + if not isinstance(other, self.__class__): + return False + return sorted(self.multi_items()) == sorted(other.multi_items()) + + def __str__(self) -> str: + return urlencode(self.multi_items()) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + query_string = str(self) + return f"{class_name}({query_string!r})" + + def update(self, params: typing.Optional[QueryParamTypes] = None) -> None: + raise RuntimeError( + "QueryParams are immutable since 0.18.0. " + "Use `q = q.merge(...)` to create an updated copy." + ) + + def __setitem__(self, key: str, value: str) -> None: + raise RuntimeError( + "QueryParams are immutable since 0.18.0. " + "Use `q = q.set(key, value)` to create an updated copy." + ) diff --git a/myenv/lib/python3.9/site-packages/httpx/_utils.py b/myenv/lib/python3.9/site-packages/httpx/_utils.py new file mode 100644 index 0000000..e01c050 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/httpx/_utils.py @@ -0,0 +1,522 @@ +import codecs +import logging +import mimetypes +import netrc +import os +import re +import sys +import time +import typing +from pathlib import Path +from urllib.request import getproxies + +import sniffio + +from ._types import PrimitiveData + +if typing.TYPE_CHECKING: # pragma: no cover + from ._models import URL + + +_HTML5_FORM_ENCODING_REPLACEMENTS = {'"': "%22", "\\": "\\\\"} +_HTML5_FORM_ENCODING_REPLACEMENTS.update( + {chr(c): "%{:02X}".format(c) for c in range(0x1F + 1) if c != 0x1B} +) +_HTML5_FORM_ENCODING_RE = re.compile( + r"|".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()]) +) + + +def normalize_header_key( + value: typing.Union[str, bytes], + lower: bool, + encoding: typing.Optional[str] = None, +) -> bytes: + """ + Coerce str/bytes into a strictly byte-wise HTTP header key. + """ + if isinstance(value, bytes): + bytes_value = value + else: + bytes_value = value.encode(encoding or "ascii") + + return bytes_value.lower() if lower else bytes_value + + +def normalize_header_value( + value: typing.Union[str, bytes], encoding: typing.Optional[str] = None +) -> bytes: + """ + Coerce str/bytes into a strictly byte-wise HTTP header value. + """ + if isinstance(value, bytes): + return value + return value.encode(encoding or "ascii") + + +def primitive_value_to_str(value: "PrimitiveData") -> str: + """ + Coerce a primitive data type into a string value. + + Note that we prefer JSON-style 'true'/'false' for boolean values here. + """ + if value is True: + return "true" + elif value is False: + return "false" + elif value is None: + return "" + return str(value) + + +def is_known_encoding(encoding: str) -> bool: + """ + Return `True` if `encoding` is a known codec. + """ + try: + codecs.lookup(encoding) + except LookupError: + return False + return True + + +def format_form_param(name: str, value: typing.Union[str, bytes]) -> bytes: + """ + Encode a name/value pair within a multipart form. + """ + if isinstance(value, bytes): + value = value.decode() + + def replacer(match: typing.Match[str]) -> str: + return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)] + + value = _HTML5_FORM_ENCODING_RE.sub(replacer, value) + return f'{name}="{value}"'.encode() + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = b"\x00" +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data: bytes) -> typing.Optional[str]: + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + + +class NetRCInfo: + def __init__(self, files: typing.Optional[typing.List[str]] = None) -> None: + if files is None: + files = [os.getenv("NETRC", ""), "~/.netrc", "~/_netrc"] + self.netrc_files = files + + @property + def netrc_info(self) -> typing.Optional[netrc.netrc]: + if not hasattr(self, "_netrc_info"): + self._netrc_info = None + for file_path in self.netrc_files: + expanded_path = Path(file_path).expanduser() + try: + if expanded_path.is_file(): + self._netrc_info = netrc.netrc(str(expanded_path)) + break + except (netrc.NetrcParseError, IOError): # pragma: nocover + # Issue while reading the netrc file, ignore... + pass + return self._netrc_info + + def get_credentials(self, host: str) -> typing.Optional[typing.Tuple[str, str]]: + if self.netrc_info is None: + return None + + auth_info = self.netrc_info.authenticators(host) + if auth_info is None or auth_info[2] is None: + return None + return (auth_info[0], auth_info[2]) + + +def get_ca_bundle_from_env() -> typing.Optional[str]: + if "SSL_CERT_FILE" in os.environ: + ssl_file = Path(os.environ["SSL_CERT_FILE"]) + if ssl_file.is_file(): + return str(ssl_file) + if "SSL_CERT_DIR" in os.environ: + ssl_path = Path(os.environ["SSL_CERT_DIR"]) + if ssl_path.is_dir(): + return str(ssl_path) + return None + + +def parse_header_links(value: str) -> typing.List[typing.Dict[str, str]]: + """ + Returns a list of parsed link headers, for more info see: + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link + The generic syntax of those is: + Link: < uri-reference >; param1=value1; param2="value2" + So for instance: + Link; '; type="image/jpeg",;' + would return + [ + {"url": "http:/.../front.jpeg", "type": "image/jpeg"}, + {"url": "http://.../back.jpeg"}, + ] + :param value: HTTP Link entity-header field + :return: list of parsed link headers + """ + links: typing.List[typing.Dict[str, str]] = [] + replace_chars = " '\"" + value = value.strip(replace_chars) + if not value: + return links + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + link = {"url": url.strip("<> '\"")} + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + link[key.strip(replace_chars)] = value.strip(replace_chars) + links.append(link) + return links + + +SENSITIVE_HEADERS = {"authorization", "proxy-authorization"} + + +def obfuscate_sensitive_headers( + items: typing.Iterable[typing.Tuple[typing.AnyStr, typing.AnyStr]] +) -> typing.Iterator[typing.Tuple[typing.AnyStr, typing.AnyStr]]: + for k, v in items: + if to_str(k.lower()) in SENSITIVE_HEADERS: + v = to_bytes_or_str("[secure]", match_type_of=v) + yield k, v + + +_LOGGER_INITIALIZED = False +TRACE_LOG_LEVEL = 5 + + +class Logger(logging.Logger): + # Stub for type checkers. + def trace(self, message: str, *args: typing.Any, **kwargs: typing.Any) -> None: + ... # pragma: nocover + + +def get_logger(name: str) -> Logger: + """ + Get a `logging.Logger` instance, and optionally + set up debug logging based on the HTTPX_LOG_LEVEL environment variable. + """ + global _LOGGER_INITIALIZED + + if not _LOGGER_INITIALIZED: + _LOGGER_INITIALIZED = True + logging.addLevelName(TRACE_LOG_LEVEL, "TRACE") + + log_level = os.environ.get("HTTPX_LOG_LEVEL", "").upper() + if log_level in ("DEBUG", "TRACE"): + logger = logging.getLogger("httpx") + logger.setLevel(logging.DEBUG if log_level == "DEBUG" else TRACE_LOG_LEVEL) + handler = logging.StreamHandler(sys.stderr) + handler.setFormatter( + logging.Formatter( + fmt="%(levelname)s [%(asctime)s] %(name)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + ) + logger.addHandler(handler) + + logger = logging.getLogger(name) + + def trace(message: str, *args: typing.Any, **kwargs: typing.Any) -> None: + logger.log(TRACE_LOG_LEVEL, message, *args, **kwargs) + + logger.trace = trace # type: ignore + + return typing.cast(Logger, logger) + + +def port_or_default(url: "URL") -> typing.Optional[int]: + if url.port is not None: + return url.port + return {"http": 80, "https": 443}.get(url.scheme) + + +def same_origin(url: "URL", other: "URL") -> bool: + """ + Return 'True' if the given URLs share the same origin. + """ + return ( + url.scheme == other.scheme + and url.host == other.host + and port_or_default(url) == port_or_default(other) + ) + + +def is_https_redirect(url: "URL", location: "URL") -> bool: + """ + Return 'True' if 'location' is a HTTPS upgrade of 'url' + """ + if url.host != location.host: + return False + + return ( + url.scheme == "http" + and port_or_default(url) == 80 + and location.scheme == "https" + and port_or_default(location) == 443 + ) + + +def get_environment_proxies() -> typing.Dict[str, typing.Optional[str]]: + """Gets proxy information from the environment""" + + # urllib.request.getproxies() falls back on System + # Registry and Config for proxies on Windows and macOS. + # We don't want to propagate non-HTTP proxies into + # our configuration such as 'TRAVIS_APT_PROXY'. + proxy_info = getproxies() + mounts: typing.Dict[str, typing.Optional[str]] = {} + + for scheme in ("http", "https", "all"): + if proxy_info.get(scheme): + hostname = proxy_info[scheme] + mounts[f"{scheme}://"] = ( + hostname if "://" in hostname else f"http://{hostname}" + ) + + no_proxy_hosts = [host.strip() for host in proxy_info.get("no", "").split(",")] + for hostname in no_proxy_hosts: + # See https://curl.haxx.se/libcurl/c/CURLOPT_NOPROXY.html for details + # on how names in `NO_PROXY` are handled. + if hostname == "*": + # If NO_PROXY=* is used or if "*" occurs as any one of the comma + # separated hostnames, then we should just bypass any information + # from HTTP_PROXY, HTTPS_PROXY, ALL_PROXY, and always ignore + # proxies. + return {} + elif hostname: + # NO_PROXY=.google.com is marked as "all://*.google.com, + # which disables "www.google.com" but not "google.com" + # NO_PROXY=google.com is marked as "all://*google.com, + # which disables "www.google.com" and "google.com". + # (But not "wwwgoogle.com") + mounts[f"all://*{hostname}"] = None + + return mounts + + +def to_bytes(value: typing.Union[str, bytes], encoding: str = "utf-8") -> bytes: + return value.encode(encoding) if isinstance(value, str) else value + + +def to_str(value: typing.Union[str, bytes], encoding: str = "utf-8") -> str: + return value if isinstance(value, str) else value.decode(encoding) + + +def to_bytes_or_str(value: str, match_type_of: typing.AnyStr) -> typing.AnyStr: + return value if isinstance(match_type_of, str) else value.encode() + + +def unquote(value: str) -> str: + return value[1:-1] if value[0] == value[-1] == '"' else value + + +def guess_content_type(filename: typing.Optional[str]) -> typing.Optional[str]: + if filename: + return mimetypes.guess_type(filename)[0] or "application/octet-stream" + return None + + +def peek_filelike_length(stream: typing.Any) -> typing.Optional[int]: + """ + Given a file-like stream object, return its length in number of bytes + without reading it into memory. + """ + try: + # Is it an actual file? + fd = stream.fileno() + # Yup, seems to be an actual file. + length = os.fstat(fd).st_size + except (AttributeError, OSError): + # No... Maybe it's something that supports random access, like `io.BytesIO`? + try: + # Assuming so, go to end of stream to figure out its length, + # then put it back in place. + offset = stream.tell() + length = stream.seek(0, os.SEEK_END) + stream.seek(offset) + except (AttributeError, OSError): + # Not even that? Sorry, we're doomed... + return None + + return length + + +class Timer: + async def _get_time(self) -> float: + library = sniffio.current_async_library() + if library == "trio": + import trio + + return trio.current_time() + elif library == "curio": # pragma: nocover + import curio + + return await curio.clock() + + import asyncio + + return asyncio.get_event_loop().time() + + def sync_start(self) -> None: + self.started = time.perf_counter() + + async def async_start(self) -> None: + self.started = await self._get_time() + + def sync_elapsed(self) -> float: + now = time.perf_counter() + return now - self.started + + async def async_elapsed(self) -> float: + now = await self._get_time() + return now - self.started + + +class URLPattern: + """ + A utility class currently used for making lookups against proxy keys... + + # Wildcard matching... + >>> pattern = URLPattern("all") + >>> pattern.matches(httpx.URL("http://example.com")) + True + + # Witch scheme matching... + >>> pattern = URLPattern("https") + >>> pattern.matches(httpx.URL("https://example.com")) + True + >>> pattern.matches(httpx.URL("http://example.com")) + False + + # With domain matching... + >>> pattern = URLPattern("https://example.com") + >>> pattern.matches(httpx.URL("https://example.com")) + True + >>> pattern.matches(httpx.URL("http://example.com")) + False + >>> pattern.matches(httpx.URL("https://other.com")) + False + + # Wildcard scheme, with domain matching... + >>> pattern = URLPattern("all://example.com") + >>> pattern.matches(httpx.URL("https://example.com")) + True + >>> pattern.matches(httpx.URL("http://example.com")) + True + >>> pattern.matches(httpx.URL("https://other.com")) + False + + # With port matching... + >>> pattern = URLPattern("https://example.com:1234") + >>> pattern.matches(httpx.URL("https://example.com:1234")) + True + >>> pattern.matches(httpx.URL("https://example.com")) + False + """ + + def __init__(self, pattern: str) -> None: + from ._models import URL + + if pattern and ":" not in pattern: + raise ValueError( + f"Proxy keys should use proper URL forms rather " + f"than plain scheme strings. " + f'Instead of "{pattern}", use "{pattern}://"' + ) + + url = URL(pattern) + self.pattern = pattern + self.scheme = "" if url.scheme == "all" else url.scheme + self.host = "" if url.host == "*" else url.host + self.port = url.port + if not url.host or url.host == "*": + self.host_regex: typing.Optional[typing.Pattern[str]] = None + elif url.host.startswith("*."): + # *.example.com should match "www.example.com", but not "example.com" + domain = re.escape(url.host[2:]) + self.host_regex = re.compile(f"^.+\\.{domain}$") + elif url.host.startswith("*"): + # *example.com should match "www.example.com" and "example.com" + domain = re.escape(url.host[1:]) + self.host_regex = re.compile(f"^(.+\\.)?{domain}$") + else: + # example.com should match "example.com" but not "www.example.com" + domain = re.escape(url.host) + self.host_regex = re.compile(f"^{domain}$") + + def matches(self, other: "URL") -> bool: + if self.scheme and self.scheme != other.scheme: + return False + if ( + self.host + and self.host_regex is not None + and not self.host_regex.match(other.host) + ): + return False + if self.port is not None and self.port != other.port: + return False + return True + + @property + def priority(self) -> tuple: + """ + The priority allows URLPattern instances to be sortable, so that + we can match from most specific to least specific. + """ + # URLs with a port should take priority over URLs without a port. + port_priority = 0 if self.port is not None else 1 + # Longer hostnames should match first. + host_priority = -len(self.host) + # Longer schemes should match first. + scheme_priority = -len(self.scheme) + return (port_priority, host_priority, scheme_priority) + + def __hash__(self) -> int: + return hash(self.pattern) + + def __lt__(self, other: "URLPattern") -> bool: + return self.priority < other.priority + + def __eq__(self, other: typing.Any) -> bool: + return isinstance(other, URLPattern) and self.pattern == other.pattern diff --git a/myenv/lib/python3.9/site-packages/httpx/py.typed b/myenv/lib/python3.9/site-packages/httpx/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/LICENSE.md b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/LICENSE.md new file mode 100644 index 0000000..b6f8732 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/LICENSE.md @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2013-2021, Kim Davies +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/METADATA b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/METADATA new file mode 100644 index 0000000..6446805 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/METADATA @@ -0,0 +1,236 @@ +Metadata-Version: 2.1 +Name: idna +Version: 3.3 +Summary: Internationalized Domain Names in Applications (IDNA) +Home-page: https://github.com/kjd/idna +Author: Kim Davies +Author-email: kim@cynosure.com.au +License: BSD-3-Clause +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: Name Service (DNS) +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Utilities +Requires-Python: >=3.5 +License-File: LICENSE.md + +Internationalized Domain Names in Applications (IDNA) +===================================================== + +Support for the Internationalised Domain Names in Applications +(IDNA) protocol as specified in `RFC 5891 `_. +This is the latest version of the protocol and is sometimes referred to as +“IDNA 2008”. + +This library also provides support for Unicode Technical Standard 46, +`Unicode IDNA Compatibility Processing `_. + +This acts as a suitable replacement for the “encodings.idna” module that +comes with the Python standard library, but which only supports the +older superseded IDNA specification (`RFC 3490 `_). + +Basic functions are simply executed: + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('ドメイン.テスト') + b'xn--eckwd4c7c.xn--zckzah' + >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah')) + ドメイン.テスト + + +Installation +------------ + +To install this library, you can use pip: + +.. code-block:: bash + + $ pip install idna + +Alternatively, you can install the package using the bundled setup script: + +.. code-block:: bash + + $ python setup.py install + + +Usage +----- + +For typical usage, the ``encode`` and ``decode`` functions will take a domain +name argument and perform a conversion to A-labels or U-labels respectively. + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('ドメイン.テスト') + b'xn--eckwd4c7c.xn--zckzah' + >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah')) + ドメイン.テスト + +You may use the codec encoding and decoding methods using the +``idna.codec`` module: + +.. code-block:: pycon + + >>> import idna.codec + >>> print('домен.испытание'.encode('idna')) + b'xn--d1acufc.xn--80akhbyknj4f' + >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna')) + домен.испытание + +Conversions can be applied at a per-label basis using the ``ulabel`` or ``alabel`` +functions if necessary: + +.. code-block:: pycon + + >>> idna.alabel('测试') + b'xn--0zwm56d' + +Compatibility Mapping (UTS #46) ++++++++++++++++++++++++++++++++ + +As described in `RFC 5895 `_, the IDNA +specification does not normalize input from different potential ways a user +may input a domain name. This functionality, known as a “mapping”, is +considered by the specification to be a local user-interface issue distinct +from IDNA conversion functionality. + +This library provides one such mapping, that was developed by the Unicode +Consortium. Known as `Unicode IDNA Compatibility Processing `_, +it provides for both a regular mapping for typical applications, as well as +a transitional mapping to help migrate from older IDNA 2003 applications. + +For example, “Königsgäßchen” is not a permissible label as *LATIN CAPITAL +LETTER K* is not allowed (nor are capital letters in general). UTS 46 will +convert this into lower case prior to applying the IDNA conversion. + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('Königsgäßchen') + ... + idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed + >>> idna.encode('Königsgäßchen', uts46=True) + b'xn--knigsgchen-b4a3dun' + >>> print(idna.decode('xn--knigsgchen-b4a3dun')) + königsgäßchen + +Transitional processing provides conversions to help transition from the older +2003 standard to the current standard. For example, in the original IDNA +specification, the *LATIN SMALL LETTER SHARP S* (ß) was converted into two +*LATIN SMALL LETTER S* (ss), whereas in the current IDNA specification this +conversion is not performed. + +.. code-block:: pycon + + >>> idna.encode('Königsgäßchen', uts46=True, transitional=True) + 'xn--knigsgsschen-lcb0w' + +Implementors should use transitional processing with caution, only in rare +cases where conversion from legacy labels to current labels must be performed +(i.e. IDNA implementations that pre-date 2008). For typical applications +that just need to convert labels, transitional processing is unlikely to be +beneficial and could produce unexpected incompatible results. + +``encodings.idna`` Compatibility +++++++++++++++++++++++++++++++++ + +Function calls from the Python built-in ``encodings.idna`` module are +mapped to their IDNA 2008 equivalents using the ``idna.compat`` module. +Simply substitute the ``import`` clause in your code to refer to the +new module name. + +Exceptions +---------- + +All errors raised during the conversion following the specification should +raise an exception derived from the ``idna.IDNAError`` base class. + +More specific exceptions that may be generated as ``idna.IDNABidiError`` +when the error reflects an illegal combination of left-to-right and +right-to-left characters in a label; ``idna.InvalidCodepoint`` when +a specific codepoint is an illegal character in an IDN label (i.e. +INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is +illegal based on its positional context (i.e. it is CONTEXTO or CONTEXTJ +but the contextual requirements are not satisfied.) + +Building and Diagnostics +------------------------ + +The IDNA and UTS 46 functionality relies upon pre-calculated lookup +tables for performance. These tables are derived from computing against +eligibility criteria in the respective standards. These tables are +computed using the command-line script ``tools/idna-data``. + +This tool will fetch relevant codepoint data from the Unicode repository +and perform the required calculations to identify eligibility. There are +three main modes: + +* ``idna-data make-libdata``. Generates ``idnadata.py`` and ``uts46data.py``, + the pre-calculated lookup tables using for IDNA and UTS 46 conversions. Implementors + who wish to track this library against a different Unicode version may use this tool + to manually generate a different version of the ``idnadata.py`` and ``uts46data.py`` + files. + +* ``idna-data make-table``. Generate a table of the IDNA disposition + (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix B.1 of RFC + 5892 and the pre-computed tables published by `IANA `_. + +* ``idna-data U+0061``. Prints debugging output on the various properties + associated with an individual Unicode codepoint (in this case, U+0061), that are + used to assess the IDNA and UTS 46 status of a codepoint. This is helpful in debugging + or analysis. + +The tool accepts a number of arguments, described using ``idna-data -h``. Most notably, +the ``--version`` argument allows the specification of the version of Unicode to use +in computing the table data. For example, ``idna-data --version 9.0.0 make-libdata`` +will generate library data against Unicode 9.0.0. + + +Additional Notes +---------------- + +* **Packages**. The latest tagged release version is published in the + `Python Package Index `_. + +* **Version support**. This library supports Python 3.5 and higher. As this library + serves as a low-level toolkit for a variety of applications, many of which strive + for broad compatibility with older Python versions, there is no rush to remove + older intepreter support. Removing support for older versions should be well + justified in that the maintenance burden has become too high. + +* **Python 2**. Python 2 is supported by version 2.x of this library. While active + development of the version 2.x series has ended, notable issues being corrected + may be backported to 2.x. Use "idna<3" in your requirements file if you need this + library for a Python 2 application. + +* **Testing**. The library has a test suite based on each rule of the IDNA specification, as + well as tests that are provided as part of the Unicode Technical Standard 46, + `Unicode IDNA Compatibility Processing `_. + +* **Emoji**. It is an occasional request to support emoji domains in this library. Encoding + of symbols like emoji is expressly prohibited by the technical standard IDNA 2008 and + emoji domains are broadly phased out across the domain industry due to associated security + risks. For now, applications that wish need to support these non-compliant labels may + wish to consider trying the encode/decode operation in this library first, and then falling + back to using `encodings.idna`. See `the Github project `_ + for more discussion. + diff --git a/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/RECORD b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/RECORD new file mode 100644 index 0000000..969358c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/RECORD @@ -0,0 +1,15 @@ +idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849 +idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374 +idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321 +idna/core.py,sha256=RFIkY-HhFZaDoBEFjGwyGd_vWI04uOAQjnzueMWqwOU,12795 +idna/idnadata.py,sha256=fzMzkCea2xieVxcrjngJ-2pLsKQNejPCZFlBajIuQdw,44025 +idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881 +idna/package_data.py,sha256=szxQhV0ZD0nKJ84Kuobw3l8q4_KeCyXjFRdpwIpKZmw,21 +idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +idna/uts46data.py,sha256=o-D7V-a0fOLZNd7tvxof6MYfUd0TBZzE2bLR5XO67xU,204400 +idna-3.3.dist-info/LICENSE.md,sha256=otbk2UC9JNvnuWRc3hmpeSzFHbeuDVrNMBrIYMqj6DY,1523 +idna-3.3.dist-info/METADATA,sha256=BdqiAf8ou4x1nzIHp2_sDfXWjl7BrSUGpOeVzbYHQuQ,9765 +idna-3.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +idna-3.3.dist-info/top_level.txt,sha256=jSag9sEDqvSPftxOQy-ABfGV_RSy7oFh4zZJpODV8k0,5 +idna-3.3.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +idna-3.3.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/WHEEL new file mode 100644 index 0000000..5bad85f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/top_level.txt new file mode 100644 index 0000000..c40472e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna-3.3.dist-info/top_level.txt @@ -0,0 +1 @@ +idna diff --git a/myenv/lib/python3.9/site-packages/idna/__init__.py b/myenv/lib/python3.9/site-packages/idna/__init__.py new file mode 100644 index 0000000..a40eeaf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/__init__.py @@ -0,0 +1,44 @@ +from .package_data import __version__ +from .core import ( + IDNABidiError, + IDNAError, + InvalidCodepoint, + InvalidCodepointContext, + alabel, + check_bidi, + check_hyphen_ok, + check_initial_combiner, + check_label, + check_nfc, + decode, + encode, + ulabel, + uts46_remap, + valid_contextj, + valid_contexto, + valid_label_length, + valid_string_length, +) +from .intranges import intranges_contain + +__all__ = [ + "IDNABidiError", + "IDNAError", + "InvalidCodepoint", + "InvalidCodepointContext", + "alabel", + "check_bidi", + "check_hyphen_ok", + "check_initial_combiner", + "check_label", + "check_nfc", + "decode", + "encode", + "intranges_contain", + "ulabel", + "uts46_remap", + "valid_contextj", + "valid_contexto", + "valid_label_length", + "valid_string_length", +] diff --git a/myenv/lib/python3.9/site-packages/idna/codec.py b/myenv/lib/python3.9/site-packages/idna/codec.py new file mode 100644 index 0000000..1ca9ba6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/codec.py @@ -0,0 +1,112 @@ +from .core import encode, decode, alabel, ulabel, IDNAError +import codecs +import re +from typing import Tuple, Optional + +_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]') + +class Codec(codecs.Codec): + + def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]: + if errors != 'strict': + raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) + + if not data: + return b"", 0 + + return encode(data), len(data) + + def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]: + if errors != 'strict': + raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) + + if not data: + return '', 0 + + return decode(data), len(data) + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore + if errors != 'strict': + raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) + + if not data: + return "", 0 + + labels = _unicode_dots_re.split(data) + trailing_dot = '' + if labels: + if not labels[-1]: + trailing_dot = '.' + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = '.' + + result = [] + size = 0 + for label in labels: + result.append(alabel(label)) + if size: + size += 1 + size += len(label) + + # Join with U+002E + result_str = '.'.join(result) + trailing_dot # type: ignore + size += len(trailing_dot) + return result_str, size + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore + if errors != 'strict': + raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) + + if not data: + return ('', 0) + + labels = _unicode_dots_re.split(data) + trailing_dot = '' + if labels: + if not labels[-1]: + trailing_dot = '.' + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = '.' + + result = [] + size = 0 + for label in labels: + result.append(ulabel(label)) + if size: + size += 1 + size += len(label) + + result_str = '.'.join(result) + trailing_dot + size += len(trailing_dot) + return (result_str, size) + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +def getregentry() -> codecs.CodecInfo: + # Compatibility as a search_function for codecs.register() + return codecs.CodecInfo( + name='idna', + encode=Codec().encode, # type: ignore + decode=Codec().decode, # type: ignore + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/myenv/lib/python3.9/site-packages/idna/compat.py b/myenv/lib/python3.9/site-packages/idna/compat.py new file mode 100644 index 0000000..786e6bd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/compat.py @@ -0,0 +1,13 @@ +from .core import * +from .codec import * +from typing import Any, Union + +def ToASCII(label: str) -> bytes: + return encode(label) + +def ToUnicode(label: Union[bytes, bytearray]) -> str: + return decode(label) + +def nameprep(s: Any) -> None: + raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol') + diff --git a/myenv/lib/python3.9/site-packages/idna/core.py b/myenv/lib/python3.9/site-packages/idna/core.py new file mode 100644 index 0000000..55ab967 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/core.py @@ -0,0 +1,397 @@ +from . import idnadata +import bisect +import unicodedata +import re +from typing import Union, Optional +from .intranges import intranges_contain + +_virama_combining_class = 9 +_alabel_prefix = b'xn--' +_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]') + +class IDNAError(UnicodeError): + """ Base exception for all IDNA-encoding related problems """ + pass + + +class IDNABidiError(IDNAError): + """ Exception when bidirectional requirements are not satisfied """ + pass + + +class InvalidCodepoint(IDNAError): + """ Exception when a disallowed or unallocated codepoint is used """ + pass + + +class InvalidCodepointContext(IDNAError): + """ Exception when the codepoint is not valid in the context it is used """ + pass + + +def _combining_class(cp: int) -> int: + v = unicodedata.combining(chr(cp)) + if v == 0: + if not unicodedata.name(chr(cp)): + raise ValueError('Unknown character in unicodedata') + return v + +def _is_script(cp: str, script: str) -> bool: + return intranges_contain(ord(cp), idnadata.scripts[script]) + +def _punycode(s: str) -> bytes: + return s.encode('punycode') + +def _unot(s: int) -> str: + return 'U+{:04X}'.format(s) + + +def valid_label_length(label: Union[bytes, str]) -> bool: + if len(label) > 63: + return False + return True + + +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: + if len(label) > (254 if trailing_dot else 253): + return False + return True + + +def check_bidi(label: str, check_ltr: bool = False) -> bool: + # Bidi rules should only be applied if string contains RTL characters + bidi_label = False + for (idx, cp) in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + if direction == '': + # String likely comes from a newer version of Unicode + raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx)) + if direction in ['R', 'AL', 'AN']: + bidi_label = True + if not bidi_label and not check_ltr: + return True + + # Bidi rule 1 + direction = unicodedata.bidirectional(label[0]) + if direction in ['R', 'AL']: + rtl = True + elif direction == 'L': + rtl = False + else: + raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label))) + + valid_ending = False + number_type = None # type: Optional[str] + for (idx, cp) in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + + if rtl: + # Bidi rule 2 + if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: + raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx)) + # Bidi rule 3 + if direction in ['R', 'AL', 'EN', 'AN']: + valid_ending = True + elif direction != 'NSM': + valid_ending = False + # Bidi rule 4 + if direction in ['AN', 'EN']: + if not number_type: + number_type = direction + else: + if number_type != direction: + raise IDNABidiError('Can not mix numeral types in a right-to-left label') + else: + # Bidi rule 5 + if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: + raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx)) + # Bidi rule 6 + if direction in ['L', 'EN']: + valid_ending = True + elif direction != 'NSM': + valid_ending = False + + if not valid_ending: + raise IDNABidiError('Label ends with illegal codepoint directionality') + + return True + + +def check_initial_combiner(label: str) -> bool: + if unicodedata.category(label[0])[0] == 'M': + raise IDNAError('Label begins with an illegal combining character') + return True + + +def check_hyphen_ok(label: str) -> bool: + if label[2:4] == '--': + raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') + if label[0] == '-' or label[-1] == '-': + raise IDNAError('Label must not start or end with a hyphen') + return True + + +def check_nfc(label: str) -> None: + if unicodedata.normalize('NFC', label) != label: + raise IDNAError('Label must be in Normalization Form C') + + +def valid_contextj(label: str, pos: int) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x200c: + + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + + ok = False + for i in range(pos-1, -1, -1): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord('T'): + continue + if joining_type in [ord('L'), ord('D')]: + ok = True + break + + if not ok: + return False + + ok = False + for i in range(pos+1, len(label)): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord('T'): + continue + if joining_type in [ord('R'), ord('D')]: + ok = True + break + return ok + + if cp_value == 0x200d: + + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + return False + + else: + + return False + + +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x00b7: + if 0 < pos < len(label)-1: + if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c: + return True + return False + + elif cp_value == 0x0375: + if pos < len(label)-1 and len(label) > 1: + return _is_script(label[pos + 1], 'Greek') + return False + + elif cp_value == 0x05f3 or cp_value == 0x05f4: + if pos > 0: + return _is_script(label[pos - 1], 'Hebrew') + return False + + elif cp_value == 0x30fb: + for cp in label: + if cp == '\u30fb': + continue + if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'): + return True + return False + + elif 0x660 <= cp_value <= 0x669: + for cp in label: + if 0x6f0 <= ord(cp) <= 0x06f9: + return False + return True + + elif 0x6f0 <= cp_value <= 0x6f9: + for cp in label: + if 0x660 <= ord(cp) <= 0x0669: + return False + return True + + return False + + +def check_label(label: Union[str, bytes, bytearray]) -> None: + if isinstance(label, (bytes, bytearray)): + label = label.decode('utf-8') + if len(label) == 0: + raise IDNAError('Empty Label') + + check_nfc(label) + check_hyphen_ok(label) + check_initial_combiner(label) + + for (pos, cp) in enumerate(label): + cp_value = ord(cp) + if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']): + continue + elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']): + try: + if not valid_contextj(label, pos): + raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format( + _unot(cp_value), pos+1, repr(label))) + except ValueError: + raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format( + _unot(cp_value), pos+1, repr(label))) + elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']): + if not valid_contexto(label, pos): + raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label))) + else: + raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label))) + + check_bidi(label) + + +def alabel(label: str) -> bytes: + try: + label_bytes = label.encode('ascii') + ulabel(label_bytes) + if not valid_label_length(label_bytes): + raise IDNAError('Label too long') + return label_bytes + except UnicodeEncodeError: + pass + + if not label: + raise IDNAError('No Input') + + label = str(label) + check_label(label) + label_bytes = _punycode(label) + label_bytes = _alabel_prefix + label_bytes + + if not valid_label_length(label_bytes): + raise IDNAError('Label too long') + + return label_bytes + + +def ulabel(label: Union[str, bytes, bytearray]) -> str: + if not isinstance(label, (bytes, bytearray)): + try: + label_bytes = label.encode('ascii') + except UnicodeEncodeError: + check_label(label) + return label + else: + label_bytes = label + + label_bytes = label_bytes.lower() + if label_bytes.startswith(_alabel_prefix): + label_bytes = label_bytes[len(_alabel_prefix):] + if not label_bytes: + raise IDNAError('Malformed A-label, no Punycode eligible content found') + if label_bytes.decode('ascii')[-1] == '-': + raise IDNAError('A-label must not end with a hyphen') + else: + check_label(label_bytes) + return label_bytes.decode('ascii') + + try: + label = label_bytes.decode('punycode') + except UnicodeError: + raise IDNAError('Invalid A-label') + check_label(label) + return label + + +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: + """Re-map the characters in the string according to UTS46 processing.""" + from .uts46data import uts46data + output = '' + + for pos, char in enumerate(domain): + code_point = ord(char) + try: + uts46row = uts46data[code_point if code_point < 256 else + bisect.bisect_left(uts46data, (code_point, 'Z')) - 1] + status = uts46row[1] + replacement = None # type: Optional[str] + if len(uts46row) == 3: + replacement = uts46row[2] # type: ignore + if (status == 'V' or + (status == 'D' and not transitional) or + (status == '3' and not std3_rules and replacement is None)): + output += char + elif replacement is not None and (status == 'M' or + (status == '3' and not std3_rules) or + (status == 'D' and transitional)): + output += replacement + elif status != 'I': + raise IndexError() + except IndexError: + raise InvalidCodepoint( + 'Codepoint {} not allowed at position {} in {}'.format( + _unot(code_point), pos + 1, repr(domain))) + + return unicodedata.normalize('NFC', output) + + +def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes: + if isinstance(s, (bytes, bytearray)): + s = s.decode('ascii') + if uts46: + s = uts46_remap(s, std3_rules, transitional) + trailing_dot = False + result = [] + if strict: + labels = s.split('.') + else: + labels = _unicode_dots_re.split(s) + if not labels or labels == ['']: + raise IDNAError('Empty domain') + if labels[-1] == '': + del labels[-1] + trailing_dot = True + for label in labels: + s = alabel(label) + if s: + result.append(s) + else: + raise IDNAError('Empty label') + if trailing_dot: + result.append(b'') + s = b'.'.join(result) + if not valid_string_length(s, trailing_dot): + raise IDNAError('Domain too long') + return s + + +def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str: + try: + if isinstance(s, (bytes, bytearray)): + s = s.decode('ascii') + except UnicodeDecodeError: + raise IDNAError('Invalid ASCII in A-label') + if uts46: + s = uts46_remap(s, std3_rules, False) + trailing_dot = False + result = [] + if not strict: + labels = _unicode_dots_re.split(s) + else: + labels = s.split('.') + if not labels or labels == ['']: + raise IDNAError('Empty domain') + if not labels[-1]: + del labels[-1] + trailing_dot = True + for label in labels: + s = ulabel(label) + if s: + result.append(s) + else: + raise IDNAError('Empty label') + if trailing_dot: + result.append('') + return '.'.join(result) diff --git a/myenv/lib/python3.9/site-packages/idna/idnadata.py b/myenv/lib/python3.9/site-packages/idna/idnadata.py new file mode 100644 index 0000000..1b5805d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/idnadata.py @@ -0,0 +1,2137 @@ +# This file is automatically generated by tools/idna-data + +__version__ = '14.0.0' +scripts = { + 'Greek': ( + 0x37000000374, + 0x37500000378, + 0x37a0000037e, + 0x37f00000380, + 0x38400000385, + 0x38600000387, + 0x3880000038b, + 0x38c0000038d, + 0x38e000003a2, + 0x3a3000003e2, + 0x3f000000400, + 0x1d2600001d2b, + 0x1d5d00001d62, + 0x1d6600001d6b, + 0x1dbf00001dc0, + 0x1f0000001f16, + 0x1f1800001f1e, + 0x1f2000001f46, + 0x1f4800001f4e, + 0x1f5000001f58, + 0x1f5900001f5a, + 0x1f5b00001f5c, + 0x1f5d00001f5e, + 0x1f5f00001f7e, + 0x1f8000001fb5, + 0x1fb600001fc5, + 0x1fc600001fd4, + 0x1fd600001fdc, + 0x1fdd00001ff0, + 0x1ff200001ff5, + 0x1ff600001fff, + 0x212600002127, + 0xab650000ab66, + 0x101400001018f, + 0x101a0000101a1, + 0x1d2000001d246, + ), + 'Han': ( + 0x2e8000002e9a, + 0x2e9b00002ef4, + 0x2f0000002fd6, + 0x300500003006, + 0x300700003008, + 0x30210000302a, + 0x30380000303c, + 0x340000004dc0, + 0x4e000000a000, + 0xf9000000fa6e, + 0xfa700000fada, + 0x16fe200016fe4, + 0x16ff000016ff2, + 0x200000002a6e0, + 0x2a7000002b739, + 0x2b7400002b81e, + 0x2b8200002cea2, + 0x2ceb00002ebe1, + 0x2f8000002fa1e, + 0x300000003134b, + ), + 'Hebrew': ( + 0x591000005c8, + 0x5d0000005eb, + 0x5ef000005f5, + 0xfb1d0000fb37, + 0xfb380000fb3d, + 0xfb3e0000fb3f, + 0xfb400000fb42, + 0xfb430000fb45, + 0xfb460000fb50, + ), + 'Hiragana': ( + 0x304100003097, + 0x309d000030a0, + 0x1b0010001b120, + 0x1b1500001b153, + 0x1f2000001f201, + ), + 'Katakana': ( + 0x30a1000030fb, + 0x30fd00003100, + 0x31f000003200, + 0x32d0000032ff, + 0x330000003358, + 0xff660000ff70, + 0xff710000ff9e, + 0x1aff00001aff4, + 0x1aff50001affc, + 0x1affd0001afff, + 0x1b0000001b001, + 0x1b1200001b123, + 0x1b1640001b168, + ), +} +joining_types = { + 0x600: 85, + 0x601: 85, + 0x602: 85, + 0x603: 85, + 0x604: 85, + 0x605: 85, + 0x608: 85, + 0x60b: 85, + 0x620: 68, + 0x621: 85, + 0x622: 82, + 0x623: 82, + 0x624: 82, + 0x625: 82, + 0x626: 68, + 0x627: 82, + 0x628: 68, + 0x629: 82, + 0x62a: 68, + 0x62b: 68, + 0x62c: 68, + 0x62d: 68, + 0x62e: 68, + 0x62f: 82, + 0x630: 82, + 0x631: 82, + 0x632: 82, + 0x633: 68, + 0x634: 68, + 0x635: 68, + 0x636: 68, + 0x637: 68, + 0x638: 68, + 0x639: 68, + 0x63a: 68, + 0x63b: 68, + 0x63c: 68, + 0x63d: 68, + 0x63e: 68, + 0x63f: 68, + 0x640: 67, + 0x641: 68, + 0x642: 68, + 0x643: 68, + 0x644: 68, + 0x645: 68, + 0x646: 68, + 0x647: 68, + 0x648: 82, + 0x649: 68, + 0x64a: 68, + 0x66e: 68, + 0x66f: 68, + 0x671: 82, + 0x672: 82, + 0x673: 82, + 0x674: 85, + 0x675: 82, + 0x676: 82, + 0x677: 82, + 0x678: 68, + 0x679: 68, + 0x67a: 68, + 0x67b: 68, + 0x67c: 68, + 0x67d: 68, + 0x67e: 68, + 0x67f: 68, + 0x680: 68, + 0x681: 68, + 0x682: 68, + 0x683: 68, + 0x684: 68, + 0x685: 68, + 0x686: 68, + 0x687: 68, + 0x688: 82, + 0x689: 82, + 0x68a: 82, + 0x68b: 82, + 0x68c: 82, + 0x68d: 82, + 0x68e: 82, + 0x68f: 82, + 0x690: 82, + 0x691: 82, + 0x692: 82, + 0x693: 82, + 0x694: 82, + 0x695: 82, + 0x696: 82, + 0x697: 82, + 0x698: 82, + 0x699: 82, + 0x69a: 68, + 0x69b: 68, + 0x69c: 68, + 0x69d: 68, + 0x69e: 68, + 0x69f: 68, + 0x6a0: 68, + 0x6a1: 68, + 0x6a2: 68, + 0x6a3: 68, + 0x6a4: 68, + 0x6a5: 68, + 0x6a6: 68, + 0x6a7: 68, + 0x6a8: 68, + 0x6a9: 68, + 0x6aa: 68, + 0x6ab: 68, + 0x6ac: 68, + 0x6ad: 68, + 0x6ae: 68, + 0x6af: 68, + 0x6b0: 68, + 0x6b1: 68, + 0x6b2: 68, + 0x6b3: 68, + 0x6b4: 68, + 0x6b5: 68, + 0x6b6: 68, + 0x6b7: 68, + 0x6b8: 68, + 0x6b9: 68, + 0x6ba: 68, + 0x6bb: 68, + 0x6bc: 68, + 0x6bd: 68, + 0x6be: 68, + 0x6bf: 68, + 0x6c0: 82, + 0x6c1: 68, + 0x6c2: 68, + 0x6c3: 82, + 0x6c4: 82, + 0x6c5: 82, + 0x6c6: 82, + 0x6c7: 82, + 0x6c8: 82, + 0x6c9: 82, + 0x6ca: 82, + 0x6cb: 82, + 0x6cc: 68, + 0x6cd: 82, + 0x6ce: 68, + 0x6cf: 82, + 0x6d0: 68, + 0x6d1: 68, + 0x6d2: 82, + 0x6d3: 82, + 0x6d5: 82, + 0x6dd: 85, + 0x6ee: 82, + 0x6ef: 82, + 0x6fa: 68, + 0x6fb: 68, + 0x6fc: 68, + 0x6ff: 68, + 0x70f: 84, + 0x710: 82, + 0x712: 68, + 0x713: 68, + 0x714: 68, + 0x715: 82, + 0x716: 82, + 0x717: 82, + 0x718: 82, + 0x719: 82, + 0x71a: 68, + 0x71b: 68, + 0x71c: 68, + 0x71d: 68, + 0x71e: 82, + 0x71f: 68, + 0x720: 68, + 0x721: 68, + 0x722: 68, + 0x723: 68, + 0x724: 68, + 0x725: 68, + 0x726: 68, + 0x727: 68, + 0x728: 82, + 0x729: 68, + 0x72a: 82, + 0x72b: 68, + 0x72c: 82, + 0x72d: 68, + 0x72e: 68, + 0x72f: 82, + 0x74d: 82, + 0x74e: 68, + 0x74f: 68, + 0x750: 68, + 0x751: 68, + 0x752: 68, + 0x753: 68, + 0x754: 68, + 0x755: 68, + 0x756: 68, + 0x757: 68, + 0x758: 68, + 0x759: 82, + 0x75a: 82, + 0x75b: 82, + 0x75c: 68, + 0x75d: 68, + 0x75e: 68, + 0x75f: 68, + 0x760: 68, + 0x761: 68, + 0x762: 68, + 0x763: 68, + 0x764: 68, + 0x765: 68, + 0x766: 68, + 0x767: 68, + 0x768: 68, + 0x769: 68, + 0x76a: 68, + 0x76b: 82, + 0x76c: 82, + 0x76d: 68, + 0x76e: 68, + 0x76f: 68, + 0x770: 68, + 0x771: 82, + 0x772: 68, + 0x773: 82, + 0x774: 82, + 0x775: 68, + 0x776: 68, + 0x777: 68, + 0x778: 82, + 0x779: 82, + 0x77a: 68, + 0x77b: 68, + 0x77c: 68, + 0x77d: 68, + 0x77e: 68, + 0x77f: 68, + 0x7ca: 68, + 0x7cb: 68, + 0x7cc: 68, + 0x7cd: 68, + 0x7ce: 68, + 0x7cf: 68, + 0x7d0: 68, + 0x7d1: 68, + 0x7d2: 68, + 0x7d3: 68, + 0x7d4: 68, + 0x7d5: 68, + 0x7d6: 68, + 0x7d7: 68, + 0x7d8: 68, + 0x7d9: 68, + 0x7da: 68, + 0x7db: 68, + 0x7dc: 68, + 0x7dd: 68, + 0x7de: 68, + 0x7df: 68, + 0x7e0: 68, + 0x7e1: 68, + 0x7e2: 68, + 0x7e3: 68, + 0x7e4: 68, + 0x7e5: 68, + 0x7e6: 68, + 0x7e7: 68, + 0x7e8: 68, + 0x7e9: 68, + 0x7ea: 68, + 0x7fa: 67, + 0x840: 82, + 0x841: 68, + 0x842: 68, + 0x843: 68, + 0x844: 68, + 0x845: 68, + 0x846: 82, + 0x847: 82, + 0x848: 68, + 0x849: 82, + 0x84a: 68, + 0x84b: 68, + 0x84c: 68, + 0x84d: 68, + 0x84e: 68, + 0x84f: 68, + 0x850: 68, + 0x851: 68, + 0x852: 68, + 0x853: 68, + 0x854: 82, + 0x855: 68, + 0x856: 82, + 0x857: 82, + 0x858: 82, + 0x860: 68, + 0x861: 85, + 0x862: 68, + 0x863: 68, + 0x864: 68, + 0x865: 68, + 0x866: 85, + 0x867: 82, + 0x868: 68, + 0x869: 82, + 0x86a: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87a: 82, + 0x87b: 82, + 0x87c: 82, + 0x87d: 82, + 0x87e: 82, + 0x87f: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x887: 85, + 0x888: 85, + 0x889: 68, + 0x88a: 68, + 0x88b: 68, + 0x88c: 68, + 0x88d: 68, + 0x88e: 82, + 0x890: 85, + 0x891: 85, + 0x8a0: 68, + 0x8a1: 68, + 0x8a2: 68, + 0x8a3: 68, + 0x8a4: 68, + 0x8a5: 68, + 0x8a6: 68, + 0x8a7: 68, + 0x8a8: 68, + 0x8a9: 68, + 0x8aa: 82, + 0x8ab: 82, + 0x8ac: 82, + 0x8ad: 85, + 0x8ae: 82, + 0x8af: 68, + 0x8b0: 68, + 0x8b1: 82, + 0x8b2: 82, + 0x8b3: 68, + 0x8b4: 68, + 0x8b5: 68, + 0x8b6: 68, + 0x8b7: 68, + 0x8b8: 68, + 0x8b9: 82, + 0x8ba: 68, + 0x8bb: 68, + 0x8bc: 68, + 0x8bd: 68, + 0x8be: 68, + 0x8bf: 68, + 0x8c0: 68, + 0x8c1: 68, + 0x8c2: 68, + 0x8c3: 68, + 0x8c4: 68, + 0x8c5: 68, + 0x8c6: 68, + 0x8c7: 68, + 0x8c8: 68, + 0x8e2: 85, + 0x1806: 85, + 0x1807: 68, + 0x180a: 67, + 0x180e: 85, + 0x1820: 68, + 0x1821: 68, + 0x1822: 68, + 0x1823: 68, + 0x1824: 68, + 0x1825: 68, + 0x1826: 68, + 0x1827: 68, + 0x1828: 68, + 0x1829: 68, + 0x182a: 68, + 0x182b: 68, + 0x182c: 68, + 0x182d: 68, + 0x182e: 68, + 0x182f: 68, + 0x1830: 68, + 0x1831: 68, + 0x1832: 68, + 0x1833: 68, + 0x1834: 68, + 0x1835: 68, + 0x1836: 68, + 0x1837: 68, + 0x1838: 68, + 0x1839: 68, + 0x183a: 68, + 0x183b: 68, + 0x183c: 68, + 0x183d: 68, + 0x183e: 68, + 0x183f: 68, + 0x1840: 68, + 0x1841: 68, + 0x1842: 68, + 0x1843: 68, + 0x1844: 68, + 0x1845: 68, + 0x1846: 68, + 0x1847: 68, + 0x1848: 68, + 0x1849: 68, + 0x184a: 68, + 0x184b: 68, + 0x184c: 68, + 0x184d: 68, + 0x184e: 68, + 0x184f: 68, + 0x1850: 68, + 0x1851: 68, + 0x1852: 68, + 0x1853: 68, + 0x1854: 68, + 0x1855: 68, + 0x1856: 68, + 0x1857: 68, + 0x1858: 68, + 0x1859: 68, + 0x185a: 68, + 0x185b: 68, + 0x185c: 68, + 0x185d: 68, + 0x185e: 68, + 0x185f: 68, + 0x1860: 68, + 0x1861: 68, + 0x1862: 68, + 0x1863: 68, + 0x1864: 68, + 0x1865: 68, + 0x1866: 68, + 0x1867: 68, + 0x1868: 68, + 0x1869: 68, + 0x186a: 68, + 0x186b: 68, + 0x186c: 68, + 0x186d: 68, + 0x186e: 68, + 0x186f: 68, + 0x1870: 68, + 0x1871: 68, + 0x1872: 68, + 0x1873: 68, + 0x1874: 68, + 0x1875: 68, + 0x1876: 68, + 0x1877: 68, + 0x1878: 68, + 0x1880: 85, + 0x1881: 85, + 0x1882: 85, + 0x1883: 85, + 0x1884: 85, + 0x1885: 84, + 0x1886: 84, + 0x1887: 68, + 0x1888: 68, + 0x1889: 68, + 0x188a: 68, + 0x188b: 68, + 0x188c: 68, + 0x188d: 68, + 0x188e: 68, + 0x188f: 68, + 0x1890: 68, + 0x1891: 68, + 0x1892: 68, + 0x1893: 68, + 0x1894: 68, + 0x1895: 68, + 0x1896: 68, + 0x1897: 68, + 0x1898: 68, + 0x1899: 68, + 0x189a: 68, + 0x189b: 68, + 0x189c: 68, + 0x189d: 68, + 0x189e: 68, + 0x189f: 68, + 0x18a0: 68, + 0x18a1: 68, + 0x18a2: 68, + 0x18a3: 68, + 0x18a4: 68, + 0x18a5: 68, + 0x18a6: 68, + 0x18a7: 68, + 0x18a8: 68, + 0x18aa: 68, + 0x200c: 85, + 0x200d: 67, + 0x202f: 85, + 0x2066: 85, + 0x2067: 85, + 0x2068: 85, + 0x2069: 85, + 0xa840: 68, + 0xa841: 68, + 0xa842: 68, + 0xa843: 68, + 0xa844: 68, + 0xa845: 68, + 0xa846: 68, + 0xa847: 68, + 0xa848: 68, + 0xa849: 68, + 0xa84a: 68, + 0xa84b: 68, + 0xa84c: 68, + 0xa84d: 68, + 0xa84e: 68, + 0xa84f: 68, + 0xa850: 68, + 0xa851: 68, + 0xa852: 68, + 0xa853: 68, + 0xa854: 68, + 0xa855: 68, + 0xa856: 68, + 0xa857: 68, + 0xa858: 68, + 0xa859: 68, + 0xa85a: 68, + 0xa85b: 68, + 0xa85c: 68, + 0xa85d: 68, + 0xa85e: 68, + 0xa85f: 68, + 0xa860: 68, + 0xa861: 68, + 0xa862: 68, + 0xa863: 68, + 0xa864: 68, + 0xa865: 68, + 0xa866: 68, + 0xa867: 68, + 0xa868: 68, + 0xa869: 68, + 0xa86a: 68, + 0xa86b: 68, + 0xa86c: 68, + 0xa86d: 68, + 0xa86e: 68, + 0xa86f: 68, + 0xa870: 68, + 0xa871: 68, + 0xa872: 76, + 0xa873: 85, + 0x10ac0: 68, + 0x10ac1: 68, + 0x10ac2: 68, + 0x10ac3: 68, + 0x10ac4: 68, + 0x10ac5: 82, + 0x10ac6: 85, + 0x10ac7: 82, + 0x10ac8: 85, + 0x10ac9: 82, + 0x10aca: 82, + 0x10acb: 85, + 0x10acc: 85, + 0x10acd: 76, + 0x10ace: 82, + 0x10acf: 82, + 0x10ad0: 82, + 0x10ad1: 82, + 0x10ad2: 82, + 0x10ad3: 68, + 0x10ad4: 68, + 0x10ad5: 68, + 0x10ad6: 68, + 0x10ad7: 76, + 0x10ad8: 68, + 0x10ad9: 68, + 0x10ada: 68, + 0x10adb: 68, + 0x10adc: 68, + 0x10add: 82, + 0x10ade: 68, + 0x10adf: 68, + 0x10ae0: 68, + 0x10ae1: 82, + 0x10ae2: 85, + 0x10ae3: 85, + 0x10ae4: 82, + 0x10aeb: 68, + 0x10aec: 68, + 0x10aed: 68, + 0x10aee: 68, + 0x10aef: 82, + 0x10b80: 68, + 0x10b81: 82, + 0x10b82: 68, + 0x10b83: 82, + 0x10b84: 82, + 0x10b85: 82, + 0x10b86: 68, + 0x10b87: 68, + 0x10b88: 68, + 0x10b89: 82, + 0x10b8a: 68, + 0x10b8b: 68, + 0x10b8c: 82, + 0x10b8d: 68, + 0x10b8e: 82, + 0x10b8f: 82, + 0x10b90: 68, + 0x10b91: 82, + 0x10ba9: 82, + 0x10baa: 82, + 0x10bab: 82, + 0x10bac: 82, + 0x10bad: 68, + 0x10bae: 68, + 0x10baf: 85, + 0x10d00: 76, + 0x10d01: 68, + 0x10d02: 68, + 0x10d03: 68, + 0x10d04: 68, + 0x10d05: 68, + 0x10d06: 68, + 0x10d07: 68, + 0x10d08: 68, + 0x10d09: 68, + 0x10d0a: 68, + 0x10d0b: 68, + 0x10d0c: 68, + 0x10d0d: 68, + 0x10d0e: 68, + 0x10d0f: 68, + 0x10d10: 68, + 0x10d11: 68, + 0x10d12: 68, + 0x10d13: 68, + 0x10d14: 68, + 0x10d15: 68, + 0x10d16: 68, + 0x10d17: 68, + 0x10d18: 68, + 0x10d19: 68, + 0x10d1a: 68, + 0x10d1b: 68, + 0x10d1c: 68, + 0x10d1d: 68, + 0x10d1e: 68, + 0x10d1f: 68, + 0x10d20: 68, + 0x10d21: 68, + 0x10d22: 82, + 0x10d23: 68, + 0x10f30: 68, + 0x10f31: 68, + 0x10f32: 68, + 0x10f33: 82, + 0x10f34: 68, + 0x10f35: 68, + 0x10f36: 68, + 0x10f37: 68, + 0x10f38: 68, + 0x10f39: 68, + 0x10f3a: 68, + 0x10f3b: 68, + 0x10f3c: 68, + 0x10f3d: 68, + 0x10f3e: 68, + 0x10f3f: 68, + 0x10f40: 68, + 0x10f41: 68, + 0x10f42: 68, + 0x10f43: 68, + 0x10f44: 68, + 0x10f45: 85, + 0x10f51: 68, + 0x10f52: 68, + 0x10f53: 68, + 0x10f54: 82, + 0x10f70: 68, + 0x10f71: 68, + 0x10f72: 68, + 0x10f73: 68, + 0x10f74: 82, + 0x10f75: 82, + 0x10f76: 68, + 0x10f77: 68, + 0x10f78: 68, + 0x10f79: 68, + 0x10f7a: 68, + 0x10f7b: 68, + 0x10f7c: 68, + 0x10f7d: 68, + 0x10f7e: 68, + 0x10f7f: 68, + 0x10f80: 68, + 0x10f81: 68, + 0x10fb0: 68, + 0x10fb1: 85, + 0x10fb2: 68, + 0x10fb3: 68, + 0x10fb4: 82, + 0x10fb5: 82, + 0x10fb6: 82, + 0x10fb7: 85, + 0x10fb8: 68, + 0x10fb9: 82, + 0x10fba: 82, + 0x10fbb: 68, + 0x10fbc: 68, + 0x10fbd: 82, + 0x10fbe: 68, + 0x10fbf: 68, + 0x10fc0: 85, + 0x10fc1: 68, + 0x10fc2: 82, + 0x10fc3: 82, + 0x10fc4: 68, + 0x10fc5: 85, + 0x10fc6: 85, + 0x10fc7: 85, + 0x10fc8: 85, + 0x10fc9: 82, + 0x10fca: 68, + 0x10fcb: 76, + 0x110bd: 85, + 0x110cd: 85, + 0x1e900: 68, + 0x1e901: 68, + 0x1e902: 68, + 0x1e903: 68, + 0x1e904: 68, + 0x1e905: 68, + 0x1e906: 68, + 0x1e907: 68, + 0x1e908: 68, + 0x1e909: 68, + 0x1e90a: 68, + 0x1e90b: 68, + 0x1e90c: 68, + 0x1e90d: 68, + 0x1e90e: 68, + 0x1e90f: 68, + 0x1e910: 68, + 0x1e911: 68, + 0x1e912: 68, + 0x1e913: 68, + 0x1e914: 68, + 0x1e915: 68, + 0x1e916: 68, + 0x1e917: 68, + 0x1e918: 68, + 0x1e919: 68, + 0x1e91a: 68, + 0x1e91b: 68, + 0x1e91c: 68, + 0x1e91d: 68, + 0x1e91e: 68, + 0x1e91f: 68, + 0x1e920: 68, + 0x1e921: 68, + 0x1e922: 68, + 0x1e923: 68, + 0x1e924: 68, + 0x1e925: 68, + 0x1e926: 68, + 0x1e927: 68, + 0x1e928: 68, + 0x1e929: 68, + 0x1e92a: 68, + 0x1e92b: 68, + 0x1e92c: 68, + 0x1e92d: 68, + 0x1e92e: 68, + 0x1e92f: 68, + 0x1e930: 68, + 0x1e931: 68, + 0x1e932: 68, + 0x1e933: 68, + 0x1e934: 68, + 0x1e935: 68, + 0x1e936: 68, + 0x1e937: 68, + 0x1e938: 68, + 0x1e939: 68, + 0x1e93a: 68, + 0x1e93b: 68, + 0x1e93c: 68, + 0x1e93d: 68, + 0x1e93e: 68, + 0x1e93f: 68, + 0x1e940: 68, + 0x1e941: 68, + 0x1e942: 68, + 0x1e943: 68, + 0x1e94b: 84, +} +codepoint_classes = { + 'PVALID': ( + 0x2d0000002e, + 0x300000003a, + 0x610000007b, + 0xdf000000f7, + 0xf800000100, + 0x10100000102, + 0x10300000104, + 0x10500000106, + 0x10700000108, + 0x1090000010a, + 0x10b0000010c, + 0x10d0000010e, + 0x10f00000110, + 0x11100000112, + 0x11300000114, + 0x11500000116, + 0x11700000118, + 0x1190000011a, + 0x11b0000011c, + 0x11d0000011e, + 0x11f00000120, + 0x12100000122, + 0x12300000124, + 0x12500000126, + 0x12700000128, + 0x1290000012a, + 0x12b0000012c, + 0x12d0000012e, + 0x12f00000130, + 0x13100000132, + 0x13500000136, + 0x13700000139, + 0x13a0000013b, + 0x13c0000013d, + 0x13e0000013f, + 0x14200000143, + 0x14400000145, + 0x14600000147, + 0x14800000149, + 0x14b0000014c, + 0x14d0000014e, + 0x14f00000150, + 0x15100000152, + 0x15300000154, + 0x15500000156, + 0x15700000158, + 0x1590000015a, + 0x15b0000015c, + 0x15d0000015e, + 0x15f00000160, + 0x16100000162, + 0x16300000164, + 0x16500000166, + 0x16700000168, + 0x1690000016a, + 0x16b0000016c, + 0x16d0000016e, + 0x16f00000170, + 0x17100000172, + 0x17300000174, + 0x17500000176, + 0x17700000178, + 0x17a0000017b, + 0x17c0000017d, + 0x17e0000017f, + 0x18000000181, + 0x18300000184, + 0x18500000186, + 0x18800000189, + 0x18c0000018e, + 0x19200000193, + 0x19500000196, + 0x1990000019c, + 0x19e0000019f, + 0x1a1000001a2, + 0x1a3000001a4, + 0x1a5000001a6, + 0x1a8000001a9, + 0x1aa000001ac, + 0x1ad000001ae, + 0x1b0000001b1, + 0x1b4000001b5, + 0x1b6000001b7, + 0x1b9000001bc, + 0x1bd000001c4, + 0x1ce000001cf, + 0x1d0000001d1, + 0x1d2000001d3, + 0x1d4000001d5, + 0x1d6000001d7, + 0x1d8000001d9, + 0x1da000001db, + 0x1dc000001de, + 0x1df000001e0, + 0x1e1000001e2, + 0x1e3000001e4, + 0x1e5000001e6, + 0x1e7000001e8, + 0x1e9000001ea, + 0x1eb000001ec, + 0x1ed000001ee, + 0x1ef000001f1, + 0x1f5000001f6, + 0x1f9000001fa, + 0x1fb000001fc, + 0x1fd000001fe, + 0x1ff00000200, + 0x20100000202, + 0x20300000204, + 0x20500000206, + 0x20700000208, + 0x2090000020a, + 0x20b0000020c, + 0x20d0000020e, + 0x20f00000210, + 0x21100000212, + 0x21300000214, + 0x21500000216, + 0x21700000218, + 0x2190000021a, + 0x21b0000021c, + 0x21d0000021e, + 0x21f00000220, + 0x22100000222, + 0x22300000224, + 0x22500000226, + 0x22700000228, + 0x2290000022a, + 0x22b0000022c, + 0x22d0000022e, + 0x22f00000230, + 0x23100000232, + 0x2330000023a, + 0x23c0000023d, + 0x23f00000241, + 0x24200000243, + 0x24700000248, + 0x2490000024a, + 0x24b0000024c, + 0x24d0000024e, + 0x24f000002b0, + 0x2b9000002c2, + 0x2c6000002d2, + 0x2ec000002ed, + 0x2ee000002ef, + 0x30000000340, + 0x34200000343, + 0x3460000034f, + 0x35000000370, + 0x37100000372, + 0x37300000374, + 0x37700000378, + 0x37b0000037e, + 0x39000000391, + 0x3ac000003cf, + 0x3d7000003d8, + 0x3d9000003da, + 0x3db000003dc, + 0x3dd000003de, + 0x3df000003e0, + 0x3e1000003e2, + 0x3e3000003e4, + 0x3e5000003e6, + 0x3e7000003e8, + 0x3e9000003ea, + 0x3eb000003ec, + 0x3ed000003ee, + 0x3ef000003f0, + 0x3f3000003f4, + 0x3f8000003f9, + 0x3fb000003fd, + 0x43000000460, + 0x46100000462, + 0x46300000464, + 0x46500000466, + 0x46700000468, + 0x4690000046a, + 0x46b0000046c, + 0x46d0000046e, + 0x46f00000470, + 0x47100000472, + 0x47300000474, + 0x47500000476, + 0x47700000478, + 0x4790000047a, + 0x47b0000047c, + 0x47d0000047e, + 0x47f00000480, + 0x48100000482, + 0x48300000488, + 0x48b0000048c, + 0x48d0000048e, + 0x48f00000490, + 0x49100000492, + 0x49300000494, + 0x49500000496, + 0x49700000498, + 0x4990000049a, + 0x49b0000049c, + 0x49d0000049e, + 0x49f000004a0, + 0x4a1000004a2, + 0x4a3000004a4, + 0x4a5000004a6, + 0x4a7000004a8, + 0x4a9000004aa, + 0x4ab000004ac, + 0x4ad000004ae, + 0x4af000004b0, + 0x4b1000004b2, + 0x4b3000004b4, + 0x4b5000004b6, + 0x4b7000004b8, + 0x4b9000004ba, + 0x4bb000004bc, + 0x4bd000004be, + 0x4bf000004c0, + 0x4c2000004c3, + 0x4c4000004c5, + 0x4c6000004c7, + 0x4c8000004c9, + 0x4ca000004cb, + 0x4cc000004cd, + 0x4ce000004d0, + 0x4d1000004d2, + 0x4d3000004d4, + 0x4d5000004d6, + 0x4d7000004d8, + 0x4d9000004da, + 0x4db000004dc, + 0x4dd000004de, + 0x4df000004e0, + 0x4e1000004e2, + 0x4e3000004e4, + 0x4e5000004e6, + 0x4e7000004e8, + 0x4e9000004ea, + 0x4eb000004ec, + 0x4ed000004ee, + 0x4ef000004f0, + 0x4f1000004f2, + 0x4f3000004f4, + 0x4f5000004f6, + 0x4f7000004f8, + 0x4f9000004fa, + 0x4fb000004fc, + 0x4fd000004fe, + 0x4ff00000500, + 0x50100000502, + 0x50300000504, + 0x50500000506, + 0x50700000508, + 0x5090000050a, + 0x50b0000050c, + 0x50d0000050e, + 0x50f00000510, + 0x51100000512, + 0x51300000514, + 0x51500000516, + 0x51700000518, + 0x5190000051a, + 0x51b0000051c, + 0x51d0000051e, + 0x51f00000520, + 0x52100000522, + 0x52300000524, + 0x52500000526, + 0x52700000528, + 0x5290000052a, + 0x52b0000052c, + 0x52d0000052e, + 0x52f00000530, + 0x5590000055a, + 0x56000000587, + 0x58800000589, + 0x591000005be, + 0x5bf000005c0, + 0x5c1000005c3, + 0x5c4000005c6, + 0x5c7000005c8, + 0x5d0000005eb, + 0x5ef000005f3, + 0x6100000061b, + 0x62000000640, + 0x64100000660, + 0x66e00000675, + 0x679000006d4, + 0x6d5000006dd, + 0x6df000006e9, + 0x6ea000006f0, + 0x6fa00000700, + 0x7100000074b, + 0x74d000007b2, + 0x7c0000007f6, + 0x7fd000007fe, + 0x8000000082e, + 0x8400000085c, + 0x8600000086b, + 0x87000000888, + 0x8890000088f, + 0x898000008e2, + 0x8e300000958, + 0x96000000964, + 0x96600000970, + 0x97100000984, + 0x9850000098d, + 0x98f00000991, + 0x993000009a9, + 0x9aa000009b1, + 0x9b2000009b3, + 0x9b6000009ba, + 0x9bc000009c5, + 0x9c7000009c9, + 0x9cb000009cf, + 0x9d7000009d8, + 0x9e0000009e4, + 0x9e6000009f2, + 0x9fc000009fd, + 0x9fe000009ff, + 0xa0100000a04, + 0xa0500000a0b, + 0xa0f00000a11, + 0xa1300000a29, + 0xa2a00000a31, + 0xa3200000a33, + 0xa3500000a36, + 0xa3800000a3a, + 0xa3c00000a3d, + 0xa3e00000a43, + 0xa4700000a49, + 0xa4b00000a4e, + 0xa5100000a52, + 0xa5c00000a5d, + 0xa6600000a76, + 0xa8100000a84, + 0xa8500000a8e, + 0xa8f00000a92, + 0xa9300000aa9, + 0xaaa00000ab1, + 0xab200000ab4, + 0xab500000aba, + 0xabc00000ac6, + 0xac700000aca, + 0xacb00000ace, + 0xad000000ad1, + 0xae000000ae4, + 0xae600000af0, + 0xaf900000b00, + 0xb0100000b04, + 0xb0500000b0d, + 0xb0f00000b11, + 0xb1300000b29, + 0xb2a00000b31, + 0xb3200000b34, + 0xb3500000b3a, + 0xb3c00000b45, + 0xb4700000b49, + 0xb4b00000b4e, + 0xb5500000b58, + 0xb5f00000b64, + 0xb6600000b70, + 0xb7100000b72, + 0xb8200000b84, + 0xb8500000b8b, + 0xb8e00000b91, + 0xb9200000b96, + 0xb9900000b9b, + 0xb9c00000b9d, + 0xb9e00000ba0, + 0xba300000ba5, + 0xba800000bab, + 0xbae00000bba, + 0xbbe00000bc3, + 0xbc600000bc9, + 0xbca00000bce, + 0xbd000000bd1, + 0xbd700000bd8, + 0xbe600000bf0, + 0xc0000000c0d, + 0xc0e00000c11, + 0xc1200000c29, + 0xc2a00000c3a, + 0xc3c00000c45, + 0xc4600000c49, + 0xc4a00000c4e, + 0xc5500000c57, + 0xc5800000c5b, + 0xc5d00000c5e, + 0xc6000000c64, + 0xc6600000c70, + 0xc8000000c84, + 0xc8500000c8d, + 0xc8e00000c91, + 0xc9200000ca9, + 0xcaa00000cb4, + 0xcb500000cba, + 0xcbc00000cc5, + 0xcc600000cc9, + 0xcca00000cce, + 0xcd500000cd7, + 0xcdd00000cdf, + 0xce000000ce4, + 0xce600000cf0, + 0xcf100000cf3, + 0xd0000000d0d, + 0xd0e00000d11, + 0xd1200000d45, + 0xd4600000d49, + 0xd4a00000d4f, + 0xd5400000d58, + 0xd5f00000d64, + 0xd6600000d70, + 0xd7a00000d80, + 0xd8100000d84, + 0xd8500000d97, + 0xd9a00000db2, + 0xdb300000dbc, + 0xdbd00000dbe, + 0xdc000000dc7, + 0xdca00000dcb, + 0xdcf00000dd5, + 0xdd600000dd7, + 0xdd800000de0, + 0xde600000df0, + 0xdf200000df4, + 0xe0100000e33, + 0xe3400000e3b, + 0xe4000000e4f, + 0xe5000000e5a, + 0xe8100000e83, + 0xe8400000e85, + 0xe8600000e8b, + 0xe8c00000ea4, + 0xea500000ea6, + 0xea700000eb3, + 0xeb400000ebe, + 0xec000000ec5, + 0xec600000ec7, + 0xec800000ece, + 0xed000000eda, + 0xede00000ee0, + 0xf0000000f01, + 0xf0b00000f0c, + 0xf1800000f1a, + 0xf2000000f2a, + 0xf3500000f36, + 0xf3700000f38, + 0xf3900000f3a, + 0xf3e00000f43, + 0xf4400000f48, + 0xf4900000f4d, + 0xf4e00000f52, + 0xf5300000f57, + 0xf5800000f5c, + 0xf5d00000f69, + 0xf6a00000f6d, + 0xf7100000f73, + 0xf7400000f75, + 0xf7a00000f81, + 0xf8200000f85, + 0xf8600000f93, + 0xf9400000f98, + 0xf9900000f9d, + 0xf9e00000fa2, + 0xfa300000fa7, + 0xfa800000fac, + 0xfad00000fb9, + 0xfba00000fbd, + 0xfc600000fc7, + 0x10000000104a, + 0x10500000109e, + 0x10d0000010fb, + 0x10fd00001100, + 0x120000001249, + 0x124a0000124e, + 0x125000001257, + 0x125800001259, + 0x125a0000125e, + 0x126000001289, + 0x128a0000128e, + 0x1290000012b1, + 0x12b2000012b6, + 0x12b8000012bf, + 0x12c0000012c1, + 0x12c2000012c6, + 0x12c8000012d7, + 0x12d800001311, + 0x131200001316, + 0x13180000135b, + 0x135d00001360, + 0x138000001390, + 0x13a0000013f6, + 0x14010000166d, + 0x166f00001680, + 0x16810000169b, + 0x16a0000016eb, + 0x16f1000016f9, + 0x170000001716, + 0x171f00001735, + 0x174000001754, + 0x17600000176d, + 0x176e00001771, + 0x177200001774, + 0x1780000017b4, + 0x17b6000017d4, + 0x17d7000017d8, + 0x17dc000017de, + 0x17e0000017ea, + 0x18100000181a, + 0x182000001879, + 0x1880000018ab, + 0x18b0000018f6, + 0x19000000191f, + 0x19200000192c, + 0x19300000193c, + 0x19460000196e, + 0x197000001975, + 0x1980000019ac, + 0x19b0000019ca, + 0x19d0000019da, + 0x1a0000001a1c, + 0x1a2000001a5f, + 0x1a6000001a7d, + 0x1a7f00001a8a, + 0x1a9000001a9a, + 0x1aa700001aa8, + 0x1ab000001abe, + 0x1abf00001acf, + 0x1b0000001b4d, + 0x1b5000001b5a, + 0x1b6b00001b74, + 0x1b8000001bf4, + 0x1c0000001c38, + 0x1c4000001c4a, + 0x1c4d00001c7e, + 0x1cd000001cd3, + 0x1cd400001cfb, + 0x1d0000001d2c, + 0x1d2f00001d30, + 0x1d3b00001d3c, + 0x1d4e00001d4f, + 0x1d6b00001d78, + 0x1d7900001d9b, + 0x1dc000001e00, + 0x1e0100001e02, + 0x1e0300001e04, + 0x1e0500001e06, + 0x1e0700001e08, + 0x1e0900001e0a, + 0x1e0b00001e0c, + 0x1e0d00001e0e, + 0x1e0f00001e10, + 0x1e1100001e12, + 0x1e1300001e14, + 0x1e1500001e16, + 0x1e1700001e18, + 0x1e1900001e1a, + 0x1e1b00001e1c, + 0x1e1d00001e1e, + 0x1e1f00001e20, + 0x1e2100001e22, + 0x1e2300001e24, + 0x1e2500001e26, + 0x1e2700001e28, + 0x1e2900001e2a, + 0x1e2b00001e2c, + 0x1e2d00001e2e, + 0x1e2f00001e30, + 0x1e3100001e32, + 0x1e3300001e34, + 0x1e3500001e36, + 0x1e3700001e38, + 0x1e3900001e3a, + 0x1e3b00001e3c, + 0x1e3d00001e3e, + 0x1e3f00001e40, + 0x1e4100001e42, + 0x1e4300001e44, + 0x1e4500001e46, + 0x1e4700001e48, + 0x1e4900001e4a, + 0x1e4b00001e4c, + 0x1e4d00001e4e, + 0x1e4f00001e50, + 0x1e5100001e52, + 0x1e5300001e54, + 0x1e5500001e56, + 0x1e5700001e58, + 0x1e5900001e5a, + 0x1e5b00001e5c, + 0x1e5d00001e5e, + 0x1e5f00001e60, + 0x1e6100001e62, + 0x1e6300001e64, + 0x1e6500001e66, + 0x1e6700001e68, + 0x1e6900001e6a, + 0x1e6b00001e6c, + 0x1e6d00001e6e, + 0x1e6f00001e70, + 0x1e7100001e72, + 0x1e7300001e74, + 0x1e7500001e76, + 0x1e7700001e78, + 0x1e7900001e7a, + 0x1e7b00001e7c, + 0x1e7d00001e7e, + 0x1e7f00001e80, + 0x1e8100001e82, + 0x1e8300001e84, + 0x1e8500001e86, + 0x1e8700001e88, + 0x1e8900001e8a, + 0x1e8b00001e8c, + 0x1e8d00001e8e, + 0x1e8f00001e90, + 0x1e9100001e92, + 0x1e9300001e94, + 0x1e9500001e9a, + 0x1e9c00001e9e, + 0x1e9f00001ea0, + 0x1ea100001ea2, + 0x1ea300001ea4, + 0x1ea500001ea6, + 0x1ea700001ea8, + 0x1ea900001eaa, + 0x1eab00001eac, + 0x1ead00001eae, + 0x1eaf00001eb0, + 0x1eb100001eb2, + 0x1eb300001eb4, + 0x1eb500001eb6, + 0x1eb700001eb8, + 0x1eb900001eba, + 0x1ebb00001ebc, + 0x1ebd00001ebe, + 0x1ebf00001ec0, + 0x1ec100001ec2, + 0x1ec300001ec4, + 0x1ec500001ec6, + 0x1ec700001ec8, + 0x1ec900001eca, + 0x1ecb00001ecc, + 0x1ecd00001ece, + 0x1ecf00001ed0, + 0x1ed100001ed2, + 0x1ed300001ed4, + 0x1ed500001ed6, + 0x1ed700001ed8, + 0x1ed900001eda, + 0x1edb00001edc, + 0x1edd00001ede, + 0x1edf00001ee0, + 0x1ee100001ee2, + 0x1ee300001ee4, + 0x1ee500001ee6, + 0x1ee700001ee8, + 0x1ee900001eea, + 0x1eeb00001eec, + 0x1eed00001eee, + 0x1eef00001ef0, + 0x1ef100001ef2, + 0x1ef300001ef4, + 0x1ef500001ef6, + 0x1ef700001ef8, + 0x1ef900001efa, + 0x1efb00001efc, + 0x1efd00001efe, + 0x1eff00001f08, + 0x1f1000001f16, + 0x1f2000001f28, + 0x1f3000001f38, + 0x1f4000001f46, + 0x1f5000001f58, + 0x1f6000001f68, + 0x1f7000001f71, + 0x1f7200001f73, + 0x1f7400001f75, + 0x1f7600001f77, + 0x1f7800001f79, + 0x1f7a00001f7b, + 0x1f7c00001f7d, + 0x1fb000001fb2, + 0x1fb600001fb7, + 0x1fc600001fc7, + 0x1fd000001fd3, + 0x1fd600001fd8, + 0x1fe000001fe3, + 0x1fe400001fe8, + 0x1ff600001ff7, + 0x214e0000214f, + 0x218400002185, + 0x2c3000002c60, + 0x2c6100002c62, + 0x2c6500002c67, + 0x2c6800002c69, + 0x2c6a00002c6b, + 0x2c6c00002c6d, + 0x2c7100002c72, + 0x2c7300002c75, + 0x2c7600002c7c, + 0x2c8100002c82, + 0x2c8300002c84, + 0x2c8500002c86, + 0x2c8700002c88, + 0x2c8900002c8a, + 0x2c8b00002c8c, + 0x2c8d00002c8e, + 0x2c8f00002c90, + 0x2c9100002c92, + 0x2c9300002c94, + 0x2c9500002c96, + 0x2c9700002c98, + 0x2c9900002c9a, + 0x2c9b00002c9c, + 0x2c9d00002c9e, + 0x2c9f00002ca0, + 0x2ca100002ca2, + 0x2ca300002ca4, + 0x2ca500002ca6, + 0x2ca700002ca8, + 0x2ca900002caa, + 0x2cab00002cac, + 0x2cad00002cae, + 0x2caf00002cb0, + 0x2cb100002cb2, + 0x2cb300002cb4, + 0x2cb500002cb6, + 0x2cb700002cb8, + 0x2cb900002cba, + 0x2cbb00002cbc, + 0x2cbd00002cbe, + 0x2cbf00002cc0, + 0x2cc100002cc2, + 0x2cc300002cc4, + 0x2cc500002cc6, + 0x2cc700002cc8, + 0x2cc900002cca, + 0x2ccb00002ccc, + 0x2ccd00002cce, + 0x2ccf00002cd0, + 0x2cd100002cd2, + 0x2cd300002cd4, + 0x2cd500002cd6, + 0x2cd700002cd8, + 0x2cd900002cda, + 0x2cdb00002cdc, + 0x2cdd00002cde, + 0x2cdf00002ce0, + 0x2ce100002ce2, + 0x2ce300002ce5, + 0x2cec00002ced, + 0x2cee00002cf2, + 0x2cf300002cf4, + 0x2d0000002d26, + 0x2d2700002d28, + 0x2d2d00002d2e, + 0x2d3000002d68, + 0x2d7f00002d97, + 0x2da000002da7, + 0x2da800002daf, + 0x2db000002db7, + 0x2db800002dbf, + 0x2dc000002dc7, + 0x2dc800002dcf, + 0x2dd000002dd7, + 0x2dd800002ddf, + 0x2de000002e00, + 0x2e2f00002e30, + 0x300500003008, + 0x302a0000302e, + 0x303c0000303d, + 0x304100003097, + 0x30990000309b, + 0x309d0000309f, + 0x30a1000030fb, + 0x30fc000030ff, + 0x310500003130, + 0x31a0000031c0, + 0x31f000003200, + 0x340000004dc0, + 0x4e000000a48d, + 0xa4d00000a4fe, + 0xa5000000a60d, + 0xa6100000a62c, + 0xa6410000a642, + 0xa6430000a644, + 0xa6450000a646, + 0xa6470000a648, + 0xa6490000a64a, + 0xa64b0000a64c, + 0xa64d0000a64e, + 0xa64f0000a650, + 0xa6510000a652, + 0xa6530000a654, + 0xa6550000a656, + 0xa6570000a658, + 0xa6590000a65a, + 0xa65b0000a65c, + 0xa65d0000a65e, + 0xa65f0000a660, + 0xa6610000a662, + 0xa6630000a664, + 0xa6650000a666, + 0xa6670000a668, + 0xa6690000a66a, + 0xa66b0000a66c, + 0xa66d0000a670, + 0xa6740000a67e, + 0xa67f0000a680, + 0xa6810000a682, + 0xa6830000a684, + 0xa6850000a686, + 0xa6870000a688, + 0xa6890000a68a, + 0xa68b0000a68c, + 0xa68d0000a68e, + 0xa68f0000a690, + 0xa6910000a692, + 0xa6930000a694, + 0xa6950000a696, + 0xa6970000a698, + 0xa6990000a69a, + 0xa69b0000a69c, + 0xa69e0000a6e6, + 0xa6f00000a6f2, + 0xa7170000a720, + 0xa7230000a724, + 0xa7250000a726, + 0xa7270000a728, + 0xa7290000a72a, + 0xa72b0000a72c, + 0xa72d0000a72e, + 0xa72f0000a732, + 0xa7330000a734, + 0xa7350000a736, + 0xa7370000a738, + 0xa7390000a73a, + 0xa73b0000a73c, + 0xa73d0000a73e, + 0xa73f0000a740, + 0xa7410000a742, + 0xa7430000a744, + 0xa7450000a746, + 0xa7470000a748, + 0xa7490000a74a, + 0xa74b0000a74c, + 0xa74d0000a74e, + 0xa74f0000a750, + 0xa7510000a752, + 0xa7530000a754, + 0xa7550000a756, + 0xa7570000a758, + 0xa7590000a75a, + 0xa75b0000a75c, + 0xa75d0000a75e, + 0xa75f0000a760, + 0xa7610000a762, + 0xa7630000a764, + 0xa7650000a766, + 0xa7670000a768, + 0xa7690000a76a, + 0xa76b0000a76c, + 0xa76d0000a76e, + 0xa76f0000a770, + 0xa7710000a779, + 0xa77a0000a77b, + 0xa77c0000a77d, + 0xa77f0000a780, + 0xa7810000a782, + 0xa7830000a784, + 0xa7850000a786, + 0xa7870000a789, + 0xa78c0000a78d, + 0xa78e0000a790, + 0xa7910000a792, + 0xa7930000a796, + 0xa7970000a798, + 0xa7990000a79a, + 0xa79b0000a79c, + 0xa79d0000a79e, + 0xa79f0000a7a0, + 0xa7a10000a7a2, + 0xa7a30000a7a4, + 0xa7a50000a7a6, + 0xa7a70000a7a8, + 0xa7a90000a7aa, + 0xa7af0000a7b0, + 0xa7b50000a7b6, + 0xa7b70000a7b8, + 0xa7b90000a7ba, + 0xa7bb0000a7bc, + 0xa7bd0000a7be, + 0xa7bf0000a7c0, + 0xa7c10000a7c2, + 0xa7c30000a7c4, + 0xa7c80000a7c9, + 0xa7ca0000a7cb, + 0xa7d10000a7d2, + 0xa7d30000a7d4, + 0xa7d50000a7d6, + 0xa7d70000a7d8, + 0xa7d90000a7da, + 0xa7f20000a7f5, + 0xa7f60000a7f8, + 0xa7fa0000a828, + 0xa82c0000a82d, + 0xa8400000a874, + 0xa8800000a8c6, + 0xa8d00000a8da, + 0xa8e00000a8f8, + 0xa8fb0000a8fc, + 0xa8fd0000a92e, + 0xa9300000a954, + 0xa9800000a9c1, + 0xa9cf0000a9da, + 0xa9e00000a9ff, + 0xaa000000aa37, + 0xaa400000aa4e, + 0xaa500000aa5a, + 0xaa600000aa77, + 0xaa7a0000aac3, + 0xaadb0000aade, + 0xaae00000aaf0, + 0xaaf20000aaf7, + 0xab010000ab07, + 0xab090000ab0f, + 0xab110000ab17, + 0xab200000ab27, + 0xab280000ab2f, + 0xab300000ab5b, + 0xab600000ab6a, + 0xabc00000abeb, + 0xabec0000abee, + 0xabf00000abfa, + 0xac000000d7a4, + 0xfa0e0000fa10, + 0xfa110000fa12, + 0xfa130000fa15, + 0xfa1f0000fa20, + 0xfa210000fa22, + 0xfa230000fa25, + 0xfa270000fa2a, + 0xfb1e0000fb1f, + 0xfe200000fe30, + 0xfe730000fe74, + 0x100000001000c, + 0x1000d00010027, + 0x100280001003b, + 0x1003c0001003e, + 0x1003f0001004e, + 0x100500001005e, + 0x10080000100fb, + 0x101fd000101fe, + 0x102800001029d, + 0x102a0000102d1, + 0x102e0000102e1, + 0x1030000010320, + 0x1032d00010341, + 0x103420001034a, + 0x103500001037b, + 0x103800001039e, + 0x103a0000103c4, + 0x103c8000103d0, + 0x104280001049e, + 0x104a0000104aa, + 0x104d8000104fc, + 0x1050000010528, + 0x1053000010564, + 0x10597000105a2, + 0x105a3000105b2, + 0x105b3000105ba, + 0x105bb000105bd, + 0x1060000010737, + 0x1074000010756, + 0x1076000010768, + 0x1078000010786, + 0x10787000107b1, + 0x107b2000107bb, + 0x1080000010806, + 0x1080800010809, + 0x1080a00010836, + 0x1083700010839, + 0x1083c0001083d, + 0x1083f00010856, + 0x1086000010877, + 0x108800001089f, + 0x108e0000108f3, + 0x108f4000108f6, + 0x1090000010916, + 0x109200001093a, + 0x10980000109b8, + 0x109be000109c0, + 0x10a0000010a04, + 0x10a0500010a07, + 0x10a0c00010a14, + 0x10a1500010a18, + 0x10a1900010a36, + 0x10a3800010a3b, + 0x10a3f00010a40, + 0x10a6000010a7d, + 0x10a8000010a9d, + 0x10ac000010ac8, + 0x10ac900010ae7, + 0x10b0000010b36, + 0x10b4000010b56, + 0x10b6000010b73, + 0x10b8000010b92, + 0x10c0000010c49, + 0x10cc000010cf3, + 0x10d0000010d28, + 0x10d3000010d3a, + 0x10e8000010eaa, + 0x10eab00010ead, + 0x10eb000010eb2, + 0x10f0000010f1d, + 0x10f2700010f28, + 0x10f3000010f51, + 0x10f7000010f86, + 0x10fb000010fc5, + 0x10fe000010ff7, + 0x1100000011047, + 0x1106600011076, + 0x1107f000110bb, + 0x110c2000110c3, + 0x110d0000110e9, + 0x110f0000110fa, + 0x1110000011135, + 0x1113600011140, + 0x1114400011148, + 0x1115000011174, + 0x1117600011177, + 0x11180000111c5, + 0x111c9000111cd, + 0x111ce000111db, + 0x111dc000111dd, + 0x1120000011212, + 0x1121300011238, + 0x1123e0001123f, + 0x1128000011287, + 0x1128800011289, + 0x1128a0001128e, + 0x1128f0001129e, + 0x1129f000112a9, + 0x112b0000112eb, + 0x112f0000112fa, + 0x1130000011304, + 0x113050001130d, + 0x1130f00011311, + 0x1131300011329, + 0x1132a00011331, + 0x1133200011334, + 0x113350001133a, + 0x1133b00011345, + 0x1134700011349, + 0x1134b0001134e, + 0x1135000011351, + 0x1135700011358, + 0x1135d00011364, + 0x113660001136d, + 0x1137000011375, + 0x114000001144b, + 0x114500001145a, + 0x1145e00011462, + 0x11480000114c6, + 0x114c7000114c8, + 0x114d0000114da, + 0x11580000115b6, + 0x115b8000115c1, + 0x115d8000115de, + 0x1160000011641, + 0x1164400011645, + 0x116500001165a, + 0x11680000116b9, + 0x116c0000116ca, + 0x117000001171b, + 0x1171d0001172c, + 0x117300001173a, + 0x1174000011747, + 0x118000001183b, + 0x118c0000118ea, + 0x118ff00011907, + 0x119090001190a, + 0x1190c00011914, + 0x1191500011917, + 0x1191800011936, + 0x1193700011939, + 0x1193b00011944, + 0x119500001195a, + 0x119a0000119a8, + 0x119aa000119d8, + 0x119da000119e2, + 0x119e3000119e5, + 0x11a0000011a3f, + 0x11a4700011a48, + 0x11a5000011a9a, + 0x11a9d00011a9e, + 0x11ab000011af9, + 0x11c0000011c09, + 0x11c0a00011c37, + 0x11c3800011c41, + 0x11c5000011c5a, + 0x11c7200011c90, + 0x11c9200011ca8, + 0x11ca900011cb7, + 0x11d0000011d07, + 0x11d0800011d0a, + 0x11d0b00011d37, + 0x11d3a00011d3b, + 0x11d3c00011d3e, + 0x11d3f00011d48, + 0x11d5000011d5a, + 0x11d6000011d66, + 0x11d6700011d69, + 0x11d6a00011d8f, + 0x11d9000011d92, + 0x11d9300011d99, + 0x11da000011daa, + 0x11ee000011ef7, + 0x11fb000011fb1, + 0x120000001239a, + 0x1248000012544, + 0x12f9000012ff1, + 0x130000001342f, + 0x1440000014647, + 0x1680000016a39, + 0x16a4000016a5f, + 0x16a6000016a6a, + 0x16a7000016abf, + 0x16ac000016aca, + 0x16ad000016aee, + 0x16af000016af5, + 0x16b0000016b37, + 0x16b4000016b44, + 0x16b5000016b5a, + 0x16b6300016b78, + 0x16b7d00016b90, + 0x16e6000016e80, + 0x16f0000016f4b, + 0x16f4f00016f88, + 0x16f8f00016fa0, + 0x16fe000016fe2, + 0x16fe300016fe5, + 0x16ff000016ff2, + 0x17000000187f8, + 0x1880000018cd6, + 0x18d0000018d09, + 0x1aff00001aff4, + 0x1aff50001affc, + 0x1affd0001afff, + 0x1b0000001b123, + 0x1b1500001b153, + 0x1b1640001b168, + 0x1b1700001b2fc, + 0x1bc000001bc6b, + 0x1bc700001bc7d, + 0x1bc800001bc89, + 0x1bc900001bc9a, + 0x1bc9d0001bc9f, + 0x1cf000001cf2e, + 0x1cf300001cf47, + 0x1da000001da37, + 0x1da3b0001da6d, + 0x1da750001da76, + 0x1da840001da85, + 0x1da9b0001daa0, + 0x1daa10001dab0, + 0x1df000001df1f, + 0x1e0000001e007, + 0x1e0080001e019, + 0x1e01b0001e022, + 0x1e0230001e025, + 0x1e0260001e02b, + 0x1e1000001e12d, + 0x1e1300001e13e, + 0x1e1400001e14a, + 0x1e14e0001e14f, + 0x1e2900001e2af, + 0x1e2c00001e2fa, + 0x1e7e00001e7e7, + 0x1e7e80001e7ec, + 0x1e7ed0001e7ef, + 0x1e7f00001e7ff, + 0x1e8000001e8c5, + 0x1e8d00001e8d7, + 0x1e9220001e94c, + 0x1e9500001e95a, + 0x1fbf00001fbfa, + 0x200000002a6e0, + 0x2a7000002b739, + 0x2b7400002b81e, + 0x2b8200002cea2, + 0x2ceb00002ebe1, + 0x300000003134b, + ), + 'CONTEXTJ': ( + 0x200c0000200e, + ), + 'CONTEXTO': ( + 0xb7000000b8, + 0x37500000376, + 0x5f3000005f5, + 0x6600000066a, + 0x6f0000006fa, + 0x30fb000030fc, + ), +} diff --git a/myenv/lib/python3.9/site-packages/idna/intranges.py b/myenv/lib/python3.9/site-packages/idna/intranges.py new file mode 100644 index 0000000..6a43b04 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/intranges.py @@ -0,0 +1,54 @@ +""" +Given a list of integers, made up of (hopefully) a small number of long runs +of consecutive integers, compute a representation of the form +((start1, end1), (start2, end2) ...). Then answer the question "was x present +in the original list?" in time O(log(# runs)). +""" + +import bisect +from typing import List, Tuple + +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: + """Represent a list of integers as a sequence of ranges: + ((start_0, end_0), (start_1, end_1), ...), such that the original + integers are exactly those x such that start_i <= x < end_i for some i. + + Ranges are encoded as single integers (start << 32 | end), not as tuples. + """ + + sorted_list = sorted(list_) + ranges = [] + last_write = -1 + for i in range(len(sorted_list)): + if i+1 < len(sorted_list): + if sorted_list[i] == sorted_list[i+1]-1: + continue + current_range = sorted_list[last_write+1:i+1] + ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) + last_write = i + + return tuple(ranges) + +def _encode_range(start: int, end: int) -> int: + return (start << 32) | end + +def _decode_range(r: int) -> Tuple[int, int]: + return (r >> 32), (r & ((1 << 32) - 1)) + + +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: + """Determine if `int_` falls into one of the ranges in `ranges`.""" + tuple_ = _encode_range(int_, 0) + pos = bisect.bisect_left(ranges, tuple_) + # we could be immediately ahead of a tuple (start, end) + # with start < int_ <= end + if pos > 0: + left, right = _decode_range(ranges[pos-1]) + if left <= int_ < right: + return True + # or we could be immediately behind a tuple (int_, end) + if pos < len(ranges): + left, _ = _decode_range(ranges[pos]) + if left == int_: + return True + return False diff --git a/myenv/lib/python3.9/site-packages/idna/package_data.py b/myenv/lib/python3.9/site-packages/idna/package_data.py new file mode 100644 index 0000000..f5ea87c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/package_data.py @@ -0,0 +1,2 @@ +__version__ = '3.3' + diff --git a/myenv/lib/python3.9/site-packages/idna/py.typed b/myenv/lib/python3.9/site-packages/idna/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/idna/uts46data.py b/myenv/lib/python3.9/site-packages/idna/uts46data.py new file mode 100644 index 0000000..8f65705 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/idna/uts46data.py @@ -0,0 +1,8512 @@ +# This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +from typing import List, Tuple, Union + + +"""IDNA Mapping Table from UTS46.""" + + +__version__ = '14.0.0' +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x0, '3'), + (0x1, '3'), + (0x2, '3'), + (0x3, '3'), + (0x4, '3'), + (0x5, '3'), + (0x6, '3'), + (0x7, '3'), + (0x8, '3'), + (0x9, '3'), + (0xA, '3'), + (0xB, '3'), + (0xC, '3'), + (0xD, '3'), + (0xE, '3'), + (0xF, '3'), + (0x10, '3'), + (0x11, '3'), + (0x12, '3'), + (0x13, '3'), + (0x14, '3'), + (0x15, '3'), + (0x16, '3'), + (0x17, '3'), + (0x18, '3'), + (0x19, '3'), + (0x1A, '3'), + (0x1B, '3'), + (0x1C, '3'), + (0x1D, '3'), + (0x1E, '3'), + (0x1F, '3'), + (0x20, '3'), + (0x21, '3'), + (0x22, '3'), + (0x23, '3'), + (0x24, '3'), + (0x25, '3'), + (0x26, '3'), + (0x27, '3'), + (0x28, '3'), + (0x29, '3'), + (0x2A, '3'), + (0x2B, '3'), + (0x2C, '3'), + (0x2D, 'V'), + (0x2E, 'V'), + (0x2F, '3'), + (0x30, 'V'), + (0x31, 'V'), + (0x32, 'V'), + (0x33, 'V'), + (0x34, 'V'), + (0x35, 'V'), + (0x36, 'V'), + (0x37, 'V'), + (0x38, 'V'), + (0x39, 'V'), + (0x3A, '3'), + (0x3B, '3'), + (0x3C, '3'), + (0x3D, '3'), + (0x3E, '3'), + (0x3F, '3'), + (0x40, '3'), + (0x41, 'M', 'a'), + (0x42, 'M', 'b'), + (0x43, 'M', 'c'), + (0x44, 'M', 'd'), + (0x45, 'M', 'e'), + (0x46, 'M', 'f'), + (0x47, 'M', 'g'), + (0x48, 'M', 'h'), + (0x49, 'M', 'i'), + (0x4A, 'M', 'j'), + (0x4B, 'M', 'k'), + (0x4C, 'M', 'l'), + (0x4D, 'M', 'm'), + (0x4E, 'M', 'n'), + (0x4F, 'M', 'o'), + (0x50, 'M', 'p'), + (0x51, 'M', 'q'), + (0x52, 'M', 'r'), + (0x53, 'M', 's'), + (0x54, 'M', 't'), + (0x55, 'M', 'u'), + (0x56, 'M', 'v'), + (0x57, 'M', 'w'), + (0x58, 'M', 'x'), + (0x59, 'M', 'y'), + (0x5A, 'M', 'z'), + (0x5B, '3'), + (0x5C, '3'), + (0x5D, '3'), + (0x5E, '3'), + (0x5F, '3'), + (0x60, '3'), + (0x61, 'V'), + (0x62, 'V'), + (0x63, 'V'), + ] + +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x64, 'V'), + (0x65, 'V'), + (0x66, 'V'), + (0x67, 'V'), + (0x68, 'V'), + (0x69, 'V'), + (0x6A, 'V'), + (0x6B, 'V'), + (0x6C, 'V'), + (0x6D, 'V'), + (0x6E, 'V'), + (0x6F, 'V'), + (0x70, 'V'), + (0x71, 'V'), + (0x72, 'V'), + (0x73, 'V'), + (0x74, 'V'), + (0x75, 'V'), + (0x76, 'V'), + (0x77, 'V'), + (0x78, 'V'), + (0x79, 'V'), + (0x7A, 'V'), + (0x7B, '3'), + (0x7C, '3'), + (0x7D, '3'), + (0x7E, '3'), + (0x7F, '3'), + (0x80, 'X'), + (0x81, 'X'), + (0x82, 'X'), + (0x83, 'X'), + (0x84, 'X'), + (0x85, 'X'), + (0x86, 'X'), + (0x87, 'X'), + (0x88, 'X'), + (0x89, 'X'), + (0x8A, 'X'), + (0x8B, 'X'), + (0x8C, 'X'), + (0x8D, 'X'), + (0x8E, 'X'), + (0x8F, 'X'), + (0x90, 'X'), + (0x91, 'X'), + (0x92, 'X'), + (0x93, 'X'), + (0x94, 'X'), + (0x95, 'X'), + (0x96, 'X'), + (0x97, 'X'), + (0x98, 'X'), + (0x99, 'X'), + (0x9A, 'X'), + (0x9B, 'X'), + (0x9C, 'X'), + (0x9D, 'X'), + (0x9E, 'X'), + (0x9F, 'X'), + (0xA0, '3', ' '), + (0xA1, 'V'), + (0xA2, 'V'), + (0xA3, 'V'), + (0xA4, 'V'), + (0xA5, 'V'), + (0xA6, 'V'), + (0xA7, 'V'), + (0xA8, '3', ' ̈'), + (0xA9, 'V'), + (0xAA, 'M', 'a'), + (0xAB, 'V'), + (0xAC, 'V'), + (0xAD, 'I'), + (0xAE, 'V'), + (0xAF, '3', ' ̄'), + (0xB0, 'V'), + (0xB1, 'V'), + (0xB2, 'M', '2'), + (0xB3, 'M', '3'), + (0xB4, '3', ' ́'), + (0xB5, 'M', 'μ'), + (0xB6, 'V'), + (0xB7, 'V'), + (0xB8, '3', ' ̧'), + (0xB9, 'M', '1'), + (0xBA, 'M', 'o'), + (0xBB, 'V'), + (0xBC, 'M', '1⁄4'), + (0xBD, 'M', '1⁄2'), + (0xBE, 'M', '3⁄4'), + (0xBF, 'V'), + (0xC0, 'M', 'à'), + (0xC1, 'M', 'á'), + (0xC2, 'M', 'â'), + (0xC3, 'M', 'ã'), + (0xC4, 'M', 'ä'), + (0xC5, 'M', 'å'), + (0xC6, 'M', 'æ'), + (0xC7, 'M', 'ç'), + ] + +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC8, 'M', 'è'), + (0xC9, 'M', 'é'), + (0xCA, 'M', 'ê'), + (0xCB, 'M', 'ë'), + (0xCC, 'M', 'ì'), + (0xCD, 'M', 'í'), + (0xCE, 'M', 'î'), + (0xCF, 'M', 'ï'), + (0xD0, 'M', 'ð'), + (0xD1, 'M', 'ñ'), + (0xD2, 'M', 'ò'), + (0xD3, 'M', 'ó'), + (0xD4, 'M', 'ô'), + (0xD5, 'M', 'õ'), + (0xD6, 'M', 'ö'), + (0xD7, 'V'), + (0xD8, 'M', 'ø'), + (0xD9, 'M', 'ù'), + (0xDA, 'M', 'ú'), + (0xDB, 'M', 'û'), + (0xDC, 'M', 'ü'), + (0xDD, 'M', 'ý'), + (0xDE, 'M', 'þ'), + (0xDF, 'D', 'ss'), + (0xE0, 'V'), + (0xE1, 'V'), + (0xE2, 'V'), + (0xE3, 'V'), + (0xE4, 'V'), + (0xE5, 'V'), + (0xE6, 'V'), + (0xE7, 'V'), + (0xE8, 'V'), + (0xE9, 'V'), + (0xEA, 'V'), + (0xEB, 'V'), + (0xEC, 'V'), + (0xED, 'V'), + (0xEE, 'V'), + (0xEF, 'V'), + (0xF0, 'V'), + (0xF1, 'V'), + (0xF2, 'V'), + (0xF3, 'V'), + (0xF4, 'V'), + (0xF5, 'V'), + (0xF6, 'V'), + (0xF7, 'V'), + (0xF8, 'V'), + (0xF9, 'V'), + (0xFA, 'V'), + (0xFB, 'V'), + (0xFC, 'V'), + (0xFD, 'V'), + (0xFE, 'V'), + (0xFF, 'V'), + (0x100, 'M', 'ā'), + (0x101, 'V'), + (0x102, 'M', 'ă'), + (0x103, 'V'), + (0x104, 'M', 'ą'), + (0x105, 'V'), + (0x106, 'M', 'ć'), + (0x107, 'V'), + (0x108, 'M', 'ĉ'), + (0x109, 'V'), + (0x10A, 'M', 'ċ'), + (0x10B, 'V'), + (0x10C, 'M', 'č'), + (0x10D, 'V'), + (0x10E, 'M', 'ď'), + (0x10F, 'V'), + (0x110, 'M', 'đ'), + (0x111, 'V'), + (0x112, 'M', 'ē'), + (0x113, 'V'), + (0x114, 'M', 'ĕ'), + (0x115, 'V'), + (0x116, 'M', 'ė'), + (0x117, 'V'), + (0x118, 'M', 'ę'), + (0x119, 'V'), + (0x11A, 'M', 'ě'), + (0x11B, 'V'), + (0x11C, 'M', 'ĝ'), + (0x11D, 'V'), + (0x11E, 'M', 'ğ'), + (0x11F, 'V'), + (0x120, 'M', 'ġ'), + (0x121, 'V'), + (0x122, 'M', 'ģ'), + (0x123, 'V'), + (0x124, 'M', 'ĥ'), + (0x125, 'V'), + (0x126, 'M', 'ħ'), + (0x127, 'V'), + (0x128, 'M', 'ĩ'), + (0x129, 'V'), + (0x12A, 'M', 'ī'), + (0x12B, 'V'), + ] + +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x12C, 'M', 'ĭ'), + (0x12D, 'V'), + (0x12E, 'M', 'į'), + (0x12F, 'V'), + (0x130, 'M', 'i̇'), + (0x131, 'V'), + (0x132, 'M', 'ij'), + (0x134, 'M', 'ĵ'), + (0x135, 'V'), + (0x136, 'M', 'ķ'), + (0x137, 'V'), + (0x139, 'M', 'ĺ'), + (0x13A, 'V'), + (0x13B, 'M', 'ļ'), + (0x13C, 'V'), + (0x13D, 'M', 'ľ'), + (0x13E, 'V'), + (0x13F, 'M', 'l·'), + (0x141, 'M', 'ł'), + (0x142, 'V'), + (0x143, 'M', 'ń'), + (0x144, 'V'), + (0x145, 'M', 'ņ'), + (0x146, 'V'), + (0x147, 'M', 'ň'), + (0x148, 'V'), + (0x149, 'M', 'ʼn'), + (0x14A, 'M', 'ŋ'), + (0x14B, 'V'), + (0x14C, 'M', 'ō'), + (0x14D, 'V'), + (0x14E, 'M', 'ŏ'), + (0x14F, 'V'), + (0x150, 'M', 'ő'), + (0x151, 'V'), + (0x152, 'M', 'œ'), + (0x153, 'V'), + (0x154, 'M', 'ŕ'), + (0x155, 'V'), + (0x156, 'M', 'ŗ'), + (0x157, 'V'), + (0x158, 'M', 'ř'), + (0x159, 'V'), + (0x15A, 'M', 'ś'), + (0x15B, 'V'), + (0x15C, 'M', 'ŝ'), + (0x15D, 'V'), + (0x15E, 'M', 'ş'), + (0x15F, 'V'), + (0x160, 'M', 'š'), + (0x161, 'V'), + (0x162, 'M', 'ţ'), + (0x163, 'V'), + (0x164, 'M', 'ť'), + (0x165, 'V'), + (0x166, 'M', 'ŧ'), + (0x167, 'V'), + (0x168, 'M', 'ũ'), + (0x169, 'V'), + (0x16A, 'M', 'ū'), + (0x16B, 'V'), + (0x16C, 'M', 'ŭ'), + (0x16D, 'V'), + (0x16E, 'M', 'ů'), + (0x16F, 'V'), + (0x170, 'M', 'ű'), + (0x171, 'V'), + (0x172, 'M', 'ų'), + (0x173, 'V'), + (0x174, 'M', 'ŵ'), + (0x175, 'V'), + (0x176, 'M', 'ŷ'), + (0x177, 'V'), + (0x178, 'M', 'ÿ'), + (0x179, 'M', 'ź'), + (0x17A, 'V'), + (0x17B, 'M', 'ż'), + (0x17C, 'V'), + (0x17D, 'M', 'ž'), + (0x17E, 'V'), + (0x17F, 'M', 's'), + (0x180, 'V'), + (0x181, 'M', 'ɓ'), + (0x182, 'M', 'ƃ'), + (0x183, 'V'), + (0x184, 'M', 'ƅ'), + (0x185, 'V'), + (0x186, 'M', 'ɔ'), + (0x187, 'M', 'ƈ'), + (0x188, 'V'), + (0x189, 'M', 'ɖ'), + (0x18A, 'M', 'ɗ'), + (0x18B, 'M', 'ƌ'), + (0x18C, 'V'), + (0x18E, 'M', 'ǝ'), + (0x18F, 'M', 'ə'), + (0x190, 'M', 'ɛ'), + (0x191, 'M', 'ƒ'), + (0x192, 'V'), + (0x193, 'M', 'ɠ'), + ] + +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x194, 'M', 'ɣ'), + (0x195, 'V'), + (0x196, 'M', 'ɩ'), + (0x197, 'M', 'ɨ'), + (0x198, 'M', 'ƙ'), + (0x199, 'V'), + (0x19C, 'M', 'ɯ'), + (0x19D, 'M', 'ɲ'), + (0x19E, 'V'), + (0x19F, 'M', 'ɵ'), + (0x1A0, 'M', 'ơ'), + (0x1A1, 'V'), + (0x1A2, 'M', 'ƣ'), + (0x1A3, 'V'), + (0x1A4, 'M', 'ƥ'), + (0x1A5, 'V'), + (0x1A6, 'M', 'ʀ'), + (0x1A7, 'M', 'ƨ'), + (0x1A8, 'V'), + (0x1A9, 'M', 'ʃ'), + (0x1AA, 'V'), + (0x1AC, 'M', 'ƭ'), + (0x1AD, 'V'), + (0x1AE, 'M', 'ʈ'), + (0x1AF, 'M', 'ư'), + (0x1B0, 'V'), + (0x1B1, 'M', 'ʊ'), + (0x1B2, 'M', 'ʋ'), + (0x1B3, 'M', 'ƴ'), + (0x1B4, 'V'), + (0x1B5, 'M', 'ƶ'), + (0x1B6, 'V'), + (0x1B7, 'M', 'ʒ'), + (0x1B8, 'M', 'ƹ'), + (0x1B9, 'V'), + (0x1BC, 'M', 'ƽ'), + (0x1BD, 'V'), + (0x1C4, 'M', 'dž'), + (0x1C7, 'M', 'lj'), + (0x1CA, 'M', 'nj'), + (0x1CD, 'M', 'ǎ'), + (0x1CE, 'V'), + (0x1CF, 'M', 'ǐ'), + (0x1D0, 'V'), + (0x1D1, 'M', 'ǒ'), + (0x1D2, 'V'), + (0x1D3, 'M', 'ǔ'), + (0x1D4, 'V'), + (0x1D5, 'M', 'ǖ'), + (0x1D6, 'V'), + (0x1D7, 'M', 'ǘ'), + (0x1D8, 'V'), + (0x1D9, 'M', 'ǚ'), + (0x1DA, 'V'), + (0x1DB, 'M', 'ǜ'), + (0x1DC, 'V'), + (0x1DE, 'M', 'ǟ'), + (0x1DF, 'V'), + (0x1E0, 'M', 'ǡ'), + (0x1E1, 'V'), + (0x1E2, 'M', 'ǣ'), + (0x1E3, 'V'), + (0x1E4, 'M', 'ǥ'), + (0x1E5, 'V'), + (0x1E6, 'M', 'ǧ'), + (0x1E7, 'V'), + (0x1E8, 'M', 'ǩ'), + (0x1E9, 'V'), + (0x1EA, 'M', 'ǫ'), + (0x1EB, 'V'), + (0x1EC, 'M', 'ǭ'), + (0x1ED, 'V'), + (0x1EE, 'M', 'ǯ'), + (0x1EF, 'V'), + (0x1F1, 'M', 'dz'), + (0x1F4, 'M', 'ǵ'), + (0x1F5, 'V'), + (0x1F6, 'M', 'ƕ'), + (0x1F7, 'M', 'ƿ'), + (0x1F8, 'M', 'ǹ'), + (0x1F9, 'V'), + (0x1FA, 'M', 'ǻ'), + (0x1FB, 'V'), + (0x1FC, 'M', 'ǽ'), + (0x1FD, 'V'), + (0x1FE, 'M', 'ǿ'), + (0x1FF, 'V'), + (0x200, 'M', 'ȁ'), + (0x201, 'V'), + (0x202, 'M', 'ȃ'), + (0x203, 'V'), + (0x204, 'M', 'ȅ'), + (0x205, 'V'), + (0x206, 'M', 'ȇ'), + (0x207, 'V'), + (0x208, 'M', 'ȉ'), + (0x209, 'V'), + (0x20A, 'M', 'ȋ'), + (0x20B, 'V'), + (0x20C, 'M', 'ȍ'), + ] + +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x20D, 'V'), + (0x20E, 'M', 'ȏ'), + (0x20F, 'V'), + (0x210, 'M', 'ȑ'), + (0x211, 'V'), + (0x212, 'M', 'ȓ'), + (0x213, 'V'), + (0x214, 'M', 'ȕ'), + (0x215, 'V'), + (0x216, 'M', 'ȗ'), + (0x217, 'V'), + (0x218, 'M', 'ș'), + (0x219, 'V'), + (0x21A, 'M', 'ț'), + (0x21B, 'V'), + (0x21C, 'M', 'ȝ'), + (0x21D, 'V'), + (0x21E, 'M', 'ȟ'), + (0x21F, 'V'), + (0x220, 'M', 'ƞ'), + (0x221, 'V'), + (0x222, 'M', 'ȣ'), + (0x223, 'V'), + (0x224, 'M', 'ȥ'), + (0x225, 'V'), + (0x226, 'M', 'ȧ'), + (0x227, 'V'), + (0x228, 'M', 'ȩ'), + (0x229, 'V'), + (0x22A, 'M', 'ȫ'), + (0x22B, 'V'), + (0x22C, 'M', 'ȭ'), + (0x22D, 'V'), + (0x22E, 'M', 'ȯ'), + (0x22F, 'V'), + (0x230, 'M', 'ȱ'), + (0x231, 'V'), + (0x232, 'M', 'ȳ'), + (0x233, 'V'), + (0x23A, 'M', 'ⱥ'), + (0x23B, 'M', 'ȼ'), + (0x23C, 'V'), + (0x23D, 'M', 'ƚ'), + (0x23E, 'M', 'ⱦ'), + (0x23F, 'V'), + (0x241, 'M', 'ɂ'), + (0x242, 'V'), + (0x243, 'M', 'ƀ'), + (0x244, 'M', 'ʉ'), + (0x245, 'M', 'ʌ'), + (0x246, 'M', 'ɇ'), + (0x247, 'V'), + (0x248, 'M', 'ɉ'), + (0x249, 'V'), + (0x24A, 'M', 'ɋ'), + (0x24B, 'V'), + (0x24C, 'M', 'ɍ'), + (0x24D, 'V'), + (0x24E, 'M', 'ɏ'), + (0x24F, 'V'), + (0x2B0, 'M', 'h'), + (0x2B1, 'M', 'ɦ'), + (0x2B2, 'M', 'j'), + (0x2B3, 'M', 'r'), + (0x2B4, 'M', 'ɹ'), + (0x2B5, 'M', 'ɻ'), + (0x2B6, 'M', 'ʁ'), + (0x2B7, 'M', 'w'), + (0x2B8, 'M', 'y'), + (0x2B9, 'V'), + (0x2D8, '3', ' ̆'), + (0x2D9, '3', ' ̇'), + (0x2DA, '3', ' ̊'), + (0x2DB, '3', ' ̨'), + (0x2DC, '3', ' ̃'), + (0x2DD, '3', ' ̋'), + (0x2DE, 'V'), + (0x2E0, 'M', 'ɣ'), + (0x2E1, 'M', 'l'), + (0x2E2, 'M', 's'), + (0x2E3, 'M', 'x'), + (0x2E4, 'M', 'ʕ'), + (0x2E5, 'V'), + (0x340, 'M', '̀'), + (0x341, 'M', '́'), + (0x342, 'V'), + (0x343, 'M', '̓'), + (0x344, 'M', '̈́'), + (0x345, 'M', 'ι'), + (0x346, 'V'), + (0x34F, 'I'), + (0x350, 'V'), + (0x370, 'M', 'ͱ'), + (0x371, 'V'), + (0x372, 'M', 'ͳ'), + (0x373, 'V'), + (0x374, 'M', 'ʹ'), + (0x375, 'V'), + (0x376, 'M', 'ͷ'), + (0x377, 'V'), + ] + +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x378, 'X'), + (0x37A, '3', ' ι'), + (0x37B, 'V'), + (0x37E, '3', ';'), + (0x37F, 'M', 'ϳ'), + (0x380, 'X'), + (0x384, '3', ' ́'), + (0x385, '3', ' ̈́'), + (0x386, 'M', 'ά'), + (0x387, 'M', '·'), + (0x388, 'M', 'έ'), + (0x389, 'M', 'ή'), + (0x38A, 'M', 'ί'), + (0x38B, 'X'), + (0x38C, 'M', 'ό'), + (0x38D, 'X'), + (0x38E, 'M', 'ύ'), + (0x38F, 'M', 'ώ'), + (0x390, 'V'), + (0x391, 'M', 'α'), + (0x392, 'M', 'β'), + (0x393, 'M', 'γ'), + (0x394, 'M', 'δ'), + (0x395, 'M', 'ε'), + (0x396, 'M', 'ζ'), + (0x397, 'M', 'η'), + (0x398, 'M', 'θ'), + (0x399, 'M', 'ι'), + (0x39A, 'M', 'κ'), + (0x39B, 'M', 'λ'), + (0x39C, 'M', 'μ'), + (0x39D, 'M', 'ν'), + (0x39E, 'M', 'ξ'), + (0x39F, 'M', 'ο'), + (0x3A0, 'M', 'π'), + (0x3A1, 'M', 'ρ'), + (0x3A2, 'X'), + (0x3A3, 'M', 'σ'), + (0x3A4, 'M', 'τ'), + (0x3A5, 'M', 'υ'), + (0x3A6, 'M', 'φ'), + (0x3A7, 'M', 'χ'), + (0x3A8, 'M', 'ψ'), + (0x3A9, 'M', 'ω'), + (0x3AA, 'M', 'ϊ'), + (0x3AB, 'M', 'ϋ'), + (0x3AC, 'V'), + (0x3C2, 'D', 'σ'), + (0x3C3, 'V'), + (0x3CF, 'M', 'ϗ'), + (0x3D0, 'M', 'β'), + (0x3D1, 'M', 'θ'), + (0x3D2, 'M', 'υ'), + (0x3D3, 'M', 'ύ'), + (0x3D4, 'M', 'ϋ'), + (0x3D5, 'M', 'φ'), + (0x3D6, 'M', 'π'), + (0x3D7, 'V'), + (0x3D8, 'M', 'ϙ'), + (0x3D9, 'V'), + (0x3DA, 'M', 'ϛ'), + (0x3DB, 'V'), + (0x3DC, 'M', 'ϝ'), + (0x3DD, 'V'), + (0x3DE, 'M', 'ϟ'), + (0x3DF, 'V'), + (0x3E0, 'M', 'ϡ'), + (0x3E1, 'V'), + (0x3E2, 'M', 'ϣ'), + (0x3E3, 'V'), + (0x3E4, 'M', 'ϥ'), + (0x3E5, 'V'), + (0x3E6, 'M', 'ϧ'), + (0x3E7, 'V'), + (0x3E8, 'M', 'ϩ'), + (0x3E9, 'V'), + (0x3EA, 'M', 'ϫ'), + (0x3EB, 'V'), + (0x3EC, 'M', 'ϭ'), + (0x3ED, 'V'), + (0x3EE, 'M', 'ϯ'), + (0x3EF, 'V'), + (0x3F0, 'M', 'κ'), + (0x3F1, 'M', 'ρ'), + (0x3F2, 'M', 'σ'), + (0x3F3, 'V'), + (0x3F4, 'M', 'θ'), + (0x3F5, 'M', 'ε'), + (0x3F6, 'V'), + (0x3F7, 'M', 'ϸ'), + (0x3F8, 'V'), + (0x3F9, 'M', 'σ'), + (0x3FA, 'M', 'ϻ'), + (0x3FB, 'V'), + (0x3FD, 'M', 'ͻ'), + (0x3FE, 'M', 'ͼ'), + (0x3FF, 'M', 'ͽ'), + (0x400, 'M', 'ѐ'), + (0x401, 'M', 'ё'), + (0x402, 'M', 'ђ'), + ] + +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x403, 'M', 'ѓ'), + (0x404, 'M', 'є'), + (0x405, 'M', 'ѕ'), + (0x406, 'M', 'і'), + (0x407, 'M', 'ї'), + (0x408, 'M', 'ј'), + (0x409, 'M', 'љ'), + (0x40A, 'M', 'њ'), + (0x40B, 'M', 'ћ'), + (0x40C, 'M', 'ќ'), + (0x40D, 'M', 'ѝ'), + (0x40E, 'M', 'ў'), + (0x40F, 'M', 'џ'), + (0x410, 'M', 'а'), + (0x411, 'M', 'б'), + (0x412, 'M', 'в'), + (0x413, 'M', 'г'), + (0x414, 'M', 'д'), + (0x415, 'M', 'е'), + (0x416, 'M', 'ж'), + (0x417, 'M', 'з'), + (0x418, 'M', 'и'), + (0x419, 'M', 'й'), + (0x41A, 'M', 'к'), + (0x41B, 'M', 'л'), + (0x41C, 'M', 'м'), + (0x41D, 'M', 'н'), + (0x41E, 'M', 'о'), + (0x41F, 'M', 'п'), + (0x420, 'M', 'р'), + (0x421, 'M', 'с'), + (0x422, 'M', 'т'), + (0x423, 'M', 'у'), + (0x424, 'M', 'ф'), + (0x425, 'M', 'х'), + (0x426, 'M', 'ц'), + (0x427, 'M', 'ч'), + (0x428, 'M', 'ш'), + (0x429, 'M', 'щ'), + (0x42A, 'M', 'ъ'), + (0x42B, 'M', 'ы'), + (0x42C, 'M', 'ь'), + (0x42D, 'M', 'э'), + (0x42E, 'M', 'ю'), + (0x42F, 'M', 'я'), + (0x430, 'V'), + (0x460, 'M', 'ѡ'), + (0x461, 'V'), + (0x462, 'M', 'ѣ'), + (0x463, 'V'), + (0x464, 'M', 'ѥ'), + (0x465, 'V'), + (0x466, 'M', 'ѧ'), + (0x467, 'V'), + (0x468, 'M', 'ѩ'), + (0x469, 'V'), + (0x46A, 'M', 'ѫ'), + (0x46B, 'V'), + (0x46C, 'M', 'ѭ'), + (0x46D, 'V'), + (0x46E, 'M', 'ѯ'), + (0x46F, 'V'), + (0x470, 'M', 'ѱ'), + (0x471, 'V'), + (0x472, 'M', 'ѳ'), + (0x473, 'V'), + (0x474, 'M', 'ѵ'), + (0x475, 'V'), + (0x476, 'M', 'ѷ'), + (0x477, 'V'), + (0x478, 'M', 'ѹ'), + (0x479, 'V'), + (0x47A, 'M', 'ѻ'), + (0x47B, 'V'), + (0x47C, 'M', 'ѽ'), + (0x47D, 'V'), + (0x47E, 'M', 'ѿ'), + (0x47F, 'V'), + (0x480, 'M', 'ҁ'), + (0x481, 'V'), + (0x48A, 'M', 'ҋ'), + (0x48B, 'V'), + (0x48C, 'M', 'ҍ'), + (0x48D, 'V'), + (0x48E, 'M', 'ҏ'), + (0x48F, 'V'), + (0x490, 'M', 'ґ'), + (0x491, 'V'), + (0x492, 'M', 'ғ'), + (0x493, 'V'), + (0x494, 'M', 'ҕ'), + (0x495, 'V'), + (0x496, 'M', 'җ'), + (0x497, 'V'), + (0x498, 'M', 'ҙ'), + (0x499, 'V'), + (0x49A, 'M', 'қ'), + (0x49B, 'V'), + (0x49C, 'M', 'ҝ'), + (0x49D, 'V'), + ] + +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x49E, 'M', 'ҟ'), + (0x49F, 'V'), + (0x4A0, 'M', 'ҡ'), + (0x4A1, 'V'), + (0x4A2, 'M', 'ң'), + (0x4A3, 'V'), + (0x4A4, 'M', 'ҥ'), + (0x4A5, 'V'), + (0x4A6, 'M', 'ҧ'), + (0x4A7, 'V'), + (0x4A8, 'M', 'ҩ'), + (0x4A9, 'V'), + (0x4AA, 'M', 'ҫ'), + (0x4AB, 'V'), + (0x4AC, 'M', 'ҭ'), + (0x4AD, 'V'), + (0x4AE, 'M', 'ү'), + (0x4AF, 'V'), + (0x4B0, 'M', 'ұ'), + (0x4B1, 'V'), + (0x4B2, 'M', 'ҳ'), + (0x4B3, 'V'), + (0x4B4, 'M', 'ҵ'), + (0x4B5, 'V'), + (0x4B6, 'M', 'ҷ'), + (0x4B7, 'V'), + (0x4B8, 'M', 'ҹ'), + (0x4B9, 'V'), + (0x4BA, 'M', 'һ'), + (0x4BB, 'V'), + (0x4BC, 'M', 'ҽ'), + (0x4BD, 'V'), + (0x4BE, 'M', 'ҿ'), + (0x4BF, 'V'), + (0x4C0, 'X'), + (0x4C1, 'M', 'ӂ'), + (0x4C2, 'V'), + (0x4C3, 'M', 'ӄ'), + (0x4C4, 'V'), + (0x4C5, 'M', 'ӆ'), + (0x4C6, 'V'), + (0x4C7, 'M', 'ӈ'), + (0x4C8, 'V'), + (0x4C9, 'M', 'ӊ'), + (0x4CA, 'V'), + (0x4CB, 'M', 'ӌ'), + (0x4CC, 'V'), + (0x4CD, 'M', 'ӎ'), + (0x4CE, 'V'), + (0x4D0, 'M', 'ӑ'), + (0x4D1, 'V'), + (0x4D2, 'M', 'ӓ'), + (0x4D3, 'V'), + (0x4D4, 'M', 'ӕ'), + (0x4D5, 'V'), + (0x4D6, 'M', 'ӗ'), + (0x4D7, 'V'), + (0x4D8, 'M', 'ә'), + (0x4D9, 'V'), + (0x4DA, 'M', 'ӛ'), + (0x4DB, 'V'), + (0x4DC, 'M', 'ӝ'), + (0x4DD, 'V'), + (0x4DE, 'M', 'ӟ'), + (0x4DF, 'V'), + (0x4E0, 'M', 'ӡ'), + (0x4E1, 'V'), + (0x4E2, 'M', 'ӣ'), + (0x4E3, 'V'), + (0x4E4, 'M', 'ӥ'), + (0x4E5, 'V'), + (0x4E6, 'M', 'ӧ'), + (0x4E7, 'V'), + (0x4E8, 'M', 'ө'), + (0x4E9, 'V'), + (0x4EA, 'M', 'ӫ'), + (0x4EB, 'V'), + (0x4EC, 'M', 'ӭ'), + (0x4ED, 'V'), + (0x4EE, 'M', 'ӯ'), + (0x4EF, 'V'), + (0x4F0, 'M', 'ӱ'), + (0x4F1, 'V'), + (0x4F2, 'M', 'ӳ'), + (0x4F3, 'V'), + (0x4F4, 'M', 'ӵ'), + (0x4F5, 'V'), + (0x4F6, 'M', 'ӷ'), + (0x4F7, 'V'), + (0x4F8, 'M', 'ӹ'), + (0x4F9, 'V'), + (0x4FA, 'M', 'ӻ'), + (0x4FB, 'V'), + (0x4FC, 'M', 'ӽ'), + (0x4FD, 'V'), + (0x4FE, 'M', 'ӿ'), + (0x4FF, 'V'), + (0x500, 'M', 'ԁ'), + (0x501, 'V'), + (0x502, 'M', 'ԃ'), + ] + +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x503, 'V'), + (0x504, 'M', 'ԅ'), + (0x505, 'V'), + (0x506, 'M', 'ԇ'), + (0x507, 'V'), + (0x508, 'M', 'ԉ'), + (0x509, 'V'), + (0x50A, 'M', 'ԋ'), + (0x50B, 'V'), + (0x50C, 'M', 'ԍ'), + (0x50D, 'V'), + (0x50E, 'M', 'ԏ'), + (0x50F, 'V'), + (0x510, 'M', 'ԑ'), + (0x511, 'V'), + (0x512, 'M', 'ԓ'), + (0x513, 'V'), + (0x514, 'M', 'ԕ'), + (0x515, 'V'), + (0x516, 'M', 'ԗ'), + (0x517, 'V'), + (0x518, 'M', 'ԙ'), + (0x519, 'V'), + (0x51A, 'M', 'ԛ'), + (0x51B, 'V'), + (0x51C, 'M', 'ԝ'), + (0x51D, 'V'), + (0x51E, 'M', 'ԟ'), + (0x51F, 'V'), + (0x520, 'M', 'ԡ'), + (0x521, 'V'), + (0x522, 'M', 'ԣ'), + (0x523, 'V'), + (0x524, 'M', 'ԥ'), + (0x525, 'V'), + (0x526, 'M', 'ԧ'), + (0x527, 'V'), + (0x528, 'M', 'ԩ'), + (0x529, 'V'), + (0x52A, 'M', 'ԫ'), + (0x52B, 'V'), + (0x52C, 'M', 'ԭ'), + (0x52D, 'V'), + (0x52E, 'M', 'ԯ'), + (0x52F, 'V'), + (0x530, 'X'), + (0x531, 'M', 'ա'), + (0x532, 'M', 'բ'), + (0x533, 'M', 'գ'), + (0x534, 'M', 'դ'), + (0x535, 'M', 'ե'), + (0x536, 'M', 'զ'), + (0x537, 'M', 'է'), + (0x538, 'M', 'ը'), + (0x539, 'M', 'թ'), + (0x53A, 'M', 'ժ'), + (0x53B, 'M', 'ի'), + (0x53C, 'M', 'լ'), + (0x53D, 'M', 'խ'), + (0x53E, 'M', 'ծ'), + (0x53F, 'M', 'կ'), + (0x540, 'M', 'հ'), + (0x541, 'M', 'ձ'), + (0x542, 'M', 'ղ'), + (0x543, 'M', 'ճ'), + (0x544, 'M', 'մ'), + (0x545, 'M', 'յ'), + (0x546, 'M', 'ն'), + (0x547, 'M', 'շ'), + (0x548, 'M', 'ո'), + (0x549, 'M', 'չ'), + (0x54A, 'M', 'պ'), + (0x54B, 'M', 'ջ'), + (0x54C, 'M', 'ռ'), + (0x54D, 'M', 'ս'), + (0x54E, 'M', 'վ'), + (0x54F, 'M', 'տ'), + (0x550, 'M', 'ր'), + (0x551, 'M', 'ց'), + (0x552, 'M', 'ւ'), + (0x553, 'M', 'փ'), + (0x554, 'M', 'ք'), + (0x555, 'M', 'օ'), + (0x556, 'M', 'ֆ'), + (0x557, 'X'), + (0x559, 'V'), + (0x587, 'M', 'եւ'), + (0x588, 'V'), + (0x58B, 'X'), + (0x58D, 'V'), + (0x590, 'X'), + (0x591, 'V'), + (0x5C8, 'X'), + (0x5D0, 'V'), + (0x5EB, 'X'), + (0x5EF, 'V'), + (0x5F5, 'X'), + (0x606, 'V'), + (0x61C, 'X'), + (0x61D, 'V'), + ] + +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x675, 'M', 'اٴ'), + (0x676, 'M', 'وٴ'), + (0x677, 'M', 'ۇٴ'), + (0x678, 'M', 'يٴ'), + (0x679, 'V'), + (0x6DD, 'X'), + (0x6DE, 'V'), + (0x70E, 'X'), + (0x710, 'V'), + (0x74B, 'X'), + (0x74D, 'V'), + (0x7B2, 'X'), + (0x7C0, 'V'), + (0x7FB, 'X'), + (0x7FD, 'V'), + (0x82E, 'X'), + (0x830, 'V'), + (0x83F, 'X'), + (0x840, 'V'), + (0x85C, 'X'), + (0x85E, 'V'), + (0x85F, 'X'), + (0x860, 'V'), + (0x86B, 'X'), + (0x870, 'V'), + (0x88F, 'X'), + (0x898, 'V'), + (0x8E2, 'X'), + (0x8E3, 'V'), + (0x958, 'M', 'क़'), + (0x959, 'M', 'ख़'), + (0x95A, 'M', 'ग़'), + (0x95B, 'M', 'ज़'), + (0x95C, 'M', 'ड़'), + (0x95D, 'M', 'ढ़'), + (0x95E, 'M', 'फ़'), + (0x95F, 'M', 'य़'), + (0x960, 'V'), + (0x984, 'X'), + (0x985, 'V'), + (0x98D, 'X'), + (0x98F, 'V'), + (0x991, 'X'), + (0x993, 'V'), + (0x9A9, 'X'), + (0x9AA, 'V'), + (0x9B1, 'X'), + (0x9B2, 'V'), + (0x9B3, 'X'), + (0x9B6, 'V'), + (0x9BA, 'X'), + (0x9BC, 'V'), + (0x9C5, 'X'), + (0x9C7, 'V'), + (0x9C9, 'X'), + (0x9CB, 'V'), + (0x9CF, 'X'), + (0x9D7, 'V'), + (0x9D8, 'X'), + (0x9DC, 'M', 'ড়'), + (0x9DD, 'M', 'ঢ়'), + (0x9DE, 'X'), + (0x9DF, 'M', 'য়'), + (0x9E0, 'V'), + (0x9E4, 'X'), + (0x9E6, 'V'), + (0x9FF, 'X'), + (0xA01, 'V'), + (0xA04, 'X'), + (0xA05, 'V'), + (0xA0B, 'X'), + (0xA0F, 'V'), + (0xA11, 'X'), + (0xA13, 'V'), + (0xA29, 'X'), + (0xA2A, 'V'), + (0xA31, 'X'), + (0xA32, 'V'), + (0xA33, 'M', 'ਲ਼'), + (0xA34, 'X'), + (0xA35, 'V'), + (0xA36, 'M', 'ਸ਼'), + (0xA37, 'X'), + (0xA38, 'V'), + (0xA3A, 'X'), + (0xA3C, 'V'), + (0xA3D, 'X'), + (0xA3E, 'V'), + (0xA43, 'X'), + (0xA47, 'V'), + (0xA49, 'X'), + (0xA4B, 'V'), + (0xA4E, 'X'), + (0xA51, 'V'), + (0xA52, 'X'), + (0xA59, 'M', 'ਖ਼'), + (0xA5A, 'M', 'ਗ਼'), + (0xA5B, 'M', 'ਜ਼'), + (0xA5C, 'V'), + (0xA5D, 'X'), + ] + +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA5E, 'M', 'ਫ਼'), + (0xA5F, 'X'), + (0xA66, 'V'), + (0xA77, 'X'), + (0xA81, 'V'), + (0xA84, 'X'), + (0xA85, 'V'), + (0xA8E, 'X'), + (0xA8F, 'V'), + (0xA92, 'X'), + (0xA93, 'V'), + (0xAA9, 'X'), + (0xAAA, 'V'), + (0xAB1, 'X'), + (0xAB2, 'V'), + (0xAB4, 'X'), + (0xAB5, 'V'), + (0xABA, 'X'), + (0xABC, 'V'), + (0xAC6, 'X'), + (0xAC7, 'V'), + (0xACA, 'X'), + (0xACB, 'V'), + (0xACE, 'X'), + (0xAD0, 'V'), + (0xAD1, 'X'), + (0xAE0, 'V'), + (0xAE4, 'X'), + (0xAE6, 'V'), + (0xAF2, 'X'), + (0xAF9, 'V'), + (0xB00, 'X'), + (0xB01, 'V'), + (0xB04, 'X'), + (0xB05, 'V'), + (0xB0D, 'X'), + (0xB0F, 'V'), + (0xB11, 'X'), + (0xB13, 'V'), + (0xB29, 'X'), + (0xB2A, 'V'), + (0xB31, 'X'), + (0xB32, 'V'), + (0xB34, 'X'), + (0xB35, 'V'), + (0xB3A, 'X'), + (0xB3C, 'V'), + (0xB45, 'X'), + (0xB47, 'V'), + (0xB49, 'X'), + (0xB4B, 'V'), + (0xB4E, 'X'), + (0xB55, 'V'), + (0xB58, 'X'), + (0xB5C, 'M', 'ଡ଼'), + (0xB5D, 'M', 'ଢ଼'), + (0xB5E, 'X'), + (0xB5F, 'V'), + (0xB64, 'X'), + (0xB66, 'V'), + (0xB78, 'X'), + (0xB82, 'V'), + (0xB84, 'X'), + (0xB85, 'V'), + (0xB8B, 'X'), + (0xB8E, 'V'), + (0xB91, 'X'), + (0xB92, 'V'), + (0xB96, 'X'), + (0xB99, 'V'), + (0xB9B, 'X'), + (0xB9C, 'V'), + (0xB9D, 'X'), + (0xB9E, 'V'), + (0xBA0, 'X'), + (0xBA3, 'V'), + (0xBA5, 'X'), + (0xBA8, 'V'), + (0xBAB, 'X'), + (0xBAE, 'V'), + (0xBBA, 'X'), + (0xBBE, 'V'), + (0xBC3, 'X'), + (0xBC6, 'V'), + (0xBC9, 'X'), + (0xBCA, 'V'), + (0xBCE, 'X'), + (0xBD0, 'V'), + (0xBD1, 'X'), + (0xBD7, 'V'), + (0xBD8, 'X'), + (0xBE6, 'V'), + (0xBFB, 'X'), + (0xC00, 'V'), + (0xC0D, 'X'), + (0xC0E, 'V'), + (0xC11, 'X'), + (0xC12, 'V'), + (0xC29, 'X'), + (0xC2A, 'V'), + ] + +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC3A, 'X'), + (0xC3C, 'V'), + (0xC45, 'X'), + (0xC46, 'V'), + (0xC49, 'X'), + (0xC4A, 'V'), + (0xC4E, 'X'), + (0xC55, 'V'), + (0xC57, 'X'), + (0xC58, 'V'), + (0xC5B, 'X'), + (0xC5D, 'V'), + (0xC5E, 'X'), + (0xC60, 'V'), + (0xC64, 'X'), + (0xC66, 'V'), + (0xC70, 'X'), + (0xC77, 'V'), + (0xC8D, 'X'), + (0xC8E, 'V'), + (0xC91, 'X'), + (0xC92, 'V'), + (0xCA9, 'X'), + (0xCAA, 'V'), + (0xCB4, 'X'), + (0xCB5, 'V'), + (0xCBA, 'X'), + (0xCBC, 'V'), + (0xCC5, 'X'), + (0xCC6, 'V'), + (0xCC9, 'X'), + (0xCCA, 'V'), + (0xCCE, 'X'), + (0xCD5, 'V'), + (0xCD7, 'X'), + (0xCDD, 'V'), + (0xCDF, 'X'), + (0xCE0, 'V'), + (0xCE4, 'X'), + (0xCE6, 'V'), + (0xCF0, 'X'), + (0xCF1, 'V'), + (0xCF3, 'X'), + (0xD00, 'V'), + (0xD0D, 'X'), + (0xD0E, 'V'), + (0xD11, 'X'), + (0xD12, 'V'), + (0xD45, 'X'), + (0xD46, 'V'), + (0xD49, 'X'), + (0xD4A, 'V'), + (0xD50, 'X'), + (0xD54, 'V'), + (0xD64, 'X'), + (0xD66, 'V'), + (0xD80, 'X'), + (0xD81, 'V'), + (0xD84, 'X'), + (0xD85, 'V'), + (0xD97, 'X'), + (0xD9A, 'V'), + (0xDB2, 'X'), + (0xDB3, 'V'), + (0xDBC, 'X'), + (0xDBD, 'V'), + (0xDBE, 'X'), + (0xDC0, 'V'), + (0xDC7, 'X'), + (0xDCA, 'V'), + (0xDCB, 'X'), + (0xDCF, 'V'), + (0xDD5, 'X'), + (0xDD6, 'V'), + (0xDD7, 'X'), + (0xDD8, 'V'), + (0xDE0, 'X'), + (0xDE6, 'V'), + (0xDF0, 'X'), + (0xDF2, 'V'), + (0xDF5, 'X'), + (0xE01, 'V'), + (0xE33, 'M', 'ํา'), + (0xE34, 'V'), + (0xE3B, 'X'), + (0xE3F, 'V'), + (0xE5C, 'X'), + (0xE81, 'V'), + (0xE83, 'X'), + (0xE84, 'V'), + (0xE85, 'X'), + (0xE86, 'V'), + (0xE8B, 'X'), + (0xE8C, 'V'), + (0xEA4, 'X'), + (0xEA5, 'V'), + (0xEA6, 'X'), + (0xEA7, 'V'), + (0xEB3, 'M', 'ໍາ'), + (0xEB4, 'V'), + ] + +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xEBE, 'X'), + (0xEC0, 'V'), + (0xEC5, 'X'), + (0xEC6, 'V'), + (0xEC7, 'X'), + (0xEC8, 'V'), + (0xECE, 'X'), + (0xED0, 'V'), + (0xEDA, 'X'), + (0xEDC, 'M', 'ຫນ'), + (0xEDD, 'M', 'ຫມ'), + (0xEDE, 'V'), + (0xEE0, 'X'), + (0xF00, 'V'), + (0xF0C, 'M', '་'), + (0xF0D, 'V'), + (0xF43, 'M', 'གྷ'), + (0xF44, 'V'), + (0xF48, 'X'), + (0xF49, 'V'), + (0xF4D, 'M', 'ཌྷ'), + (0xF4E, 'V'), + (0xF52, 'M', 'དྷ'), + (0xF53, 'V'), + (0xF57, 'M', 'བྷ'), + (0xF58, 'V'), + (0xF5C, 'M', 'ཛྷ'), + (0xF5D, 'V'), + (0xF69, 'M', 'ཀྵ'), + (0xF6A, 'V'), + (0xF6D, 'X'), + (0xF71, 'V'), + (0xF73, 'M', 'ཱི'), + (0xF74, 'V'), + (0xF75, 'M', 'ཱུ'), + (0xF76, 'M', 'ྲྀ'), + (0xF77, 'M', 'ྲཱྀ'), + (0xF78, 'M', 'ླྀ'), + (0xF79, 'M', 'ླཱྀ'), + (0xF7A, 'V'), + (0xF81, 'M', 'ཱྀ'), + (0xF82, 'V'), + (0xF93, 'M', 'ྒྷ'), + (0xF94, 'V'), + (0xF98, 'X'), + (0xF99, 'V'), + (0xF9D, 'M', 'ྜྷ'), + (0xF9E, 'V'), + (0xFA2, 'M', 'ྡྷ'), + (0xFA3, 'V'), + (0xFA7, 'M', 'ྦྷ'), + (0xFA8, 'V'), + (0xFAC, 'M', 'ྫྷ'), + (0xFAD, 'V'), + (0xFB9, 'M', 'ྐྵ'), + (0xFBA, 'V'), + (0xFBD, 'X'), + (0xFBE, 'V'), + (0xFCD, 'X'), + (0xFCE, 'V'), + (0xFDB, 'X'), + (0x1000, 'V'), + (0x10A0, 'X'), + (0x10C7, 'M', 'ⴧ'), + (0x10C8, 'X'), + (0x10CD, 'M', 'ⴭ'), + (0x10CE, 'X'), + (0x10D0, 'V'), + (0x10FC, 'M', 'ნ'), + (0x10FD, 'V'), + (0x115F, 'X'), + (0x1161, 'V'), + (0x1249, 'X'), + (0x124A, 'V'), + (0x124E, 'X'), + (0x1250, 'V'), + (0x1257, 'X'), + (0x1258, 'V'), + (0x1259, 'X'), + (0x125A, 'V'), + (0x125E, 'X'), + (0x1260, 'V'), + (0x1289, 'X'), + (0x128A, 'V'), + (0x128E, 'X'), + (0x1290, 'V'), + (0x12B1, 'X'), + (0x12B2, 'V'), + (0x12B6, 'X'), + (0x12B8, 'V'), + (0x12BF, 'X'), + (0x12C0, 'V'), + (0x12C1, 'X'), + (0x12C2, 'V'), + (0x12C6, 'X'), + (0x12C8, 'V'), + (0x12D7, 'X'), + (0x12D8, 'V'), + (0x1311, 'X'), + (0x1312, 'V'), + ] + +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1316, 'X'), + (0x1318, 'V'), + (0x135B, 'X'), + (0x135D, 'V'), + (0x137D, 'X'), + (0x1380, 'V'), + (0x139A, 'X'), + (0x13A0, 'V'), + (0x13F6, 'X'), + (0x13F8, 'M', 'Ᏸ'), + (0x13F9, 'M', 'Ᏹ'), + (0x13FA, 'M', 'Ᏺ'), + (0x13FB, 'M', 'Ᏻ'), + (0x13FC, 'M', 'Ᏼ'), + (0x13FD, 'M', 'Ᏽ'), + (0x13FE, 'X'), + (0x1400, 'V'), + (0x1680, 'X'), + (0x1681, 'V'), + (0x169D, 'X'), + (0x16A0, 'V'), + (0x16F9, 'X'), + (0x1700, 'V'), + (0x1716, 'X'), + (0x171F, 'V'), + (0x1737, 'X'), + (0x1740, 'V'), + (0x1754, 'X'), + (0x1760, 'V'), + (0x176D, 'X'), + (0x176E, 'V'), + (0x1771, 'X'), + (0x1772, 'V'), + (0x1774, 'X'), + (0x1780, 'V'), + (0x17B4, 'X'), + (0x17B6, 'V'), + (0x17DE, 'X'), + (0x17E0, 'V'), + (0x17EA, 'X'), + (0x17F0, 'V'), + (0x17FA, 'X'), + (0x1800, 'V'), + (0x1806, 'X'), + (0x1807, 'V'), + (0x180B, 'I'), + (0x180E, 'X'), + (0x180F, 'I'), + (0x1810, 'V'), + (0x181A, 'X'), + (0x1820, 'V'), + (0x1879, 'X'), + (0x1880, 'V'), + (0x18AB, 'X'), + (0x18B0, 'V'), + (0x18F6, 'X'), + (0x1900, 'V'), + (0x191F, 'X'), + (0x1920, 'V'), + (0x192C, 'X'), + (0x1930, 'V'), + (0x193C, 'X'), + (0x1940, 'V'), + (0x1941, 'X'), + (0x1944, 'V'), + (0x196E, 'X'), + (0x1970, 'V'), + (0x1975, 'X'), + (0x1980, 'V'), + (0x19AC, 'X'), + (0x19B0, 'V'), + (0x19CA, 'X'), + (0x19D0, 'V'), + (0x19DB, 'X'), + (0x19DE, 'V'), + (0x1A1C, 'X'), + (0x1A1E, 'V'), + (0x1A5F, 'X'), + (0x1A60, 'V'), + (0x1A7D, 'X'), + (0x1A7F, 'V'), + (0x1A8A, 'X'), + (0x1A90, 'V'), + (0x1A9A, 'X'), + (0x1AA0, 'V'), + (0x1AAE, 'X'), + (0x1AB0, 'V'), + (0x1ACF, 'X'), + (0x1B00, 'V'), + (0x1B4D, 'X'), + (0x1B50, 'V'), + (0x1B7F, 'X'), + (0x1B80, 'V'), + (0x1BF4, 'X'), + (0x1BFC, 'V'), + (0x1C38, 'X'), + (0x1C3B, 'V'), + (0x1C4A, 'X'), + (0x1C4D, 'V'), + (0x1C80, 'M', 'в'), + ] + +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1C81, 'M', 'д'), + (0x1C82, 'M', 'о'), + (0x1C83, 'M', 'с'), + (0x1C84, 'M', 'т'), + (0x1C86, 'M', 'ъ'), + (0x1C87, 'M', 'ѣ'), + (0x1C88, 'M', 'ꙋ'), + (0x1C89, 'X'), + (0x1C90, 'M', 'ა'), + (0x1C91, 'M', 'ბ'), + (0x1C92, 'M', 'გ'), + (0x1C93, 'M', 'დ'), + (0x1C94, 'M', 'ე'), + (0x1C95, 'M', 'ვ'), + (0x1C96, 'M', 'ზ'), + (0x1C97, 'M', 'თ'), + (0x1C98, 'M', 'ი'), + (0x1C99, 'M', 'კ'), + (0x1C9A, 'M', 'ლ'), + (0x1C9B, 'M', 'მ'), + (0x1C9C, 'M', 'ნ'), + (0x1C9D, 'M', 'ო'), + (0x1C9E, 'M', 'პ'), + (0x1C9F, 'M', 'ჟ'), + (0x1CA0, 'M', 'რ'), + (0x1CA1, 'M', 'ს'), + (0x1CA2, 'M', 'ტ'), + (0x1CA3, 'M', 'უ'), + (0x1CA4, 'M', 'ფ'), + (0x1CA5, 'M', 'ქ'), + (0x1CA6, 'M', 'ღ'), + (0x1CA7, 'M', 'ყ'), + (0x1CA8, 'M', 'შ'), + (0x1CA9, 'M', 'ჩ'), + (0x1CAA, 'M', 'ც'), + (0x1CAB, 'M', 'ძ'), + (0x1CAC, 'M', 'წ'), + (0x1CAD, 'M', 'ჭ'), + (0x1CAE, 'M', 'ხ'), + (0x1CAF, 'M', 'ჯ'), + (0x1CB0, 'M', 'ჰ'), + (0x1CB1, 'M', 'ჱ'), + (0x1CB2, 'M', 'ჲ'), + (0x1CB3, 'M', 'ჳ'), + (0x1CB4, 'M', 'ჴ'), + (0x1CB5, 'M', 'ჵ'), + (0x1CB6, 'M', 'ჶ'), + (0x1CB7, 'M', 'ჷ'), + (0x1CB8, 'M', 'ჸ'), + (0x1CB9, 'M', 'ჹ'), + (0x1CBA, 'M', 'ჺ'), + (0x1CBB, 'X'), + (0x1CBD, 'M', 'ჽ'), + (0x1CBE, 'M', 'ჾ'), + (0x1CBF, 'M', 'ჿ'), + (0x1CC0, 'V'), + (0x1CC8, 'X'), + (0x1CD0, 'V'), + (0x1CFB, 'X'), + (0x1D00, 'V'), + (0x1D2C, 'M', 'a'), + (0x1D2D, 'M', 'æ'), + (0x1D2E, 'M', 'b'), + (0x1D2F, 'V'), + (0x1D30, 'M', 'd'), + (0x1D31, 'M', 'e'), + (0x1D32, 'M', 'ǝ'), + (0x1D33, 'M', 'g'), + (0x1D34, 'M', 'h'), + (0x1D35, 'M', 'i'), + (0x1D36, 'M', 'j'), + (0x1D37, 'M', 'k'), + (0x1D38, 'M', 'l'), + (0x1D39, 'M', 'm'), + (0x1D3A, 'M', 'n'), + (0x1D3B, 'V'), + (0x1D3C, 'M', 'o'), + (0x1D3D, 'M', 'ȣ'), + (0x1D3E, 'M', 'p'), + (0x1D3F, 'M', 'r'), + (0x1D40, 'M', 't'), + (0x1D41, 'M', 'u'), + (0x1D42, 'M', 'w'), + (0x1D43, 'M', 'a'), + (0x1D44, 'M', 'ɐ'), + (0x1D45, 'M', 'ɑ'), + (0x1D46, 'M', 'ᴂ'), + (0x1D47, 'M', 'b'), + (0x1D48, 'M', 'd'), + (0x1D49, 'M', 'e'), + (0x1D4A, 'M', 'ə'), + (0x1D4B, 'M', 'ɛ'), + (0x1D4C, 'M', 'ɜ'), + (0x1D4D, 'M', 'g'), + (0x1D4E, 'V'), + (0x1D4F, 'M', 'k'), + (0x1D50, 'M', 'm'), + (0x1D51, 'M', 'ŋ'), + (0x1D52, 'M', 'o'), + (0x1D53, 'M', 'ɔ'), + ] + +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D54, 'M', 'ᴖ'), + (0x1D55, 'M', 'ᴗ'), + (0x1D56, 'M', 'p'), + (0x1D57, 'M', 't'), + (0x1D58, 'M', 'u'), + (0x1D59, 'M', 'ᴝ'), + (0x1D5A, 'M', 'ɯ'), + (0x1D5B, 'M', 'v'), + (0x1D5C, 'M', 'ᴥ'), + (0x1D5D, 'M', 'β'), + (0x1D5E, 'M', 'γ'), + (0x1D5F, 'M', 'δ'), + (0x1D60, 'M', 'φ'), + (0x1D61, 'M', 'χ'), + (0x1D62, 'M', 'i'), + (0x1D63, 'M', 'r'), + (0x1D64, 'M', 'u'), + (0x1D65, 'M', 'v'), + (0x1D66, 'M', 'β'), + (0x1D67, 'M', 'γ'), + (0x1D68, 'M', 'ρ'), + (0x1D69, 'M', 'φ'), + (0x1D6A, 'M', 'χ'), + (0x1D6B, 'V'), + (0x1D78, 'M', 'н'), + (0x1D79, 'V'), + (0x1D9B, 'M', 'ɒ'), + (0x1D9C, 'M', 'c'), + (0x1D9D, 'M', 'ɕ'), + (0x1D9E, 'M', 'ð'), + (0x1D9F, 'M', 'ɜ'), + (0x1DA0, 'M', 'f'), + (0x1DA1, 'M', 'ɟ'), + (0x1DA2, 'M', 'ɡ'), + (0x1DA3, 'M', 'ɥ'), + (0x1DA4, 'M', 'ɨ'), + (0x1DA5, 'M', 'ɩ'), + (0x1DA6, 'M', 'ɪ'), + (0x1DA7, 'M', 'ᵻ'), + (0x1DA8, 'M', 'ʝ'), + (0x1DA9, 'M', 'ɭ'), + (0x1DAA, 'M', 'ᶅ'), + (0x1DAB, 'M', 'ʟ'), + (0x1DAC, 'M', 'ɱ'), + (0x1DAD, 'M', 'ɰ'), + (0x1DAE, 'M', 'ɲ'), + (0x1DAF, 'M', 'ɳ'), + (0x1DB0, 'M', 'ɴ'), + (0x1DB1, 'M', 'ɵ'), + (0x1DB2, 'M', 'ɸ'), + (0x1DB3, 'M', 'ʂ'), + (0x1DB4, 'M', 'ʃ'), + (0x1DB5, 'M', 'ƫ'), + (0x1DB6, 'M', 'ʉ'), + (0x1DB7, 'M', 'ʊ'), + (0x1DB8, 'M', 'ᴜ'), + (0x1DB9, 'M', 'ʋ'), + (0x1DBA, 'M', 'ʌ'), + (0x1DBB, 'M', 'z'), + (0x1DBC, 'M', 'ʐ'), + (0x1DBD, 'M', 'ʑ'), + (0x1DBE, 'M', 'ʒ'), + (0x1DBF, 'M', 'θ'), + (0x1DC0, 'V'), + (0x1E00, 'M', 'ḁ'), + (0x1E01, 'V'), + (0x1E02, 'M', 'ḃ'), + (0x1E03, 'V'), + (0x1E04, 'M', 'ḅ'), + (0x1E05, 'V'), + (0x1E06, 'M', 'ḇ'), + (0x1E07, 'V'), + (0x1E08, 'M', 'ḉ'), + (0x1E09, 'V'), + (0x1E0A, 'M', 'ḋ'), + (0x1E0B, 'V'), + (0x1E0C, 'M', 'ḍ'), + (0x1E0D, 'V'), + (0x1E0E, 'M', 'ḏ'), + (0x1E0F, 'V'), + (0x1E10, 'M', 'ḑ'), + (0x1E11, 'V'), + (0x1E12, 'M', 'ḓ'), + (0x1E13, 'V'), + (0x1E14, 'M', 'ḕ'), + (0x1E15, 'V'), + (0x1E16, 'M', 'ḗ'), + (0x1E17, 'V'), + (0x1E18, 'M', 'ḙ'), + (0x1E19, 'V'), + (0x1E1A, 'M', 'ḛ'), + (0x1E1B, 'V'), + (0x1E1C, 'M', 'ḝ'), + (0x1E1D, 'V'), + (0x1E1E, 'M', 'ḟ'), + (0x1E1F, 'V'), + (0x1E20, 'M', 'ḡ'), + (0x1E21, 'V'), + (0x1E22, 'M', 'ḣ'), + (0x1E23, 'V'), + ] + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E24, 'M', 'ḥ'), + (0x1E25, 'V'), + (0x1E26, 'M', 'ḧ'), + (0x1E27, 'V'), + (0x1E28, 'M', 'ḩ'), + (0x1E29, 'V'), + (0x1E2A, 'M', 'ḫ'), + (0x1E2B, 'V'), + (0x1E2C, 'M', 'ḭ'), + (0x1E2D, 'V'), + (0x1E2E, 'M', 'ḯ'), + (0x1E2F, 'V'), + (0x1E30, 'M', 'ḱ'), + (0x1E31, 'V'), + (0x1E32, 'M', 'ḳ'), + (0x1E33, 'V'), + (0x1E34, 'M', 'ḵ'), + (0x1E35, 'V'), + (0x1E36, 'M', 'ḷ'), + (0x1E37, 'V'), + (0x1E38, 'M', 'ḹ'), + (0x1E39, 'V'), + (0x1E3A, 'M', 'ḻ'), + (0x1E3B, 'V'), + (0x1E3C, 'M', 'ḽ'), + (0x1E3D, 'V'), + (0x1E3E, 'M', 'ḿ'), + (0x1E3F, 'V'), + (0x1E40, 'M', 'ṁ'), + (0x1E41, 'V'), + (0x1E42, 'M', 'ṃ'), + (0x1E43, 'V'), + (0x1E44, 'M', 'ṅ'), + (0x1E45, 'V'), + (0x1E46, 'M', 'ṇ'), + (0x1E47, 'V'), + (0x1E48, 'M', 'ṉ'), + (0x1E49, 'V'), + (0x1E4A, 'M', 'ṋ'), + (0x1E4B, 'V'), + (0x1E4C, 'M', 'ṍ'), + (0x1E4D, 'V'), + (0x1E4E, 'M', 'ṏ'), + (0x1E4F, 'V'), + (0x1E50, 'M', 'ṑ'), + (0x1E51, 'V'), + (0x1E52, 'M', 'ṓ'), + (0x1E53, 'V'), + (0x1E54, 'M', 'ṕ'), + (0x1E55, 'V'), + (0x1E56, 'M', 'ṗ'), + (0x1E57, 'V'), + (0x1E58, 'M', 'ṙ'), + (0x1E59, 'V'), + (0x1E5A, 'M', 'ṛ'), + (0x1E5B, 'V'), + (0x1E5C, 'M', 'ṝ'), + (0x1E5D, 'V'), + (0x1E5E, 'M', 'ṟ'), + (0x1E5F, 'V'), + (0x1E60, 'M', 'ṡ'), + (0x1E61, 'V'), + (0x1E62, 'M', 'ṣ'), + (0x1E63, 'V'), + (0x1E64, 'M', 'ṥ'), + (0x1E65, 'V'), + (0x1E66, 'M', 'ṧ'), + (0x1E67, 'V'), + (0x1E68, 'M', 'ṩ'), + (0x1E69, 'V'), + (0x1E6A, 'M', 'ṫ'), + (0x1E6B, 'V'), + (0x1E6C, 'M', 'ṭ'), + (0x1E6D, 'V'), + (0x1E6E, 'M', 'ṯ'), + (0x1E6F, 'V'), + (0x1E70, 'M', 'ṱ'), + (0x1E71, 'V'), + (0x1E72, 'M', 'ṳ'), + (0x1E73, 'V'), + (0x1E74, 'M', 'ṵ'), + (0x1E75, 'V'), + (0x1E76, 'M', 'ṷ'), + (0x1E77, 'V'), + (0x1E78, 'M', 'ṹ'), + (0x1E79, 'V'), + (0x1E7A, 'M', 'ṻ'), + (0x1E7B, 'V'), + (0x1E7C, 'M', 'ṽ'), + (0x1E7D, 'V'), + (0x1E7E, 'M', 'ṿ'), + (0x1E7F, 'V'), + (0x1E80, 'M', 'ẁ'), + (0x1E81, 'V'), + (0x1E82, 'M', 'ẃ'), + (0x1E83, 'V'), + (0x1E84, 'M', 'ẅ'), + (0x1E85, 'V'), + (0x1E86, 'M', 'ẇ'), + (0x1E87, 'V'), + ] + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E88, 'M', 'ẉ'), + (0x1E89, 'V'), + (0x1E8A, 'M', 'ẋ'), + (0x1E8B, 'V'), + (0x1E8C, 'M', 'ẍ'), + (0x1E8D, 'V'), + (0x1E8E, 'M', 'ẏ'), + (0x1E8F, 'V'), + (0x1E90, 'M', 'ẑ'), + (0x1E91, 'V'), + (0x1E92, 'M', 'ẓ'), + (0x1E93, 'V'), + (0x1E94, 'M', 'ẕ'), + (0x1E95, 'V'), + (0x1E9A, 'M', 'aʾ'), + (0x1E9B, 'M', 'ṡ'), + (0x1E9C, 'V'), + (0x1E9E, 'M', 'ss'), + (0x1E9F, 'V'), + (0x1EA0, 'M', 'ạ'), + (0x1EA1, 'V'), + (0x1EA2, 'M', 'ả'), + (0x1EA3, 'V'), + (0x1EA4, 'M', 'ấ'), + (0x1EA5, 'V'), + (0x1EA6, 'M', 'ầ'), + (0x1EA7, 'V'), + (0x1EA8, 'M', 'ẩ'), + (0x1EA9, 'V'), + (0x1EAA, 'M', 'ẫ'), + (0x1EAB, 'V'), + (0x1EAC, 'M', 'ậ'), + (0x1EAD, 'V'), + (0x1EAE, 'M', 'ắ'), + (0x1EAF, 'V'), + (0x1EB0, 'M', 'ằ'), + (0x1EB1, 'V'), + (0x1EB2, 'M', 'ẳ'), + (0x1EB3, 'V'), + (0x1EB4, 'M', 'ẵ'), + (0x1EB5, 'V'), + (0x1EB6, 'M', 'ặ'), + (0x1EB7, 'V'), + (0x1EB8, 'M', 'ẹ'), + (0x1EB9, 'V'), + (0x1EBA, 'M', 'ẻ'), + (0x1EBB, 'V'), + (0x1EBC, 'M', 'ẽ'), + (0x1EBD, 'V'), + (0x1EBE, 'M', 'ế'), + (0x1EBF, 'V'), + (0x1EC0, 'M', 'ề'), + (0x1EC1, 'V'), + (0x1EC2, 'M', 'ể'), + (0x1EC3, 'V'), + (0x1EC4, 'M', 'ễ'), + (0x1EC5, 'V'), + (0x1EC6, 'M', 'ệ'), + (0x1EC7, 'V'), + (0x1EC8, 'M', 'ỉ'), + (0x1EC9, 'V'), + (0x1ECA, 'M', 'ị'), + (0x1ECB, 'V'), + (0x1ECC, 'M', 'ọ'), + (0x1ECD, 'V'), + (0x1ECE, 'M', 'ỏ'), + (0x1ECF, 'V'), + (0x1ED0, 'M', 'ố'), + (0x1ED1, 'V'), + (0x1ED2, 'M', 'ồ'), + (0x1ED3, 'V'), + (0x1ED4, 'M', 'ổ'), + (0x1ED5, 'V'), + (0x1ED6, 'M', 'ỗ'), + (0x1ED7, 'V'), + (0x1ED8, 'M', 'ộ'), + (0x1ED9, 'V'), + (0x1EDA, 'M', 'ớ'), + (0x1EDB, 'V'), + (0x1EDC, 'M', 'ờ'), + (0x1EDD, 'V'), + (0x1EDE, 'M', 'ở'), + (0x1EDF, 'V'), + (0x1EE0, 'M', 'ỡ'), + (0x1EE1, 'V'), + (0x1EE2, 'M', 'ợ'), + (0x1EE3, 'V'), + (0x1EE4, 'M', 'ụ'), + (0x1EE5, 'V'), + (0x1EE6, 'M', 'ủ'), + (0x1EE7, 'V'), + (0x1EE8, 'M', 'ứ'), + (0x1EE9, 'V'), + (0x1EEA, 'M', 'ừ'), + (0x1EEB, 'V'), + (0x1EEC, 'M', 'ử'), + (0x1EED, 'V'), + (0x1EEE, 'M', 'ữ'), + (0x1EEF, 'V'), + (0x1EF0, 'M', 'ự'), + ] + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EF1, 'V'), + (0x1EF2, 'M', 'ỳ'), + (0x1EF3, 'V'), + (0x1EF4, 'M', 'ỵ'), + (0x1EF5, 'V'), + (0x1EF6, 'M', 'ỷ'), + (0x1EF7, 'V'), + (0x1EF8, 'M', 'ỹ'), + (0x1EF9, 'V'), + (0x1EFA, 'M', 'ỻ'), + (0x1EFB, 'V'), + (0x1EFC, 'M', 'ỽ'), + (0x1EFD, 'V'), + (0x1EFE, 'M', 'ỿ'), + (0x1EFF, 'V'), + (0x1F08, 'M', 'ἀ'), + (0x1F09, 'M', 'ἁ'), + (0x1F0A, 'M', 'ἂ'), + (0x1F0B, 'M', 'ἃ'), + (0x1F0C, 'M', 'ἄ'), + (0x1F0D, 'M', 'ἅ'), + (0x1F0E, 'M', 'ἆ'), + (0x1F0F, 'M', 'ἇ'), + (0x1F10, 'V'), + (0x1F16, 'X'), + (0x1F18, 'M', 'ἐ'), + (0x1F19, 'M', 'ἑ'), + (0x1F1A, 'M', 'ἒ'), + (0x1F1B, 'M', 'ἓ'), + (0x1F1C, 'M', 'ἔ'), + (0x1F1D, 'M', 'ἕ'), + (0x1F1E, 'X'), + (0x1F20, 'V'), + (0x1F28, 'M', 'ἠ'), + (0x1F29, 'M', 'ἡ'), + (0x1F2A, 'M', 'ἢ'), + (0x1F2B, 'M', 'ἣ'), + (0x1F2C, 'M', 'ἤ'), + (0x1F2D, 'M', 'ἥ'), + (0x1F2E, 'M', 'ἦ'), + (0x1F2F, 'M', 'ἧ'), + (0x1F30, 'V'), + (0x1F38, 'M', 'ἰ'), + (0x1F39, 'M', 'ἱ'), + (0x1F3A, 'M', 'ἲ'), + (0x1F3B, 'M', 'ἳ'), + (0x1F3C, 'M', 'ἴ'), + (0x1F3D, 'M', 'ἵ'), + (0x1F3E, 'M', 'ἶ'), + (0x1F3F, 'M', 'ἷ'), + (0x1F40, 'V'), + (0x1F46, 'X'), + (0x1F48, 'M', 'ὀ'), + (0x1F49, 'M', 'ὁ'), + (0x1F4A, 'M', 'ὂ'), + (0x1F4B, 'M', 'ὃ'), + (0x1F4C, 'M', 'ὄ'), + (0x1F4D, 'M', 'ὅ'), + (0x1F4E, 'X'), + (0x1F50, 'V'), + (0x1F58, 'X'), + (0x1F59, 'M', 'ὑ'), + (0x1F5A, 'X'), + (0x1F5B, 'M', 'ὓ'), + (0x1F5C, 'X'), + (0x1F5D, 'M', 'ὕ'), + (0x1F5E, 'X'), + (0x1F5F, 'M', 'ὗ'), + (0x1F60, 'V'), + (0x1F68, 'M', 'ὠ'), + (0x1F69, 'M', 'ὡ'), + (0x1F6A, 'M', 'ὢ'), + (0x1F6B, 'M', 'ὣ'), + (0x1F6C, 'M', 'ὤ'), + (0x1F6D, 'M', 'ὥ'), + (0x1F6E, 'M', 'ὦ'), + (0x1F6F, 'M', 'ὧ'), + (0x1F70, 'V'), + (0x1F71, 'M', 'ά'), + (0x1F72, 'V'), + (0x1F73, 'M', 'έ'), + (0x1F74, 'V'), + (0x1F75, 'M', 'ή'), + (0x1F76, 'V'), + (0x1F77, 'M', 'ί'), + (0x1F78, 'V'), + (0x1F79, 'M', 'ό'), + (0x1F7A, 'V'), + (0x1F7B, 'M', 'ύ'), + (0x1F7C, 'V'), + (0x1F7D, 'M', 'ώ'), + (0x1F7E, 'X'), + (0x1F80, 'M', 'ἀι'), + (0x1F81, 'M', 'ἁι'), + (0x1F82, 'M', 'ἂι'), + (0x1F83, 'M', 'ἃι'), + (0x1F84, 'M', 'ἄι'), + (0x1F85, 'M', 'ἅι'), + (0x1F86, 'M', 'ἆι'), + (0x1F87, 'M', 'ἇι'), + ] + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F88, 'M', 'ἀι'), + (0x1F89, 'M', 'ἁι'), + (0x1F8A, 'M', 'ἂι'), + (0x1F8B, 'M', 'ἃι'), + (0x1F8C, 'M', 'ἄι'), + (0x1F8D, 'M', 'ἅι'), + (0x1F8E, 'M', 'ἆι'), + (0x1F8F, 'M', 'ἇι'), + (0x1F90, 'M', 'ἠι'), + (0x1F91, 'M', 'ἡι'), + (0x1F92, 'M', 'ἢι'), + (0x1F93, 'M', 'ἣι'), + (0x1F94, 'M', 'ἤι'), + (0x1F95, 'M', 'ἥι'), + (0x1F96, 'M', 'ἦι'), + (0x1F97, 'M', 'ἧι'), + (0x1F98, 'M', 'ἠι'), + (0x1F99, 'M', 'ἡι'), + (0x1F9A, 'M', 'ἢι'), + (0x1F9B, 'M', 'ἣι'), + (0x1F9C, 'M', 'ἤι'), + (0x1F9D, 'M', 'ἥι'), + (0x1F9E, 'M', 'ἦι'), + (0x1F9F, 'M', 'ἧι'), + (0x1FA0, 'M', 'ὠι'), + (0x1FA1, 'M', 'ὡι'), + (0x1FA2, 'M', 'ὢι'), + (0x1FA3, 'M', 'ὣι'), + (0x1FA4, 'M', 'ὤι'), + (0x1FA5, 'M', 'ὥι'), + (0x1FA6, 'M', 'ὦι'), + (0x1FA7, 'M', 'ὧι'), + (0x1FA8, 'M', 'ὠι'), + (0x1FA9, 'M', 'ὡι'), + (0x1FAA, 'M', 'ὢι'), + (0x1FAB, 'M', 'ὣι'), + (0x1FAC, 'M', 'ὤι'), + (0x1FAD, 'M', 'ὥι'), + (0x1FAE, 'M', 'ὦι'), + (0x1FAF, 'M', 'ὧι'), + (0x1FB0, 'V'), + (0x1FB2, 'M', 'ὰι'), + (0x1FB3, 'M', 'αι'), + (0x1FB4, 'M', 'άι'), + (0x1FB5, 'X'), + (0x1FB6, 'V'), + (0x1FB7, 'M', 'ᾶι'), + (0x1FB8, 'M', 'ᾰ'), + (0x1FB9, 'M', 'ᾱ'), + (0x1FBA, 'M', 'ὰ'), + (0x1FBB, 'M', 'ά'), + (0x1FBC, 'M', 'αι'), + (0x1FBD, '3', ' ̓'), + (0x1FBE, 'M', 'ι'), + (0x1FBF, '3', ' ̓'), + (0x1FC0, '3', ' ͂'), + (0x1FC1, '3', ' ̈͂'), + (0x1FC2, 'M', 'ὴι'), + (0x1FC3, 'M', 'ηι'), + (0x1FC4, 'M', 'ήι'), + (0x1FC5, 'X'), + (0x1FC6, 'V'), + (0x1FC7, 'M', 'ῆι'), + (0x1FC8, 'M', 'ὲ'), + (0x1FC9, 'M', 'έ'), + (0x1FCA, 'M', 'ὴ'), + (0x1FCB, 'M', 'ή'), + (0x1FCC, 'M', 'ηι'), + (0x1FCD, '3', ' ̓̀'), + (0x1FCE, '3', ' ̓́'), + (0x1FCF, '3', ' ̓͂'), + (0x1FD0, 'V'), + (0x1FD3, 'M', 'ΐ'), + (0x1FD4, 'X'), + (0x1FD6, 'V'), + (0x1FD8, 'M', 'ῐ'), + (0x1FD9, 'M', 'ῑ'), + (0x1FDA, 'M', 'ὶ'), + (0x1FDB, 'M', 'ί'), + (0x1FDC, 'X'), + (0x1FDD, '3', ' ̔̀'), + (0x1FDE, '3', ' ̔́'), + (0x1FDF, '3', ' ̔͂'), + (0x1FE0, 'V'), + (0x1FE3, 'M', 'ΰ'), + (0x1FE4, 'V'), + (0x1FE8, 'M', 'ῠ'), + (0x1FE9, 'M', 'ῡ'), + (0x1FEA, 'M', 'ὺ'), + (0x1FEB, 'M', 'ύ'), + (0x1FEC, 'M', 'ῥ'), + (0x1FED, '3', ' ̈̀'), + (0x1FEE, '3', ' ̈́'), + (0x1FEF, '3', '`'), + (0x1FF0, 'X'), + (0x1FF2, 'M', 'ὼι'), + (0x1FF3, 'M', 'ωι'), + (0x1FF4, 'M', 'ώι'), + (0x1FF5, 'X'), + (0x1FF6, 'V'), + ] + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FF7, 'M', 'ῶι'), + (0x1FF8, 'M', 'ὸ'), + (0x1FF9, 'M', 'ό'), + (0x1FFA, 'M', 'ὼ'), + (0x1FFB, 'M', 'ώ'), + (0x1FFC, 'M', 'ωι'), + (0x1FFD, '3', ' ́'), + (0x1FFE, '3', ' ̔'), + (0x1FFF, 'X'), + (0x2000, '3', ' '), + (0x200B, 'I'), + (0x200C, 'D', ''), + (0x200E, 'X'), + (0x2010, 'V'), + (0x2011, 'M', '‐'), + (0x2012, 'V'), + (0x2017, '3', ' ̳'), + (0x2018, 'V'), + (0x2024, 'X'), + (0x2027, 'V'), + (0x2028, 'X'), + (0x202F, '3', ' '), + (0x2030, 'V'), + (0x2033, 'M', '′′'), + (0x2034, 'M', '′′′'), + (0x2035, 'V'), + (0x2036, 'M', '‵‵'), + (0x2037, 'M', '‵‵‵'), + (0x2038, 'V'), + (0x203C, '3', '!!'), + (0x203D, 'V'), + (0x203E, '3', ' ̅'), + (0x203F, 'V'), + (0x2047, '3', '??'), + (0x2048, '3', '?!'), + (0x2049, '3', '!?'), + (0x204A, 'V'), + (0x2057, 'M', '′′′′'), + (0x2058, 'V'), + (0x205F, '3', ' '), + (0x2060, 'I'), + (0x2061, 'X'), + (0x2064, 'I'), + (0x2065, 'X'), + (0x2070, 'M', '0'), + (0x2071, 'M', 'i'), + (0x2072, 'X'), + (0x2074, 'M', '4'), + (0x2075, 'M', '5'), + (0x2076, 'M', '6'), + (0x2077, 'M', '7'), + (0x2078, 'M', '8'), + (0x2079, 'M', '9'), + (0x207A, '3', '+'), + (0x207B, 'M', '−'), + (0x207C, '3', '='), + (0x207D, '3', '('), + (0x207E, '3', ')'), + (0x207F, 'M', 'n'), + (0x2080, 'M', '0'), + (0x2081, 'M', '1'), + (0x2082, 'M', '2'), + (0x2083, 'M', '3'), + (0x2084, 'M', '4'), + (0x2085, 'M', '5'), + (0x2086, 'M', '6'), + (0x2087, 'M', '7'), + (0x2088, 'M', '8'), + (0x2089, 'M', '9'), + (0x208A, '3', '+'), + (0x208B, 'M', '−'), + (0x208C, '3', '='), + (0x208D, '3', '('), + (0x208E, '3', ')'), + (0x208F, 'X'), + (0x2090, 'M', 'a'), + (0x2091, 'M', 'e'), + (0x2092, 'M', 'o'), + (0x2093, 'M', 'x'), + (0x2094, 'M', 'ə'), + (0x2095, 'M', 'h'), + (0x2096, 'M', 'k'), + (0x2097, 'M', 'l'), + (0x2098, 'M', 'm'), + (0x2099, 'M', 'n'), + (0x209A, 'M', 'p'), + (0x209B, 'M', 's'), + (0x209C, 'M', 't'), + (0x209D, 'X'), + (0x20A0, 'V'), + (0x20A8, 'M', 'rs'), + (0x20A9, 'V'), + (0x20C1, 'X'), + (0x20D0, 'V'), + (0x20F1, 'X'), + (0x2100, '3', 'a/c'), + (0x2101, '3', 'a/s'), + (0x2102, 'M', 'c'), + (0x2103, 'M', '°c'), + (0x2104, 'V'), + ] + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2105, '3', 'c/o'), + (0x2106, '3', 'c/u'), + (0x2107, 'M', 'ɛ'), + (0x2108, 'V'), + (0x2109, 'M', '°f'), + (0x210A, 'M', 'g'), + (0x210B, 'M', 'h'), + (0x210F, 'M', 'ħ'), + (0x2110, 'M', 'i'), + (0x2112, 'M', 'l'), + (0x2114, 'V'), + (0x2115, 'M', 'n'), + (0x2116, 'M', 'no'), + (0x2117, 'V'), + (0x2119, 'M', 'p'), + (0x211A, 'M', 'q'), + (0x211B, 'M', 'r'), + (0x211E, 'V'), + (0x2120, 'M', 'sm'), + (0x2121, 'M', 'tel'), + (0x2122, 'M', 'tm'), + (0x2123, 'V'), + (0x2124, 'M', 'z'), + (0x2125, 'V'), + (0x2126, 'M', 'ω'), + (0x2127, 'V'), + (0x2128, 'M', 'z'), + (0x2129, 'V'), + (0x212A, 'M', 'k'), + (0x212B, 'M', 'å'), + (0x212C, 'M', 'b'), + (0x212D, 'M', 'c'), + (0x212E, 'V'), + (0x212F, 'M', 'e'), + (0x2131, 'M', 'f'), + (0x2132, 'X'), + (0x2133, 'M', 'm'), + (0x2134, 'M', 'o'), + (0x2135, 'M', 'א'), + (0x2136, 'M', 'ב'), + (0x2137, 'M', 'ג'), + (0x2138, 'M', 'ד'), + (0x2139, 'M', 'i'), + (0x213A, 'V'), + (0x213B, 'M', 'fax'), + (0x213C, 'M', 'π'), + (0x213D, 'M', 'γ'), + (0x213F, 'M', 'π'), + (0x2140, 'M', '∑'), + (0x2141, 'V'), + (0x2145, 'M', 'd'), + (0x2147, 'M', 'e'), + (0x2148, 'M', 'i'), + (0x2149, 'M', 'j'), + (0x214A, 'V'), + (0x2150, 'M', '1⁄7'), + (0x2151, 'M', '1⁄9'), + (0x2152, 'M', '1⁄10'), + (0x2153, 'M', '1⁄3'), + (0x2154, 'M', '2⁄3'), + (0x2155, 'M', '1⁄5'), + (0x2156, 'M', '2⁄5'), + (0x2157, 'M', '3⁄5'), + (0x2158, 'M', '4⁄5'), + (0x2159, 'M', '1⁄6'), + (0x215A, 'M', '5⁄6'), + (0x215B, 'M', '1⁄8'), + (0x215C, 'M', '3⁄8'), + (0x215D, 'M', '5⁄8'), + (0x215E, 'M', '7⁄8'), + (0x215F, 'M', '1⁄'), + (0x2160, 'M', 'i'), + (0x2161, 'M', 'ii'), + (0x2162, 'M', 'iii'), + (0x2163, 'M', 'iv'), + (0x2164, 'M', 'v'), + (0x2165, 'M', 'vi'), + (0x2166, 'M', 'vii'), + (0x2167, 'M', 'viii'), + (0x2168, 'M', 'ix'), + (0x2169, 'M', 'x'), + (0x216A, 'M', 'xi'), + (0x216B, 'M', 'xii'), + (0x216C, 'M', 'l'), + (0x216D, 'M', 'c'), + (0x216E, 'M', 'd'), + (0x216F, 'M', 'm'), + (0x2170, 'M', 'i'), + (0x2171, 'M', 'ii'), + (0x2172, 'M', 'iii'), + (0x2173, 'M', 'iv'), + (0x2174, 'M', 'v'), + (0x2175, 'M', 'vi'), + (0x2176, 'M', 'vii'), + (0x2177, 'M', 'viii'), + (0x2178, 'M', 'ix'), + (0x2179, 'M', 'x'), + (0x217A, 'M', 'xi'), + (0x217B, 'M', 'xii'), + (0x217C, 'M', 'l'), + ] + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x217D, 'M', 'c'), + (0x217E, 'M', 'd'), + (0x217F, 'M', 'm'), + (0x2180, 'V'), + (0x2183, 'X'), + (0x2184, 'V'), + (0x2189, 'M', '0⁄3'), + (0x218A, 'V'), + (0x218C, 'X'), + (0x2190, 'V'), + (0x222C, 'M', '∫∫'), + (0x222D, 'M', '∫∫∫'), + (0x222E, 'V'), + (0x222F, 'M', '∮∮'), + (0x2230, 'M', '∮∮∮'), + (0x2231, 'V'), + (0x2260, '3'), + (0x2261, 'V'), + (0x226E, '3'), + (0x2270, 'V'), + (0x2329, 'M', '〈'), + (0x232A, 'M', '〉'), + (0x232B, 'V'), + (0x2427, 'X'), + (0x2440, 'V'), + (0x244B, 'X'), + (0x2460, 'M', '1'), + (0x2461, 'M', '2'), + (0x2462, 'M', '3'), + (0x2463, 'M', '4'), + (0x2464, 'M', '5'), + (0x2465, 'M', '6'), + (0x2466, 'M', '7'), + (0x2467, 'M', '8'), + (0x2468, 'M', '9'), + (0x2469, 'M', '10'), + (0x246A, 'M', '11'), + (0x246B, 'M', '12'), + (0x246C, 'M', '13'), + (0x246D, 'M', '14'), + (0x246E, 'M', '15'), + (0x246F, 'M', '16'), + (0x2470, 'M', '17'), + (0x2471, 'M', '18'), + (0x2472, 'M', '19'), + (0x2473, 'M', '20'), + (0x2474, '3', '(1)'), + (0x2475, '3', '(2)'), + (0x2476, '3', '(3)'), + (0x2477, '3', '(4)'), + (0x2478, '3', '(5)'), + (0x2479, '3', '(6)'), + (0x247A, '3', '(7)'), + (0x247B, '3', '(8)'), + (0x247C, '3', '(9)'), + (0x247D, '3', '(10)'), + (0x247E, '3', '(11)'), + (0x247F, '3', '(12)'), + (0x2480, '3', '(13)'), + (0x2481, '3', '(14)'), + (0x2482, '3', '(15)'), + (0x2483, '3', '(16)'), + (0x2484, '3', '(17)'), + (0x2485, '3', '(18)'), + (0x2486, '3', '(19)'), + (0x2487, '3', '(20)'), + (0x2488, 'X'), + (0x249C, '3', '(a)'), + (0x249D, '3', '(b)'), + (0x249E, '3', '(c)'), + (0x249F, '3', '(d)'), + (0x24A0, '3', '(e)'), + (0x24A1, '3', '(f)'), + (0x24A2, '3', '(g)'), + (0x24A3, '3', '(h)'), + (0x24A4, '3', '(i)'), + (0x24A5, '3', '(j)'), + (0x24A6, '3', '(k)'), + (0x24A7, '3', '(l)'), + (0x24A8, '3', '(m)'), + (0x24A9, '3', '(n)'), + (0x24AA, '3', '(o)'), + (0x24AB, '3', '(p)'), + (0x24AC, '3', '(q)'), + (0x24AD, '3', '(r)'), + (0x24AE, '3', '(s)'), + (0x24AF, '3', '(t)'), + (0x24B0, '3', '(u)'), + (0x24B1, '3', '(v)'), + (0x24B2, '3', '(w)'), + (0x24B3, '3', '(x)'), + (0x24B4, '3', '(y)'), + (0x24B5, '3', '(z)'), + (0x24B6, 'M', 'a'), + (0x24B7, 'M', 'b'), + (0x24B8, 'M', 'c'), + (0x24B9, 'M', 'd'), + (0x24BA, 'M', 'e'), + (0x24BB, 'M', 'f'), + (0x24BC, 'M', 'g'), + ] + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x24BD, 'M', 'h'), + (0x24BE, 'M', 'i'), + (0x24BF, 'M', 'j'), + (0x24C0, 'M', 'k'), + (0x24C1, 'M', 'l'), + (0x24C2, 'M', 'm'), + (0x24C3, 'M', 'n'), + (0x24C4, 'M', 'o'), + (0x24C5, 'M', 'p'), + (0x24C6, 'M', 'q'), + (0x24C7, 'M', 'r'), + (0x24C8, 'M', 's'), + (0x24C9, 'M', 't'), + (0x24CA, 'M', 'u'), + (0x24CB, 'M', 'v'), + (0x24CC, 'M', 'w'), + (0x24CD, 'M', 'x'), + (0x24CE, 'M', 'y'), + (0x24CF, 'M', 'z'), + (0x24D0, 'M', 'a'), + (0x24D1, 'M', 'b'), + (0x24D2, 'M', 'c'), + (0x24D3, 'M', 'd'), + (0x24D4, 'M', 'e'), + (0x24D5, 'M', 'f'), + (0x24D6, 'M', 'g'), + (0x24D7, 'M', 'h'), + (0x24D8, 'M', 'i'), + (0x24D9, 'M', 'j'), + (0x24DA, 'M', 'k'), + (0x24DB, 'M', 'l'), + (0x24DC, 'M', 'm'), + (0x24DD, 'M', 'n'), + (0x24DE, 'M', 'o'), + (0x24DF, 'M', 'p'), + (0x24E0, 'M', 'q'), + (0x24E1, 'M', 'r'), + (0x24E2, 'M', 's'), + (0x24E3, 'M', 't'), + (0x24E4, 'M', 'u'), + (0x24E5, 'M', 'v'), + (0x24E6, 'M', 'w'), + (0x24E7, 'M', 'x'), + (0x24E8, 'M', 'y'), + (0x24E9, 'M', 'z'), + (0x24EA, 'M', '0'), + (0x24EB, 'V'), + (0x2A0C, 'M', '∫∫∫∫'), + (0x2A0D, 'V'), + (0x2A74, '3', '::='), + (0x2A75, '3', '=='), + (0x2A76, '3', '==='), + (0x2A77, 'V'), + (0x2ADC, 'M', '⫝̸'), + (0x2ADD, 'V'), + (0x2B74, 'X'), + (0x2B76, 'V'), + (0x2B96, 'X'), + (0x2B97, 'V'), + (0x2C00, 'M', 'ⰰ'), + (0x2C01, 'M', 'ⰱ'), + (0x2C02, 'M', 'ⰲ'), + (0x2C03, 'M', 'ⰳ'), + (0x2C04, 'M', 'ⰴ'), + (0x2C05, 'M', 'ⰵ'), + (0x2C06, 'M', 'ⰶ'), + (0x2C07, 'M', 'ⰷ'), + (0x2C08, 'M', 'ⰸ'), + (0x2C09, 'M', 'ⰹ'), + (0x2C0A, 'M', 'ⰺ'), + (0x2C0B, 'M', 'ⰻ'), + (0x2C0C, 'M', 'ⰼ'), + (0x2C0D, 'M', 'ⰽ'), + (0x2C0E, 'M', 'ⰾ'), + (0x2C0F, 'M', 'ⰿ'), + (0x2C10, 'M', 'ⱀ'), + (0x2C11, 'M', 'ⱁ'), + (0x2C12, 'M', 'ⱂ'), + (0x2C13, 'M', 'ⱃ'), + (0x2C14, 'M', 'ⱄ'), + (0x2C15, 'M', 'ⱅ'), + (0x2C16, 'M', 'ⱆ'), + (0x2C17, 'M', 'ⱇ'), + (0x2C18, 'M', 'ⱈ'), + (0x2C19, 'M', 'ⱉ'), + (0x2C1A, 'M', 'ⱊ'), + (0x2C1B, 'M', 'ⱋ'), + (0x2C1C, 'M', 'ⱌ'), + (0x2C1D, 'M', 'ⱍ'), + (0x2C1E, 'M', 'ⱎ'), + (0x2C1F, 'M', 'ⱏ'), + (0x2C20, 'M', 'ⱐ'), + (0x2C21, 'M', 'ⱑ'), + (0x2C22, 'M', 'ⱒ'), + (0x2C23, 'M', 'ⱓ'), + (0x2C24, 'M', 'ⱔ'), + (0x2C25, 'M', 'ⱕ'), + (0x2C26, 'M', 'ⱖ'), + (0x2C27, 'M', 'ⱗ'), + (0x2C28, 'M', 'ⱘ'), + ] + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2C29, 'M', 'ⱙ'), + (0x2C2A, 'M', 'ⱚ'), + (0x2C2B, 'M', 'ⱛ'), + (0x2C2C, 'M', 'ⱜ'), + (0x2C2D, 'M', 'ⱝ'), + (0x2C2E, 'M', 'ⱞ'), + (0x2C2F, 'M', 'ⱟ'), + (0x2C30, 'V'), + (0x2C60, 'M', 'ⱡ'), + (0x2C61, 'V'), + (0x2C62, 'M', 'ɫ'), + (0x2C63, 'M', 'ᵽ'), + (0x2C64, 'M', 'ɽ'), + (0x2C65, 'V'), + (0x2C67, 'M', 'ⱨ'), + (0x2C68, 'V'), + (0x2C69, 'M', 'ⱪ'), + (0x2C6A, 'V'), + (0x2C6B, 'M', 'ⱬ'), + (0x2C6C, 'V'), + (0x2C6D, 'M', 'ɑ'), + (0x2C6E, 'M', 'ɱ'), + (0x2C6F, 'M', 'ɐ'), + (0x2C70, 'M', 'ɒ'), + (0x2C71, 'V'), + (0x2C72, 'M', 'ⱳ'), + (0x2C73, 'V'), + (0x2C75, 'M', 'ⱶ'), + (0x2C76, 'V'), + (0x2C7C, 'M', 'j'), + (0x2C7D, 'M', 'v'), + (0x2C7E, 'M', 'ȿ'), + (0x2C7F, 'M', 'ɀ'), + (0x2C80, 'M', 'ⲁ'), + (0x2C81, 'V'), + (0x2C82, 'M', 'ⲃ'), + (0x2C83, 'V'), + (0x2C84, 'M', 'ⲅ'), + (0x2C85, 'V'), + (0x2C86, 'M', 'ⲇ'), + (0x2C87, 'V'), + (0x2C88, 'M', 'ⲉ'), + (0x2C89, 'V'), + (0x2C8A, 'M', 'ⲋ'), + (0x2C8B, 'V'), + (0x2C8C, 'M', 'ⲍ'), + (0x2C8D, 'V'), + (0x2C8E, 'M', 'ⲏ'), + (0x2C8F, 'V'), + (0x2C90, 'M', 'ⲑ'), + (0x2C91, 'V'), + (0x2C92, 'M', 'ⲓ'), + (0x2C93, 'V'), + (0x2C94, 'M', 'ⲕ'), + (0x2C95, 'V'), + (0x2C96, 'M', 'ⲗ'), + (0x2C97, 'V'), + (0x2C98, 'M', 'ⲙ'), + (0x2C99, 'V'), + (0x2C9A, 'M', 'ⲛ'), + (0x2C9B, 'V'), + (0x2C9C, 'M', 'ⲝ'), + (0x2C9D, 'V'), + (0x2C9E, 'M', 'ⲟ'), + (0x2C9F, 'V'), + (0x2CA0, 'M', 'ⲡ'), + (0x2CA1, 'V'), + (0x2CA2, 'M', 'ⲣ'), + (0x2CA3, 'V'), + (0x2CA4, 'M', 'ⲥ'), + (0x2CA5, 'V'), + (0x2CA6, 'M', 'ⲧ'), + (0x2CA7, 'V'), + (0x2CA8, 'M', 'ⲩ'), + (0x2CA9, 'V'), + (0x2CAA, 'M', 'ⲫ'), + (0x2CAB, 'V'), + (0x2CAC, 'M', 'ⲭ'), + (0x2CAD, 'V'), + (0x2CAE, 'M', 'ⲯ'), + (0x2CAF, 'V'), + (0x2CB0, 'M', 'ⲱ'), + (0x2CB1, 'V'), + (0x2CB2, 'M', 'ⲳ'), + (0x2CB3, 'V'), + (0x2CB4, 'M', 'ⲵ'), + (0x2CB5, 'V'), + (0x2CB6, 'M', 'ⲷ'), + (0x2CB7, 'V'), + (0x2CB8, 'M', 'ⲹ'), + (0x2CB9, 'V'), + (0x2CBA, 'M', 'ⲻ'), + (0x2CBB, 'V'), + (0x2CBC, 'M', 'ⲽ'), + (0x2CBD, 'V'), + (0x2CBE, 'M', 'ⲿ'), + (0x2CBF, 'V'), + (0x2CC0, 'M', 'ⳁ'), + (0x2CC1, 'V'), + (0x2CC2, 'M', 'ⳃ'), + ] + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2CC3, 'V'), + (0x2CC4, 'M', 'ⳅ'), + (0x2CC5, 'V'), + (0x2CC6, 'M', 'ⳇ'), + (0x2CC7, 'V'), + (0x2CC8, 'M', 'ⳉ'), + (0x2CC9, 'V'), + (0x2CCA, 'M', 'ⳋ'), + (0x2CCB, 'V'), + (0x2CCC, 'M', 'ⳍ'), + (0x2CCD, 'V'), + (0x2CCE, 'M', 'ⳏ'), + (0x2CCF, 'V'), + (0x2CD0, 'M', 'ⳑ'), + (0x2CD1, 'V'), + (0x2CD2, 'M', 'ⳓ'), + (0x2CD3, 'V'), + (0x2CD4, 'M', 'ⳕ'), + (0x2CD5, 'V'), + (0x2CD6, 'M', 'ⳗ'), + (0x2CD7, 'V'), + (0x2CD8, 'M', 'ⳙ'), + (0x2CD9, 'V'), + (0x2CDA, 'M', 'ⳛ'), + (0x2CDB, 'V'), + (0x2CDC, 'M', 'ⳝ'), + (0x2CDD, 'V'), + (0x2CDE, 'M', 'ⳟ'), + (0x2CDF, 'V'), + (0x2CE0, 'M', 'ⳡ'), + (0x2CE1, 'V'), + (0x2CE2, 'M', 'ⳣ'), + (0x2CE3, 'V'), + (0x2CEB, 'M', 'ⳬ'), + (0x2CEC, 'V'), + (0x2CED, 'M', 'ⳮ'), + (0x2CEE, 'V'), + (0x2CF2, 'M', 'ⳳ'), + (0x2CF3, 'V'), + (0x2CF4, 'X'), + (0x2CF9, 'V'), + (0x2D26, 'X'), + (0x2D27, 'V'), + (0x2D28, 'X'), + (0x2D2D, 'V'), + (0x2D2E, 'X'), + (0x2D30, 'V'), + (0x2D68, 'X'), + (0x2D6F, 'M', 'ⵡ'), + (0x2D70, 'V'), + (0x2D71, 'X'), + (0x2D7F, 'V'), + (0x2D97, 'X'), + (0x2DA0, 'V'), + (0x2DA7, 'X'), + (0x2DA8, 'V'), + (0x2DAF, 'X'), + (0x2DB0, 'V'), + (0x2DB7, 'X'), + (0x2DB8, 'V'), + (0x2DBF, 'X'), + (0x2DC0, 'V'), + (0x2DC7, 'X'), + (0x2DC8, 'V'), + (0x2DCF, 'X'), + (0x2DD0, 'V'), + (0x2DD7, 'X'), + (0x2DD8, 'V'), + (0x2DDF, 'X'), + (0x2DE0, 'V'), + (0x2E5E, 'X'), + (0x2E80, 'V'), + (0x2E9A, 'X'), + (0x2E9B, 'V'), + (0x2E9F, 'M', '母'), + (0x2EA0, 'V'), + (0x2EF3, 'M', '龟'), + (0x2EF4, 'X'), + (0x2F00, 'M', '一'), + (0x2F01, 'M', '丨'), + (0x2F02, 'M', '丶'), + (0x2F03, 'M', '丿'), + (0x2F04, 'M', '乙'), + (0x2F05, 'M', '亅'), + (0x2F06, 'M', '二'), + (0x2F07, 'M', '亠'), + (0x2F08, 'M', '人'), + (0x2F09, 'M', '儿'), + (0x2F0A, 'M', '入'), + (0x2F0B, 'M', '八'), + (0x2F0C, 'M', '冂'), + (0x2F0D, 'M', '冖'), + (0x2F0E, 'M', '冫'), + (0x2F0F, 'M', '几'), + (0x2F10, 'M', '凵'), + (0x2F11, 'M', '刀'), + (0x2F12, 'M', '力'), + (0x2F13, 'M', '勹'), + (0x2F14, 'M', '匕'), + (0x2F15, 'M', '匚'), + ] + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F16, 'M', '匸'), + (0x2F17, 'M', '十'), + (0x2F18, 'M', '卜'), + (0x2F19, 'M', '卩'), + (0x2F1A, 'M', '厂'), + (0x2F1B, 'M', '厶'), + (0x2F1C, 'M', '又'), + (0x2F1D, 'M', '口'), + (0x2F1E, 'M', '囗'), + (0x2F1F, 'M', '土'), + (0x2F20, 'M', '士'), + (0x2F21, 'M', '夂'), + (0x2F22, 'M', '夊'), + (0x2F23, 'M', '夕'), + (0x2F24, 'M', '大'), + (0x2F25, 'M', '女'), + (0x2F26, 'M', '子'), + (0x2F27, 'M', '宀'), + (0x2F28, 'M', '寸'), + (0x2F29, 'M', '小'), + (0x2F2A, 'M', '尢'), + (0x2F2B, 'M', '尸'), + (0x2F2C, 'M', '屮'), + (0x2F2D, 'M', '山'), + (0x2F2E, 'M', '巛'), + (0x2F2F, 'M', '工'), + (0x2F30, 'M', '己'), + (0x2F31, 'M', '巾'), + (0x2F32, 'M', '干'), + (0x2F33, 'M', '幺'), + (0x2F34, 'M', '广'), + (0x2F35, 'M', '廴'), + (0x2F36, 'M', '廾'), + (0x2F37, 'M', '弋'), + (0x2F38, 'M', '弓'), + (0x2F39, 'M', '彐'), + (0x2F3A, 'M', '彡'), + (0x2F3B, 'M', '彳'), + (0x2F3C, 'M', '心'), + (0x2F3D, 'M', '戈'), + (0x2F3E, 'M', '戶'), + (0x2F3F, 'M', '手'), + (0x2F40, 'M', '支'), + (0x2F41, 'M', '攴'), + (0x2F42, 'M', '文'), + (0x2F43, 'M', '斗'), + (0x2F44, 'M', '斤'), + (0x2F45, 'M', '方'), + (0x2F46, 'M', '无'), + (0x2F47, 'M', '日'), + (0x2F48, 'M', '曰'), + (0x2F49, 'M', '月'), + (0x2F4A, 'M', '木'), + (0x2F4B, 'M', '欠'), + (0x2F4C, 'M', '止'), + (0x2F4D, 'M', '歹'), + (0x2F4E, 'M', '殳'), + (0x2F4F, 'M', '毋'), + (0x2F50, 'M', '比'), + (0x2F51, 'M', '毛'), + (0x2F52, 'M', '氏'), + (0x2F53, 'M', '气'), + (0x2F54, 'M', '水'), + (0x2F55, 'M', '火'), + (0x2F56, 'M', '爪'), + (0x2F57, 'M', '父'), + (0x2F58, 'M', '爻'), + (0x2F59, 'M', '爿'), + (0x2F5A, 'M', '片'), + (0x2F5B, 'M', '牙'), + (0x2F5C, 'M', '牛'), + (0x2F5D, 'M', '犬'), + (0x2F5E, 'M', '玄'), + (0x2F5F, 'M', '玉'), + (0x2F60, 'M', '瓜'), + (0x2F61, 'M', '瓦'), + (0x2F62, 'M', '甘'), + (0x2F63, 'M', '生'), + (0x2F64, 'M', '用'), + (0x2F65, 'M', '田'), + (0x2F66, 'M', '疋'), + (0x2F67, 'M', '疒'), + (0x2F68, 'M', '癶'), + (0x2F69, 'M', '白'), + (0x2F6A, 'M', '皮'), + (0x2F6B, 'M', '皿'), + (0x2F6C, 'M', '目'), + (0x2F6D, 'M', '矛'), + (0x2F6E, 'M', '矢'), + (0x2F6F, 'M', '石'), + (0x2F70, 'M', '示'), + (0x2F71, 'M', '禸'), + (0x2F72, 'M', '禾'), + (0x2F73, 'M', '穴'), + (0x2F74, 'M', '立'), + (0x2F75, 'M', '竹'), + (0x2F76, 'M', '米'), + (0x2F77, 'M', '糸'), + (0x2F78, 'M', '缶'), + (0x2F79, 'M', '网'), + ] + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F7A, 'M', '羊'), + (0x2F7B, 'M', '羽'), + (0x2F7C, 'M', '老'), + (0x2F7D, 'M', '而'), + (0x2F7E, 'M', '耒'), + (0x2F7F, 'M', '耳'), + (0x2F80, 'M', '聿'), + (0x2F81, 'M', '肉'), + (0x2F82, 'M', '臣'), + (0x2F83, 'M', '自'), + (0x2F84, 'M', '至'), + (0x2F85, 'M', '臼'), + (0x2F86, 'M', '舌'), + (0x2F87, 'M', '舛'), + (0x2F88, 'M', '舟'), + (0x2F89, 'M', '艮'), + (0x2F8A, 'M', '色'), + (0x2F8B, 'M', '艸'), + (0x2F8C, 'M', '虍'), + (0x2F8D, 'M', '虫'), + (0x2F8E, 'M', '血'), + (0x2F8F, 'M', '行'), + (0x2F90, 'M', '衣'), + (0x2F91, 'M', '襾'), + (0x2F92, 'M', '見'), + (0x2F93, 'M', '角'), + (0x2F94, 'M', '言'), + (0x2F95, 'M', '谷'), + (0x2F96, 'M', '豆'), + (0x2F97, 'M', '豕'), + (0x2F98, 'M', '豸'), + (0x2F99, 'M', '貝'), + (0x2F9A, 'M', '赤'), + (0x2F9B, 'M', '走'), + (0x2F9C, 'M', '足'), + (0x2F9D, 'M', '身'), + (0x2F9E, 'M', '車'), + (0x2F9F, 'M', '辛'), + (0x2FA0, 'M', '辰'), + (0x2FA1, 'M', '辵'), + (0x2FA2, 'M', '邑'), + (0x2FA3, 'M', '酉'), + (0x2FA4, 'M', '釆'), + (0x2FA5, 'M', '里'), + (0x2FA6, 'M', '金'), + (0x2FA7, 'M', '長'), + (0x2FA8, 'M', '門'), + (0x2FA9, 'M', '阜'), + (0x2FAA, 'M', '隶'), + (0x2FAB, 'M', '隹'), + (0x2FAC, 'M', '雨'), + (0x2FAD, 'M', '靑'), + (0x2FAE, 'M', '非'), + (0x2FAF, 'M', '面'), + (0x2FB0, 'M', '革'), + (0x2FB1, 'M', '韋'), + (0x2FB2, 'M', '韭'), + (0x2FB3, 'M', '音'), + (0x2FB4, 'M', '頁'), + (0x2FB5, 'M', '風'), + (0x2FB6, 'M', '飛'), + (0x2FB7, 'M', '食'), + (0x2FB8, 'M', '首'), + (0x2FB9, 'M', '香'), + (0x2FBA, 'M', '馬'), + (0x2FBB, 'M', '骨'), + (0x2FBC, 'M', '高'), + (0x2FBD, 'M', '髟'), + (0x2FBE, 'M', '鬥'), + (0x2FBF, 'M', '鬯'), + (0x2FC0, 'M', '鬲'), + (0x2FC1, 'M', '鬼'), + (0x2FC2, 'M', '魚'), + (0x2FC3, 'M', '鳥'), + (0x2FC4, 'M', '鹵'), + (0x2FC5, 'M', '鹿'), + (0x2FC6, 'M', '麥'), + (0x2FC7, 'M', '麻'), + (0x2FC8, 'M', '黃'), + (0x2FC9, 'M', '黍'), + (0x2FCA, 'M', '黑'), + (0x2FCB, 'M', '黹'), + (0x2FCC, 'M', '黽'), + (0x2FCD, 'M', '鼎'), + (0x2FCE, 'M', '鼓'), + (0x2FCF, 'M', '鼠'), + (0x2FD0, 'M', '鼻'), + (0x2FD1, 'M', '齊'), + (0x2FD2, 'M', '齒'), + (0x2FD3, 'M', '龍'), + (0x2FD4, 'M', '龜'), + (0x2FD5, 'M', '龠'), + (0x2FD6, 'X'), + (0x3000, '3', ' '), + (0x3001, 'V'), + (0x3002, 'M', '.'), + (0x3003, 'V'), + (0x3036, 'M', '〒'), + (0x3037, 'V'), + (0x3038, 'M', '十'), + ] + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3039, 'M', '卄'), + (0x303A, 'M', '卅'), + (0x303B, 'V'), + (0x3040, 'X'), + (0x3041, 'V'), + (0x3097, 'X'), + (0x3099, 'V'), + (0x309B, '3', ' ゙'), + (0x309C, '3', ' ゚'), + (0x309D, 'V'), + (0x309F, 'M', 'より'), + (0x30A0, 'V'), + (0x30FF, 'M', 'コト'), + (0x3100, 'X'), + (0x3105, 'V'), + (0x3130, 'X'), + (0x3131, 'M', 'ᄀ'), + (0x3132, 'M', 'ᄁ'), + (0x3133, 'M', 'ᆪ'), + (0x3134, 'M', 'ᄂ'), + (0x3135, 'M', 'ᆬ'), + (0x3136, 'M', 'ᆭ'), + (0x3137, 'M', 'ᄃ'), + (0x3138, 'M', 'ᄄ'), + (0x3139, 'M', 'ᄅ'), + (0x313A, 'M', 'ᆰ'), + (0x313B, 'M', 'ᆱ'), + (0x313C, 'M', 'ᆲ'), + (0x313D, 'M', 'ᆳ'), + (0x313E, 'M', 'ᆴ'), + (0x313F, 'M', 'ᆵ'), + (0x3140, 'M', 'ᄚ'), + (0x3141, 'M', 'ᄆ'), + (0x3142, 'M', 'ᄇ'), + (0x3143, 'M', 'ᄈ'), + (0x3144, 'M', 'ᄡ'), + (0x3145, 'M', 'ᄉ'), + (0x3146, 'M', 'ᄊ'), + (0x3147, 'M', 'ᄋ'), + (0x3148, 'M', 'ᄌ'), + (0x3149, 'M', 'ᄍ'), + (0x314A, 'M', 'ᄎ'), + (0x314B, 'M', 'ᄏ'), + (0x314C, 'M', 'ᄐ'), + (0x314D, 'M', 'ᄑ'), + (0x314E, 'M', 'ᄒ'), + (0x314F, 'M', 'ᅡ'), + (0x3150, 'M', 'ᅢ'), + (0x3151, 'M', 'ᅣ'), + (0x3152, 'M', 'ᅤ'), + (0x3153, 'M', 'ᅥ'), + (0x3154, 'M', 'ᅦ'), + (0x3155, 'M', 'ᅧ'), + (0x3156, 'M', 'ᅨ'), + (0x3157, 'M', 'ᅩ'), + (0x3158, 'M', 'ᅪ'), + (0x3159, 'M', 'ᅫ'), + (0x315A, 'M', 'ᅬ'), + (0x315B, 'M', 'ᅭ'), + (0x315C, 'M', 'ᅮ'), + (0x315D, 'M', 'ᅯ'), + (0x315E, 'M', 'ᅰ'), + (0x315F, 'M', 'ᅱ'), + (0x3160, 'M', 'ᅲ'), + (0x3161, 'M', 'ᅳ'), + (0x3162, 'M', 'ᅴ'), + (0x3163, 'M', 'ᅵ'), + (0x3164, 'X'), + (0x3165, 'M', 'ᄔ'), + (0x3166, 'M', 'ᄕ'), + (0x3167, 'M', 'ᇇ'), + (0x3168, 'M', 'ᇈ'), + (0x3169, 'M', 'ᇌ'), + (0x316A, 'M', 'ᇎ'), + (0x316B, 'M', 'ᇓ'), + (0x316C, 'M', 'ᇗ'), + (0x316D, 'M', 'ᇙ'), + (0x316E, 'M', 'ᄜ'), + (0x316F, 'M', 'ᇝ'), + (0x3170, 'M', 'ᇟ'), + (0x3171, 'M', 'ᄝ'), + (0x3172, 'M', 'ᄞ'), + (0x3173, 'M', 'ᄠ'), + (0x3174, 'M', 'ᄢ'), + (0x3175, 'M', 'ᄣ'), + (0x3176, 'M', 'ᄧ'), + (0x3177, 'M', 'ᄩ'), + (0x3178, 'M', 'ᄫ'), + (0x3179, 'M', 'ᄬ'), + (0x317A, 'M', 'ᄭ'), + (0x317B, 'M', 'ᄮ'), + (0x317C, 'M', 'ᄯ'), + (0x317D, 'M', 'ᄲ'), + (0x317E, 'M', 'ᄶ'), + (0x317F, 'M', 'ᅀ'), + (0x3180, 'M', 'ᅇ'), + (0x3181, 'M', 'ᅌ'), + (0x3182, 'M', 'ᇱ'), + (0x3183, 'M', 'ᇲ'), + (0x3184, 'M', 'ᅗ'), + ] + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3185, 'M', 'ᅘ'), + (0x3186, 'M', 'ᅙ'), + (0x3187, 'M', 'ᆄ'), + (0x3188, 'M', 'ᆅ'), + (0x3189, 'M', 'ᆈ'), + (0x318A, 'M', 'ᆑ'), + (0x318B, 'M', 'ᆒ'), + (0x318C, 'M', 'ᆔ'), + (0x318D, 'M', 'ᆞ'), + (0x318E, 'M', 'ᆡ'), + (0x318F, 'X'), + (0x3190, 'V'), + (0x3192, 'M', '一'), + (0x3193, 'M', '二'), + (0x3194, 'M', '三'), + (0x3195, 'M', '四'), + (0x3196, 'M', '上'), + (0x3197, 'M', '中'), + (0x3198, 'M', '下'), + (0x3199, 'M', '甲'), + (0x319A, 'M', '乙'), + (0x319B, 'M', '丙'), + (0x319C, 'M', '丁'), + (0x319D, 'M', '天'), + (0x319E, 'M', '地'), + (0x319F, 'M', '人'), + (0x31A0, 'V'), + (0x31E4, 'X'), + (0x31F0, 'V'), + (0x3200, '3', '(ᄀ)'), + (0x3201, '3', '(ᄂ)'), + (0x3202, '3', '(ᄃ)'), + (0x3203, '3', '(ᄅ)'), + (0x3204, '3', '(ᄆ)'), + (0x3205, '3', '(ᄇ)'), + (0x3206, '3', '(ᄉ)'), + (0x3207, '3', '(ᄋ)'), + (0x3208, '3', '(ᄌ)'), + (0x3209, '3', '(ᄎ)'), + (0x320A, '3', '(ᄏ)'), + (0x320B, '3', '(ᄐ)'), + (0x320C, '3', '(ᄑ)'), + (0x320D, '3', '(ᄒ)'), + (0x320E, '3', '(가)'), + (0x320F, '3', '(나)'), + (0x3210, '3', '(다)'), + (0x3211, '3', '(라)'), + (0x3212, '3', '(마)'), + (0x3213, '3', '(바)'), + (0x3214, '3', '(사)'), + (0x3215, '3', '(아)'), + (0x3216, '3', '(자)'), + (0x3217, '3', '(차)'), + (0x3218, '3', '(카)'), + (0x3219, '3', '(타)'), + (0x321A, '3', '(파)'), + (0x321B, '3', '(하)'), + (0x321C, '3', '(주)'), + (0x321D, '3', '(오전)'), + (0x321E, '3', '(오후)'), + (0x321F, 'X'), + (0x3220, '3', '(一)'), + (0x3221, '3', '(二)'), + (0x3222, '3', '(三)'), + (0x3223, '3', '(四)'), + (0x3224, '3', '(五)'), + (0x3225, '3', '(六)'), + (0x3226, '3', '(七)'), + (0x3227, '3', '(八)'), + (0x3228, '3', '(九)'), + (0x3229, '3', '(十)'), + (0x322A, '3', '(月)'), + (0x322B, '3', '(火)'), + (0x322C, '3', '(水)'), + (0x322D, '3', '(木)'), + (0x322E, '3', '(金)'), + (0x322F, '3', '(土)'), + (0x3230, '3', '(日)'), + (0x3231, '3', '(株)'), + (0x3232, '3', '(有)'), + (0x3233, '3', '(社)'), + (0x3234, '3', '(名)'), + (0x3235, '3', '(特)'), + (0x3236, '3', '(財)'), + (0x3237, '3', '(祝)'), + (0x3238, '3', '(労)'), + (0x3239, '3', '(代)'), + (0x323A, '3', '(呼)'), + (0x323B, '3', '(学)'), + (0x323C, '3', '(監)'), + (0x323D, '3', '(企)'), + (0x323E, '3', '(資)'), + (0x323F, '3', '(協)'), + (0x3240, '3', '(祭)'), + (0x3241, '3', '(休)'), + (0x3242, '3', '(自)'), + (0x3243, '3', '(至)'), + (0x3244, 'M', '問'), + (0x3245, 'M', '幼'), + (0x3246, 'M', '文'), + ] + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3247, 'M', '箏'), + (0x3248, 'V'), + (0x3250, 'M', 'pte'), + (0x3251, 'M', '21'), + (0x3252, 'M', '22'), + (0x3253, 'M', '23'), + (0x3254, 'M', '24'), + (0x3255, 'M', '25'), + (0x3256, 'M', '26'), + (0x3257, 'M', '27'), + (0x3258, 'M', '28'), + (0x3259, 'M', '29'), + (0x325A, 'M', '30'), + (0x325B, 'M', '31'), + (0x325C, 'M', '32'), + (0x325D, 'M', '33'), + (0x325E, 'M', '34'), + (0x325F, 'M', '35'), + (0x3260, 'M', 'ᄀ'), + (0x3261, 'M', 'ᄂ'), + (0x3262, 'M', 'ᄃ'), + (0x3263, 'M', 'ᄅ'), + (0x3264, 'M', 'ᄆ'), + (0x3265, 'M', 'ᄇ'), + (0x3266, 'M', 'ᄉ'), + (0x3267, 'M', 'ᄋ'), + (0x3268, 'M', 'ᄌ'), + (0x3269, 'M', 'ᄎ'), + (0x326A, 'M', 'ᄏ'), + (0x326B, 'M', 'ᄐ'), + (0x326C, 'M', 'ᄑ'), + (0x326D, 'M', 'ᄒ'), + (0x326E, 'M', '가'), + (0x326F, 'M', '나'), + (0x3270, 'M', '다'), + (0x3271, 'M', '라'), + (0x3272, 'M', '마'), + (0x3273, 'M', '바'), + (0x3274, 'M', '사'), + (0x3275, 'M', '아'), + (0x3276, 'M', '자'), + (0x3277, 'M', '차'), + (0x3278, 'M', '카'), + (0x3279, 'M', '타'), + (0x327A, 'M', '파'), + (0x327B, 'M', '하'), + (0x327C, 'M', '참고'), + (0x327D, 'M', '주의'), + (0x327E, 'M', '우'), + (0x327F, 'V'), + (0x3280, 'M', '一'), + (0x3281, 'M', '二'), + (0x3282, 'M', '三'), + (0x3283, 'M', '四'), + (0x3284, 'M', '五'), + (0x3285, 'M', '六'), + (0x3286, 'M', '七'), + (0x3287, 'M', '八'), + (0x3288, 'M', '九'), + (0x3289, 'M', '十'), + (0x328A, 'M', '月'), + (0x328B, 'M', '火'), + (0x328C, 'M', '水'), + (0x328D, 'M', '木'), + (0x328E, 'M', '金'), + (0x328F, 'M', '土'), + (0x3290, 'M', '日'), + (0x3291, 'M', '株'), + (0x3292, 'M', '有'), + (0x3293, 'M', '社'), + (0x3294, 'M', '名'), + (0x3295, 'M', '特'), + (0x3296, 'M', '財'), + (0x3297, 'M', '祝'), + (0x3298, 'M', '労'), + (0x3299, 'M', '秘'), + (0x329A, 'M', '男'), + (0x329B, 'M', '女'), + (0x329C, 'M', '適'), + (0x329D, 'M', '優'), + (0x329E, 'M', '印'), + (0x329F, 'M', '注'), + (0x32A0, 'M', '項'), + (0x32A1, 'M', '休'), + (0x32A2, 'M', '写'), + (0x32A3, 'M', '正'), + (0x32A4, 'M', '上'), + (0x32A5, 'M', '中'), + (0x32A6, 'M', '下'), + (0x32A7, 'M', '左'), + (0x32A8, 'M', '右'), + (0x32A9, 'M', '医'), + (0x32AA, 'M', '宗'), + (0x32AB, 'M', '学'), + (0x32AC, 'M', '監'), + (0x32AD, 'M', '企'), + (0x32AE, 'M', '資'), + (0x32AF, 'M', '協'), + (0x32B0, 'M', '夜'), + (0x32B1, 'M', '36'), + ] + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x32B2, 'M', '37'), + (0x32B3, 'M', '38'), + (0x32B4, 'M', '39'), + (0x32B5, 'M', '40'), + (0x32B6, 'M', '41'), + (0x32B7, 'M', '42'), + (0x32B8, 'M', '43'), + (0x32B9, 'M', '44'), + (0x32BA, 'M', '45'), + (0x32BB, 'M', '46'), + (0x32BC, 'M', '47'), + (0x32BD, 'M', '48'), + (0x32BE, 'M', '49'), + (0x32BF, 'M', '50'), + (0x32C0, 'M', '1月'), + (0x32C1, 'M', '2月'), + (0x32C2, 'M', '3月'), + (0x32C3, 'M', '4月'), + (0x32C4, 'M', '5月'), + (0x32C5, 'M', '6月'), + (0x32C6, 'M', '7月'), + (0x32C7, 'M', '8月'), + (0x32C8, 'M', '9月'), + (0x32C9, 'M', '10月'), + (0x32CA, 'M', '11月'), + (0x32CB, 'M', '12月'), + (0x32CC, 'M', 'hg'), + (0x32CD, 'M', 'erg'), + (0x32CE, 'M', 'ev'), + (0x32CF, 'M', 'ltd'), + (0x32D0, 'M', 'ア'), + (0x32D1, 'M', 'イ'), + (0x32D2, 'M', 'ウ'), + (0x32D3, 'M', 'エ'), + (0x32D4, 'M', 'オ'), + (0x32D5, 'M', 'カ'), + (0x32D6, 'M', 'キ'), + (0x32D7, 'M', 'ク'), + (0x32D8, 'M', 'ケ'), + (0x32D9, 'M', 'コ'), + (0x32DA, 'M', 'サ'), + (0x32DB, 'M', 'シ'), + (0x32DC, 'M', 'ス'), + (0x32DD, 'M', 'セ'), + (0x32DE, 'M', 'ソ'), + (0x32DF, 'M', 'タ'), + (0x32E0, 'M', 'チ'), + (0x32E1, 'M', 'ツ'), + (0x32E2, 'M', 'テ'), + (0x32E3, 'M', 'ト'), + (0x32E4, 'M', 'ナ'), + (0x32E5, 'M', 'ニ'), + (0x32E6, 'M', 'ヌ'), + (0x32E7, 'M', 'ネ'), + (0x32E8, 'M', 'ノ'), + (0x32E9, 'M', 'ハ'), + (0x32EA, 'M', 'ヒ'), + (0x32EB, 'M', 'フ'), + (0x32EC, 'M', 'ヘ'), + (0x32ED, 'M', 'ホ'), + (0x32EE, 'M', 'マ'), + (0x32EF, 'M', 'ミ'), + (0x32F0, 'M', 'ム'), + (0x32F1, 'M', 'メ'), + (0x32F2, 'M', 'モ'), + (0x32F3, 'M', 'ヤ'), + (0x32F4, 'M', 'ユ'), + (0x32F5, 'M', 'ヨ'), + (0x32F6, 'M', 'ラ'), + (0x32F7, 'M', 'リ'), + (0x32F8, 'M', 'ル'), + (0x32F9, 'M', 'レ'), + (0x32FA, 'M', 'ロ'), + (0x32FB, 'M', 'ワ'), + (0x32FC, 'M', 'ヰ'), + (0x32FD, 'M', 'ヱ'), + (0x32FE, 'M', 'ヲ'), + (0x32FF, 'M', '令和'), + (0x3300, 'M', 'アパート'), + (0x3301, 'M', 'アルファ'), + (0x3302, 'M', 'アンペア'), + (0x3303, 'M', 'アール'), + (0x3304, 'M', 'イニング'), + (0x3305, 'M', 'インチ'), + (0x3306, 'M', 'ウォン'), + (0x3307, 'M', 'エスクード'), + (0x3308, 'M', 'エーカー'), + (0x3309, 'M', 'オンス'), + (0x330A, 'M', 'オーム'), + (0x330B, 'M', 'カイリ'), + (0x330C, 'M', 'カラット'), + (0x330D, 'M', 'カロリー'), + (0x330E, 'M', 'ガロン'), + (0x330F, 'M', 'ガンマ'), + (0x3310, 'M', 'ギガ'), + (0x3311, 'M', 'ギニー'), + (0x3312, 'M', 'キュリー'), + (0x3313, 'M', 'ギルダー'), + (0x3314, 'M', 'キロ'), + (0x3315, 'M', 'キログラム'), + ] + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3316, 'M', 'キロメートル'), + (0x3317, 'M', 'キロワット'), + (0x3318, 'M', 'グラム'), + (0x3319, 'M', 'グラムトン'), + (0x331A, 'M', 'クルゼイロ'), + (0x331B, 'M', 'クローネ'), + (0x331C, 'M', 'ケース'), + (0x331D, 'M', 'コルナ'), + (0x331E, 'M', 'コーポ'), + (0x331F, 'M', 'サイクル'), + (0x3320, 'M', 'サンチーム'), + (0x3321, 'M', 'シリング'), + (0x3322, 'M', 'センチ'), + (0x3323, 'M', 'セント'), + (0x3324, 'M', 'ダース'), + (0x3325, 'M', 'デシ'), + (0x3326, 'M', 'ドル'), + (0x3327, 'M', 'トン'), + (0x3328, 'M', 'ナノ'), + (0x3329, 'M', 'ノット'), + (0x332A, 'M', 'ハイツ'), + (0x332B, 'M', 'パーセント'), + (0x332C, 'M', 'パーツ'), + (0x332D, 'M', 'バーレル'), + (0x332E, 'M', 'ピアストル'), + (0x332F, 'M', 'ピクル'), + (0x3330, 'M', 'ピコ'), + (0x3331, 'M', 'ビル'), + (0x3332, 'M', 'ファラッド'), + (0x3333, 'M', 'フィート'), + (0x3334, 'M', 'ブッシェル'), + (0x3335, 'M', 'フラン'), + (0x3336, 'M', 'ヘクタール'), + (0x3337, 'M', 'ペソ'), + (0x3338, 'M', 'ペニヒ'), + (0x3339, 'M', 'ヘルツ'), + (0x333A, 'M', 'ペンス'), + (0x333B, 'M', 'ページ'), + (0x333C, 'M', 'ベータ'), + (0x333D, 'M', 'ポイント'), + (0x333E, 'M', 'ボルト'), + (0x333F, 'M', 'ホン'), + (0x3340, 'M', 'ポンド'), + (0x3341, 'M', 'ホール'), + (0x3342, 'M', 'ホーン'), + (0x3343, 'M', 'マイクロ'), + (0x3344, 'M', 'マイル'), + (0x3345, 'M', 'マッハ'), + (0x3346, 'M', 'マルク'), + (0x3347, 'M', 'マンション'), + (0x3348, 'M', 'ミクロン'), + (0x3349, 'M', 'ミリ'), + (0x334A, 'M', 'ミリバール'), + (0x334B, 'M', 'メガ'), + (0x334C, 'M', 'メガトン'), + (0x334D, 'M', 'メートル'), + (0x334E, 'M', 'ヤード'), + (0x334F, 'M', 'ヤール'), + (0x3350, 'M', 'ユアン'), + (0x3351, 'M', 'リットル'), + (0x3352, 'M', 'リラ'), + (0x3353, 'M', 'ルピー'), + (0x3354, 'M', 'ルーブル'), + (0x3355, 'M', 'レム'), + (0x3356, 'M', 'レントゲン'), + (0x3357, 'M', 'ワット'), + (0x3358, 'M', '0点'), + (0x3359, 'M', '1点'), + (0x335A, 'M', '2点'), + (0x335B, 'M', '3点'), + (0x335C, 'M', '4点'), + (0x335D, 'M', '5点'), + (0x335E, 'M', '6点'), + (0x335F, 'M', '7点'), + (0x3360, 'M', '8点'), + (0x3361, 'M', '9点'), + (0x3362, 'M', '10点'), + (0x3363, 'M', '11点'), + (0x3364, 'M', '12点'), + (0x3365, 'M', '13点'), + (0x3366, 'M', '14点'), + (0x3367, 'M', '15点'), + (0x3368, 'M', '16点'), + (0x3369, 'M', '17点'), + (0x336A, 'M', '18点'), + (0x336B, 'M', '19点'), + (0x336C, 'M', '20点'), + (0x336D, 'M', '21点'), + (0x336E, 'M', '22点'), + (0x336F, 'M', '23点'), + (0x3370, 'M', '24点'), + (0x3371, 'M', 'hpa'), + (0x3372, 'M', 'da'), + (0x3373, 'M', 'au'), + (0x3374, 'M', 'bar'), + (0x3375, 'M', 'ov'), + (0x3376, 'M', 'pc'), + (0x3377, 'M', 'dm'), + (0x3378, 'M', 'dm2'), + (0x3379, 'M', 'dm3'), + ] + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x337A, 'M', 'iu'), + (0x337B, 'M', '平成'), + (0x337C, 'M', '昭和'), + (0x337D, 'M', '大正'), + (0x337E, 'M', '明治'), + (0x337F, 'M', '株式会社'), + (0x3380, 'M', 'pa'), + (0x3381, 'M', 'na'), + (0x3382, 'M', 'μa'), + (0x3383, 'M', 'ma'), + (0x3384, 'M', 'ka'), + (0x3385, 'M', 'kb'), + (0x3386, 'M', 'mb'), + (0x3387, 'M', 'gb'), + (0x3388, 'M', 'cal'), + (0x3389, 'M', 'kcal'), + (0x338A, 'M', 'pf'), + (0x338B, 'M', 'nf'), + (0x338C, 'M', 'μf'), + (0x338D, 'M', 'μg'), + (0x338E, 'M', 'mg'), + (0x338F, 'M', 'kg'), + (0x3390, 'M', 'hz'), + (0x3391, 'M', 'khz'), + (0x3392, 'M', 'mhz'), + (0x3393, 'M', 'ghz'), + (0x3394, 'M', 'thz'), + (0x3395, 'M', 'μl'), + (0x3396, 'M', 'ml'), + (0x3397, 'M', 'dl'), + (0x3398, 'M', 'kl'), + (0x3399, 'M', 'fm'), + (0x339A, 'M', 'nm'), + (0x339B, 'M', 'μm'), + (0x339C, 'M', 'mm'), + (0x339D, 'M', 'cm'), + (0x339E, 'M', 'km'), + (0x339F, 'M', 'mm2'), + (0x33A0, 'M', 'cm2'), + (0x33A1, 'M', 'm2'), + (0x33A2, 'M', 'km2'), + (0x33A3, 'M', 'mm3'), + (0x33A4, 'M', 'cm3'), + (0x33A5, 'M', 'm3'), + (0x33A6, 'M', 'km3'), + (0x33A7, 'M', 'm∕s'), + (0x33A8, 'M', 'm∕s2'), + (0x33A9, 'M', 'pa'), + (0x33AA, 'M', 'kpa'), + (0x33AB, 'M', 'mpa'), + (0x33AC, 'M', 'gpa'), + (0x33AD, 'M', 'rad'), + (0x33AE, 'M', 'rad∕s'), + (0x33AF, 'M', 'rad∕s2'), + (0x33B0, 'M', 'ps'), + (0x33B1, 'M', 'ns'), + (0x33B2, 'M', 'μs'), + (0x33B3, 'M', 'ms'), + (0x33B4, 'M', 'pv'), + (0x33B5, 'M', 'nv'), + (0x33B6, 'M', 'μv'), + (0x33B7, 'M', 'mv'), + (0x33B8, 'M', 'kv'), + (0x33B9, 'M', 'mv'), + (0x33BA, 'M', 'pw'), + (0x33BB, 'M', 'nw'), + (0x33BC, 'M', 'μw'), + (0x33BD, 'M', 'mw'), + (0x33BE, 'M', 'kw'), + (0x33BF, 'M', 'mw'), + (0x33C0, 'M', 'kω'), + (0x33C1, 'M', 'mω'), + (0x33C2, 'X'), + (0x33C3, 'M', 'bq'), + (0x33C4, 'M', 'cc'), + (0x33C5, 'M', 'cd'), + (0x33C6, 'M', 'c∕kg'), + (0x33C7, 'X'), + (0x33C8, 'M', 'db'), + (0x33C9, 'M', 'gy'), + (0x33CA, 'M', 'ha'), + (0x33CB, 'M', 'hp'), + (0x33CC, 'M', 'in'), + (0x33CD, 'M', 'kk'), + (0x33CE, 'M', 'km'), + (0x33CF, 'M', 'kt'), + (0x33D0, 'M', 'lm'), + (0x33D1, 'M', 'ln'), + (0x33D2, 'M', 'log'), + (0x33D3, 'M', 'lx'), + (0x33D4, 'M', 'mb'), + (0x33D5, 'M', 'mil'), + (0x33D6, 'M', 'mol'), + (0x33D7, 'M', 'ph'), + (0x33D8, 'X'), + (0x33D9, 'M', 'ppm'), + (0x33DA, 'M', 'pr'), + (0x33DB, 'M', 'sr'), + (0x33DC, 'M', 'sv'), + (0x33DD, 'M', 'wb'), + ] + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x33DE, 'M', 'v∕m'), + (0x33DF, 'M', 'a∕m'), + (0x33E0, 'M', '1日'), + (0x33E1, 'M', '2日'), + (0x33E2, 'M', '3日'), + (0x33E3, 'M', '4日'), + (0x33E4, 'M', '5日'), + (0x33E5, 'M', '6日'), + (0x33E6, 'M', '7日'), + (0x33E7, 'M', '8日'), + (0x33E8, 'M', '9日'), + (0x33E9, 'M', '10日'), + (0x33EA, 'M', '11日'), + (0x33EB, 'M', '12日'), + (0x33EC, 'M', '13日'), + (0x33ED, 'M', '14日'), + (0x33EE, 'M', '15日'), + (0x33EF, 'M', '16日'), + (0x33F0, 'M', '17日'), + (0x33F1, 'M', '18日'), + (0x33F2, 'M', '19日'), + (0x33F3, 'M', '20日'), + (0x33F4, 'M', '21日'), + (0x33F5, 'M', '22日'), + (0x33F6, 'M', '23日'), + (0x33F7, 'M', '24日'), + (0x33F8, 'M', '25日'), + (0x33F9, 'M', '26日'), + (0x33FA, 'M', '27日'), + (0x33FB, 'M', '28日'), + (0x33FC, 'M', '29日'), + (0x33FD, 'M', '30日'), + (0x33FE, 'M', '31日'), + (0x33FF, 'M', 'gal'), + (0x3400, 'V'), + (0xA48D, 'X'), + (0xA490, 'V'), + (0xA4C7, 'X'), + (0xA4D0, 'V'), + (0xA62C, 'X'), + (0xA640, 'M', 'ꙁ'), + (0xA641, 'V'), + (0xA642, 'M', 'ꙃ'), + (0xA643, 'V'), + (0xA644, 'M', 'ꙅ'), + (0xA645, 'V'), + (0xA646, 'M', 'ꙇ'), + (0xA647, 'V'), + (0xA648, 'M', 'ꙉ'), + (0xA649, 'V'), + (0xA64A, 'M', 'ꙋ'), + (0xA64B, 'V'), + (0xA64C, 'M', 'ꙍ'), + (0xA64D, 'V'), + (0xA64E, 'M', 'ꙏ'), + (0xA64F, 'V'), + (0xA650, 'M', 'ꙑ'), + (0xA651, 'V'), + (0xA652, 'M', 'ꙓ'), + (0xA653, 'V'), + (0xA654, 'M', 'ꙕ'), + (0xA655, 'V'), + (0xA656, 'M', 'ꙗ'), + (0xA657, 'V'), + (0xA658, 'M', 'ꙙ'), + (0xA659, 'V'), + (0xA65A, 'M', 'ꙛ'), + (0xA65B, 'V'), + (0xA65C, 'M', 'ꙝ'), + (0xA65D, 'V'), + (0xA65E, 'M', 'ꙟ'), + (0xA65F, 'V'), + (0xA660, 'M', 'ꙡ'), + (0xA661, 'V'), + (0xA662, 'M', 'ꙣ'), + (0xA663, 'V'), + (0xA664, 'M', 'ꙥ'), + (0xA665, 'V'), + (0xA666, 'M', 'ꙧ'), + (0xA667, 'V'), + (0xA668, 'M', 'ꙩ'), + (0xA669, 'V'), + (0xA66A, 'M', 'ꙫ'), + (0xA66B, 'V'), + (0xA66C, 'M', 'ꙭ'), + (0xA66D, 'V'), + (0xA680, 'M', 'ꚁ'), + (0xA681, 'V'), + (0xA682, 'M', 'ꚃ'), + (0xA683, 'V'), + (0xA684, 'M', 'ꚅ'), + (0xA685, 'V'), + (0xA686, 'M', 'ꚇ'), + (0xA687, 'V'), + (0xA688, 'M', 'ꚉ'), + (0xA689, 'V'), + (0xA68A, 'M', 'ꚋ'), + (0xA68B, 'V'), + (0xA68C, 'M', 'ꚍ'), + (0xA68D, 'V'), + ] + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA68E, 'M', 'ꚏ'), + (0xA68F, 'V'), + (0xA690, 'M', 'ꚑ'), + (0xA691, 'V'), + (0xA692, 'M', 'ꚓ'), + (0xA693, 'V'), + (0xA694, 'M', 'ꚕ'), + (0xA695, 'V'), + (0xA696, 'M', 'ꚗ'), + (0xA697, 'V'), + (0xA698, 'M', 'ꚙ'), + (0xA699, 'V'), + (0xA69A, 'M', 'ꚛ'), + (0xA69B, 'V'), + (0xA69C, 'M', 'ъ'), + (0xA69D, 'M', 'ь'), + (0xA69E, 'V'), + (0xA6F8, 'X'), + (0xA700, 'V'), + (0xA722, 'M', 'ꜣ'), + (0xA723, 'V'), + (0xA724, 'M', 'ꜥ'), + (0xA725, 'V'), + (0xA726, 'M', 'ꜧ'), + (0xA727, 'V'), + (0xA728, 'M', 'ꜩ'), + (0xA729, 'V'), + (0xA72A, 'M', 'ꜫ'), + (0xA72B, 'V'), + (0xA72C, 'M', 'ꜭ'), + (0xA72D, 'V'), + (0xA72E, 'M', 'ꜯ'), + (0xA72F, 'V'), + (0xA732, 'M', 'ꜳ'), + (0xA733, 'V'), + (0xA734, 'M', 'ꜵ'), + (0xA735, 'V'), + (0xA736, 'M', 'ꜷ'), + (0xA737, 'V'), + (0xA738, 'M', 'ꜹ'), + (0xA739, 'V'), + (0xA73A, 'M', 'ꜻ'), + (0xA73B, 'V'), + (0xA73C, 'M', 'ꜽ'), + (0xA73D, 'V'), + (0xA73E, 'M', 'ꜿ'), + (0xA73F, 'V'), + (0xA740, 'M', 'ꝁ'), + (0xA741, 'V'), + (0xA742, 'M', 'ꝃ'), + (0xA743, 'V'), + (0xA744, 'M', 'ꝅ'), + (0xA745, 'V'), + (0xA746, 'M', 'ꝇ'), + (0xA747, 'V'), + (0xA748, 'M', 'ꝉ'), + (0xA749, 'V'), + (0xA74A, 'M', 'ꝋ'), + (0xA74B, 'V'), + (0xA74C, 'M', 'ꝍ'), + (0xA74D, 'V'), + (0xA74E, 'M', 'ꝏ'), + (0xA74F, 'V'), + (0xA750, 'M', 'ꝑ'), + (0xA751, 'V'), + (0xA752, 'M', 'ꝓ'), + (0xA753, 'V'), + (0xA754, 'M', 'ꝕ'), + (0xA755, 'V'), + (0xA756, 'M', 'ꝗ'), + (0xA757, 'V'), + (0xA758, 'M', 'ꝙ'), + (0xA759, 'V'), + (0xA75A, 'M', 'ꝛ'), + (0xA75B, 'V'), + (0xA75C, 'M', 'ꝝ'), + (0xA75D, 'V'), + (0xA75E, 'M', 'ꝟ'), + (0xA75F, 'V'), + (0xA760, 'M', 'ꝡ'), + (0xA761, 'V'), + (0xA762, 'M', 'ꝣ'), + (0xA763, 'V'), + (0xA764, 'M', 'ꝥ'), + (0xA765, 'V'), + (0xA766, 'M', 'ꝧ'), + (0xA767, 'V'), + (0xA768, 'M', 'ꝩ'), + (0xA769, 'V'), + (0xA76A, 'M', 'ꝫ'), + (0xA76B, 'V'), + (0xA76C, 'M', 'ꝭ'), + (0xA76D, 'V'), + (0xA76E, 'M', 'ꝯ'), + (0xA76F, 'V'), + (0xA770, 'M', 'ꝯ'), + (0xA771, 'V'), + (0xA779, 'M', 'ꝺ'), + (0xA77A, 'V'), + (0xA77B, 'M', 'ꝼ'), + ] + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA77C, 'V'), + (0xA77D, 'M', 'ᵹ'), + (0xA77E, 'M', 'ꝿ'), + (0xA77F, 'V'), + (0xA780, 'M', 'ꞁ'), + (0xA781, 'V'), + (0xA782, 'M', 'ꞃ'), + (0xA783, 'V'), + (0xA784, 'M', 'ꞅ'), + (0xA785, 'V'), + (0xA786, 'M', 'ꞇ'), + (0xA787, 'V'), + (0xA78B, 'M', 'ꞌ'), + (0xA78C, 'V'), + (0xA78D, 'M', 'ɥ'), + (0xA78E, 'V'), + (0xA790, 'M', 'ꞑ'), + (0xA791, 'V'), + (0xA792, 'M', 'ꞓ'), + (0xA793, 'V'), + (0xA796, 'M', 'ꞗ'), + (0xA797, 'V'), + (0xA798, 'M', 'ꞙ'), + (0xA799, 'V'), + (0xA79A, 'M', 'ꞛ'), + (0xA79B, 'V'), + (0xA79C, 'M', 'ꞝ'), + (0xA79D, 'V'), + (0xA79E, 'M', 'ꞟ'), + (0xA79F, 'V'), + (0xA7A0, 'M', 'ꞡ'), + (0xA7A1, 'V'), + (0xA7A2, 'M', 'ꞣ'), + (0xA7A3, 'V'), + (0xA7A4, 'M', 'ꞥ'), + (0xA7A5, 'V'), + (0xA7A6, 'M', 'ꞧ'), + (0xA7A7, 'V'), + (0xA7A8, 'M', 'ꞩ'), + (0xA7A9, 'V'), + (0xA7AA, 'M', 'ɦ'), + (0xA7AB, 'M', 'ɜ'), + (0xA7AC, 'M', 'ɡ'), + (0xA7AD, 'M', 'ɬ'), + (0xA7AE, 'M', 'ɪ'), + (0xA7AF, 'V'), + (0xA7B0, 'M', 'ʞ'), + (0xA7B1, 'M', 'ʇ'), + (0xA7B2, 'M', 'ʝ'), + (0xA7B3, 'M', 'ꭓ'), + (0xA7B4, 'M', 'ꞵ'), + (0xA7B5, 'V'), + (0xA7B6, 'M', 'ꞷ'), + (0xA7B7, 'V'), + (0xA7B8, 'M', 'ꞹ'), + (0xA7B9, 'V'), + (0xA7BA, 'M', 'ꞻ'), + (0xA7BB, 'V'), + (0xA7BC, 'M', 'ꞽ'), + (0xA7BD, 'V'), + (0xA7BE, 'M', 'ꞿ'), + (0xA7BF, 'V'), + (0xA7C0, 'M', 'ꟁ'), + (0xA7C1, 'V'), + (0xA7C2, 'M', 'ꟃ'), + (0xA7C3, 'V'), + (0xA7C4, 'M', 'ꞔ'), + (0xA7C5, 'M', 'ʂ'), + (0xA7C6, 'M', 'ᶎ'), + (0xA7C7, 'M', 'ꟈ'), + (0xA7C8, 'V'), + (0xA7C9, 'M', 'ꟊ'), + (0xA7CA, 'V'), + (0xA7CB, 'X'), + (0xA7D0, 'M', 'ꟑ'), + (0xA7D1, 'V'), + (0xA7D2, 'X'), + (0xA7D3, 'V'), + (0xA7D4, 'X'), + (0xA7D5, 'V'), + (0xA7D6, 'M', 'ꟗ'), + (0xA7D7, 'V'), + (0xA7D8, 'M', 'ꟙ'), + (0xA7D9, 'V'), + (0xA7DA, 'X'), + (0xA7F2, 'M', 'c'), + (0xA7F3, 'M', 'f'), + (0xA7F4, 'M', 'q'), + (0xA7F5, 'M', 'ꟶ'), + (0xA7F6, 'V'), + (0xA7F8, 'M', 'ħ'), + (0xA7F9, 'M', 'œ'), + (0xA7FA, 'V'), + (0xA82D, 'X'), + (0xA830, 'V'), + (0xA83A, 'X'), + (0xA840, 'V'), + (0xA878, 'X'), + (0xA880, 'V'), + (0xA8C6, 'X'), + ] + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA8CE, 'V'), + (0xA8DA, 'X'), + (0xA8E0, 'V'), + (0xA954, 'X'), + (0xA95F, 'V'), + (0xA97D, 'X'), + (0xA980, 'V'), + (0xA9CE, 'X'), + (0xA9CF, 'V'), + (0xA9DA, 'X'), + (0xA9DE, 'V'), + (0xA9FF, 'X'), + (0xAA00, 'V'), + (0xAA37, 'X'), + (0xAA40, 'V'), + (0xAA4E, 'X'), + (0xAA50, 'V'), + (0xAA5A, 'X'), + (0xAA5C, 'V'), + (0xAAC3, 'X'), + (0xAADB, 'V'), + (0xAAF7, 'X'), + (0xAB01, 'V'), + (0xAB07, 'X'), + (0xAB09, 'V'), + (0xAB0F, 'X'), + (0xAB11, 'V'), + (0xAB17, 'X'), + (0xAB20, 'V'), + (0xAB27, 'X'), + (0xAB28, 'V'), + (0xAB2F, 'X'), + (0xAB30, 'V'), + (0xAB5C, 'M', 'ꜧ'), + (0xAB5D, 'M', 'ꬷ'), + (0xAB5E, 'M', 'ɫ'), + (0xAB5F, 'M', 'ꭒ'), + (0xAB60, 'V'), + (0xAB69, 'M', 'ʍ'), + (0xAB6A, 'V'), + (0xAB6C, 'X'), + (0xAB70, 'M', 'Ꭰ'), + (0xAB71, 'M', 'Ꭱ'), + (0xAB72, 'M', 'Ꭲ'), + (0xAB73, 'M', 'Ꭳ'), + (0xAB74, 'M', 'Ꭴ'), + (0xAB75, 'M', 'Ꭵ'), + (0xAB76, 'M', 'Ꭶ'), + (0xAB77, 'M', 'Ꭷ'), + (0xAB78, 'M', 'Ꭸ'), + (0xAB79, 'M', 'Ꭹ'), + (0xAB7A, 'M', 'Ꭺ'), + (0xAB7B, 'M', 'Ꭻ'), + (0xAB7C, 'M', 'Ꭼ'), + (0xAB7D, 'M', 'Ꭽ'), + (0xAB7E, 'M', 'Ꭾ'), + (0xAB7F, 'M', 'Ꭿ'), + (0xAB80, 'M', 'Ꮀ'), + (0xAB81, 'M', 'Ꮁ'), + (0xAB82, 'M', 'Ꮂ'), + (0xAB83, 'M', 'Ꮃ'), + (0xAB84, 'M', 'Ꮄ'), + (0xAB85, 'M', 'Ꮅ'), + (0xAB86, 'M', 'Ꮆ'), + (0xAB87, 'M', 'Ꮇ'), + (0xAB88, 'M', 'Ꮈ'), + (0xAB89, 'M', 'Ꮉ'), + (0xAB8A, 'M', 'Ꮊ'), + (0xAB8B, 'M', 'Ꮋ'), + (0xAB8C, 'M', 'Ꮌ'), + (0xAB8D, 'M', 'Ꮍ'), + (0xAB8E, 'M', 'Ꮎ'), + (0xAB8F, 'M', 'Ꮏ'), + (0xAB90, 'M', 'Ꮐ'), + (0xAB91, 'M', 'Ꮑ'), + (0xAB92, 'M', 'Ꮒ'), + (0xAB93, 'M', 'Ꮓ'), + (0xAB94, 'M', 'Ꮔ'), + (0xAB95, 'M', 'Ꮕ'), + (0xAB96, 'M', 'Ꮖ'), + (0xAB97, 'M', 'Ꮗ'), + (0xAB98, 'M', 'Ꮘ'), + (0xAB99, 'M', 'Ꮙ'), + (0xAB9A, 'M', 'Ꮚ'), + (0xAB9B, 'M', 'Ꮛ'), + (0xAB9C, 'M', 'Ꮜ'), + (0xAB9D, 'M', 'Ꮝ'), + (0xAB9E, 'M', 'Ꮞ'), + (0xAB9F, 'M', 'Ꮟ'), + (0xABA0, 'M', 'Ꮠ'), + (0xABA1, 'M', 'Ꮡ'), + (0xABA2, 'M', 'Ꮢ'), + (0xABA3, 'M', 'Ꮣ'), + (0xABA4, 'M', 'Ꮤ'), + (0xABA5, 'M', 'Ꮥ'), + (0xABA6, 'M', 'Ꮦ'), + (0xABA7, 'M', 'Ꮧ'), + (0xABA8, 'M', 'Ꮨ'), + (0xABA9, 'M', 'Ꮩ'), + (0xABAA, 'M', 'Ꮪ'), + ] + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xABAB, 'M', 'Ꮫ'), + (0xABAC, 'M', 'Ꮬ'), + (0xABAD, 'M', 'Ꮭ'), + (0xABAE, 'M', 'Ꮮ'), + (0xABAF, 'M', 'Ꮯ'), + (0xABB0, 'M', 'Ꮰ'), + (0xABB1, 'M', 'Ꮱ'), + (0xABB2, 'M', 'Ꮲ'), + (0xABB3, 'M', 'Ꮳ'), + (0xABB4, 'M', 'Ꮴ'), + (0xABB5, 'M', 'Ꮵ'), + (0xABB6, 'M', 'Ꮶ'), + (0xABB7, 'M', 'Ꮷ'), + (0xABB8, 'M', 'Ꮸ'), + (0xABB9, 'M', 'Ꮹ'), + (0xABBA, 'M', 'Ꮺ'), + (0xABBB, 'M', 'Ꮻ'), + (0xABBC, 'M', 'Ꮼ'), + (0xABBD, 'M', 'Ꮽ'), + (0xABBE, 'M', 'Ꮾ'), + (0xABBF, 'M', 'Ꮿ'), + (0xABC0, 'V'), + (0xABEE, 'X'), + (0xABF0, 'V'), + (0xABFA, 'X'), + (0xAC00, 'V'), + (0xD7A4, 'X'), + (0xD7B0, 'V'), + (0xD7C7, 'X'), + (0xD7CB, 'V'), + (0xD7FC, 'X'), + (0xF900, 'M', '豈'), + (0xF901, 'M', '更'), + (0xF902, 'M', '車'), + (0xF903, 'M', '賈'), + (0xF904, 'M', '滑'), + (0xF905, 'M', '串'), + (0xF906, 'M', '句'), + (0xF907, 'M', '龜'), + (0xF909, 'M', '契'), + (0xF90A, 'M', '金'), + (0xF90B, 'M', '喇'), + (0xF90C, 'M', '奈'), + (0xF90D, 'M', '懶'), + (0xF90E, 'M', '癩'), + (0xF90F, 'M', '羅'), + (0xF910, 'M', '蘿'), + (0xF911, 'M', '螺'), + (0xF912, 'M', '裸'), + (0xF913, 'M', '邏'), + (0xF914, 'M', '樂'), + (0xF915, 'M', '洛'), + (0xF916, 'M', '烙'), + (0xF917, 'M', '珞'), + (0xF918, 'M', '落'), + (0xF919, 'M', '酪'), + (0xF91A, 'M', '駱'), + (0xF91B, 'M', '亂'), + (0xF91C, 'M', '卵'), + (0xF91D, 'M', '欄'), + (0xF91E, 'M', '爛'), + (0xF91F, 'M', '蘭'), + (0xF920, 'M', '鸞'), + (0xF921, 'M', '嵐'), + (0xF922, 'M', '濫'), + (0xF923, 'M', '藍'), + (0xF924, 'M', '襤'), + (0xF925, 'M', '拉'), + (0xF926, 'M', '臘'), + (0xF927, 'M', '蠟'), + (0xF928, 'M', '廊'), + (0xF929, 'M', '朗'), + (0xF92A, 'M', '浪'), + (0xF92B, 'M', '狼'), + (0xF92C, 'M', '郎'), + (0xF92D, 'M', '來'), + (0xF92E, 'M', '冷'), + (0xF92F, 'M', '勞'), + (0xF930, 'M', '擄'), + (0xF931, 'M', '櫓'), + (0xF932, 'M', '爐'), + (0xF933, 'M', '盧'), + (0xF934, 'M', '老'), + (0xF935, 'M', '蘆'), + (0xF936, 'M', '虜'), + (0xF937, 'M', '路'), + (0xF938, 'M', '露'), + (0xF939, 'M', '魯'), + (0xF93A, 'M', '鷺'), + (0xF93B, 'M', '碌'), + (0xF93C, 'M', '祿'), + (0xF93D, 'M', '綠'), + (0xF93E, 'M', '菉'), + (0xF93F, 'M', '錄'), + (0xF940, 'M', '鹿'), + (0xF941, 'M', '論'), + (0xF942, 'M', '壟'), + (0xF943, 'M', '弄'), + (0xF944, 'M', '籠'), + (0xF945, 'M', '聾'), + ] + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF946, 'M', '牢'), + (0xF947, 'M', '磊'), + (0xF948, 'M', '賂'), + (0xF949, 'M', '雷'), + (0xF94A, 'M', '壘'), + (0xF94B, 'M', '屢'), + (0xF94C, 'M', '樓'), + (0xF94D, 'M', '淚'), + (0xF94E, 'M', '漏'), + (0xF94F, 'M', '累'), + (0xF950, 'M', '縷'), + (0xF951, 'M', '陋'), + (0xF952, 'M', '勒'), + (0xF953, 'M', '肋'), + (0xF954, 'M', '凜'), + (0xF955, 'M', '凌'), + (0xF956, 'M', '稜'), + (0xF957, 'M', '綾'), + (0xF958, 'M', '菱'), + (0xF959, 'M', '陵'), + (0xF95A, 'M', '讀'), + (0xF95B, 'M', '拏'), + (0xF95C, 'M', '樂'), + (0xF95D, 'M', '諾'), + (0xF95E, 'M', '丹'), + (0xF95F, 'M', '寧'), + (0xF960, 'M', '怒'), + (0xF961, 'M', '率'), + (0xF962, 'M', '異'), + (0xF963, 'M', '北'), + (0xF964, 'M', '磻'), + (0xF965, 'M', '便'), + (0xF966, 'M', '復'), + (0xF967, 'M', '不'), + (0xF968, 'M', '泌'), + (0xF969, 'M', '數'), + (0xF96A, 'M', '索'), + (0xF96B, 'M', '參'), + (0xF96C, 'M', '塞'), + (0xF96D, 'M', '省'), + (0xF96E, 'M', '葉'), + (0xF96F, 'M', '說'), + (0xF970, 'M', '殺'), + (0xF971, 'M', '辰'), + (0xF972, 'M', '沈'), + (0xF973, 'M', '拾'), + (0xF974, 'M', '若'), + (0xF975, 'M', '掠'), + (0xF976, 'M', '略'), + (0xF977, 'M', '亮'), + (0xF978, 'M', '兩'), + (0xF979, 'M', '凉'), + (0xF97A, 'M', '梁'), + (0xF97B, 'M', '糧'), + (0xF97C, 'M', '良'), + (0xF97D, 'M', '諒'), + (0xF97E, 'M', '量'), + (0xF97F, 'M', '勵'), + (0xF980, 'M', '呂'), + (0xF981, 'M', '女'), + (0xF982, 'M', '廬'), + (0xF983, 'M', '旅'), + (0xF984, 'M', '濾'), + (0xF985, 'M', '礪'), + (0xF986, 'M', '閭'), + (0xF987, 'M', '驪'), + (0xF988, 'M', '麗'), + (0xF989, 'M', '黎'), + (0xF98A, 'M', '力'), + (0xF98B, 'M', '曆'), + (0xF98C, 'M', '歷'), + (0xF98D, 'M', '轢'), + (0xF98E, 'M', '年'), + (0xF98F, 'M', '憐'), + (0xF990, 'M', '戀'), + (0xF991, 'M', '撚'), + (0xF992, 'M', '漣'), + (0xF993, 'M', '煉'), + (0xF994, 'M', '璉'), + (0xF995, 'M', '秊'), + (0xF996, 'M', '練'), + (0xF997, 'M', '聯'), + (0xF998, 'M', '輦'), + (0xF999, 'M', '蓮'), + (0xF99A, 'M', '連'), + (0xF99B, 'M', '鍊'), + (0xF99C, 'M', '列'), + (0xF99D, 'M', '劣'), + (0xF99E, 'M', '咽'), + (0xF99F, 'M', '烈'), + (0xF9A0, 'M', '裂'), + (0xF9A1, 'M', '說'), + (0xF9A2, 'M', '廉'), + (0xF9A3, 'M', '念'), + (0xF9A4, 'M', '捻'), + (0xF9A5, 'M', '殮'), + (0xF9A6, 'M', '簾'), + (0xF9A7, 'M', '獵'), + (0xF9A8, 'M', '令'), + (0xF9A9, 'M', '囹'), + ] + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF9AA, 'M', '寧'), + (0xF9AB, 'M', '嶺'), + (0xF9AC, 'M', '怜'), + (0xF9AD, 'M', '玲'), + (0xF9AE, 'M', '瑩'), + (0xF9AF, 'M', '羚'), + (0xF9B0, 'M', '聆'), + (0xF9B1, 'M', '鈴'), + (0xF9B2, 'M', '零'), + (0xF9B3, 'M', '靈'), + (0xF9B4, 'M', '領'), + (0xF9B5, 'M', '例'), + (0xF9B6, 'M', '禮'), + (0xF9B7, 'M', '醴'), + (0xF9B8, 'M', '隸'), + (0xF9B9, 'M', '惡'), + (0xF9BA, 'M', '了'), + (0xF9BB, 'M', '僚'), + (0xF9BC, 'M', '寮'), + (0xF9BD, 'M', '尿'), + (0xF9BE, 'M', '料'), + (0xF9BF, 'M', '樂'), + (0xF9C0, 'M', '燎'), + (0xF9C1, 'M', '療'), + (0xF9C2, 'M', '蓼'), + (0xF9C3, 'M', '遼'), + (0xF9C4, 'M', '龍'), + (0xF9C5, 'M', '暈'), + (0xF9C6, 'M', '阮'), + (0xF9C7, 'M', '劉'), + (0xF9C8, 'M', '杻'), + (0xF9C9, 'M', '柳'), + (0xF9CA, 'M', '流'), + (0xF9CB, 'M', '溜'), + (0xF9CC, 'M', '琉'), + (0xF9CD, 'M', '留'), + (0xF9CE, 'M', '硫'), + (0xF9CF, 'M', '紐'), + (0xF9D0, 'M', '類'), + (0xF9D1, 'M', '六'), + (0xF9D2, 'M', '戮'), + (0xF9D3, 'M', '陸'), + (0xF9D4, 'M', '倫'), + (0xF9D5, 'M', '崙'), + (0xF9D6, 'M', '淪'), + (0xF9D7, 'M', '輪'), + (0xF9D8, 'M', '律'), + (0xF9D9, 'M', '慄'), + (0xF9DA, 'M', '栗'), + (0xF9DB, 'M', '率'), + (0xF9DC, 'M', '隆'), + (0xF9DD, 'M', '利'), + (0xF9DE, 'M', '吏'), + (0xF9DF, 'M', '履'), + (0xF9E0, 'M', '易'), + (0xF9E1, 'M', '李'), + (0xF9E2, 'M', '梨'), + (0xF9E3, 'M', '泥'), + (0xF9E4, 'M', '理'), + (0xF9E5, 'M', '痢'), + (0xF9E6, 'M', '罹'), + (0xF9E7, 'M', '裏'), + (0xF9E8, 'M', '裡'), + (0xF9E9, 'M', '里'), + (0xF9EA, 'M', '離'), + (0xF9EB, 'M', '匿'), + (0xF9EC, 'M', '溺'), + (0xF9ED, 'M', '吝'), + (0xF9EE, 'M', '燐'), + (0xF9EF, 'M', '璘'), + (0xF9F0, 'M', '藺'), + (0xF9F1, 'M', '隣'), + (0xF9F2, 'M', '鱗'), + (0xF9F3, 'M', '麟'), + (0xF9F4, 'M', '林'), + (0xF9F5, 'M', '淋'), + (0xF9F6, 'M', '臨'), + (0xF9F7, 'M', '立'), + (0xF9F8, 'M', '笠'), + (0xF9F9, 'M', '粒'), + (0xF9FA, 'M', '狀'), + (0xF9FB, 'M', '炙'), + (0xF9FC, 'M', '識'), + (0xF9FD, 'M', '什'), + (0xF9FE, 'M', '茶'), + (0xF9FF, 'M', '刺'), + (0xFA00, 'M', '切'), + (0xFA01, 'M', '度'), + (0xFA02, 'M', '拓'), + (0xFA03, 'M', '糖'), + (0xFA04, 'M', '宅'), + (0xFA05, 'M', '洞'), + (0xFA06, 'M', '暴'), + (0xFA07, 'M', '輻'), + (0xFA08, 'M', '行'), + (0xFA09, 'M', '降'), + (0xFA0A, 'M', '見'), + (0xFA0B, 'M', '廓'), + (0xFA0C, 'M', '兀'), + (0xFA0D, 'M', '嗀'), + ] + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA0E, 'V'), + (0xFA10, 'M', '塚'), + (0xFA11, 'V'), + (0xFA12, 'M', '晴'), + (0xFA13, 'V'), + (0xFA15, 'M', '凞'), + (0xFA16, 'M', '猪'), + (0xFA17, 'M', '益'), + (0xFA18, 'M', '礼'), + (0xFA19, 'M', '神'), + (0xFA1A, 'M', '祥'), + (0xFA1B, 'M', '福'), + (0xFA1C, 'M', '靖'), + (0xFA1D, 'M', '精'), + (0xFA1E, 'M', '羽'), + (0xFA1F, 'V'), + (0xFA20, 'M', '蘒'), + (0xFA21, 'V'), + (0xFA22, 'M', '諸'), + (0xFA23, 'V'), + (0xFA25, 'M', '逸'), + (0xFA26, 'M', '都'), + (0xFA27, 'V'), + (0xFA2A, 'M', '飯'), + (0xFA2B, 'M', '飼'), + (0xFA2C, 'M', '館'), + (0xFA2D, 'M', '鶴'), + (0xFA2E, 'M', '郞'), + (0xFA2F, 'M', '隷'), + (0xFA30, 'M', '侮'), + (0xFA31, 'M', '僧'), + (0xFA32, 'M', '免'), + (0xFA33, 'M', '勉'), + (0xFA34, 'M', '勤'), + (0xFA35, 'M', '卑'), + (0xFA36, 'M', '喝'), + (0xFA37, 'M', '嘆'), + (0xFA38, 'M', '器'), + (0xFA39, 'M', '塀'), + (0xFA3A, 'M', '墨'), + (0xFA3B, 'M', '層'), + (0xFA3C, 'M', '屮'), + (0xFA3D, 'M', '悔'), + (0xFA3E, 'M', '慨'), + (0xFA3F, 'M', '憎'), + (0xFA40, 'M', '懲'), + (0xFA41, 'M', '敏'), + (0xFA42, 'M', '既'), + (0xFA43, 'M', '暑'), + (0xFA44, 'M', '梅'), + (0xFA45, 'M', '海'), + (0xFA46, 'M', '渚'), + (0xFA47, 'M', '漢'), + (0xFA48, 'M', '煮'), + (0xFA49, 'M', '爫'), + (0xFA4A, 'M', '琢'), + (0xFA4B, 'M', '碑'), + (0xFA4C, 'M', '社'), + (0xFA4D, 'M', '祉'), + (0xFA4E, 'M', '祈'), + (0xFA4F, 'M', '祐'), + (0xFA50, 'M', '祖'), + (0xFA51, 'M', '祝'), + (0xFA52, 'M', '禍'), + (0xFA53, 'M', '禎'), + (0xFA54, 'M', '穀'), + (0xFA55, 'M', '突'), + (0xFA56, 'M', '節'), + (0xFA57, 'M', '練'), + (0xFA58, 'M', '縉'), + (0xFA59, 'M', '繁'), + (0xFA5A, 'M', '署'), + (0xFA5B, 'M', '者'), + (0xFA5C, 'M', '臭'), + (0xFA5D, 'M', '艹'), + (0xFA5F, 'M', '著'), + (0xFA60, 'M', '褐'), + (0xFA61, 'M', '視'), + (0xFA62, 'M', '謁'), + (0xFA63, 'M', '謹'), + (0xFA64, 'M', '賓'), + (0xFA65, 'M', '贈'), + (0xFA66, 'M', '辶'), + (0xFA67, 'M', '逸'), + (0xFA68, 'M', '難'), + (0xFA69, 'M', '響'), + (0xFA6A, 'M', '頻'), + (0xFA6B, 'M', '恵'), + (0xFA6C, 'M', '𤋮'), + (0xFA6D, 'M', '舘'), + (0xFA6E, 'X'), + (0xFA70, 'M', '並'), + (0xFA71, 'M', '况'), + (0xFA72, 'M', '全'), + (0xFA73, 'M', '侀'), + (0xFA74, 'M', '充'), + (0xFA75, 'M', '冀'), + (0xFA76, 'M', '勇'), + (0xFA77, 'M', '勺'), + (0xFA78, 'M', '喝'), + ] + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA79, 'M', '啕'), + (0xFA7A, 'M', '喙'), + (0xFA7B, 'M', '嗢'), + (0xFA7C, 'M', '塚'), + (0xFA7D, 'M', '墳'), + (0xFA7E, 'M', '奄'), + (0xFA7F, 'M', '奔'), + (0xFA80, 'M', '婢'), + (0xFA81, 'M', '嬨'), + (0xFA82, 'M', '廒'), + (0xFA83, 'M', '廙'), + (0xFA84, 'M', '彩'), + (0xFA85, 'M', '徭'), + (0xFA86, 'M', '惘'), + (0xFA87, 'M', '慎'), + (0xFA88, 'M', '愈'), + (0xFA89, 'M', '憎'), + (0xFA8A, 'M', '慠'), + (0xFA8B, 'M', '懲'), + (0xFA8C, 'M', '戴'), + (0xFA8D, 'M', '揄'), + (0xFA8E, 'M', '搜'), + (0xFA8F, 'M', '摒'), + (0xFA90, 'M', '敖'), + (0xFA91, 'M', '晴'), + (0xFA92, 'M', '朗'), + (0xFA93, 'M', '望'), + (0xFA94, 'M', '杖'), + (0xFA95, 'M', '歹'), + (0xFA96, 'M', '殺'), + (0xFA97, 'M', '流'), + (0xFA98, 'M', '滛'), + (0xFA99, 'M', '滋'), + (0xFA9A, 'M', '漢'), + (0xFA9B, 'M', '瀞'), + (0xFA9C, 'M', '煮'), + (0xFA9D, 'M', '瞧'), + (0xFA9E, 'M', '爵'), + (0xFA9F, 'M', '犯'), + (0xFAA0, 'M', '猪'), + (0xFAA1, 'M', '瑱'), + (0xFAA2, 'M', '甆'), + (0xFAA3, 'M', '画'), + (0xFAA4, 'M', '瘝'), + (0xFAA5, 'M', '瘟'), + (0xFAA6, 'M', '益'), + (0xFAA7, 'M', '盛'), + (0xFAA8, 'M', '直'), + (0xFAA9, 'M', '睊'), + (0xFAAA, 'M', '着'), + (0xFAAB, 'M', '磌'), + (0xFAAC, 'M', '窱'), + (0xFAAD, 'M', '節'), + (0xFAAE, 'M', '类'), + (0xFAAF, 'M', '絛'), + (0xFAB0, 'M', '練'), + (0xFAB1, 'M', '缾'), + (0xFAB2, 'M', '者'), + (0xFAB3, 'M', '荒'), + (0xFAB4, 'M', '華'), + (0xFAB5, 'M', '蝹'), + (0xFAB6, 'M', '襁'), + (0xFAB7, 'M', '覆'), + (0xFAB8, 'M', '視'), + (0xFAB9, 'M', '調'), + (0xFABA, 'M', '諸'), + (0xFABB, 'M', '請'), + (0xFABC, 'M', '謁'), + (0xFABD, 'M', '諾'), + (0xFABE, 'M', '諭'), + (0xFABF, 'M', '謹'), + (0xFAC0, 'M', '變'), + (0xFAC1, 'M', '贈'), + (0xFAC2, 'M', '輸'), + (0xFAC3, 'M', '遲'), + (0xFAC4, 'M', '醙'), + (0xFAC5, 'M', '鉶'), + (0xFAC6, 'M', '陼'), + (0xFAC7, 'M', '難'), + (0xFAC8, 'M', '靖'), + (0xFAC9, 'M', '韛'), + (0xFACA, 'M', '響'), + (0xFACB, 'M', '頋'), + (0xFACC, 'M', '頻'), + (0xFACD, 'M', '鬒'), + (0xFACE, 'M', '龜'), + (0xFACF, 'M', '𢡊'), + (0xFAD0, 'M', '𢡄'), + (0xFAD1, 'M', '𣏕'), + (0xFAD2, 'M', '㮝'), + (0xFAD3, 'M', '䀘'), + (0xFAD4, 'M', '䀹'), + (0xFAD5, 'M', '𥉉'), + (0xFAD6, 'M', '𥳐'), + (0xFAD7, 'M', '𧻓'), + (0xFAD8, 'M', '齃'), + (0xFAD9, 'M', '龎'), + (0xFADA, 'X'), + (0xFB00, 'M', 'ff'), + (0xFB01, 'M', 'fi'), + ] + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFB02, 'M', 'fl'), + (0xFB03, 'M', 'ffi'), + (0xFB04, 'M', 'ffl'), + (0xFB05, 'M', 'st'), + (0xFB07, 'X'), + (0xFB13, 'M', 'մն'), + (0xFB14, 'M', 'մե'), + (0xFB15, 'M', 'մի'), + (0xFB16, 'M', 'վն'), + (0xFB17, 'M', 'մխ'), + (0xFB18, 'X'), + (0xFB1D, 'M', 'יִ'), + (0xFB1E, 'V'), + (0xFB1F, 'M', 'ײַ'), + (0xFB20, 'M', 'ע'), + (0xFB21, 'M', 'א'), + (0xFB22, 'M', 'ד'), + (0xFB23, 'M', 'ה'), + (0xFB24, 'M', 'כ'), + (0xFB25, 'M', 'ל'), + (0xFB26, 'M', 'ם'), + (0xFB27, 'M', 'ר'), + (0xFB28, 'M', 'ת'), + (0xFB29, '3', '+'), + (0xFB2A, 'M', 'שׁ'), + (0xFB2B, 'M', 'שׂ'), + (0xFB2C, 'M', 'שּׁ'), + (0xFB2D, 'M', 'שּׂ'), + (0xFB2E, 'M', 'אַ'), + (0xFB2F, 'M', 'אָ'), + (0xFB30, 'M', 'אּ'), + (0xFB31, 'M', 'בּ'), + (0xFB32, 'M', 'גּ'), + (0xFB33, 'M', 'דּ'), + (0xFB34, 'M', 'הּ'), + (0xFB35, 'M', 'וּ'), + (0xFB36, 'M', 'זּ'), + (0xFB37, 'X'), + (0xFB38, 'M', 'טּ'), + (0xFB39, 'M', 'יּ'), + (0xFB3A, 'M', 'ךּ'), + (0xFB3B, 'M', 'כּ'), + (0xFB3C, 'M', 'לּ'), + (0xFB3D, 'X'), + (0xFB3E, 'M', 'מּ'), + (0xFB3F, 'X'), + (0xFB40, 'M', 'נּ'), + (0xFB41, 'M', 'סּ'), + (0xFB42, 'X'), + (0xFB43, 'M', 'ףּ'), + (0xFB44, 'M', 'פּ'), + (0xFB45, 'X'), + (0xFB46, 'M', 'צּ'), + (0xFB47, 'M', 'קּ'), + (0xFB48, 'M', 'רּ'), + (0xFB49, 'M', 'שּ'), + (0xFB4A, 'M', 'תּ'), + (0xFB4B, 'M', 'וֹ'), + (0xFB4C, 'M', 'בֿ'), + (0xFB4D, 'M', 'כֿ'), + (0xFB4E, 'M', 'פֿ'), + (0xFB4F, 'M', 'אל'), + (0xFB50, 'M', 'ٱ'), + (0xFB52, 'M', 'ٻ'), + (0xFB56, 'M', 'پ'), + (0xFB5A, 'M', 'ڀ'), + (0xFB5E, 'M', 'ٺ'), + (0xFB62, 'M', 'ٿ'), + (0xFB66, 'M', 'ٹ'), + (0xFB6A, 'M', 'ڤ'), + (0xFB6E, 'M', 'ڦ'), + (0xFB72, 'M', 'ڄ'), + (0xFB76, 'M', 'ڃ'), + (0xFB7A, 'M', 'چ'), + (0xFB7E, 'M', 'ڇ'), + (0xFB82, 'M', 'ڍ'), + (0xFB84, 'M', 'ڌ'), + (0xFB86, 'M', 'ڎ'), + (0xFB88, 'M', 'ڈ'), + (0xFB8A, 'M', 'ژ'), + (0xFB8C, 'M', 'ڑ'), + (0xFB8E, 'M', 'ک'), + (0xFB92, 'M', 'گ'), + (0xFB96, 'M', 'ڳ'), + (0xFB9A, 'M', 'ڱ'), + (0xFB9E, 'M', 'ں'), + (0xFBA0, 'M', 'ڻ'), + (0xFBA4, 'M', 'ۀ'), + (0xFBA6, 'M', 'ہ'), + (0xFBAA, 'M', 'ھ'), + (0xFBAE, 'M', 'ے'), + (0xFBB0, 'M', 'ۓ'), + (0xFBB2, 'V'), + (0xFBC3, 'X'), + (0xFBD3, 'M', 'ڭ'), + (0xFBD7, 'M', 'ۇ'), + (0xFBD9, 'M', 'ۆ'), + (0xFBDB, 'M', 'ۈ'), + (0xFBDD, 'M', 'ۇٴ'), + (0xFBDE, 'M', 'ۋ'), + ] + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFBE0, 'M', 'ۅ'), + (0xFBE2, 'M', 'ۉ'), + (0xFBE4, 'M', 'ې'), + (0xFBE8, 'M', 'ى'), + (0xFBEA, 'M', 'ئا'), + (0xFBEC, 'M', 'ئە'), + (0xFBEE, 'M', 'ئو'), + (0xFBF0, 'M', 'ئۇ'), + (0xFBF2, 'M', 'ئۆ'), + (0xFBF4, 'M', 'ئۈ'), + (0xFBF6, 'M', 'ئې'), + (0xFBF9, 'M', 'ئى'), + (0xFBFC, 'M', 'ی'), + (0xFC00, 'M', 'ئج'), + (0xFC01, 'M', 'ئح'), + (0xFC02, 'M', 'ئم'), + (0xFC03, 'M', 'ئى'), + (0xFC04, 'M', 'ئي'), + (0xFC05, 'M', 'بج'), + (0xFC06, 'M', 'بح'), + (0xFC07, 'M', 'بخ'), + (0xFC08, 'M', 'بم'), + (0xFC09, 'M', 'بى'), + (0xFC0A, 'M', 'بي'), + (0xFC0B, 'M', 'تج'), + (0xFC0C, 'M', 'تح'), + (0xFC0D, 'M', 'تخ'), + (0xFC0E, 'M', 'تم'), + (0xFC0F, 'M', 'تى'), + (0xFC10, 'M', 'تي'), + (0xFC11, 'M', 'ثج'), + (0xFC12, 'M', 'ثم'), + (0xFC13, 'M', 'ثى'), + (0xFC14, 'M', 'ثي'), + (0xFC15, 'M', 'جح'), + (0xFC16, 'M', 'جم'), + (0xFC17, 'M', 'حج'), + (0xFC18, 'M', 'حم'), + (0xFC19, 'M', 'خج'), + (0xFC1A, 'M', 'خح'), + (0xFC1B, 'M', 'خم'), + (0xFC1C, 'M', 'سج'), + (0xFC1D, 'M', 'سح'), + (0xFC1E, 'M', 'سخ'), + (0xFC1F, 'M', 'سم'), + (0xFC20, 'M', 'صح'), + (0xFC21, 'M', 'صم'), + (0xFC22, 'M', 'ضج'), + (0xFC23, 'M', 'ضح'), + (0xFC24, 'M', 'ضخ'), + (0xFC25, 'M', 'ضم'), + (0xFC26, 'M', 'طح'), + (0xFC27, 'M', 'طم'), + (0xFC28, 'M', 'ظم'), + (0xFC29, 'M', 'عج'), + (0xFC2A, 'M', 'عم'), + (0xFC2B, 'M', 'غج'), + (0xFC2C, 'M', 'غم'), + (0xFC2D, 'M', 'فج'), + (0xFC2E, 'M', 'فح'), + (0xFC2F, 'M', 'فخ'), + (0xFC30, 'M', 'فم'), + (0xFC31, 'M', 'فى'), + (0xFC32, 'M', 'في'), + (0xFC33, 'M', 'قح'), + (0xFC34, 'M', 'قم'), + (0xFC35, 'M', 'قى'), + (0xFC36, 'M', 'قي'), + (0xFC37, 'M', 'كا'), + (0xFC38, 'M', 'كج'), + (0xFC39, 'M', 'كح'), + (0xFC3A, 'M', 'كخ'), + (0xFC3B, 'M', 'كل'), + (0xFC3C, 'M', 'كم'), + (0xFC3D, 'M', 'كى'), + (0xFC3E, 'M', 'كي'), + (0xFC3F, 'M', 'لج'), + (0xFC40, 'M', 'لح'), + (0xFC41, 'M', 'لخ'), + (0xFC42, 'M', 'لم'), + (0xFC43, 'M', 'لى'), + (0xFC44, 'M', 'لي'), + (0xFC45, 'M', 'مج'), + (0xFC46, 'M', 'مح'), + (0xFC47, 'M', 'مخ'), + (0xFC48, 'M', 'مم'), + (0xFC49, 'M', 'مى'), + (0xFC4A, 'M', 'مي'), + (0xFC4B, 'M', 'نج'), + (0xFC4C, 'M', 'نح'), + (0xFC4D, 'M', 'نخ'), + (0xFC4E, 'M', 'نم'), + (0xFC4F, 'M', 'نى'), + (0xFC50, 'M', 'ني'), + (0xFC51, 'M', 'هج'), + (0xFC52, 'M', 'هم'), + (0xFC53, 'M', 'هى'), + (0xFC54, 'M', 'هي'), + (0xFC55, 'M', 'يج'), + (0xFC56, 'M', 'يح'), + ] + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFC57, 'M', 'يخ'), + (0xFC58, 'M', 'يم'), + (0xFC59, 'M', 'يى'), + (0xFC5A, 'M', 'يي'), + (0xFC5B, 'M', 'ذٰ'), + (0xFC5C, 'M', 'رٰ'), + (0xFC5D, 'M', 'ىٰ'), + (0xFC5E, '3', ' ٌّ'), + (0xFC5F, '3', ' ٍّ'), + (0xFC60, '3', ' َّ'), + (0xFC61, '3', ' ُّ'), + (0xFC62, '3', ' ِّ'), + (0xFC63, '3', ' ّٰ'), + (0xFC64, 'M', 'ئر'), + (0xFC65, 'M', 'ئز'), + (0xFC66, 'M', 'ئم'), + (0xFC67, 'M', 'ئن'), + (0xFC68, 'M', 'ئى'), + (0xFC69, 'M', 'ئي'), + (0xFC6A, 'M', 'بر'), + (0xFC6B, 'M', 'بز'), + (0xFC6C, 'M', 'بم'), + (0xFC6D, 'M', 'بن'), + (0xFC6E, 'M', 'بى'), + (0xFC6F, 'M', 'بي'), + (0xFC70, 'M', 'تر'), + (0xFC71, 'M', 'تز'), + (0xFC72, 'M', 'تم'), + (0xFC73, 'M', 'تن'), + (0xFC74, 'M', 'تى'), + (0xFC75, 'M', 'تي'), + (0xFC76, 'M', 'ثر'), + (0xFC77, 'M', 'ثز'), + (0xFC78, 'M', 'ثم'), + (0xFC79, 'M', 'ثن'), + (0xFC7A, 'M', 'ثى'), + (0xFC7B, 'M', 'ثي'), + (0xFC7C, 'M', 'فى'), + (0xFC7D, 'M', 'في'), + (0xFC7E, 'M', 'قى'), + (0xFC7F, 'M', 'قي'), + (0xFC80, 'M', 'كا'), + (0xFC81, 'M', 'كل'), + (0xFC82, 'M', 'كم'), + (0xFC83, 'M', 'كى'), + (0xFC84, 'M', 'كي'), + (0xFC85, 'M', 'لم'), + (0xFC86, 'M', 'لى'), + (0xFC87, 'M', 'لي'), + (0xFC88, 'M', 'ما'), + (0xFC89, 'M', 'مم'), + (0xFC8A, 'M', 'نر'), + (0xFC8B, 'M', 'نز'), + (0xFC8C, 'M', 'نم'), + (0xFC8D, 'M', 'نن'), + (0xFC8E, 'M', 'نى'), + (0xFC8F, 'M', 'ني'), + (0xFC90, 'M', 'ىٰ'), + (0xFC91, 'M', 'ير'), + (0xFC92, 'M', 'يز'), + (0xFC93, 'M', 'يم'), + (0xFC94, 'M', 'ين'), + (0xFC95, 'M', 'يى'), + (0xFC96, 'M', 'يي'), + (0xFC97, 'M', 'ئج'), + (0xFC98, 'M', 'ئح'), + (0xFC99, 'M', 'ئخ'), + (0xFC9A, 'M', 'ئم'), + (0xFC9B, 'M', 'ئه'), + (0xFC9C, 'M', 'بج'), + (0xFC9D, 'M', 'بح'), + (0xFC9E, 'M', 'بخ'), + (0xFC9F, 'M', 'بم'), + (0xFCA0, 'M', 'به'), + (0xFCA1, 'M', 'تج'), + (0xFCA2, 'M', 'تح'), + (0xFCA3, 'M', 'تخ'), + (0xFCA4, 'M', 'تم'), + (0xFCA5, 'M', 'ته'), + (0xFCA6, 'M', 'ثم'), + (0xFCA7, 'M', 'جح'), + (0xFCA8, 'M', 'جم'), + (0xFCA9, 'M', 'حج'), + (0xFCAA, 'M', 'حم'), + (0xFCAB, 'M', 'خج'), + (0xFCAC, 'M', 'خم'), + (0xFCAD, 'M', 'سج'), + (0xFCAE, 'M', 'سح'), + (0xFCAF, 'M', 'سخ'), + (0xFCB0, 'M', 'سم'), + (0xFCB1, 'M', 'صح'), + (0xFCB2, 'M', 'صخ'), + (0xFCB3, 'M', 'صم'), + (0xFCB4, 'M', 'ضج'), + (0xFCB5, 'M', 'ضح'), + (0xFCB6, 'M', 'ضخ'), + (0xFCB7, 'M', 'ضم'), + (0xFCB8, 'M', 'طح'), + (0xFCB9, 'M', 'ظم'), + (0xFCBA, 'M', 'عج'), + ] + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFCBB, 'M', 'عم'), + (0xFCBC, 'M', 'غج'), + (0xFCBD, 'M', 'غم'), + (0xFCBE, 'M', 'فج'), + (0xFCBF, 'M', 'فح'), + (0xFCC0, 'M', 'فخ'), + (0xFCC1, 'M', 'فم'), + (0xFCC2, 'M', 'قح'), + (0xFCC3, 'M', 'قم'), + (0xFCC4, 'M', 'كج'), + (0xFCC5, 'M', 'كح'), + (0xFCC6, 'M', 'كخ'), + (0xFCC7, 'M', 'كل'), + (0xFCC8, 'M', 'كم'), + (0xFCC9, 'M', 'لج'), + (0xFCCA, 'M', 'لح'), + (0xFCCB, 'M', 'لخ'), + (0xFCCC, 'M', 'لم'), + (0xFCCD, 'M', 'له'), + (0xFCCE, 'M', 'مج'), + (0xFCCF, 'M', 'مح'), + (0xFCD0, 'M', 'مخ'), + (0xFCD1, 'M', 'مم'), + (0xFCD2, 'M', 'نج'), + (0xFCD3, 'M', 'نح'), + (0xFCD4, 'M', 'نخ'), + (0xFCD5, 'M', 'نم'), + (0xFCD6, 'M', 'نه'), + (0xFCD7, 'M', 'هج'), + (0xFCD8, 'M', 'هم'), + (0xFCD9, 'M', 'هٰ'), + (0xFCDA, 'M', 'يج'), + (0xFCDB, 'M', 'يح'), + (0xFCDC, 'M', 'يخ'), + (0xFCDD, 'M', 'يم'), + (0xFCDE, 'M', 'يه'), + (0xFCDF, 'M', 'ئم'), + (0xFCE0, 'M', 'ئه'), + (0xFCE1, 'M', 'بم'), + (0xFCE2, 'M', 'به'), + (0xFCE3, 'M', 'تم'), + (0xFCE4, 'M', 'ته'), + (0xFCE5, 'M', 'ثم'), + (0xFCE6, 'M', 'ثه'), + (0xFCE7, 'M', 'سم'), + (0xFCE8, 'M', 'سه'), + (0xFCE9, 'M', 'شم'), + (0xFCEA, 'M', 'شه'), + (0xFCEB, 'M', 'كل'), + (0xFCEC, 'M', 'كم'), + (0xFCED, 'M', 'لم'), + (0xFCEE, 'M', 'نم'), + (0xFCEF, 'M', 'نه'), + (0xFCF0, 'M', 'يم'), + (0xFCF1, 'M', 'يه'), + (0xFCF2, 'M', 'ـَّ'), + (0xFCF3, 'M', 'ـُّ'), + (0xFCF4, 'M', 'ـِّ'), + (0xFCF5, 'M', 'طى'), + (0xFCF6, 'M', 'طي'), + (0xFCF7, 'M', 'عى'), + (0xFCF8, 'M', 'عي'), + (0xFCF9, 'M', 'غى'), + (0xFCFA, 'M', 'غي'), + (0xFCFB, 'M', 'سى'), + (0xFCFC, 'M', 'سي'), + (0xFCFD, 'M', 'شى'), + (0xFCFE, 'M', 'شي'), + (0xFCFF, 'M', 'حى'), + (0xFD00, 'M', 'حي'), + (0xFD01, 'M', 'جى'), + (0xFD02, 'M', 'جي'), + (0xFD03, 'M', 'خى'), + (0xFD04, 'M', 'خي'), + (0xFD05, 'M', 'صى'), + (0xFD06, 'M', 'صي'), + (0xFD07, 'M', 'ضى'), + (0xFD08, 'M', 'ضي'), + (0xFD09, 'M', 'شج'), + (0xFD0A, 'M', 'شح'), + (0xFD0B, 'M', 'شخ'), + (0xFD0C, 'M', 'شم'), + (0xFD0D, 'M', 'شر'), + (0xFD0E, 'M', 'سر'), + (0xFD0F, 'M', 'صر'), + (0xFD10, 'M', 'ضر'), + (0xFD11, 'M', 'طى'), + (0xFD12, 'M', 'طي'), + (0xFD13, 'M', 'عى'), + (0xFD14, 'M', 'عي'), + (0xFD15, 'M', 'غى'), + (0xFD16, 'M', 'غي'), + (0xFD17, 'M', 'سى'), + (0xFD18, 'M', 'سي'), + (0xFD19, 'M', 'شى'), + (0xFD1A, 'M', 'شي'), + (0xFD1B, 'M', 'حى'), + (0xFD1C, 'M', 'حي'), + (0xFD1D, 'M', 'جى'), + (0xFD1E, 'M', 'جي'), + ] + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFD1F, 'M', 'خى'), + (0xFD20, 'M', 'خي'), + (0xFD21, 'M', 'صى'), + (0xFD22, 'M', 'صي'), + (0xFD23, 'M', 'ضى'), + (0xFD24, 'M', 'ضي'), + (0xFD25, 'M', 'شج'), + (0xFD26, 'M', 'شح'), + (0xFD27, 'M', 'شخ'), + (0xFD28, 'M', 'شم'), + (0xFD29, 'M', 'شر'), + (0xFD2A, 'M', 'سر'), + (0xFD2B, 'M', 'صر'), + (0xFD2C, 'M', 'ضر'), + (0xFD2D, 'M', 'شج'), + (0xFD2E, 'M', 'شح'), + (0xFD2F, 'M', 'شخ'), + (0xFD30, 'M', 'شم'), + (0xFD31, 'M', 'سه'), + (0xFD32, 'M', 'شه'), + (0xFD33, 'M', 'طم'), + (0xFD34, 'M', 'سج'), + (0xFD35, 'M', 'سح'), + (0xFD36, 'M', 'سخ'), + (0xFD37, 'M', 'شج'), + (0xFD38, 'M', 'شح'), + (0xFD39, 'M', 'شخ'), + (0xFD3A, 'M', 'طم'), + (0xFD3B, 'M', 'ظم'), + (0xFD3C, 'M', 'اً'), + (0xFD3E, 'V'), + (0xFD50, 'M', 'تجم'), + (0xFD51, 'M', 'تحج'), + (0xFD53, 'M', 'تحم'), + (0xFD54, 'M', 'تخم'), + (0xFD55, 'M', 'تمج'), + (0xFD56, 'M', 'تمح'), + (0xFD57, 'M', 'تمخ'), + (0xFD58, 'M', 'جمح'), + (0xFD5A, 'M', 'حمي'), + (0xFD5B, 'M', 'حمى'), + (0xFD5C, 'M', 'سحج'), + (0xFD5D, 'M', 'سجح'), + (0xFD5E, 'M', 'سجى'), + (0xFD5F, 'M', 'سمح'), + (0xFD61, 'M', 'سمج'), + (0xFD62, 'M', 'سمم'), + (0xFD64, 'M', 'صحح'), + (0xFD66, 'M', 'صمم'), + (0xFD67, 'M', 'شحم'), + (0xFD69, 'M', 'شجي'), + (0xFD6A, 'M', 'شمخ'), + (0xFD6C, 'M', 'شمم'), + (0xFD6E, 'M', 'ضحى'), + (0xFD6F, 'M', 'ضخم'), + (0xFD71, 'M', 'طمح'), + (0xFD73, 'M', 'طمم'), + (0xFD74, 'M', 'طمي'), + (0xFD75, 'M', 'عجم'), + (0xFD76, 'M', 'عمم'), + (0xFD78, 'M', 'عمى'), + (0xFD79, 'M', 'غمم'), + (0xFD7A, 'M', 'غمي'), + (0xFD7B, 'M', 'غمى'), + (0xFD7C, 'M', 'فخم'), + (0xFD7E, 'M', 'قمح'), + (0xFD7F, 'M', 'قمم'), + (0xFD80, 'M', 'لحم'), + (0xFD81, 'M', 'لحي'), + (0xFD82, 'M', 'لحى'), + (0xFD83, 'M', 'لجج'), + (0xFD85, 'M', 'لخم'), + (0xFD87, 'M', 'لمح'), + (0xFD89, 'M', 'محج'), + (0xFD8A, 'M', 'محم'), + (0xFD8B, 'M', 'محي'), + (0xFD8C, 'M', 'مجح'), + (0xFD8D, 'M', 'مجم'), + (0xFD8E, 'M', 'مخج'), + (0xFD8F, 'M', 'مخم'), + (0xFD90, 'X'), + (0xFD92, 'M', 'مجخ'), + (0xFD93, 'M', 'همج'), + (0xFD94, 'M', 'همم'), + (0xFD95, 'M', 'نحم'), + (0xFD96, 'M', 'نحى'), + (0xFD97, 'M', 'نجم'), + (0xFD99, 'M', 'نجى'), + (0xFD9A, 'M', 'نمي'), + (0xFD9B, 'M', 'نمى'), + (0xFD9C, 'M', 'يمم'), + (0xFD9E, 'M', 'بخي'), + (0xFD9F, 'M', 'تجي'), + (0xFDA0, 'M', 'تجى'), + (0xFDA1, 'M', 'تخي'), + (0xFDA2, 'M', 'تخى'), + (0xFDA3, 'M', 'تمي'), + (0xFDA4, 'M', 'تمى'), + (0xFDA5, 'M', 'جمي'), + (0xFDA6, 'M', 'جحى'), + ] + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFDA7, 'M', 'جمى'), + (0xFDA8, 'M', 'سخى'), + (0xFDA9, 'M', 'صحي'), + (0xFDAA, 'M', 'شحي'), + (0xFDAB, 'M', 'ضحي'), + (0xFDAC, 'M', 'لجي'), + (0xFDAD, 'M', 'لمي'), + (0xFDAE, 'M', 'يحي'), + (0xFDAF, 'M', 'يجي'), + (0xFDB0, 'M', 'يمي'), + (0xFDB1, 'M', 'ممي'), + (0xFDB2, 'M', 'قمي'), + (0xFDB3, 'M', 'نحي'), + (0xFDB4, 'M', 'قمح'), + (0xFDB5, 'M', 'لحم'), + (0xFDB6, 'M', 'عمي'), + (0xFDB7, 'M', 'كمي'), + (0xFDB8, 'M', 'نجح'), + (0xFDB9, 'M', 'مخي'), + (0xFDBA, 'M', 'لجم'), + (0xFDBB, 'M', 'كمم'), + (0xFDBC, 'M', 'لجم'), + (0xFDBD, 'M', 'نجح'), + (0xFDBE, 'M', 'جحي'), + (0xFDBF, 'M', 'حجي'), + (0xFDC0, 'M', 'مجي'), + (0xFDC1, 'M', 'فمي'), + (0xFDC2, 'M', 'بحي'), + (0xFDC3, 'M', 'كمم'), + (0xFDC4, 'M', 'عجم'), + (0xFDC5, 'M', 'صمم'), + (0xFDC6, 'M', 'سخي'), + (0xFDC7, 'M', 'نجي'), + (0xFDC8, 'X'), + (0xFDCF, 'V'), + (0xFDD0, 'X'), + (0xFDF0, 'M', 'صلے'), + (0xFDF1, 'M', 'قلے'), + (0xFDF2, 'M', 'الله'), + (0xFDF3, 'M', 'اكبر'), + (0xFDF4, 'M', 'محمد'), + (0xFDF5, 'M', 'صلعم'), + (0xFDF6, 'M', 'رسول'), + (0xFDF7, 'M', 'عليه'), + (0xFDF8, 'M', 'وسلم'), + (0xFDF9, 'M', 'صلى'), + (0xFDFA, '3', 'صلى الله عليه وسلم'), + (0xFDFB, '3', 'جل جلاله'), + (0xFDFC, 'M', 'ریال'), + (0xFDFD, 'V'), + (0xFE00, 'I'), + (0xFE10, '3', ','), + (0xFE11, 'M', '、'), + (0xFE12, 'X'), + (0xFE13, '3', ':'), + (0xFE14, '3', ';'), + (0xFE15, '3', '!'), + (0xFE16, '3', '?'), + (0xFE17, 'M', '〖'), + (0xFE18, 'M', '〗'), + (0xFE19, 'X'), + (0xFE20, 'V'), + (0xFE30, 'X'), + (0xFE31, 'M', '—'), + (0xFE32, 'M', '–'), + (0xFE33, '3', '_'), + (0xFE35, '3', '('), + (0xFE36, '3', ')'), + (0xFE37, '3', '{'), + (0xFE38, '3', '}'), + (0xFE39, 'M', '〔'), + (0xFE3A, 'M', '〕'), + (0xFE3B, 'M', '【'), + (0xFE3C, 'M', '】'), + (0xFE3D, 'M', '《'), + (0xFE3E, 'M', '》'), + (0xFE3F, 'M', '〈'), + (0xFE40, 'M', '〉'), + (0xFE41, 'M', '「'), + (0xFE42, 'M', '」'), + (0xFE43, 'M', '『'), + (0xFE44, 'M', '』'), + (0xFE45, 'V'), + (0xFE47, '3', '['), + (0xFE48, '3', ']'), + (0xFE49, '3', ' ̅'), + (0xFE4D, '3', '_'), + (0xFE50, '3', ','), + (0xFE51, 'M', '、'), + (0xFE52, 'X'), + (0xFE54, '3', ';'), + (0xFE55, '3', ':'), + (0xFE56, '3', '?'), + (0xFE57, '3', '!'), + (0xFE58, 'M', '—'), + (0xFE59, '3', '('), + (0xFE5A, '3', ')'), + (0xFE5B, '3', '{'), + (0xFE5C, '3', '}'), + (0xFE5D, 'M', '〔'), + ] + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFE5E, 'M', '〕'), + (0xFE5F, '3', '#'), + (0xFE60, '3', '&'), + (0xFE61, '3', '*'), + (0xFE62, '3', '+'), + (0xFE63, 'M', '-'), + (0xFE64, '3', '<'), + (0xFE65, '3', '>'), + (0xFE66, '3', '='), + (0xFE67, 'X'), + (0xFE68, '3', '\\'), + (0xFE69, '3', '$'), + (0xFE6A, '3', '%'), + (0xFE6B, '3', '@'), + (0xFE6C, 'X'), + (0xFE70, '3', ' ً'), + (0xFE71, 'M', 'ـً'), + (0xFE72, '3', ' ٌ'), + (0xFE73, 'V'), + (0xFE74, '3', ' ٍ'), + (0xFE75, 'X'), + (0xFE76, '3', ' َ'), + (0xFE77, 'M', 'ـَ'), + (0xFE78, '3', ' ُ'), + (0xFE79, 'M', 'ـُ'), + (0xFE7A, '3', ' ِ'), + (0xFE7B, 'M', 'ـِ'), + (0xFE7C, '3', ' ّ'), + (0xFE7D, 'M', 'ـّ'), + (0xFE7E, '3', ' ْ'), + (0xFE7F, 'M', 'ـْ'), + (0xFE80, 'M', 'ء'), + (0xFE81, 'M', 'آ'), + (0xFE83, 'M', 'أ'), + (0xFE85, 'M', 'ؤ'), + (0xFE87, 'M', 'إ'), + (0xFE89, 'M', 'ئ'), + (0xFE8D, 'M', 'ا'), + (0xFE8F, 'M', 'ب'), + (0xFE93, 'M', 'ة'), + (0xFE95, 'M', 'ت'), + (0xFE99, 'M', 'ث'), + (0xFE9D, 'M', 'ج'), + (0xFEA1, 'M', 'ح'), + (0xFEA5, 'M', 'خ'), + (0xFEA9, 'M', 'د'), + (0xFEAB, 'M', 'ذ'), + (0xFEAD, 'M', 'ر'), + (0xFEAF, 'M', 'ز'), + (0xFEB1, 'M', 'س'), + (0xFEB5, 'M', 'ش'), + (0xFEB9, 'M', 'ص'), + (0xFEBD, 'M', 'ض'), + (0xFEC1, 'M', 'ط'), + (0xFEC5, 'M', 'ظ'), + (0xFEC9, 'M', 'ع'), + (0xFECD, 'M', 'غ'), + (0xFED1, 'M', 'ف'), + (0xFED5, 'M', 'ق'), + (0xFED9, 'M', 'ك'), + (0xFEDD, 'M', 'ل'), + (0xFEE1, 'M', 'م'), + (0xFEE5, 'M', 'ن'), + (0xFEE9, 'M', 'ه'), + (0xFEED, 'M', 'و'), + (0xFEEF, 'M', 'ى'), + (0xFEF1, 'M', 'ي'), + (0xFEF5, 'M', 'لآ'), + (0xFEF7, 'M', 'لأ'), + (0xFEF9, 'M', 'لإ'), + (0xFEFB, 'M', 'لا'), + (0xFEFD, 'X'), + (0xFEFF, 'I'), + (0xFF00, 'X'), + (0xFF01, '3', '!'), + (0xFF02, '3', '"'), + (0xFF03, '3', '#'), + (0xFF04, '3', '$'), + (0xFF05, '3', '%'), + (0xFF06, '3', '&'), + (0xFF07, '3', '\''), + (0xFF08, '3', '('), + (0xFF09, '3', ')'), + (0xFF0A, '3', '*'), + (0xFF0B, '3', '+'), + (0xFF0C, '3', ','), + (0xFF0D, 'M', '-'), + (0xFF0E, 'M', '.'), + (0xFF0F, '3', '/'), + (0xFF10, 'M', '0'), + (0xFF11, 'M', '1'), + (0xFF12, 'M', '2'), + (0xFF13, 'M', '3'), + (0xFF14, 'M', '4'), + (0xFF15, 'M', '5'), + (0xFF16, 'M', '6'), + (0xFF17, 'M', '7'), + (0xFF18, 'M', '8'), + (0xFF19, 'M', '9'), + (0xFF1A, '3', ':'), + ] + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF1B, '3', ';'), + (0xFF1C, '3', '<'), + (0xFF1D, '3', '='), + (0xFF1E, '3', '>'), + (0xFF1F, '3', '?'), + (0xFF20, '3', '@'), + (0xFF21, 'M', 'a'), + (0xFF22, 'M', 'b'), + (0xFF23, 'M', 'c'), + (0xFF24, 'M', 'd'), + (0xFF25, 'M', 'e'), + (0xFF26, 'M', 'f'), + (0xFF27, 'M', 'g'), + (0xFF28, 'M', 'h'), + (0xFF29, 'M', 'i'), + (0xFF2A, 'M', 'j'), + (0xFF2B, 'M', 'k'), + (0xFF2C, 'M', 'l'), + (0xFF2D, 'M', 'm'), + (0xFF2E, 'M', 'n'), + (0xFF2F, 'M', 'o'), + (0xFF30, 'M', 'p'), + (0xFF31, 'M', 'q'), + (0xFF32, 'M', 'r'), + (0xFF33, 'M', 's'), + (0xFF34, 'M', 't'), + (0xFF35, 'M', 'u'), + (0xFF36, 'M', 'v'), + (0xFF37, 'M', 'w'), + (0xFF38, 'M', 'x'), + (0xFF39, 'M', 'y'), + (0xFF3A, 'M', 'z'), + (0xFF3B, '3', '['), + (0xFF3C, '3', '\\'), + (0xFF3D, '3', ']'), + (0xFF3E, '3', '^'), + (0xFF3F, '3', '_'), + (0xFF40, '3', '`'), + (0xFF41, 'M', 'a'), + (0xFF42, 'M', 'b'), + (0xFF43, 'M', 'c'), + (0xFF44, 'M', 'd'), + (0xFF45, 'M', 'e'), + (0xFF46, 'M', 'f'), + (0xFF47, 'M', 'g'), + (0xFF48, 'M', 'h'), + (0xFF49, 'M', 'i'), + (0xFF4A, 'M', 'j'), + (0xFF4B, 'M', 'k'), + (0xFF4C, 'M', 'l'), + (0xFF4D, 'M', 'm'), + (0xFF4E, 'M', 'n'), + (0xFF4F, 'M', 'o'), + (0xFF50, 'M', 'p'), + (0xFF51, 'M', 'q'), + (0xFF52, 'M', 'r'), + (0xFF53, 'M', 's'), + (0xFF54, 'M', 't'), + (0xFF55, 'M', 'u'), + (0xFF56, 'M', 'v'), + (0xFF57, 'M', 'w'), + (0xFF58, 'M', 'x'), + (0xFF59, 'M', 'y'), + (0xFF5A, 'M', 'z'), + (0xFF5B, '3', '{'), + (0xFF5C, '3', '|'), + (0xFF5D, '3', '}'), + (0xFF5E, '3', '~'), + (0xFF5F, 'M', '⦅'), + (0xFF60, 'M', '⦆'), + (0xFF61, 'M', '.'), + (0xFF62, 'M', '「'), + (0xFF63, 'M', '」'), + (0xFF64, 'M', '、'), + (0xFF65, 'M', '・'), + (0xFF66, 'M', 'ヲ'), + (0xFF67, 'M', 'ァ'), + (0xFF68, 'M', 'ィ'), + (0xFF69, 'M', 'ゥ'), + (0xFF6A, 'M', 'ェ'), + (0xFF6B, 'M', 'ォ'), + (0xFF6C, 'M', 'ャ'), + (0xFF6D, 'M', 'ュ'), + (0xFF6E, 'M', 'ョ'), + (0xFF6F, 'M', 'ッ'), + (0xFF70, 'M', 'ー'), + (0xFF71, 'M', 'ア'), + (0xFF72, 'M', 'イ'), + (0xFF73, 'M', 'ウ'), + (0xFF74, 'M', 'エ'), + (0xFF75, 'M', 'オ'), + (0xFF76, 'M', 'カ'), + (0xFF77, 'M', 'キ'), + (0xFF78, 'M', 'ク'), + (0xFF79, 'M', 'ケ'), + (0xFF7A, 'M', 'コ'), + (0xFF7B, 'M', 'サ'), + (0xFF7C, 'M', 'シ'), + (0xFF7D, 'M', 'ス'), + (0xFF7E, 'M', 'セ'), + ] + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF7F, 'M', 'ソ'), + (0xFF80, 'M', 'タ'), + (0xFF81, 'M', 'チ'), + (0xFF82, 'M', 'ツ'), + (0xFF83, 'M', 'テ'), + (0xFF84, 'M', 'ト'), + (0xFF85, 'M', 'ナ'), + (0xFF86, 'M', 'ニ'), + (0xFF87, 'M', 'ヌ'), + (0xFF88, 'M', 'ネ'), + (0xFF89, 'M', 'ノ'), + (0xFF8A, 'M', 'ハ'), + (0xFF8B, 'M', 'ヒ'), + (0xFF8C, 'M', 'フ'), + (0xFF8D, 'M', 'ヘ'), + (0xFF8E, 'M', 'ホ'), + (0xFF8F, 'M', 'マ'), + (0xFF90, 'M', 'ミ'), + (0xFF91, 'M', 'ム'), + (0xFF92, 'M', 'メ'), + (0xFF93, 'M', 'モ'), + (0xFF94, 'M', 'ヤ'), + (0xFF95, 'M', 'ユ'), + (0xFF96, 'M', 'ヨ'), + (0xFF97, 'M', 'ラ'), + (0xFF98, 'M', 'リ'), + (0xFF99, 'M', 'ル'), + (0xFF9A, 'M', 'レ'), + (0xFF9B, 'M', 'ロ'), + (0xFF9C, 'M', 'ワ'), + (0xFF9D, 'M', 'ン'), + (0xFF9E, 'M', '゙'), + (0xFF9F, 'M', '゚'), + (0xFFA0, 'X'), + (0xFFA1, 'M', 'ᄀ'), + (0xFFA2, 'M', 'ᄁ'), + (0xFFA3, 'M', 'ᆪ'), + (0xFFA4, 'M', 'ᄂ'), + (0xFFA5, 'M', 'ᆬ'), + (0xFFA6, 'M', 'ᆭ'), + (0xFFA7, 'M', 'ᄃ'), + (0xFFA8, 'M', 'ᄄ'), + (0xFFA9, 'M', 'ᄅ'), + (0xFFAA, 'M', 'ᆰ'), + (0xFFAB, 'M', 'ᆱ'), + (0xFFAC, 'M', 'ᆲ'), + (0xFFAD, 'M', 'ᆳ'), + (0xFFAE, 'M', 'ᆴ'), + (0xFFAF, 'M', 'ᆵ'), + (0xFFB0, 'M', 'ᄚ'), + (0xFFB1, 'M', 'ᄆ'), + (0xFFB2, 'M', 'ᄇ'), + (0xFFB3, 'M', 'ᄈ'), + (0xFFB4, 'M', 'ᄡ'), + (0xFFB5, 'M', 'ᄉ'), + (0xFFB6, 'M', 'ᄊ'), + (0xFFB7, 'M', 'ᄋ'), + (0xFFB8, 'M', 'ᄌ'), + (0xFFB9, 'M', 'ᄍ'), + (0xFFBA, 'M', 'ᄎ'), + (0xFFBB, 'M', 'ᄏ'), + (0xFFBC, 'M', 'ᄐ'), + (0xFFBD, 'M', 'ᄑ'), + (0xFFBE, 'M', 'ᄒ'), + (0xFFBF, 'X'), + (0xFFC2, 'M', 'ᅡ'), + (0xFFC3, 'M', 'ᅢ'), + (0xFFC4, 'M', 'ᅣ'), + (0xFFC5, 'M', 'ᅤ'), + (0xFFC6, 'M', 'ᅥ'), + (0xFFC7, 'M', 'ᅦ'), + (0xFFC8, 'X'), + (0xFFCA, 'M', 'ᅧ'), + (0xFFCB, 'M', 'ᅨ'), + (0xFFCC, 'M', 'ᅩ'), + (0xFFCD, 'M', 'ᅪ'), + (0xFFCE, 'M', 'ᅫ'), + (0xFFCF, 'M', 'ᅬ'), + (0xFFD0, 'X'), + (0xFFD2, 'M', 'ᅭ'), + (0xFFD3, 'M', 'ᅮ'), + (0xFFD4, 'M', 'ᅯ'), + (0xFFD5, 'M', 'ᅰ'), + (0xFFD6, 'M', 'ᅱ'), + (0xFFD7, 'M', 'ᅲ'), + (0xFFD8, 'X'), + (0xFFDA, 'M', 'ᅳ'), + (0xFFDB, 'M', 'ᅴ'), + (0xFFDC, 'M', 'ᅵ'), + (0xFFDD, 'X'), + (0xFFE0, 'M', '¢'), + (0xFFE1, 'M', '£'), + (0xFFE2, 'M', '¬'), + (0xFFE3, '3', ' ̄'), + (0xFFE4, 'M', '¦'), + (0xFFE5, 'M', '¥'), + (0xFFE6, 'M', '₩'), + (0xFFE7, 'X'), + (0xFFE8, 'M', '│'), + (0xFFE9, 'M', '←'), + ] + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFFEA, 'M', '↑'), + (0xFFEB, 'M', '→'), + (0xFFEC, 'M', '↓'), + (0xFFED, 'M', '■'), + (0xFFEE, 'M', '○'), + (0xFFEF, 'X'), + (0x10000, 'V'), + (0x1000C, 'X'), + (0x1000D, 'V'), + (0x10027, 'X'), + (0x10028, 'V'), + (0x1003B, 'X'), + (0x1003C, 'V'), + (0x1003E, 'X'), + (0x1003F, 'V'), + (0x1004E, 'X'), + (0x10050, 'V'), + (0x1005E, 'X'), + (0x10080, 'V'), + (0x100FB, 'X'), + (0x10100, 'V'), + (0x10103, 'X'), + (0x10107, 'V'), + (0x10134, 'X'), + (0x10137, 'V'), + (0x1018F, 'X'), + (0x10190, 'V'), + (0x1019D, 'X'), + (0x101A0, 'V'), + (0x101A1, 'X'), + (0x101D0, 'V'), + (0x101FE, 'X'), + (0x10280, 'V'), + (0x1029D, 'X'), + (0x102A0, 'V'), + (0x102D1, 'X'), + (0x102E0, 'V'), + (0x102FC, 'X'), + (0x10300, 'V'), + (0x10324, 'X'), + (0x1032D, 'V'), + (0x1034B, 'X'), + (0x10350, 'V'), + (0x1037B, 'X'), + (0x10380, 'V'), + (0x1039E, 'X'), + (0x1039F, 'V'), + (0x103C4, 'X'), + (0x103C8, 'V'), + (0x103D6, 'X'), + (0x10400, 'M', '𐐨'), + (0x10401, 'M', '𐐩'), + (0x10402, 'M', '𐐪'), + (0x10403, 'M', '𐐫'), + (0x10404, 'M', '𐐬'), + (0x10405, 'M', '𐐭'), + (0x10406, 'M', '𐐮'), + (0x10407, 'M', '𐐯'), + (0x10408, 'M', '𐐰'), + (0x10409, 'M', '𐐱'), + (0x1040A, 'M', '𐐲'), + (0x1040B, 'M', '𐐳'), + (0x1040C, 'M', '𐐴'), + (0x1040D, 'M', '𐐵'), + (0x1040E, 'M', '𐐶'), + (0x1040F, 'M', '𐐷'), + (0x10410, 'M', '𐐸'), + (0x10411, 'M', '𐐹'), + (0x10412, 'M', '𐐺'), + (0x10413, 'M', '𐐻'), + (0x10414, 'M', '𐐼'), + (0x10415, 'M', '𐐽'), + (0x10416, 'M', '𐐾'), + (0x10417, 'M', '𐐿'), + (0x10418, 'M', '𐑀'), + (0x10419, 'M', '𐑁'), + (0x1041A, 'M', '𐑂'), + (0x1041B, 'M', '𐑃'), + (0x1041C, 'M', '𐑄'), + (0x1041D, 'M', '𐑅'), + (0x1041E, 'M', '𐑆'), + (0x1041F, 'M', '𐑇'), + (0x10420, 'M', '𐑈'), + (0x10421, 'M', '𐑉'), + (0x10422, 'M', '𐑊'), + (0x10423, 'M', '𐑋'), + (0x10424, 'M', '𐑌'), + (0x10425, 'M', '𐑍'), + (0x10426, 'M', '𐑎'), + (0x10427, 'M', '𐑏'), + (0x10428, 'V'), + (0x1049E, 'X'), + (0x104A0, 'V'), + (0x104AA, 'X'), + (0x104B0, 'M', '𐓘'), + (0x104B1, 'M', '𐓙'), + (0x104B2, 'M', '𐓚'), + (0x104B3, 'M', '𐓛'), + (0x104B4, 'M', '𐓜'), + (0x104B5, 'M', '𐓝'), + ] + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x104B6, 'M', '𐓞'), + (0x104B7, 'M', '𐓟'), + (0x104B8, 'M', '𐓠'), + (0x104B9, 'M', '𐓡'), + (0x104BA, 'M', '𐓢'), + (0x104BB, 'M', '𐓣'), + (0x104BC, 'M', '𐓤'), + (0x104BD, 'M', '𐓥'), + (0x104BE, 'M', '𐓦'), + (0x104BF, 'M', '𐓧'), + (0x104C0, 'M', '𐓨'), + (0x104C1, 'M', '𐓩'), + (0x104C2, 'M', '𐓪'), + (0x104C3, 'M', '𐓫'), + (0x104C4, 'M', '𐓬'), + (0x104C5, 'M', '𐓭'), + (0x104C6, 'M', '𐓮'), + (0x104C7, 'M', '𐓯'), + (0x104C8, 'M', '𐓰'), + (0x104C9, 'M', '𐓱'), + (0x104CA, 'M', '𐓲'), + (0x104CB, 'M', '𐓳'), + (0x104CC, 'M', '𐓴'), + (0x104CD, 'M', '𐓵'), + (0x104CE, 'M', '𐓶'), + (0x104CF, 'M', '𐓷'), + (0x104D0, 'M', '𐓸'), + (0x104D1, 'M', '𐓹'), + (0x104D2, 'M', '𐓺'), + (0x104D3, 'M', '𐓻'), + (0x104D4, 'X'), + (0x104D8, 'V'), + (0x104FC, 'X'), + (0x10500, 'V'), + (0x10528, 'X'), + (0x10530, 'V'), + (0x10564, 'X'), + (0x1056F, 'V'), + (0x10570, 'M', '𐖗'), + (0x10571, 'M', '𐖘'), + (0x10572, 'M', '𐖙'), + (0x10573, 'M', '𐖚'), + (0x10574, 'M', '𐖛'), + (0x10575, 'M', '𐖜'), + (0x10576, 'M', '𐖝'), + (0x10577, 'M', '𐖞'), + (0x10578, 'M', '𐖟'), + (0x10579, 'M', '𐖠'), + (0x1057A, 'M', '𐖡'), + (0x1057B, 'X'), + (0x1057C, 'M', '𐖣'), + (0x1057D, 'M', '𐖤'), + (0x1057E, 'M', '𐖥'), + (0x1057F, 'M', '𐖦'), + (0x10580, 'M', '𐖧'), + (0x10581, 'M', '𐖨'), + (0x10582, 'M', '𐖩'), + (0x10583, 'M', '𐖪'), + (0x10584, 'M', '𐖫'), + (0x10585, 'M', '𐖬'), + (0x10586, 'M', '𐖭'), + (0x10587, 'M', '𐖮'), + (0x10588, 'M', '𐖯'), + (0x10589, 'M', '𐖰'), + (0x1058A, 'M', '𐖱'), + (0x1058B, 'X'), + (0x1058C, 'M', '𐖳'), + (0x1058D, 'M', '𐖴'), + (0x1058E, 'M', '𐖵'), + (0x1058F, 'M', '𐖶'), + (0x10590, 'M', '𐖷'), + (0x10591, 'M', '𐖸'), + (0x10592, 'M', '𐖹'), + (0x10593, 'X'), + (0x10594, 'M', '𐖻'), + (0x10595, 'M', '𐖼'), + (0x10596, 'X'), + (0x10597, 'V'), + (0x105A2, 'X'), + (0x105A3, 'V'), + (0x105B2, 'X'), + (0x105B3, 'V'), + (0x105BA, 'X'), + (0x105BB, 'V'), + (0x105BD, 'X'), + (0x10600, 'V'), + (0x10737, 'X'), + (0x10740, 'V'), + (0x10756, 'X'), + (0x10760, 'V'), + (0x10768, 'X'), + (0x10780, 'V'), + (0x10781, 'M', 'ː'), + (0x10782, 'M', 'ˑ'), + (0x10783, 'M', 'æ'), + (0x10784, 'M', 'ʙ'), + (0x10785, 'M', 'ɓ'), + (0x10786, 'X'), + (0x10787, 'M', 'ʣ'), + (0x10788, 'M', 'ꭦ'), + ] + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10789, 'M', 'ʥ'), + (0x1078A, 'M', 'ʤ'), + (0x1078B, 'M', 'ɖ'), + (0x1078C, 'M', 'ɗ'), + (0x1078D, 'M', 'ᶑ'), + (0x1078E, 'M', 'ɘ'), + (0x1078F, 'M', 'ɞ'), + (0x10790, 'M', 'ʩ'), + (0x10791, 'M', 'ɤ'), + (0x10792, 'M', 'ɢ'), + (0x10793, 'M', 'ɠ'), + (0x10794, 'M', 'ʛ'), + (0x10795, 'M', 'ħ'), + (0x10796, 'M', 'ʜ'), + (0x10797, 'M', 'ɧ'), + (0x10798, 'M', 'ʄ'), + (0x10799, 'M', 'ʪ'), + (0x1079A, 'M', 'ʫ'), + (0x1079B, 'M', 'ɬ'), + (0x1079C, 'M', '𝼄'), + (0x1079D, 'M', 'ꞎ'), + (0x1079E, 'M', 'ɮ'), + (0x1079F, 'M', '𝼅'), + (0x107A0, 'M', 'ʎ'), + (0x107A1, 'M', '𝼆'), + (0x107A2, 'M', 'ø'), + (0x107A3, 'M', 'ɶ'), + (0x107A4, 'M', 'ɷ'), + (0x107A5, 'M', 'q'), + (0x107A6, 'M', 'ɺ'), + (0x107A7, 'M', '𝼈'), + (0x107A8, 'M', 'ɽ'), + (0x107A9, 'M', 'ɾ'), + (0x107AA, 'M', 'ʀ'), + (0x107AB, 'M', 'ʨ'), + (0x107AC, 'M', 'ʦ'), + (0x107AD, 'M', 'ꭧ'), + (0x107AE, 'M', 'ʧ'), + (0x107AF, 'M', 'ʈ'), + (0x107B0, 'M', 'ⱱ'), + (0x107B1, 'X'), + (0x107B2, 'M', 'ʏ'), + (0x107B3, 'M', 'ʡ'), + (0x107B4, 'M', 'ʢ'), + (0x107B5, 'M', 'ʘ'), + (0x107B6, 'M', 'ǀ'), + (0x107B7, 'M', 'ǁ'), + (0x107B8, 'M', 'ǂ'), + (0x107B9, 'M', '𝼊'), + (0x107BA, 'M', '𝼞'), + (0x107BB, 'X'), + (0x10800, 'V'), + (0x10806, 'X'), + (0x10808, 'V'), + (0x10809, 'X'), + (0x1080A, 'V'), + (0x10836, 'X'), + (0x10837, 'V'), + (0x10839, 'X'), + (0x1083C, 'V'), + (0x1083D, 'X'), + (0x1083F, 'V'), + (0x10856, 'X'), + (0x10857, 'V'), + (0x1089F, 'X'), + (0x108A7, 'V'), + (0x108B0, 'X'), + (0x108E0, 'V'), + (0x108F3, 'X'), + (0x108F4, 'V'), + (0x108F6, 'X'), + (0x108FB, 'V'), + (0x1091C, 'X'), + (0x1091F, 'V'), + (0x1093A, 'X'), + (0x1093F, 'V'), + (0x10940, 'X'), + (0x10980, 'V'), + (0x109B8, 'X'), + (0x109BC, 'V'), + (0x109D0, 'X'), + (0x109D2, 'V'), + (0x10A04, 'X'), + (0x10A05, 'V'), + (0x10A07, 'X'), + (0x10A0C, 'V'), + (0x10A14, 'X'), + (0x10A15, 'V'), + (0x10A18, 'X'), + (0x10A19, 'V'), + (0x10A36, 'X'), + (0x10A38, 'V'), + (0x10A3B, 'X'), + (0x10A3F, 'V'), + (0x10A49, 'X'), + (0x10A50, 'V'), + (0x10A59, 'X'), + (0x10A60, 'V'), + (0x10AA0, 'X'), + (0x10AC0, 'V'), + ] + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10AE7, 'X'), + (0x10AEB, 'V'), + (0x10AF7, 'X'), + (0x10B00, 'V'), + (0x10B36, 'X'), + (0x10B39, 'V'), + (0x10B56, 'X'), + (0x10B58, 'V'), + (0x10B73, 'X'), + (0x10B78, 'V'), + (0x10B92, 'X'), + (0x10B99, 'V'), + (0x10B9D, 'X'), + (0x10BA9, 'V'), + (0x10BB0, 'X'), + (0x10C00, 'V'), + (0x10C49, 'X'), + (0x10C80, 'M', '𐳀'), + (0x10C81, 'M', '𐳁'), + (0x10C82, 'M', '𐳂'), + (0x10C83, 'M', '𐳃'), + (0x10C84, 'M', '𐳄'), + (0x10C85, 'M', '𐳅'), + (0x10C86, 'M', '𐳆'), + (0x10C87, 'M', '𐳇'), + (0x10C88, 'M', '𐳈'), + (0x10C89, 'M', '𐳉'), + (0x10C8A, 'M', '𐳊'), + (0x10C8B, 'M', '𐳋'), + (0x10C8C, 'M', '𐳌'), + (0x10C8D, 'M', '𐳍'), + (0x10C8E, 'M', '𐳎'), + (0x10C8F, 'M', '𐳏'), + (0x10C90, 'M', '𐳐'), + (0x10C91, 'M', '𐳑'), + (0x10C92, 'M', '𐳒'), + (0x10C93, 'M', '𐳓'), + (0x10C94, 'M', '𐳔'), + (0x10C95, 'M', '𐳕'), + (0x10C96, 'M', '𐳖'), + (0x10C97, 'M', '𐳗'), + (0x10C98, 'M', '𐳘'), + (0x10C99, 'M', '𐳙'), + (0x10C9A, 'M', '𐳚'), + (0x10C9B, 'M', '𐳛'), + (0x10C9C, 'M', '𐳜'), + (0x10C9D, 'M', '𐳝'), + (0x10C9E, 'M', '𐳞'), + (0x10C9F, 'M', '𐳟'), + (0x10CA0, 'M', '𐳠'), + (0x10CA1, 'M', '𐳡'), + (0x10CA2, 'M', '𐳢'), + (0x10CA3, 'M', '𐳣'), + (0x10CA4, 'M', '𐳤'), + (0x10CA5, 'M', '𐳥'), + (0x10CA6, 'M', '𐳦'), + (0x10CA7, 'M', '𐳧'), + (0x10CA8, 'M', '𐳨'), + (0x10CA9, 'M', '𐳩'), + (0x10CAA, 'M', '𐳪'), + (0x10CAB, 'M', '𐳫'), + (0x10CAC, 'M', '𐳬'), + (0x10CAD, 'M', '𐳭'), + (0x10CAE, 'M', '𐳮'), + (0x10CAF, 'M', '𐳯'), + (0x10CB0, 'M', '𐳰'), + (0x10CB1, 'M', '𐳱'), + (0x10CB2, 'M', '𐳲'), + (0x10CB3, 'X'), + (0x10CC0, 'V'), + (0x10CF3, 'X'), + (0x10CFA, 'V'), + (0x10D28, 'X'), + (0x10D30, 'V'), + (0x10D3A, 'X'), + (0x10E60, 'V'), + (0x10E7F, 'X'), + (0x10E80, 'V'), + (0x10EAA, 'X'), + (0x10EAB, 'V'), + (0x10EAE, 'X'), + (0x10EB0, 'V'), + (0x10EB2, 'X'), + (0x10F00, 'V'), + (0x10F28, 'X'), + (0x10F30, 'V'), + (0x10F5A, 'X'), + (0x10F70, 'V'), + (0x10F8A, 'X'), + (0x10FB0, 'V'), + (0x10FCC, 'X'), + (0x10FE0, 'V'), + (0x10FF7, 'X'), + (0x11000, 'V'), + (0x1104E, 'X'), + (0x11052, 'V'), + (0x11076, 'X'), + (0x1107F, 'V'), + (0x110BD, 'X'), + (0x110BE, 'V'), + ] + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x110C3, 'X'), + (0x110D0, 'V'), + (0x110E9, 'X'), + (0x110F0, 'V'), + (0x110FA, 'X'), + (0x11100, 'V'), + (0x11135, 'X'), + (0x11136, 'V'), + (0x11148, 'X'), + (0x11150, 'V'), + (0x11177, 'X'), + (0x11180, 'V'), + (0x111E0, 'X'), + (0x111E1, 'V'), + (0x111F5, 'X'), + (0x11200, 'V'), + (0x11212, 'X'), + (0x11213, 'V'), + (0x1123F, 'X'), + (0x11280, 'V'), + (0x11287, 'X'), + (0x11288, 'V'), + (0x11289, 'X'), + (0x1128A, 'V'), + (0x1128E, 'X'), + (0x1128F, 'V'), + (0x1129E, 'X'), + (0x1129F, 'V'), + (0x112AA, 'X'), + (0x112B0, 'V'), + (0x112EB, 'X'), + (0x112F0, 'V'), + (0x112FA, 'X'), + (0x11300, 'V'), + (0x11304, 'X'), + (0x11305, 'V'), + (0x1130D, 'X'), + (0x1130F, 'V'), + (0x11311, 'X'), + (0x11313, 'V'), + (0x11329, 'X'), + (0x1132A, 'V'), + (0x11331, 'X'), + (0x11332, 'V'), + (0x11334, 'X'), + (0x11335, 'V'), + (0x1133A, 'X'), + (0x1133B, 'V'), + (0x11345, 'X'), + (0x11347, 'V'), + (0x11349, 'X'), + (0x1134B, 'V'), + (0x1134E, 'X'), + (0x11350, 'V'), + (0x11351, 'X'), + (0x11357, 'V'), + (0x11358, 'X'), + (0x1135D, 'V'), + (0x11364, 'X'), + (0x11366, 'V'), + (0x1136D, 'X'), + (0x11370, 'V'), + (0x11375, 'X'), + (0x11400, 'V'), + (0x1145C, 'X'), + (0x1145D, 'V'), + (0x11462, 'X'), + (0x11480, 'V'), + (0x114C8, 'X'), + (0x114D0, 'V'), + (0x114DA, 'X'), + (0x11580, 'V'), + (0x115B6, 'X'), + (0x115B8, 'V'), + (0x115DE, 'X'), + (0x11600, 'V'), + (0x11645, 'X'), + (0x11650, 'V'), + (0x1165A, 'X'), + (0x11660, 'V'), + (0x1166D, 'X'), + (0x11680, 'V'), + (0x116BA, 'X'), + (0x116C0, 'V'), + (0x116CA, 'X'), + (0x11700, 'V'), + (0x1171B, 'X'), + (0x1171D, 'V'), + (0x1172C, 'X'), + (0x11730, 'V'), + (0x11747, 'X'), + (0x11800, 'V'), + (0x1183C, 'X'), + (0x118A0, 'M', '𑣀'), + (0x118A1, 'M', '𑣁'), + (0x118A2, 'M', '𑣂'), + (0x118A3, 'M', '𑣃'), + (0x118A4, 'M', '𑣄'), + (0x118A5, 'M', '𑣅'), + (0x118A6, 'M', '𑣆'), + ] + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x118A7, 'M', '𑣇'), + (0x118A8, 'M', '𑣈'), + (0x118A9, 'M', '𑣉'), + (0x118AA, 'M', '𑣊'), + (0x118AB, 'M', '𑣋'), + (0x118AC, 'M', '𑣌'), + (0x118AD, 'M', '𑣍'), + (0x118AE, 'M', '𑣎'), + (0x118AF, 'M', '𑣏'), + (0x118B0, 'M', '𑣐'), + (0x118B1, 'M', '𑣑'), + (0x118B2, 'M', '𑣒'), + (0x118B3, 'M', '𑣓'), + (0x118B4, 'M', '𑣔'), + (0x118B5, 'M', '𑣕'), + (0x118B6, 'M', '𑣖'), + (0x118B7, 'M', '𑣗'), + (0x118B8, 'M', '𑣘'), + (0x118B9, 'M', '𑣙'), + (0x118BA, 'M', '𑣚'), + (0x118BB, 'M', '𑣛'), + (0x118BC, 'M', '𑣜'), + (0x118BD, 'M', '𑣝'), + (0x118BE, 'M', '𑣞'), + (0x118BF, 'M', '𑣟'), + (0x118C0, 'V'), + (0x118F3, 'X'), + (0x118FF, 'V'), + (0x11907, 'X'), + (0x11909, 'V'), + (0x1190A, 'X'), + (0x1190C, 'V'), + (0x11914, 'X'), + (0x11915, 'V'), + (0x11917, 'X'), + (0x11918, 'V'), + (0x11936, 'X'), + (0x11937, 'V'), + (0x11939, 'X'), + (0x1193B, 'V'), + (0x11947, 'X'), + (0x11950, 'V'), + (0x1195A, 'X'), + (0x119A0, 'V'), + (0x119A8, 'X'), + (0x119AA, 'V'), + (0x119D8, 'X'), + (0x119DA, 'V'), + (0x119E5, 'X'), + (0x11A00, 'V'), + (0x11A48, 'X'), + (0x11A50, 'V'), + (0x11AA3, 'X'), + (0x11AB0, 'V'), + (0x11AF9, 'X'), + (0x11C00, 'V'), + (0x11C09, 'X'), + (0x11C0A, 'V'), + (0x11C37, 'X'), + (0x11C38, 'V'), + (0x11C46, 'X'), + (0x11C50, 'V'), + (0x11C6D, 'X'), + (0x11C70, 'V'), + (0x11C90, 'X'), + (0x11C92, 'V'), + (0x11CA8, 'X'), + (0x11CA9, 'V'), + (0x11CB7, 'X'), + (0x11D00, 'V'), + (0x11D07, 'X'), + (0x11D08, 'V'), + (0x11D0A, 'X'), + (0x11D0B, 'V'), + (0x11D37, 'X'), + (0x11D3A, 'V'), + (0x11D3B, 'X'), + (0x11D3C, 'V'), + (0x11D3E, 'X'), + (0x11D3F, 'V'), + (0x11D48, 'X'), + (0x11D50, 'V'), + (0x11D5A, 'X'), + (0x11D60, 'V'), + (0x11D66, 'X'), + (0x11D67, 'V'), + (0x11D69, 'X'), + (0x11D6A, 'V'), + (0x11D8F, 'X'), + (0x11D90, 'V'), + (0x11D92, 'X'), + (0x11D93, 'V'), + (0x11D99, 'X'), + (0x11DA0, 'V'), + (0x11DAA, 'X'), + (0x11EE0, 'V'), + (0x11EF9, 'X'), + (0x11FB0, 'V'), + (0x11FB1, 'X'), + (0x11FC0, 'V'), + ] + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11FF2, 'X'), + (0x11FFF, 'V'), + (0x1239A, 'X'), + (0x12400, 'V'), + (0x1246F, 'X'), + (0x12470, 'V'), + (0x12475, 'X'), + (0x12480, 'V'), + (0x12544, 'X'), + (0x12F90, 'V'), + (0x12FF3, 'X'), + (0x13000, 'V'), + (0x1342F, 'X'), + (0x14400, 'V'), + (0x14647, 'X'), + (0x16800, 'V'), + (0x16A39, 'X'), + (0x16A40, 'V'), + (0x16A5F, 'X'), + (0x16A60, 'V'), + (0x16A6A, 'X'), + (0x16A6E, 'V'), + (0x16ABF, 'X'), + (0x16AC0, 'V'), + (0x16ACA, 'X'), + (0x16AD0, 'V'), + (0x16AEE, 'X'), + (0x16AF0, 'V'), + (0x16AF6, 'X'), + (0x16B00, 'V'), + (0x16B46, 'X'), + (0x16B50, 'V'), + (0x16B5A, 'X'), + (0x16B5B, 'V'), + (0x16B62, 'X'), + (0x16B63, 'V'), + (0x16B78, 'X'), + (0x16B7D, 'V'), + (0x16B90, 'X'), + (0x16E40, 'M', '𖹠'), + (0x16E41, 'M', '𖹡'), + (0x16E42, 'M', '𖹢'), + (0x16E43, 'M', '𖹣'), + (0x16E44, 'M', '𖹤'), + (0x16E45, 'M', '𖹥'), + (0x16E46, 'M', '𖹦'), + (0x16E47, 'M', '𖹧'), + (0x16E48, 'M', '𖹨'), + (0x16E49, 'M', '𖹩'), + (0x16E4A, 'M', '𖹪'), + (0x16E4B, 'M', '𖹫'), + (0x16E4C, 'M', '𖹬'), + (0x16E4D, 'M', '𖹭'), + (0x16E4E, 'M', '𖹮'), + (0x16E4F, 'M', '𖹯'), + (0x16E50, 'M', '𖹰'), + (0x16E51, 'M', '𖹱'), + (0x16E52, 'M', '𖹲'), + (0x16E53, 'M', '𖹳'), + (0x16E54, 'M', '𖹴'), + (0x16E55, 'M', '𖹵'), + (0x16E56, 'M', '𖹶'), + (0x16E57, 'M', '𖹷'), + (0x16E58, 'M', '𖹸'), + (0x16E59, 'M', '𖹹'), + (0x16E5A, 'M', '𖹺'), + (0x16E5B, 'M', '𖹻'), + (0x16E5C, 'M', '𖹼'), + (0x16E5D, 'M', '𖹽'), + (0x16E5E, 'M', '𖹾'), + (0x16E5F, 'M', '𖹿'), + (0x16E60, 'V'), + (0x16E9B, 'X'), + (0x16F00, 'V'), + (0x16F4B, 'X'), + (0x16F4F, 'V'), + (0x16F88, 'X'), + (0x16F8F, 'V'), + (0x16FA0, 'X'), + (0x16FE0, 'V'), + (0x16FE5, 'X'), + (0x16FF0, 'V'), + (0x16FF2, 'X'), + (0x17000, 'V'), + (0x187F8, 'X'), + (0x18800, 'V'), + (0x18CD6, 'X'), + (0x18D00, 'V'), + (0x18D09, 'X'), + (0x1AFF0, 'V'), + (0x1AFF4, 'X'), + (0x1AFF5, 'V'), + (0x1AFFC, 'X'), + (0x1AFFD, 'V'), + (0x1AFFF, 'X'), + (0x1B000, 'V'), + (0x1B123, 'X'), + (0x1B150, 'V'), + (0x1B153, 'X'), + (0x1B164, 'V'), + ] + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1B168, 'X'), + (0x1B170, 'V'), + (0x1B2FC, 'X'), + (0x1BC00, 'V'), + (0x1BC6B, 'X'), + (0x1BC70, 'V'), + (0x1BC7D, 'X'), + (0x1BC80, 'V'), + (0x1BC89, 'X'), + (0x1BC90, 'V'), + (0x1BC9A, 'X'), + (0x1BC9C, 'V'), + (0x1BCA0, 'I'), + (0x1BCA4, 'X'), + (0x1CF00, 'V'), + (0x1CF2E, 'X'), + (0x1CF30, 'V'), + (0x1CF47, 'X'), + (0x1CF50, 'V'), + (0x1CFC4, 'X'), + (0x1D000, 'V'), + (0x1D0F6, 'X'), + (0x1D100, 'V'), + (0x1D127, 'X'), + (0x1D129, 'V'), + (0x1D15E, 'M', '𝅗𝅥'), + (0x1D15F, 'M', '𝅘𝅥'), + (0x1D160, 'M', '𝅘𝅥𝅮'), + (0x1D161, 'M', '𝅘𝅥𝅯'), + (0x1D162, 'M', '𝅘𝅥𝅰'), + (0x1D163, 'M', '𝅘𝅥𝅱'), + (0x1D164, 'M', '𝅘𝅥𝅲'), + (0x1D165, 'V'), + (0x1D173, 'X'), + (0x1D17B, 'V'), + (0x1D1BB, 'M', '𝆹𝅥'), + (0x1D1BC, 'M', '𝆺𝅥'), + (0x1D1BD, 'M', '𝆹𝅥𝅮'), + (0x1D1BE, 'M', '𝆺𝅥𝅮'), + (0x1D1BF, 'M', '𝆹𝅥𝅯'), + (0x1D1C0, 'M', '𝆺𝅥𝅯'), + (0x1D1C1, 'V'), + (0x1D1EB, 'X'), + (0x1D200, 'V'), + (0x1D246, 'X'), + (0x1D2E0, 'V'), + (0x1D2F4, 'X'), + (0x1D300, 'V'), + (0x1D357, 'X'), + (0x1D360, 'V'), + (0x1D379, 'X'), + (0x1D400, 'M', 'a'), + (0x1D401, 'M', 'b'), + (0x1D402, 'M', 'c'), + (0x1D403, 'M', 'd'), + (0x1D404, 'M', 'e'), + (0x1D405, 'M', 'f'), + (0x1D406, 'M', 'g'), + (0x1D407, 'M', 'h'), + (0x1D408, 'M', 'i'), + (0x1D409, 'M', 'j'), + (0x1D40A, 'M', 'k'), + (0x1D40B, 'M', 'l'), + (0x1D40C, 'M', 'm'), + (0x1D40D, 'M', 'n'), + (0x1D40E, 'M', 'o'), + (0x1D40F, 'M', 'p'), + (0x1D410, 'M', 'q'), + (0x1D411, 'M', 'r'), + (0x1D412, 'M', 's'), + (0x1D413, 'M', 't'), + (0x1D414, 'M', 'u'), + (0x1D415, 'M', 'v'), + (0x1D416, 'M', 'w'), + (0x1D417, 'M', 'x'), + (0x1D418, 'M', 'y'), + (0x1D419, 'M', 'z'), + (0x1D41A, 'M', 'a'), + (0x1D41B, 'M', 'b'), + (0x1D41C, 'M', 'c'), + (0x1D41D, 'M', 'd'), + (0x1D41E, 'M', 'e'), + (0x1D41F, 'M', 'f'), + (0x1D420, 'M', 'g'), + (0x1D421, 'M', 'h'), + (0x1D422, 'M', 'i'), + (0x1D423, 'M', 'j'), + (0x1D424, 'M', 'k'), + (0x1D425, 'M', 'l'), + (0x1D426, 'M', 'm'), + (0x1D427, 'M', 'n'), + (0x1D428, 'M', 'o'), + (0x1D429, 'M', 'p'), + (0x1D42A, 'M', 'q'), + (0x1D42B, 'M', 'r'), + (0x1D42C, 'M', 's'), + (0x1D42D, 'M', 't'), + (0x1D42E, 'M', 'u'), + (0x1D42F, 'M', 'v'), + (0x1D430, 'M', 'w'), + ] + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D431, 'M', 'x'), + (0x1D432, 'M', 'y'), + (0x1D433, 'M', 'z'), + (0x1D434, 'M', 'a'), + (0x1D435, 'M', 'b'), + (0x1D436, 'M', 'c'), + (0x1D437, 'M', 'd'), + (0x1D438, 'M', 'e'), + (0x1D439, 'M', 'f'), + (0x1D43A, 'M', 'g'), + (0x1D43B, 'M', 'h'), + (0x1D43C, 'M', 'i'), + (0x1D43D, 'M', 'j'), + (0x1D43E, 'M', 'k'), + (0x1D43F, 'M', 'l'), + (0x1D440, 'M', 'm'), + (0x1D441, 'M', 'n'), + (0x1D442, 'M', 'o'), + (0x1D443, 'M', 'p'), + (0x1D444, 'M', 'q'), + (0x1D445, 'M', 'r'), + (0x1D446, 'M', 's'), + (0x1D447, 'M', 't'), + (0x1D448, 'M', 'u'), + (0x1D449, 'M', 'v'), + (0x1D44A, 'M', 'w'), + (0x1D44B, 'M', 'x'), + (0x1D44C, 'M', 'y'), + (0x1D44D, 'M', 'z'), + (0x1D44E, 'M', 'a'), + (0x1D44F, 'M', 'b'), + (0x1D450, 'M', 'c'), + (0x1D451, 'M', 'd'), + (0x1D452, 'M', 'e'), + (0x1D453, 'M', 'f'), + (0x1D454, 'M', 'g'), + (0x1D455, 'X'), + (0x1D456, 'M', 'i'), + (0x1D457, 'M', 'j'), + (0x1D458, 'M', 'k'), + (0x1D459, 'M', 'l'), + (0x1D45A, 'M', 'm'), + (0x1D45B, 'M', 'n'), + (0x1D45C, 'M', 'o'), + (0x1D45D, 'M', 'p'), + (0x1D45E, 'M', 'q'), + (0x1D45F, 'M', 'r'), + (0x1D460, 'M', 's'), + (0x1D461, 'M', 't'), + (0x1D462, 'M', 'u'), + (0x1D463, 'M', 'v'), + (0x1D464, 'M', 'w'), + (0x1D465, 'M', 'x'), + (0x1D466, 'M', 'y'), + (0x1D467, 'M', 'z'), + (0x1D468, 'M', 'a'), + (0x1D469, 'M', 'b'), + (0x1D46A, 'M', 'c'), + (0x1D46B, 'M', 'd'), + (0x1D46C, 'M', 'e'), + (0x1D46D, 'M', 'f'), + (0x1D46E, 'M', 'g'), + (0x1D46F, 'M', 'h'), + (0x1D470, 'M', 'i'), + (0x1D471, 'M', 'j'), + (0x1D472, 'M', 'k'), + (0x1D473, 'M', 'l'), + (0x1D474, 'M', 'm'), + (0x1D475, 'M', 'n'), + (0x1D476, 'M', 'o'), + (0x1D477, 'M', 'p'), + (0x1D478, 'M', 'q'), + (0x1D479, 'M', 'r'), + (0x1D47A, 'M', 's'), + (0x1D47B, 'M', 't'), + (0x1D47C, 'M', 'u'), + (0x1D47D, 'M', 'v'), + (0x1D47E, 'M', 'w'), + (0x1D47F, 'M', 'x'), + (0x1D480, 'M', 'y'), + (0x1D481, 'M', 'z'), + (0x1D482, 'M', 'a'), + (0x1D483, 'M', 'b'), + (0x1D484, 'M', 'c'), + (0x1D485, 'M', 'd'), + (0x1D486, 'M', 'e'), + (0x1D487, 'M', 'f'), + (0x1D488, 'M', 'g'), + (0x1D489, 'M', 'h'), + (0x1D48A, 'M', 'i'), + (0x1D48B, 'M', 'j'), + (0x1D48C, 'M', 'k'), + (0x1D48D, 'M', 'l'), + (0x1D48E, 'M', 'm'), + (0x1D48F, 'M', 'n'), + (0x1D490, 'M', 'o'), + (0x1D491, 'M', 'p'), + (0x1D492, 'M', 'q'), + (0x1D493, 'M', 'r'), + (0x1D494, 'M', 's'), + ] + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D495, 'M', 't'), + (0x1D496, 'M', 'u'), + (0x1D497, 'M', 'v'), + (0x1D498, 'M', 'w'), + (0x1D499, 'M', 'x'), + (0x1D49A, 'M', 'y'), + (0x1D49B, 'M', 'z'), + (0x1D49C, 'M', 'a'), + (0x1D49D, 'X'), + (0x1D49E, 'M', 'c'), + (0x1D49F, 'M', 'd'), + (0x1D4A0, 'X'), + (0x1D4A2, 'M', 'g'), + (0x1D4A3, 'X'), + (0x1D4A5, 'M', 'j'), + (0x1D4A6, 'M', 'k'), + (0x1D4A7, 'X'), + (0x1D4A9, 'M', 'n'), + (0x1D4AA, 'M', 'o'), + (0x1D4AB, 'M', 'p'), + (0x1D4AC, 'M', 'q'), + (0x1D4AD, 'X'), + (0x1D4AE, 'M', 's'), + (0x1D4AF, 'M', 't'), + (0x1D4B0, 'M', 'u'), + (0x1D4B1, 'M', 'v'), + (0x1D4B2, 'M', 'w'), + (0x1D4B3, 'M', 'x'), + (0x1D4B4, 'M', 'y'), + (0x1D4B5, 'M', 'z'), + (0x1D4B6, 'M', 'a'), + (0x1D4B7, 'M', 'b'), + (0x1D4B8, 'M', 'c'), + (0x1D4B9, 'M', 'd'), + (0x1D4BA, 'X'), + (0x1D4BB, 'M', 'f'), + (0x1D4BC, 'X'), + (0x1D4BD, 'M', 'h'), + (0x1D4BE, 'M', 'i'), + (0x1D4BF, 'M', 'j'), + (0x1D4C0, 'M', 'k'), + (0x1D4C1, 'M', 'l'), + (0x1D4C2, 'M', 'm'), + (0x1D4C3, 'M', 'n'), + (0x1D4C4, 'X'), + (0x1D4C5, 'M', 'p'), + (0x1D4C6, 'M', 'q'), + (0x1D4C7, 'M', 'r'), + (0x1D4C8, 'M', 's'), + (0x1D4C9, 'M', 't'), + (0x1D4CA, 'M', 'u'), + (0x1D4CB, 'M', 'v'), + (0x1D4CC, 'M', 'w'), + (0x1D4CD, 'M', 'x'), + (0x1D4CE, 'M', 'y'), + (0x1D4CF, 'M', 'z'), + (0x1D4D0, 'M', 'a'), + (0x1D4D1, 'M', 'b'), + (0x1D4D2, 'M', 'c'), + (0x1D4D3, 'M', 'd'), + (0x1D4D4, 'M', 'e'), + (0x1D4D5, 'M', 'f'), + (0x1D4D6, 'M', 'g'), + (0x1D4D7, 'M', 'h'), + (0x1D4D8, 'M', 'i'), + (0x1D4D9, 'M', 'j'), + (0x1D4DA, 'M', 'k'), + (0x1D4DB, 'M', 'l'), + (0x1D4DC, 'M', 'm'), + (0x1D4DD, 'M', 'n'), + (0x1D4DE, 'M', 'o'), + (0x1D4DF, 'M', 'p'), + (0x1D4E0, 'M', 'q'), + (0x1D4E1, 'M', 'r'), + (0x1D4E2, 'M', 's'), + (0x1D4E3, 'M', 't'), + (0x1D4E4, 'M', 'u'), + (0x1D4E5, 'M', 'v'), + (0x1D4E6, 'M', 'w'), + (0x1D4E7, 'M', 'x'), + (0x1D4E8, 'M', 'y'), + (0x1D4E9, 'M', 'z'), + (0x1D4EA, 'M', 'a'), + (0x1D4EB, 'M', 'b'), + (0x1D4EC, 'M', 'c'), + (0x1D4ED, 'M', 'd'), + (0x1D4EE, 'M', 'e'), + (0x1D4EF, 'M', 'f'), + (0x1D4F0, 'M', 'g'), + (0x1D4F1, 'M', 'h'), + (0x1D4F2, 'M', 'i'), + (0x1D4F3, 'M', 'j'), + (0x1D4F4, 'M', 'k'), + (0x1D4F5, 'M', 'l'), + (0x1D4F6, 'M', 'm'), + (0x1D4F7, 'M', 'n'), + (0x1D4F8, 'M', 'o'), + (0x1D4F9, 'M', 'p'), + (0x1D4FA, 'M', 'q'), + (0x1D4FB, 'M', 'r'), + ] + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D4FC, 'M', 's'), + (0x1D4FD, 'M', 't'), + (0x1D4FE, 'M', 'u'), + (0x1D4FF, 'M', 'v'), + (0x1D500, 'M', 'w'), + (0x1D501, 'M', 'x'), + (0x1D502, 'M', 'y'), + (0x1D503, 'M', 'z'), + (0x1D504, 'M', 'a'), + (0x1D505, 'M', 'b'), + (0x1D506, 'X'), + (0x1D507, 'M', 'd'), + (0x1D508, 'M', 'e'), + (0x1D509, 'M', 'f'), + (0x1D50A, 'M', 'g'), + (0x1D50B, 'X'), + (0x1D50D, 'M', 'j'), + (0x1D50E, 'M', 'k'), + (0x1D50F, 'M', 'l'), + (0x1D510, 'M', 'm'), + (0x1D511, 'M', 'n'), + (0x1D512, 'M', 'o'), + (0x1D513, 'M', 'p'), + (0x1D514, 'M', 'q'), + (0x1D515, 'X'), + (0x1D516, 'M', 's'), + (0x1D517, 'M', 't'), + (0x1D518, 'M', 'u'), + (0x1D519, 'M', 'v'), + (0x1D51A, 'M', 'w'), + (0x1D51B, 'M', 'x'), + (0x1D51C, 'M', 'y'), + (0x1D51D, 'X'), + (0x1D51E, 'M', 'a'), + (0x1D51F, 'M', 'b'), + (0x1D520, 'M', 'c'), + (0x1D521, 'M', 'd'), + (0x1D522, 'M', 'e'), + (0x1D523, 'M', 'f'), + (0x1D524, 'M', 'g'), + (0x1D525, 'M', 'h'), + (0x1D526, 'M', 'i'), + (0x1D527, 'M', 'j'), + (0x1D528, 'M', 'k'), + (0x1D529, 'M', 'l'), + (0x1D52A, 'M', 'm'), + (0x1D52B, 'M', 'n'), + (0x1D52C, 'M', 'o'), + (0x1D52D, 'M', 'p'), + (0x1D52E, 'M', 'q'), + (0x1D52F, 'M', 'r'), + (0x1D530, 'M', 's'), + (0x1D531, 'M', 't'), + (0x1D532, 'M', 'u'), + (0x1D533, 'M', 'v'), + (0x1D534, 'M', 'w'), + (0x1D535, 'M', 'x'), + (0x1D536, 'M', 'y'), + (0x1D537, 'M', 'z'), + (0x1D538, 'M', 'a'), + (0x1D539, 'M', 'b'), + (0x1D53A, 'X'), + (0x1D53B, 'M', 'd'), + (0x1D53C, 'M', 'e'), + (0x1D53D, 'M', 'f'), + (0x1D53E, 'M', 'g'), + (0x1D53F, 'X'), + (0x1D540, 'M', 'i'), + (0x1D541, 'M', 'j'), + (0x1D542, 'M', 'k'), + (0x1D543, 'M', 'l'), + (0x1D544, 'M', 'm'), + (0x1D545, 'X'), + (0x1D546, 'M', 'o'), + (0x1D547, 'X'), + (0x1D54A, 'M', 's'), + (0x1D54B, 'M', 't'), + (0x1D54C, 'M', 'u'), + (0x1D54D, 'M', 'v'), + (0x1D54E, 'M', 'w'), + (0x1D54F, 'M', 'x'), + (0x1D550, 'M', 'y'), + (0x1D551, 'X'), + (0x1D552, 'M', 'a'), + (0x1D553, 'M', 'b'), + (0x1D554, 'M', 'c'), + (0x1D555, 'M', 'd'), + (0x1D556, 'M', 'e'), + (0x1D557, 'M', 'f'), + (0x1D558, 'M', 'g'), + (0x1D559, 'M', 'h'), + (0x1D55A, 'M', 'i'), + (0x1D55B, 'M', 'j'), + (0x1D55C, 'M', 'k'), + (0x1D55D, 'M', 'l'), + (0x1D55E, 'M', 'm'), + (0x1D55F, 'M', 'n'), + (0x1D560, 'M', 'o'), + (0x1D561, 'M', 'p'), + (0x1D562, 'M', 'q'), + ] + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D563, 'M', 'r'), + (0x1D564, 'M', 's'), + (0x1D565, 'M', 't'), + (0x1D566, 'M', 'u'), + (0x1D567, 'M', 'v'), + (0x1D568, 'M', 'w'), + (0x1D569, 'M', 'x'), + (0x1D56A, 'M', 'y'), + (0x1D56B, 'M', 'z'), + (0x1D56C, 'M', 'a'), + (0x1D56D, 'M', 'b'), + (0x1D56E, 'M', 'c'), + (0x1D56F, 'M', 'd'), + (0x1D570, 'M', 'e'), + (0x1D571, 'M', 'f'), + (0x1D572, 'M', 'g'), + (0x1D573, 'M', 'h'), + (0x1D574, 'M', 'i'), + (0x1D575, 'M', 'j'), + (0x1D576, 'M', 'k'), + (0x1D577, 'M', 'l'), + (0x1D578, 'M', 'm'), + (0x1D579, 'M', 'n'), + (0x1D57A, 'M', 'o'), + (0x1D57B, 'M', 'p'), + (0x1D57C, 'M', 'q'), + (0x1D57D, 'M', 'r'), + (0x1D57E, 'M', 's'), + (0x1D57F, 'M', 't'), + (0x1D580, 'M', 'u'), + (0x1D581, 'M', 'v'), + (0x1D582, 'M', 'w'), + (0x1D583, 'M', 'x'), + (0x1D584, 'M', 'y'), + (0x1D585, 'M', 'z'), + (0x1D586, 'M', 'a'), + (0x1D587, 'M', 'b'), + (0x1D588, 'M', 'c'), + (0x1D589, 'M', 'd'), + (0x1D58A, 'M', 'e'), + (0x1D58B, 'M', 'f'), + (0x1D58C, 'M', 'g'), + (0x1D58D, 'M', 'h'), + (0x1D58E, 'M', 'i'), + (0x1D58F, 'M', 'j'), + (0x1D590, 'M', 'k'), + (0x1D591, 'M', 'l'), + (0x1D592, 'M', 'm'), + (0x1D593, 'M', 'n'), + (0x1D594, 'M', 'o'), + (0x1D595, 'M', 'p'), + (0x1D596, 'M', 'q'), + (0x1D597, 'M', 'r'), + (0x1D598, 'M', 's'), + (0x1D599, 'M', 't'), + (0x1D59A, 'M', 'u'), + (0x1D59B, 'M', 'v'), + (0x1D59C, 'M', 'w'), + (0x1D59D, 'M', 'x'), + (0x1D59E, 'M', 'y'), + (0x1D59F, 'M', 'z'), + (0x1D5A0, 'M', 'a'), + (0x1D5A1, 'M', 'b'), + (0x1D5A2, 'M', 'c'), + (0x1D5A3, 'M', 'd'), + (0x1D5A4, 'M', 'e'), + (0x1D5A5, 'M', 'f'), + (0x1D5A6, 'M', 'g'), + (0x1D5A7, 'M', 'h'), + (0x1D5A8, 'M', 'i'), + (0x1D5A9, 'M', 'j'), + (0x1D5AA, 'M', 'k'), + (0x1D5AB, 'M', 'l'), + (0x1D5AC, 'M', 'm'), + (0x1D5AD, 'M', 'n'), + (0x1D5AE, 'M', 'o'), + (0x1D5AF, 'M', 'p'), + (0x1D5B0, 'M', 'q'), + (0x1D5B1, 'M', 'r'), + (0x1D5B2, 'M', 's'), + (0x1D5B3, 'M', 't'), + (0x1D5B4, 'M', 'u'), + (0x1D5B5, 'M', 'v'), + (0x1D5B6, 'M', 'w'), + (0x1D5B7, 'M', 'x'), + (0x1D5B8, 'M', 'y'), + (0x1D5B9, 'M', 'z'), + (0x1D5BA, 'M', 'a'), + (0x1D5BB, 'M', 'b'), + (0x1D5BC, 'M', 'c'), + (0x1D5BD, 'M', 'd'), + (0x1D5BE, 'M', 'e'), + (0x1D5BF, 'M', 'f'), + (0x1D5C0, 'M', 'g'), + (0x1D5C1, 'M', 'h'), + (0x1D5C2, 'M', 'i'), + (0x1D5C3, 'M', 'j'), + (0x1D5C4, 'M', 'k'), + (0x1D5C5, 'M', 'l'), + (0x1D5C6, 'M', 'm'), + ] + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D5C7, 'M', 'n'), + (0x1D5C8, 'M', 'o'), + (0x1D5C9, 'M', 'p'), + (0x1D5CA, 'M', 'q'), + (0x1D5CB, 'M', 'r'), + (0x1D5CC, 'M', 's'), + (0x1D5CD, 'M', 't'), + (0x1D5CE, 'M', 'u'), + (0x1D5CF, 'M', 'v'), + (0x1D5D0, 'M', 'w'), + (0x1D5D1, 'M', 'x'), + (0x1D5D2, 'M', 'y'), + (0x1D5D3, 'M', 'z'), + (0x1D5D4, 'M', 'a'), + (0x1D5D5, 'M', 'b'), + (0x1D5D6, 'M', 'c'), + (0x1D5D7, 'M', 'd'), + (0x1D5D8, 'M', 'e'), + (0x1D5D9, 'M', 'f'), + (0x1D5DA, 'M', 'g'), + (0x1D5DB, 'M', 'h'), + (0x1D5DC, 'M', 'i'), + (0x1D5DD, 'M', 'j'), + (0x1D5DE, 'M', 'k'), + (0x1D5DF, 'M', 'l'), + (0x1D5E0, 'M', 'm'), + (0x1D5E1, 'M', 'n'), + (0x1D5E2, 'M', 'o'), + (0x1D5E3, 'M', 'p'), + (0x1D5E4, 'M', 'q'), + (0x1D5E5, 'M', 'r'), + (0x1D5E6, 'M', 's'), + (0x1D5E7, 'M', 't'), + (0x1D5E8, 'M', 'u'), + (0x1D5E9, 'M', 'v'), + (0x1D5EA, 'M', 'w'), + (0x1D5EB, 'M', 'x'), + (0x1D5EC, 'M', 'y'), + (0x1D5ED, 'M', 'z'), + (0x1D5EE, 'M', 'a'), + (0x1D5EF, 'M', 'b'), + (0x1D5F0, 'M', 'c'), + (0x1D5F1, 'M', 'd'), + (0x1D5F2, 'M', 'e'), + (0x1D5F3, 'M', 'f'), + (0x1D5F4, 'M', 'g'), + (0x1D5F5, 'M', 'h'), + (0x1D5F6, 'M', 'i'), + (0x1D5F7, 'M', 'j'), + (0x1D5F8, 'M', 'k'), + (0x1D5F9, 'M', 'l'), + (0x1D5FA, 'M', 'm'), + (0x1D5FB, 'M', 'n'), + (0x1D5FC, 'M', 'o'), + (0x1D5FD, 'M', 'p'), + (0x1D5FE, 'M', 'q'), + (0x1D5FF, 'M', 'r'), + (0x1D600, 'M', 's'), + (0x1D601, 'M', 't'), + (0x1D602, 'M', 'u'), + (0x1D603, 'M', 'v'), + (0x1D604, 'M', 'w'), + (0x1D605, 'M', 'x'), + (0x1D606, 'M', 'y'), + (0x1D607, 'M', 'z'), + (0x1D608, 'M', 'a'), + (0x1D609, 'M', 'b'), + (0x1D60A, 'M', 'c'), + (0x1D60B, 'M', 'd'), + (0x1D60C, 'M', 'e'), + (0x1D60D, 'M', 'f'), + (0x1D60E, 'M', 'g'), + (0x1D60F, 'M', 'h'), + (0x1D610, 'M', 'i'), + (0x1D611, 'M', 'j'), + (0x1D612, 'M', 'k'), + (0x1D613, 'M', 'l'), + (0x1D614, 'M', 'm'), + (0x1D615, 'M', 'n'), + (0x1D616, 'M', 'o'), + (0x1D617, 'M', 'p'), + (0x1D618, 'M', 'q'), + (0x1D619, 'M', 'r'), + (0x1D61A, 'M', 's'), + (0x1D61B, 'M', 't'), + (0x1D61C, 'M', 'u'), + (0x1D61D, 'M', 'v'), + (0x1D61E, 'M', 'w'), + (0x1D61F, 'M', 'x'), + (0x1D620, 'M', 'y'), + (0x1D621, 'M', 'z'), + (0x1D622, 'M', 'a'), + (0x1D623, 'M', 'b'), + (0x1D624, 'M', 'c'), + (0x1D625, 'M', 'd'), + (0x1D626, 'M', 'e'), + (0x1D627, 'M', 'f'), + (0x1D628, 'M', 'g'), + (0x1D629, 'M', 'h'), + (0x1D62A, 'M', 'i'), + ] + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D62B, 'M', 'j'), + (0x1D62C, 'M', 'k'), + (0x1D62D, 'M', 'l'), + (0x1D62E, 'M', 'm'), + (0x1D62F, 'M', 'n'), + (0x1D630, 'M', 'o'), + (0x1D631, 'M', 'p'), + (0x1D632, 'M', 'q'), + (0x1D633, 'M', 'r'), + (0x1D634, 'M', 's'), + (0x1D635, 'M', 't'), + (0x1D636, 'M', 'u'), + (0x1D637, 'M', 'v'), + (0x1D638, 'M', 'w'), + (0x1D639, 'M', 'x'), + (0x1D63A, 'M', 'y'), + (0x1D63B, 'M', 'z'), + (0x1D63C, 'M', 'a'), + (0x1D63D, 'M', 'b'), + (0x1D63E, 'M', 'c'), + (0x1D63F, 'M', 'd'), + (0x1D640, 'M', 'e'), + (0x1D641, 'M', 'f'), + (0x1D642, 'M', 'g'), + (0x1D643, 'M', 'h'), + (0x1D644, 'M', 'i'), + (0x1D645, 'M', 'j'), + (0x1D646, 'M', 'k'), + (0x1D647, 'M', 'l'), + (0x1D648, 'M', 'm'), + (0x1D649, 'M', 'n'), + (0x1D64A, 'M', 'o'), + (0x1D64B, 'M', 'p'), + (0x1D64C, 'M', 'q'), + (0x1D64D, 'M', 'r'), + (0x1D64E, 'M', 's'), + (0x1D64F, 'M', 't'), + (0x1D650, 'M', 'u'), + (0x1D651, 'M', 'v'), + (0x1D652, 'M', 'w'), + (0x1D653, 'M', 'x'), + (0x1D654, 'M', 'y'), + (0x1D655, 'M', 'z'), + (0x1D656, 'M', 'a'), + (0x1D657, 'M', 'b'), + (0x1D658, 'M', 'c'), + (0x1D659, 'M', 'd'), + (0x1D65A, 'M', 'e'), + (0x1D65B, 'M', 'f'), + (0x1D65C, 'M', 'g'), + (0x1D65D, 'M', 'h'), + (0x1D65E, 'M', 'i'), + (0x1D65F, 'M', 'j'), + (0x1D660, 'M', 'k'), + (0x1D661, 'M', 'l'), + (0x1D662, 'M', 'm'), + (0x1D663, 'M', 'n'), + (0x1D664, 'M', 'o'), + (0x1D665, 'M', 'p'), + (0x1D666, 'M', 'q'), + (0x1D667, 'M', 'r'), + (0x1D668, 'M', 's'), + (0x1D669, 'M', 't'), + (0x1D66A, 'M', 'u'), + (0x1D66B, 'M', 'v'), + (0x1D66C, 'M', 'w'), + (0x1D66D, 'M', 'x'), + (0x1D66E, 'M', 'y'), + (0x1D66F, 'M', 'z'), + (0x1D670, 'M', 'a'), + (0x1D671, 'M', 'b'), + (0x1D672, 'M', 'c'), + (0x1D673, 'M', 'd'), + (0x1D674, 'M', 'e'), + (0x1D675, 'M', 'f'), + (0x1D676, 'M', 'g'), + (0x1D677, 'M', 'h'), + (0x1D678, 'M', 'i'), + (0x1D679, 'M', 'j'), + (0x1D67A, 'M', 'k'), + (0x1D67B, 'M', 'l'), + (0x1D67C, 'M', 'm'), + (0x1D67D, 'M', 'n'), + (0x1D67E, 'M', 'o'), + (0x1D67F, 'M', 'p'), + (0x1D680, 'M', 'q'), + (0x1D681, 'M', 'r'), + (0x1D682, 'M', 's'), + (0x1D683, 'M', 't'), + (0x1D684, 'M', 'u'), + (0x1D685, 'M', 'v'), + (0x1D686, 'M', 'w'), + (0x1D687, 'M', 'x'), + (0x1D688, 'M', 'y'), + (0x1D689, 'M', 'z'), + (0x1D68A, 'M', 'a'), + (0x1D68B, 'M', 'b'), + (0x1D68C, 'M', 'c'), + (0x1D68D, 'M', 'd'), + (0x1D68E, 'M', 'e'), + ] + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D68F, 'M', 'f'), + (0x1D690, 'M', 'g'), + (0x1D691, 'M', 'h'), + (0x1D692, 'M', 'i'), + (0x1D693, 'M', 'j'), + (0x1D694, 'M', 'k'), + (0x1D695, 'M', 'l'), + (0x1D696, 'M', 'm'), + (0x1D697, 'M', 'n'), + (0x1D698, 'M', 'o'), + (0x1D699, 'M', 'p'), + (0x1D69A, 'M', 'q'), + (0x1D69B, 'M', 'r'), + (0x1D69C, 'M', 's'), + (0x1D69D, 'M', 't'), + (0x1D69E, 'M', 'u'), + (0x1D69F, 'M', 'v'), + (0x1D6A0, 'M', 'w'), + (0x1D6A1, 'M', 'x'), + (0x1D6A2, 'M', 'y'), + (0x1D6A3, 'M', 'z'), + (0x1D6A4, 'M', 'ı'), + (0x1D6A5, 'M', 'ȷ'), + (0x1D6A6, 'X'), + (0x1D6A8, 'M', 'α'), + (0x1D6A9, 'M', 'β'), + (0x1D6AA, 'M', 'γ'), + (0x1D6AB, 'M', 'δ'), + (0x1D6AC, 'M', 'ε'), + (0x1D6AD, 'M', 'ζ'), + (0x1D6AE, 'M', 'η'), + (0x1D6AF, 'M', 'θ'), + (0x1D6B0, 'M', 'ι'), + (0x1D6B1, 'M', 'κ'), + (0x1D6B2, 'M', 'λ'), + (0x1D6B3, 'M', 'μ'), + (0x1D6B4, 'M', 'ν'), + (0x1D6B5, 'M', 'ξ'), + (0x1D6B6, 'M', 'ο'), + (0x1D6B7, 'M', 'π'), + (0x1D6B8, 'M', 'ρ'), + (0x1D6B9, 'M', 'θ'), + (0x1D6BA, 'M', 'σ'), + (0x1D6BB, 'M', 'τ'), + (0x1D6BC, 'M', 'υ'), + (0x1D6BD, 'M', 'φ'), + (0x1D6BE, 'M', 'χ'), + (0x1D6BF, 'M', 'ψ'), + (0x1D6C0, 'M', 'ω'), + (0x1D6C1, 'M', '∇'), + (0x1D6C2, 'M', 'α'), + (0x1D6C3, 'M', 'β'), + (0x1D6C4, 'M', 'γ'), + (0x1D6C5, 'M', 'δ'), + (0x1D6C6, 'M', 'ε'), + (0x1D6C7, 'M', 'ζ'), + (0x1D6C8, 'M', 'η'), + (0x1D6C9, 'M', 'θ'), + (0x1D6CA, 'M', 'ι'), + (0x1D6CB, 'M', 'κ'), + (0x1D6CC, 'M', 'λ'), + (0x1D6CD, 'M', 'μ'), + (0x1D6CE, 'M', 'ν'), + (0x1D6CF, 'M', 'ξ'), + (0x1D6D0, 'M', 'ο'), + (0x1D6D1, 'M', 'π'), + (0x1D6D2, 'M', 'ρ'), + (0x1D6D3, 'M', 'σ'), + (0x1D6D5, 'M', 'τ'), + (0x1D6D6, 'M', 'υ'), + (0x1D6D7, 'M', 'φ'), + (0x1D6D8, 'M', 'χ'), + (0x1D6D9, 'M', 'ψ'), + (0x1D6DA, 'M', 'ω'), + (0x1D6DB, 'M', '∂'), + (0x1D6DC, 'M', 'ε'), + (0x1D6DD, 'M', 'θ'), + (0x1D6DE, 'M', 'κ'), + (0x1D6DF, 'M', 'φ'), + (0x1D6E0, 'M', 'ρ'), + (0x1D6E1, 'M', 'π'), + (0x1D6E2, 'M', 'α'), + (0x1D6E3, 'M', 'β'), + (0x1D6E4, 'M', 'γ'), + (0x1D6E5, 'M', 'δ'), + (0x1D6E6, 'M', 'ε'), + (0x1D6E7, 'M', 'ζ'), + (0x1D6E8, 'M', 'η'), + (0x1D6E9, 'M', 'θ'), + (0x1D6EA, 'M', 'ι'), + (0x1D6EB, 'M', 'κ'), + (0x1D6EC, 'M', 'λ'), + (0x1D6ED, 'M', 'μ'), + (0x1D6EE, 'M', 'ν'), + (0x1D6EF, 'M', 'ξ'), + (0x1D6F0, 'M', 'ο'), + (0x1D6F1, 'M', 'π'), + (0x1D6F2, 'M', 'ρ'), + (0x1D6F3, 'M', 'θ'), + (0x1D6F4, 'M', 'σ'), + ] + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D6F5, 'M', 'τ'), + (0x1D6F6, 'M', 'υ'), + (0x1D6F7, 'M', 'φ'), + (0x1D6F8, 'M', 'χ'), + (0x1D6F9, 'M', 'ψ'), + (0x1D6FA, 'M', 'ω'), + (0x1D6FB, 'M', '∇'), + (0x1D6FC, 'M', 'α'), + (0x1D6FD, 'M', 'β'), + (0x1D6FE, 'M', 'γ'), + (0x1D6FF, 'M', 'δ'), + (0x1D700, 'M', 'ε'), + (0x1D701, 'M', 'ζ'), + (0x1D702, 'M', 'η'), + (0x1D703, 'M', 'θ'), + (0x1D704, 'M', 'ι'), + (0x1D705, 'M', 'κ'), + (0x1D706, 'M', 'λ'), + (0x1D707, 'M', 'μ'), + (0x1D708, 'M', 'ν'), + (0x1D709, 'M', 'ξ'), + (0x1D70A, 'M', 'ο'), + (0x1D70B, 'M', 'π'), + (0x1D70C, 'M', 'ρ'), + (0x1D70D, 'M', 'σ'), + (0x1D70F, 'M', 'τ'), + (0x1D710, 'M', 'υ'), + (0x1D711, 'M', 'φ'), + (0x1D712, 'M', 'χ'), + (0x1D713, 'M', 'ψ'), + (0x1D714, 'M', 'ω'), + (0x1D715, 'M', '∂'), + (0x1D716, 'M', 'ε'), + (0x1D717, 'M', 'θ'), + (0x1D718, 'M', 'κ'), + (0x1D719, 'M', 'φ'), + (0x1D71A, 'M', 'ρ'), + (0x1D71B, 'M', 'π'), + (0x1D71C, 'M', 'α'), + (0x1D71D, 'M', 'β'), + (0x1D71E, 'M', 'γ'), + (0x1D71F, 'M', 'δ'), + (0x1D720, 'M', 'ε'), + (0x1D721, 'M', 'ζ'), + (0x1D722, 'M', 'η'), + (0x1D723, 'M', 'θ'), + (0x1D724, 'M', 'ι'), + (0x1D725, 'M', 'κ'), + (0x1D726, 'M', 'λ'), + (0x1D727, 'M', 'μ'), + (0x1D728, 'M', 'ν'), + (0x1D729, 'M', 'ξ'), + (0x1D72A, 'M', 'ο'), + (0x1D72B, 'M', 'π'), + (0x1D72C, 'M', 'ρ'), + (0x1D72D, 'M', 'θ'), + (0x1D72E, 'M', 'σ'), + (0x1D72F, 'M', 'τ'), + (0x1D730, 'M', 'υ'), + (0x1D731, 'M', 'φ'), + (0x1D732, 'M', 'χ'), + (0x1D733, 'M', 'ψ'), + (0x1D734, 'M', 'ω'), + (0x1D735, 'M', '∇'), + (0x1D736, 'M', 'α'), + (0x1D737, 'M', 'β'), + (0x1D738, 'M', 'γ'), + (0x1D739, 'M', 'δ'), + (0x1D73A, 'M', 'ε'), + (0x1D73B, 'M', 'ζ'), + (0x1D73C, 'M', 'η'), + (0x1D73D, 'M', 'θ'), + (0x1D73E, 'M', 'ι'), + (0x1D73F, 'M', 'κ'), + (0x1D740, 'M', 'λ'), + (0x1D741, 'M', 'μ'), + (0x1D742, 'M', 'ν'), + (0x1D743, 'M', 'ξ'), + (0x1D744, 'M', 'ο'), + (0x1D745, 'M', 'π'), + (0x1D746, 'M', 'ρ'), + (0x1D747, 'M', 'σ'), + (0x1D749, 'M', 'τ'), + (0x1D74A, 'M', 'υ'), + (0x1D74B, 'M', 'φ'), + (0x1D74C, 'M', 'χ'), + (0x1D74D, 'M', 'ψ'), + (0x1D74E, 'M', 'ω'), + (0x1D74F, 'M', '∂'), + (0x1D750, 'M', 'ε'), + (0x1D751, 'M', 'θ'), + (0x1D752, 'M', 'κ'), + (0x1D753, 'M', 'φ'), + (0x1D754, 'M', 'ρ'), + (0x1D755, 'M', 'π'), + (0x1D756, 'M', 'α'), + (0x1D757, 'M', 'β'), + (0x1D758, 'M', 'γ'), + (0x1D759, 'M', 'δ'), + (0x1D75A, 'M', 'ε'), + ] + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D75B, 'M', 'ζ'), + (0x1D75C, 'M', 'η'), + (0x1D75D, 'M', 'θ'), + (0x1D75E, 'M', 'ι'), + (0x1D75F, 'M', 'κ'), + (0x1D760, 'M', 'λ'), + (0x1D761, 'M', 'μ'), + (0x1D762, 'M', 'ν'), + (0x1D763, 'M', 'ξ'), + (0x1D764, 'M', 'ο'), + (0x1D765, 'M', 'π'), + (0x1D766, 'M', 'ρ'), + (0x1D767, 'M', 'θ'), + (0x1D768, 'M', 'σ'), + (0x1D769, 'M', 'τ'), + (0x1D76A, 'M', 'υ'), + (0x1D76B, 'M', 'φ'), + (0x1D76C, 'M', 'χ'), + (0x1D76D, 'M', 'ψ'), + (0x1D76E, 'M', 'ω'), + (0x1D76F, 'M', '∇'), + (0x1D770, 'M', 'α'), + (0x1D771, 'M', 'β'), + (0x1D772, 'M', 'γ'), + (0x1D773, 'M', 'δ'), + (0x1D774, 'M', 'ε'), + (0x1D775, 'M', 'ζ'), + (0x1D776, 'M', 'η'), + (0x1D777, 'M', 'θ'), + (0x1D778, 'M', 'ι'), + (0x1D779, 'M', 'κ'), + (0x1D77A, 'M', 'λ'), + (0x1D77B, 'M', 'μ'), + (0x1D77C, 'M', 'ν'), + (0x1D77D, 'M', 'ξ'), + (0x1D77E, 'M', 'ο'), + (0x1D77F, 'M', 'π'), + (0x1D780, 'M', 'ρ'), + (0x1D781, 'M', 'σ'), + (0x1D783, 'M', 'τ'), + (0x1D784, 'M', 'υ'), + (0x1D785, 'M', 'φ'), + (0x1D786, 'M', 'χ'), + (0x1D787, 'M', 'ψ'), + (0x1D788, 'M', 'ω'), + (0x1D789, 'M', '∂'), + (0x1D78A, 'M', 'ε'), + (0x1D78B, 'M', 'θ'), + (0x1D78C, 'M', 'κ'), + (0x1D78D, 'M', 'φ'), + (0x1D78E, 'M', 'ρ'), + (0x1D78F, 'M', 'π'), + (0x1D790, 'M', 'α'), + (0x1D791, 'M', 'β'), + (0x1D792, 'M', 'γ'), + (0x1D793, 'M', 'δ'), + (0x1D794, 'M', 'ε'), + (0x1D795, 'M', 'ζ'), + (0x1D796, 'M', 'η'), + (0x1D797, 'M', 'θ'), + (0x1D798, 'M', 'ι'), + (0x1D799, 'M', 'κ'), + (0x1D79A, 'M', 'λ'), + (0x1D79B, 'M', 'μ'), + (0x1D79C, 'M', 'ν'), + (0x1D79D, 'M', 'ξ'), + (0x1D79E, 'M', 'ο'), + (0x1D79F, 'M', 'π'), + (0x1D7A0, 'M', 'ρ'), + (0x1D7A1, 'M', 'θ'), + (0x1D7A2, 'M', 'σ'), + (0x1D7A3, 'M', 'τ'), + (0x1D7A4, 'M', 'υ'), + (0x1D7A5, 'M', 'φ'), + (0x1D7A6, 'M', 'χ'), + (0x1D7A7, 'M', 'ψ'), + (0x1D7A8, 'M', 'ω'), + (0x1D7A9, 'M', '∇'), + (0x1D7AA, 'M', 'α'), + (0x1D7AB, 'M', 'β'), + (0x1D7AC, 'M', 'γ'), + (0x1D7AD, 'M', 'δ'), + (0x1D7AE, 'M', 'ε'), + (0x1D7AF, 'M', 'ζ'), + (0x1D7B0, 'M', 'η'), + (0x1D7B1, 'M', 'θ'), + (0x1D7B2, 'M', 'ι'), + (0x1D7B3, 'M', 'κ'), + (0x1D7B4, 'M', 'λ'), + (0x1D7B5, 'M', 'μ'), + (0x1D7B6, 'M', 'ν'), + (0x1D7B7, 'M', 'ξ'), + (0x1D7B8, 'M', 'ο'), + (0x1D7B9, 'M', 'π'), + (0x1D7BA, 'M', 'ρ'), + (0x1D7BB, 'M', 'σ'), + (0x1D7BD, 'M', 'τ'), + (0x1D7BE, 'M', 'υ'), + (0x1D7BF, 'M', 'φ'), + (0x1D7C0, 'M', 'χ'), + ] + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D7C1, 'M', 'ψ'), + (0x1D7C2, 'M', 'ω'), + (0x1D7C3, 'M', '∂'), + (0x1D7C4, 'M', 'ε'), + (0x1D7C5, 'M', 'θ'), + (0x1D7C6, 'M', 'κ'), + (0x1D7C7, 'M', 'φ'), + (0x1D7C8, 'M', 'ρ'), + (0x1D7C9, 'M', 'π'), + (0x1D7CA, 'M', 'ϝ'), + (0x1D7CC, 'X'), + (0x1D7CE, 'M', '0'), + (0x1D7CF, 'M', '1'), + (0x1D7D0, 'M', '2'), + (0x1D7D1, 'M', '3'), + (0x1D7D2, 'M', '4'), + (0x1D7D3, 'M', '5'), + (0x1D7D4, 'M', '6'), + (0x1D7D5, 'M', '7'), + (0x1D7D6, 'M', '8'), + (0x1D7D7, 'M', '9'), + (0x1D7D8, 'M', '0'), + (0x1D7D9, 'M', '1'), + (0x1D7DA, 'M', '2'), + (0x1D7DB, 'M', '3'), + (0x1D7DC, 'M', '4'), + (0x1D7DD, 'M', '5'), + (0x1D7DE, 'M', '6'), + (0x1D7DF, 'M', '7'), + (0x1D7E0, 'M', '8'), + (0x1D7E1, 'M', '9'), + (0x1D7E2, 'M', '0'), + (0x1D7E3, 'M', '1'), + (0x1D7E4, 'M', '2'), + (0x1D7E5, 'M', '3'), + (0x1D7E6, 'M', '4'), + (0x1D7E7, 'M', '5'), + (0x1D7E8, 'M', '6'), + (0x1D7E9, 'M', '7'), + (0x1D7EA, 'M', '8'), + (0x1D7EB, 'M', '9'), + (0x1D7EC, 'M', '0'), + (0x1D7ED, 'M', '1'), + (0x1D7EE, 'M', '2'), + (0x1D7EF, 'M', '3'), + (0x1D7F0, 'M', '4'), + (0x1D7F1, 'M', '5'), + (0x1D7F2, 'M', '6'), + (0x1D7F3, 'M', '7'), + (0x1D7F4, 'M', '8'), + (0x1D7F5, 'M', '9'), + (0x1D7F6, 'M', '0'), + (0x1D7F7, 'M', '1'), + (0x1D7F8, 'M', '2'), + (0x1D7F9, 'M', '3'), + (0x1D7FA, 'M', '4'), + (0x1D7FB, 'M', '5'), + (0x1D7FC, 'M', '6'), + (0x1D7FD, 'M', '7'), + (0x1D7FE, 'M', '8'), + (0x1D7FF, 'M', '9'), + (0x1D800, 'V'), + (0x1DA8C, 'X'), + (0x1DA9B, 'V'), + (0x1DAA0, 'X'), + (0x1DAA1, 'V'), + (0x1DAB0, 'X'), + (0x1DF00, 'V'), + (0x1DF1F, 'X'), + (0x1E000, 'V'), + (0x1E007, 'X'), + (0x1E008, 'V'), + (0x1E019, 'X'), + (0x1E01B, 'V'), + (0x1E022, 'X'), + (0x1E023, 'V'), + (0x1E025, 'X'), + (0x1E026, 'V'), + (0x1E02B, 'X'), + (0x1E100, 'V'), + (0x1E12D, 'X'), + (0x1E130, 'V'), + (0x1E13E, 'X'), + (0x1E140, 'V'), + (0x1E14A, 'X'), + (0x1E14E, 'V'), + (0x1E150, 'X'), + (0x1E290, 'V'), + (0x1E2AF, 'X'), + (0x1E2C0, 'V'), + (0x1E2FA, 'X'), + (0x1E2FF, 'V'), + (0x1E300, 'X'), + (0x1E7E0, 'V'), + (0x1E7E7, 'X'), + (0x1E7E8, 'V'), + (0x1E7EC, 'X'), + (0x1E7ED, 'V'), + (0x1E7EF, 'X'), + (0x1E7F0, 'V'), + ] + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E7FF, 'X'), + (0x1E800, 'V'), + (0x1E8C5, 'X'), + (0x1E8C7, 'V'), + (0x1E8D7, 'X'), + (0x1E900, 'M', '𞤢'), + (0x1E901, 'M', '𞤣'), + (0x1E902, 'M', '𞤤'), + (0x1E903, 'M', '𞤥'), + (0x1E904, 'M', '𞤦'), + (0x1E905, 'M', '𞤧'), + (0x1E906, 'M', '𞤨'), + (0x1E907, 'M', '𞤩'), + (0x1E908, 'M', '𞤪'), + (0x1E909, 'M', '𞤫'), + (0x1E90A, 'M', '𞤬'), + (0x1E90B, 'M', '𞤭'), + (0x1E90C, 'M', '𞤮'), + (0x1E90D, 'M', '𞤯'), + (0x1E90E, 'M', '𞤰'), + (0x1E90F, 'M', '𞤱'), + (0x1E910, 'M', '𞤲'), + (0x1E911, 'M', '𞤳'), + (0x1E912, 'M', '𞤴'), + (0x1E913, 'M', '𞤵'), + (0x1E914, 'M', '𞤶'), + (0x1E915, 'M', '𞤷'), + (0x1E916, 'M', '𞤸'), + (0x1E917, 'M', '𞤹'), + (0x1E918, 'M', '𞤺'), + (0x1E919, 'M', '𞤻'), + (0x1E91A, 'M', '𞤼'), + (0x1E91B, 'M', '𞤽'), + (0x1E91C, 'M', '𞤾'), + (0x1E91D, 'M', '𞤿'), + (0x1E91E, 'M', '𞥀'), + (0x1E91F, 'M', '𞥁'), + (0x1E920, 'M', '𞥂'), + (0x1E921, 'M', '𞥃'), + (0x1E922, 'V'), + (0x1E94C, 'X'), + (0x1E950, 'V'), + (0x1E95A, 'X'), + (0x1E95E, 'V'), + (0x1E960, 'X'), + (0x1EC71, 'V'), + (0x1ECB5, 'X'), + (0x1ED01, 'V'), + (0x1ED3E, 'X'), + (0x1EE00, 'M', 'ا'), + (0x1EE01, 'M', 'ب'), + (0x1EE02, 'M', 'ج'), + (0x1EE03, 'M', 'د'), + (0x1EE04, 'X'), + (0x1EE05, 'M', 'و'), + (0x1EE06, 'M', 'ز'), + (0x1EE07, 'M', 'ح'), + (0x1EE08, 'M', 'ط'), + (0x1EE09, 'M', 'ي'), + (0x1EE0A, 'M', 'ك'), + (0x1EE0B, 'M', 'ل'), + (0x1EE0C, 'M', 'م'), + (0x1EE0D, 'M', 'ن'), + (0x1EE0E, 'M', 'س'), + (0x1EE0F, 'M', 'ع'), + (0x1EE10, 'M', 'ف'), + (0x1EE11, 'M', 'ص'), + (0x1EE12, 'M', 'ق'), + (0x1EE13, 'M', 'ر'), + (0x1EE14, 'M', 'ش'), + (0x1EE15, 'M', 'ت'), + (0x1EE16, 'M', 'ث'), + (0x1EE17, 'M', 'خ'), + (0x1EE18, 'M', 'ذ'), + (0x1EE19, 'M', 'ض'), + (0x1EE1A, 'M', 'ظ'), + (0x1EE1B, 'M', 'غ'), + (0x1EE1C, 'M', 'ٮ'), + (0x1EE1D, 'M', 'ں'), + (0x1EE1E, 'M', 'ڡ'), + (0x1EE1F, 'M', 'ٯ'), + (0x1EE20, 'X'), + (0x1EE21, 'M', 'ب'), + (0x1EE22, 'M', 'ج'), + (0x1EE23, 'X'), + (0x1EE24, 'M', 'ه'), + (0x1EE25, 'X'), + (0x1EE27, 'M', 'ح'), + (0x1EE28, 'X'), + (0x1EE29, 'M', 'ي'), + (0x1EE2A, 'M', 'ك'), + (0x1EE2B, 'M', 'ل'), + (0x1EE2C, 'M', 'م'), + (0x1EE2D, 'M', 'ن'), + (0x1EE2E, 'M', 'س'), + (0x1EE2F, 'M', 'ع'), + (0x1EE30, 'M', 'ف'), + (0x1EE31, 'M', 'ص'), + (0x1EE32, 'M', 'ق'), + (0x1EE33, 'X'), + ] + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EE34, 'M', 'ش'), + (0x1EE35, 'M', 'ت'), + (0x1EE36, 'M', 'ث'), + (0x1EE37, 'M', 'خ'), + (0x1EE38, 'X'), + (0x1EE39, 'M', 'ض'), + (0x1EE3A, 'X'), + (0x1EE3B, 'M', 'غ'), + (0x1EE3C, 'X'), + (0x1EE42, 'M', 'ج'), + (0x1EE43, 'X'), + (0x1EE47, 'M', 'ح'), + (0x1EE48, 'X'), + (0x1EE49, 'M', 'ي'), + (0x1EE4A, 'X'), + (0x1EE4B, 'M', 'ل'), + (0x1EE4C, 'X'), + (0x1EE4D, 'M', 'ن'), + (0x1EE4E, 'M', 'س'), + (0x1EE4F, 'M', 'ع'), + (0x1EE50, 'X'), + (0x1EE51, 'M', 'ص'), + (0x1EE52, 'M', 'ق'), + (0x1EE53, 'X'), + (0x1EE54, 'M', 'ش'), + (0x1EE55, 'X'), + (0x1EE57, 'M', 'خ'), + (0x1EE58, 'X'), + (0x1EE59, 'M', 'ض'), + (0x1EE5A, 'X'), + (0x1EE5B, 'M', 'غ'), + (0x1EE5C, 'X'), + (0x1EE5D, 'M', 'ں'), + (0x1EE5E, 'X'), + (0x1EE5F, 'M', 'ٯ'), + (0x1EE60, 'X'), + (0x1EE61, 'M', 'ب'), + (0x1EE62, 'M', 'ج'), + (0x1EE63, 'X'), + (0x1EE64, 'M', 'ه'), + (0x1EE65, 'X'), + (0x1EE67, 'M', 'ح'), + (0x1EE68, 'M', 'ط'), + (0x1EE69, 'M', 'ي'), + (0x1EE6A, 'M', 'ك'), + (0x1EE6B, 'X'), + (0x1EE6C, 'M', 'م'), + (0x1EE6D, 'M', 'ن'), + (0x1EE6E, 'M', 'س'), + (0x1EE6F, 'M', 'ع'), + (0x1EE70, 'M', 'ف'), + (0x1EE71, 'M', 'ص'), + (0x1EE72, 'M', 'ق'), + (0x1EE73, 'X'), + (0x1EE74, 'M', 'ش'), + (0x1EE75, 'M', 'ت'), + (0x1EE76, 'M', 'ث'), + (0x1EE77, 'M', 'خ'), + (0x1EE78, 'X'), + (0x1EE79, 'M', 'ض'), + (0x1EE7A, 'M', 'ظ'), + (0x1EE7B, 'M', 'غ'), + (0x1EE7C, 'M', 'ٮ'), + (0x1EE7D, 'X'), + (0x1EE7E, 'M', 'ڡ'), + (0x1EE7F, 'X'), + (0x1EE80, 'M', 'ا'), + (0x1EE81, 'M', 'ب'), + (0x1EE82, 'M', 'ج'), + (0x1EE83, 'M', 'د'), + (0x1EE84, 'M', 'ه'), + (0x1EE85, 'M', 'و'), + (0x1EE86, 'M', 'ز'), + (0x1EE87, 'M', 'ح'), + (0x1EE88, 'M', 'ط'), + (0x1EE89, 'M', 'ي'), + (0x1EE8A, 'X'), + (0x1EE8B, 'M', 'ل'), + (0x1EE8C, 'M', 'م'), + (0x1EE8D, 'M', 'ن'), + (0x1EE8E, 'M', 'س'), + (0x1EE8F, 'M', 'ع'), + (0x1EE90, 'M', 'ف'), + (0x1EE91, 'M', 'ص'), + (0x1EE92, 'M', 'ق'), + (0x1EE93, 'M', 'ر'), + (0x1EE94, 'M', 'ش'), + (0x1EE95, 'M', 'ت'), + (0x1EE96, 'M', 'ث'), + (0x1EE97, 'M', 'خ'), + (0x1EE98, 'M', 'ذ'), + (0x1EE99, 'M', 'ض'), + (0x1EE9A, 'M', 'ظ'), + (0x1EE9B, 'M', 'غ'), + (0x1EE9C, 'X'), + (0x1EEA1, 'M', 'ب'), + (0x1EEA2, 'M', 'ج'), + (0x1EEA3, 'M', 'د'), + (0x1EEA4, 'X'), + (0x1EEA5, 'M', 'و'), + ] + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EEA6, 'M', 'ز'), + (0x1EEA7, 'M', 'ح'), + (0x1EEA8, 'M', 'ط'), + (0x1EEA9, 'M', 'ي'), + (0x1EEAA, 'X'), + (0x1EEAB, 'M', 'ل'), + (0x1EEAC, 'M', 'م'), + (0x1EEAD, 'M', 'ن'), + (0x1EEAE, 'M', 'س'), + (0x1EEAF, 'M', 'ع'), + (0x1EEB0, 'M', 'ف'), + (0x1EEB1, 'M', 'ص'), + (0x1EEB2, 'M', 'ق'), + (0x1EEB3, 'M', 'ر'), + (0x1EEB4, 'M', 'ش'), + (0x1EEB5, 'M', 'ت'), + (0x1EEB6, 'M', 'ث'), + (0x1EEB7, 'M', 'خ'), + (0x1EEB8, 'M', 'ذ'), + (0x1EEB9, 'M', 'ض'), + (0x1EEBA, 'M', 'ظ'), + (0x1EEBB, 'M', 'غ'), + (0x1EEBC, 'X'), + (0x1EEF0, 'V'), + (0x1EEF2, 'X'), + (0x1F000, 'V'), + (0x1F02C, 'X'), + (0x1F030, 'V'), + (0x1F094, 'X'), + (0x1F0A0, 'V'), + (0x1F0AF, 'X'), + (0x1F0B1, 'V'), + (0x1F0C0, 'X'), + (0x1F0C1, 'V'), + (0x1F0D0, 'X'), + (0x1F0D1, 'V'), + (0x1F0F6, 'X'), + (0x1F101, '3', '0,'), + (0x1F102, '3', '1,'), + (0x1F103, '3', '2,'), + (0x1F104, '3', '3,'), + (0x1F105, '3', '4,'), + (0x1F106, '3', '5,'), + (0x1F107, '3', '6,'), + (0x1F108, '3', '7,'), + (0x1F109, '3', '8,'), + (0x1F10A, '3', '9,'), + (0x1F10B, 'V'), + (0x1F110, '3', '(a)'), + (0x1F111, '3', '(b)'), + (0x1F112, '3', '(c)'), + (0x1F113, '3', '(d)'), + (0x1F114, '3', '(e)'), + (0x1F115, '3', '(f)'), + (0x1F116, '3', '(g)'), + (0x1F117, '3', '(h)'), + (0x1F118, '3', '(i)'), + (0x1F119, '3', '(j)'), + (0x1F11A, '3', '(k)'), + (0x1F11B, '3', '(l)'), + (0x1F11C, '3', '(m)'), + (0x1F11D, '3', '(n)'), + (0x1F11E, '3', '(o)'), + (0x1F11F, '3', '(p)'), + (0x1F120, '3', '(q)'), + (0x1F121, '3', '(r)'), + (0x1F122, '3', '(s)'), + (0x1F123, '3', '(t)'), + (0x1F124, '3', '(u)'), + (0x1F125, '3', '(v)'), + (0x1F126, '3', '(w)'), + (0x1F127, '3', '(x)'), + (0x1F128, '3', '(y)'), + (0x1F129, '3', '(z)'), + (0x1F12A, 'M', '〔s〕'), + (0x1F12B, 'M', 'c'), + (0x1F12C, 'M', 'r'), + (0x1F12D, 'M', 'cd'), + (0x1F12E, 'M', 'wz'), + (0x1F12F, 'V'), + (0x1F130, 'M', 'a'), + (0x1F131, 'M', 'b'), + (0x1F132, 'M', 'c'), + (0x1F133, 'M', 'd'), + (0x1F134, 'M', 'e'), + (0x1F135, 'M', 'f'), + (0x1F136, 'M', 'g'), + (0x1F137, 'M', 'h'), + (0x1F138, 'M', 'i'), + (0x1F139, 'M', 'j'), + (0x1F13A, 'M', 'k'), + (0x1F13B, 'M', 'l'), + (0x1F13C, 'M', 'm'), + (0x1F13D, 'M', 'n'), + (0x1F13E, 'M', 'o'), + (0x1F13F, 'M', 'p'), + (0x1F140, 'M', 'q'), + (0x1F141, 'M', 'r'), + (0x1F142, 'M', 's'), + (0x1F143, 'M', 't'), + ] + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F144, 'M', 'u'), + (0x1F145, 'M', 'v'), + (0x1F146, 'M', 'w'), + (0x1F147, 'M', 'x'), + (0x1F148, 'M', 'y'), + (0x1F149, 'M', 'z'), + (0x1F14A, 'M', 'hv'), + (0x1F14B, 'M', 'mv'), + (0x1F14C, 'M', 'sd'), + (0x1F14D, 'M', 'ss'), + (0x1F14E, 'M', 'ppv'), + (0x1F14F, 'M', 'wc'), + (0x1F150, 'V'), + (0x1F16A, 'M', 'mc'), + (0x1F16B, 'M', 'md'), + (0x1F16C, 'M', 'mr'), + (0x1F16D, 'V'), + (0x1F190, 'M', 'dj'), + (0x1F191, 'V'), + (0x1F1AE, 'X'), + (0x1F1E6, 'V'), + (0x1F200, 'M', 'ほか'), + (0x1F201, 'M', 'ココ'), + (0x1F202, 'M', 'サ'), + (0x1F203, 'X'), + (0x1F210, 'M', '手'), + (0x1F211, 'M', '字'), + (0x1F212, 'M', '双'), + (0x1F213, 'M', 'デ'), + (0x1F214, 'M', '二'), + (0x1F215, 'M', '多'), + (0x1F216, 'M', '解'), + (0x1F217, 'M', '天'), + (0x1F218, 'M', '交'), + (0x1F219, 'M', '映'), + (0x1F21A, 'M', '無'), + (0x1F21B, 'M', '料'), + (0x1F21C, 'M', '前'), + (0x1F21D, 'M', '後'), + (0x1F21E, 'M', '再'), + (0x1F21F, 'M', '新'), + (0x1F220, 'M', '初'), + (0x1F221, 'M', '終'), + (0x1F222, 'M', '生'), + (0x1F223, 'M', '販'), + (0x1F224, 'M', '声'), + (0x1F225, 'M', '吹'), + (0x1F226, 'M', '演'), + (0x1F227, 'M', '投'), + (0x1F228, 'M', '捕'), + (0x1F229, 'M', '一'), + (0x1F22A, 'M', '三'), + (0x1F22B, 'M', '遊'), + (0x1F22C, 'M', '左'), + (0x1F22D, 'M', '中'), + (0x1F22E, 'M', '右'), + (0x1F22F, 'M', '指'), + (0x1F230, 'M', '走'), + (0x1F231, 'M', '打'), + (0x1F232, 'M', '禁'), + (0x1F233, 'M', '空'), + (0x1F234, 'M', '合'), + (0x1F235, 'M', '満'), + (0x1F236, 'M', '有'), + (0x1F237, 'M', '月'), + (0x1F238, 'M', '申'), + (0x1F239, 'M', '割'), + (0x1F23A, 'M', '営'), + (0x1F23B, 'M', '配'), + (0x1F23C, 'X'), + (0x1F240, 'M', '〔本〕'), + (0x1F241, 'M', '〔三〕'), + (0x1F242, 'M', '〔二〕'), + (0x1F243, 'M', '〔安〕'), + (0x1F244, 'M', '〔点〕'), + (0x1F245, 'M', '〔打〕'), + (0x1F246, 'M', '〔盗〕'), + (0x1F247, 'M', '〔勝〕'), + (0x1F248, 'M', '〔敗〕'), + (0x1F249, 'X'), + (0x1F250, 'M', '得'), + (0x1F251, 'M', '可'), + (0x1F252, 'X'), + (0x1F260, 'V'), + (0x1F266, 'X'), + (0x1F300, 'V'), + (0x1F6D8, 'X'), + (0x1F6DD, 'V'), + (0x1F6ED, 'X'), + (0x1F6F0, 'V'), + (0x1F6FD, 'X'), + (0x1F700, 'V'), + (0x1F774, 'X'), + (0x1F780, 'V'), + (0x1F7D9, 'X'), + (0x1F7E0, 'V'), + (0x1F7EC, 'X'), + (0x1F7F0, 'V'), + (0x1F7F1, 'X'), + (0x1F800, 'V'), + ] + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F80C, 'X'), + (0x1F810, 'V'), + (0x1F848, 'X'), + (0x1F850, 'V'), + (0x1F85A, 'X'), + (0x1F860, 'V'), + (0x1F888, 'X'), + (0x1F890, 'V'), + (0x1F8AE, 'X'), + (0x1F8B0, 'V'), + (0x1F8B2, 'X'), + (0x1F900, 'V'), + (0x1FA54, 'X'), + (0x1FA60, 'V'), + (0x1FA6E, 'X'), + (0x1FA70, 'V'), + (0x1FA75, 'X'), + (0x1FA78, 'V'), + (0x1FA7D, 'X'), + (0x1FA80, 'V'), + (0x1FA87, 'X'), + (0x1FA90, 'V'), + (0x1FAAD, 'X'), + (0x1FAB0, 'V'), + (0x1FABB, 'X'), + (0x1FAC0, 'V'), + (0x1FAC6, 'X'), + (0x1FAD0, 'V'), + (0x1FADA, 'X'), + (0x1FAE0, 'V'), + (0x1FAE8, 'X'), + (0x1FAF0, 'V'), + (0x1FAF7, 'X'), + (0x1FB00, 'V'), + (0x1FB93, 'X'), + (0x1FB94, 'V'), + (0x1FBCB, 'X'), + (0x1FBF0, 'M', '0'), + (0x1FBF1, 'M', '1'), + (0x1FBF2, 'M', '2'), + (0x1FBF3, 'M', '3'), + (0x1FBF4, 'M', '4'), + (0x1FBF5, 'M', '5'), + (0x1FBF6, 'M', '6'), + (0x1FBF7, 'M', '7'), + (0x1FBF8, 'M', '8'), + (0x1FBF9, 'M', '9'), + (0x1FBFA, 'X'), + (0x20000, 'V'), + (0x2A6E0, 'X'), + (0x2A700, 'V'), + (0x2B739, 'X'), + (0x2B740, 'V'), + (0x2B81E, 'X'), + (0x2B820, 'V'), + (0x2CEA2, 'X'), + (0x2CEB0, 'V'), + (0x2EBE1, 'X'), + (0x2F800, 'M', '丽'), + (0x2F801, 'M', '丸'), + (0x2F802, 'M', '乁'), + (0x2F803, 'M', '𠄢'), + (0x2F804, 'M', '你'), + (0x2F805, 'M', '侮'), + (0x2F806, 'M', '侻'), + (0x2F807, 'M', '倂'), + (0x2F808, 'M', '偺'), + (0x2F809, 'M', '備'), + (0x2F80A, 'M', '僧'), + (0x2F80B, 'M', '像'), + (0x2F80C, 'M', '㒞'), + (0x2F80D, 'M', '𠘺'), + (0x2F80E, 'M', '免'), + (0x2F80F, 'M', '兔'), + (0x2F810, 'M', '兤'), + (0x2F811, 'M', '具'), + (0x2F812, 'M', '𠔜'), + (0x2F813, 'M', '㒹'), + (0x2F814, 'M', '內'), + (0x2F815, 'M', '再'), + (0x2F816, 'M', '𠕋'), + (0x2F817, 'M', '冗'), + (0x2F818, 'M', '冤'), + (0x2F819, 'M', '仌'), + (0x2F81A, 'M', '冬'), + (0x2F81B, 'M', '况'), + (0x2F81C, 'M', '𩇟'), + (0x2F81D, 'M', '凵'), + (0x2F81E, 'M', '刃'), + (0x2F81F, 'M', '㓟'), + (0x2F820, 'M', '刻'), + (0x2F821, 'M', '剆'), + (0x2F822, 'M', '割'), + (0x2F823, 'M', '剷'), + (0x2F824, 'M', '㔕'), + (0x2F825, 'M', '勇'), + (0x2F826, 'M', '勉'), + (0x2F827, 'M', '勤'), + (0x2F828, 'M', '勺'), + (0x2F829, 'M', '包'), + ] + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F82A, 'M', '匆'), + (0x2F82B, 'M', '北'), + (0x2F82C, 'M', '卉'), + (0x2F82D, 'M', '卑'), + (0x2F82E, 'M', '博'), + (0x2F82F, 'M', '即'), + (0x2F830, 'M', '卽'), + (0x2F831, 'M', '卿'), + (0x2F834, 'M', '𠨬'), + (0x2F835, 'M', '灰'), + (0x2F836, 'M', '及'), + (0x2F837, 'M', '叟'), + (0x2F838, 'M', '𠭣'), + (0x2F839, 'M', '叫'), + (0x2F83A, 'M', '叱'), + (0x2F83B, 'M', '吆'), + (0x2F83C, 'M', '咞'), + (0x2F83D, 'M', '吸'), + (0x2F83E, 'M', '呈'), + (0x2F83F, 'M', '周'), + (0x2F840, 'M', '咢'), + (0x2F841, 'M', '哶'), + (0x2F842, 'M', '唐'), + (0x2F843, 'M', '啓'), + (0x2F844, 'M', '啣'), + (0x2F845, 'M', '善'), + (0x2F847, 'M', '喙'), + (0x2F848, 'M', '喫'), + (0x2F849, 'M', '喳'), + (0x2F84A, 'M', '嗂'), + (0x2F84B, 'M', '圖'), + (0x2F84C, 'M', '嘆'), + (0x2F84D, 'M', '圗'), + (0x2F84E, 'M', '噑'), + (0x2F84F, 'M', '噴'), + (0x2F850, 'M', '切'), + (0x2F851, 'M', '壮'), + (0x2F852, 'M', '城'), + (0x2F853, 'M', '埴'), + (0x2F854, 'M', '堍'), + (0x2F855, 'M', '型'), + (0x2F856, 'M', '堲'), + (0x2F857, 'M', '報'), + (0x2F858, 'M', '墬'), + (0x2F859, 'M', '𡓤'), + (0x2F85A, 'M', '売'), + (0x2F85B, 'M', '壷'), + (0x2F85C, 'M', '夆'), + (0x2F85D, 'M', '多'), + (0x2F85E, 'M', '夢'), + (0x2F85F, 'M', '奢'), + (0x2F860, 'M', '𡚨'), + (0x2F861, 'M', '𡛪'), + (0x2F862, 'M', '姬'), + (0x2F863, 'M', '娛'), + (0x2F864, 'M', '娧'), + (0x2F865, 'M', '姘'), + (0x2F866, 'M', '婦'), + (0x2F867, 'M', '㛮'), + (0x2F868, 'X'), + (0x2F869, 'M', '嬈'), + (0x2F86A, 'M', '嬾'), + (0x2F86C, 'M', '𡧈'), + (0x2F86D, 'M', '寃'), + (0x2F86E, 'M', '寘'), + (0x2F86F, 'M', '寧'), + (0x2F870, 'M', '寳'), + (0x2F871, 'M', '𡬘'), + (0x2F872, 'M', '寿'), + (0x2F873, 'M', '将'), + (0x2F874, 'X'), + (0x2F875, 'M', '尢'), + (0x2F876, 'M', '㞁'), + (0x2F877, 'M', '屠'), + (0x2F878, 'M', '屮'), + (0x2F879, 'M', '峀'), + (0x2F87A, 'M', '岍'), + (0x2F87B, 'M', '𡷤'), + (0x2F87C, 'M', '嵃'), + (0x2F87D, 'M', '𡷦'), + (0x2F87E, 'M', '嵮'), + (0x2F87F, 'M', '嵫'), + (0x2F880, 'M', '嵼'), + (0x2F881, 'M', '巡'), + (0x2F882, 'M', '巢'), + (0x2F883, 'M', '㠯'), + (0x2F884, 'M', '巽'), + (0x2F885, 'M', '帨'), + (0x2F886, 'M', '帽'), + (0x2F887, 'M', '幩'), + (0x2F888, 'M', '㡢'), + (0x2F889, 'M', '𢆃'), + (0x2F88A, 'M', '㡼'), + (0x2F88B, 'M', '庰'), + (0x2F88C, 'M', '庳'), + (0x2F88D, 'M', '庶'), + (0x2F88E, 'M', '廊'), + (0x2F88F, 'M', '𪎒'), + (0x2F890, 'M', '廾'), + (0x2F891, 'M', '𢌱'), + ] + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F893, 'M', '舁'), + (0x2F894, 'M', '弢'), + (0x2F896, 'M', '㣇'), + (0x2F897, 'M', '𣊸'), + (0x2F898, 'M', '𦇚'), + (0x2F899, 'M', '形'), + (0x2F89A, 'M', '彫'), + (0x2F89B, 'M', '㣣'), + (0x2F89C, 'M', '徚'), + (0x2F89D, 'M', '忍'), + (0x2F89E, 'M', '志'), + (0x2F89F, 'M', '忹'), + (0x2F8A0, 'M', '悁'), + (0x2F8A1, 'M', '㤺'), + (0x2F8A2, 'M', '㤜'), + (0x2F8A3, 'M', '悔'), + (0x2F8A4, 'M', '𢛔'), + (0x2F8A5, 'M', '惇'), + (0x2F8A6, 'M', '慈'), + (0x2F8A7, 'M', '慌'), + (0x2F8A8, 'M', '慎'), + (0x2F8A9, 'M', '慌'), + (0x2F8AA, 'M', '慺'), + (0x2F8AB, 'M', '憎'), + (0x2F8AC, 'M', '憲'), + (0x2F8AD, 'M', '憤'), + (0x2F8AE, 'M', '憯'), + (0x2F8AF, 'M', '懞'), + (0x2F8B0, 'M', '懲'), + (0x2F8B1, 'M', '懶'), + (0x2F8B2, 'M', '成'), + (0x2F8B3, 'M', '戛'), + (0x2F8B4, 'M', '扝'), + (0x2F8B5, 'M', '抱'), + (0x2F8B6, 'M', '拔'), + (0x2F8B7, 'M', '捐'), + (0x2F8B8, 'M', '𢬌'), + (0x2F8B9, 'M', '挽'), + (0x2F8BA, 'M', '拼'), + (0x2F8BB, 'M', '捨'), + (0x2F8BC, 'M', '掃'), + (0x2F8BD, 'M', '揤'), + (0x2F8BE, 'M', '𢯱'), + (0x2F8BF, 'M', '搢'), + (0x2F8C0, 'M', '揅'), + (0x2F8C1, 'M', '掩'), + (0x2F8C2, 'M', '㨮'), + (0x2F8C3, 'M', '摩'), + (0x2F8C4, 'M', '摾'), + (0x2F8C5, 'M', '撝'), + (0x2F8C6, 'M', '摷'), + (0x2F8C7, 'M', '㩬'), + (0x2F8C8, 'M', '敏'), + (0x2F8C9, 'M', '敬'), + (0x2F8CA, 'M', '𣀊'), + (0x2F8CB, 'M', '旣'), + (0x2F8CC, 'M', '書'), + (0x2F8CD, 'M', '晉'), + (0x2F8CE, 'M', '㬙'), + (0x2F8CF, 'M', '暑'), + (0x2F8D0, 'M', '㬈'), + (0x2F8D1, 'M', '㫤'), + (0x2F8D2, 'M', '冒'), + (0x2F8D3, 'M', '冕'), + (0x2F8D4, 'M', '最'), + (0x2F8D5, 'M', '暜'), + (0x2F8D6, 'M', '肭'), + (0x2F8D7, 'M', '䏙'), + (0x2F8D8, 'M', '朗'), + (0x2F8D9, 'M', '望'), + (0x2F8DA, 'M', '朡'), + (0x2F8DB, 'M', '杞'), + (0x2F8DC, 'M', '杓'), + (0x2F8DD, 'M', '𣏃'), + (0x2F8DE, 'M', '㭉'), + (0x2F8DF, 'M', '柺'), + (0x2F8E0, 'M', '枅'), + (0x2F8E1, 'M', '桒'), + (0x2F8E2, 'M', '梅'), + (0x2F8E3, 'M', '𣑭'), + (0x2F8E4, 'M', '梎'), + (0x2F8E5, 'M', '栟'), + (0x2F8E6, 'M', '椔'), + (0x2F8E7, 'M', '㮝'), + (0x2F8E8, 'M', '楂'), + (0x2F8E9, 'M', '榣'), + (0x2F8EA, 'M', '槪'), + (0x2F8EB, 'M', '檨'), + (0x2F8EC, 'M', '𣚣'), + (0x2F8ED, 'M', '櫛'), + (0x2F8EE, 'M', '㰘'), + (0x2F8EF, 'M', '次'), + (0x2F8F0, 'M', '𣢧'), + (0x2F8F1, 'M', '歔'), + (0x2F8F2, 'M', '㱎'), + (0x2F8F3, 'M', '歲'), + (0x2F8F4, 'M', '殟'), + (0x2F8F5, 'M', '殺'), + (0x2F8F6, 'M', '殻'), + (0x2F8F7, 'M', '𣪍'), + ] + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F8F8, 'M', '𡴋'), + (0x2F8F9, 'M', '𣫺'), + (0x2F8FA, 'M', '汎'), + (0x2F8FB, 'M', '𣲼'), + (0x2F8FC, 'M', '沿'), + (0x2F8FD, 'M', '泍'), + (0x2F8FE, 'M', '汧'), + (0x2F8FF, 'M', '洖'), + (0x2F900, 'M', '派'), + (0x2F901, 'M', '海'), + (0x2F902, 'M', '流'), + (0x2F903, 'M', '浩'), + (0x2F904, 'M', '浸'), + (0x2F905, 'M', '涅'), + (0x2F906, 'M', '𣴞'), + (0x2F907, 'M', '洴'), + (0x2F908, 'M', '港'), + (0x2F909, 'M', '湮'), + (0x2F90A, 'M', '㴳'), + (0x2F90B, 'M', '滋'), + (0x2F90C, 'M', '滇'), + (0x2F90D, 'M', '𣻑'), + (0x2F90E, 'M', '淹'), + (0x2F90F, 'M', '潮'), + (0x2F910, 'M', '𣽞'), + (0x2F911, 'M', '𣾎'), + (0x2F912, 'M', '濆'), + (0x2F913, 'M', '瀹'), + (0x2F914, 'M', '瀞'), + (0x2F915, 'M', '瀛'), + (0x2F916, 'M', '㶖'), + (0x2F917, 'M', '灊'), + (0x2F918, 'M', '災'), + (0x2F919, 'M', '灷'), + (0x2F91A, 'M', '炭'), + (0x2F91B, 'M', '𠔥'), + (0x2F91C, 'M', '煅'), + (0x2F91D, 'M', '𤉣'), + (0x2F91E, 'M', '熜'), + (0x2F91F, 'X'), + (0x2F920, 'M', '爨'), + (0x2F921, 'M', '爵'), + (0x2F922, 'M', '牐'), + (0x2F923, 'M', '𤘈'), + (0x2F924, 'M', '犀'), + (0x2F925, 'M', '犕'), + (0x2F926, 'M', '𤜵'), + (0x2F927, 'M', '𤠔'), + (0x2F928, 'M', '獺'), + (0x2F929, 'M', '王'), + (0x2F92A, 'M', '㺬'), + (0x2F92B, 'M', '玥'), + (0x2F92C, 'M', '㺸'), + (0x2F92E, 'M', '瑇'), + (0x2F92F, 'M', '瑜'), + (0x2F930, 'M', '瑱'), + (0x2F931, 'M', '璅'), + (0x2F932, 'M', '瓊'), + (0x2F933, 'M', '㼛'), + (0x2F934, 'M', '甤'), + (0x2F935, 'M', '𤰶'), + (0x2F936, 'M', '甾'), + (0x2F937, 'M', '𤲒'), + (0x2F938, 'M', '異'), + (0x2F939, 'M', '𢆟'), + (0x2F93A, 'M', '瘐'), + (0x2F93B, 'M', '𤾡'), + (0x2F93C, 'M', '𤾸'), + (0x2F93D, 'M', '𥁄'), + (0x2F93E, 'M', '㿼'), + (0x2F93F, 'M', '䀈'), + (0x2F940, 'M', '直'), + (0x2F941, 'M', '𥃳'), + (0x2F942, 'M', '𥃲'), + (0x2F943, 'M', '𥄙'), + (0x2F944, 'M', '𥄳'), + (0x2F945, 'M', '眞'), + (0x2F946, 'M', '真'), + (0x2F948, 'M', '睊'), + (0x2F949, 'M', '䀹'), + (0x2F94A, 'M', '瞋'), + (0x2F94B, 'M', '䁆'), + (0x2F94C, 'M', '䂖'), + (0x2F94D, 'M', '𥐝'), + (0x2F94E, 'M', '硎'), + (0x2F94F, 'M', '碌'), + (0x2F950, 'M', '磌'), + (0x2F951, 'M', '䃣'), + (0x2F952, 'M', '𥘦'), + (0x2F953, 'M', '祖'), + (0x2F954, 'M', '𥚚'), + (0x2F955, 'M', '𥛅'), + (0x2F956, 'M', '福'), + (0x2F957, 'M', '秫'), + (0x2F958, 'M', '䄯'), + (0x2F959, 'M', '穀'), + (0x2F95A, 'M', '穊'), + (0x2F95B, 'M', '穏'), + (0x2F95C, 'M', '𥥼'), + (0x2F95D, 'M', '𥪧'), + ] + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F95F, 'X'), + (0x2F960, 'M', '䈂'), + (0x2F961, 'M', '𥮫'), + (0x2F962, 'M', '篆'), + (0x2F963, 'M', '築'), + (0x2F964, 'M', '䈧'), + (0x2F965, 'M', '𥲀'), + (0x2F966, 'M', '糒'), + (0x2F967, 'M', '䊠'), + (0x2F968, 'M', '糨'), + (0x2F969, 'M', '糣'), + (0x2F96A, 'M', '紀'), + (0x2F96B, 'M', '𥾆'), + (0x2F96C, 'M', '絣'), + (0x2F96D, 'M', '䌁'), + (0x2F96E, 'M', '緇'), + (0x2F96F, 'M', '縂'), + (0x2F970, 'M', '繅'), + (0x2F971, 'M', '䌴'), + (0x2F972, 'M', '𦈨'), + (0x2F973, 'M', '𦉇'), + (0x2F974, 'M', '䍙'), + (0x2F975, 'M', '𦋙'), + (0x2F976, 'M', '罺'), + (0x2F977, 'M', '𦌾'), + (0x2F978, 'M', '羕'), + (0x2F979, 'M', '翺'), + (0x2F97A, 'M', '者'), + (0x2F97B, 'M', '𦓚'), + (0x2F97C, 'M', '𦔣'), + (0x2F97D, 'M', '聠'), + (0x2F97E, 'M', '𦖨'), + (0x2F97F, 'M', '聰'), + (0x2F980, 'M', '𣍟'), + (0x2F981, 'M', '䏕'), + (0x2F982, 'M', '育'), + (0x2F983, 'M', '脃'), + (0x2F984, 'M', '䐋'), + (0x2F985, 'M', '脾'), + (0x2F986, 'M', '媵'), + (0x2F987, 'M', '𦞧'), + (0x2F988, 'M', '𦞵'), + (0x2F989, 'M', '𣎓'), + (0x2F98A, 'M', '𣎜'), + (0x2F98B, 'M', '舁'), + (0x2F98C, 'M', '舄'), + (0x2F98D, 'M', '辞'), + (0x2F98E, 'M', '䑫'), + (0x2F98F, 'M', '芑'), + (0x2F990, 'M', '芋'), + (0x2F991, 'M', '芝'), + (0x2F992, 'M', '劳'), + (0x2F993, 'M', '花'), + (0x2F994, 'M', '芳'), + (0x2F995, 'M', '芽'), + (0x2F996, 'M', '苦'), + (0x2F997, 'M', '𦬼'), + (0x2F998, 'M', '若'), + (0x2F999, 'M', '茝'), + (0x2F99A, 'M', '荣'), + (0x2F99B, 'M', '莭'), + (0x2F99C, 'M', '茣'), + (0x2F99D, 'M', '莽'), + (0x2F99E, 'M', '菧'), + (0x2F99F, 'M', '著'), + (0x2F9A0, 'M', '荓'), + (0x2F9A1, 'M', '菊'), + (0x2F9A2, 'M', '菌'), + (0x2F9A3, 'M', '菜'), + (0x2F9A4, 'M', '𦰶'), + (0x2F9A5, 'M', '𦵫'), + (0x2F9A6, 'M', '𦳕'), + (0x2F9A7, 'M', '䔫'), + (0x2F9A8, 'M', '蓱'), + (0x2F9A9, 'M', '蓳'), + (0x2F9AA, 'M', '蔖'), + (0x2F9AB, 'M', '𧏊'), + (0x2F9AC, 'M', '蕤'), + (0x2F9AD, 'M', '𦼬'), + (0x2F9AE, 'M', '䕝'), + (0x2F9AF, 'M', '䕡'), + (0x2F9B0, 'M', '𦾱'), + (0x2F9B1, 'M', '𧃒'), + (0x2F9B2, 'M', '䕫'), + (0x2F9B3, 'M', '虐'), + (0x2F9B4, 'M', '虜'), + (0x2F9B5, 'M', '虧'), + (0x2F9B6, 'M', '虩'), + (0x2F9B7, 'M', '蚩'), + (0x2F9B8, 'M', '蚈'), + (0x2F9B9, 'M', '蜎'), + (0x2F9BA, 'M', '蛢'), + (0x2F9BB, 'M', '蝹'), + (0x2F9BC, 'M', '蜨'), + (0x2F9BD, 'M', '蝫'), + (0x2F9BE, 'M', '螆'), + (0x2F9BF, 'X'), + (0x2F9C0, 'M', '蟡'), + (0x2F9C1, 'M', '蠁'), + (0x2F9C2, 'M', '䗹'), + ] + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F9C3, 'M', '衠'), + (0x2F9C4, 'M', '衣'), + (0x2F9C5, 'M', '𧙧'), + (0x2F9C6, 'M', '裗'), + (0x2F9C7, 'M', '裞'), + (0x2F9C8, 'M', '䘵'), + (0x2F9C9, 'M', '裺'), + (0x2F9CA, 'M', '㒻'), + (0x2F9CB, 'M', '𧢮'), + (0x2F9CC, 'M', '𧥦'), + (0x2F9CD, 'M', '䚾'), + (0x2F9CE, 'M', '䛇'), + (0x2F9CF, 'M', '誠'), + (0x2F9D0, 'M', '諭'), + (0x2F9D1, 'M', '變'), + (0x2F9D2, 'M', '豕'), + (0x2F9D3, 'M', '𧲨'), + (0x2F9D4, 'M', '貫'), + (0x2F9D5, 'M', '賁'), + (0x2F9D6, 'M', '贛'), + (0x2F9D7, 'M', '起'), + (0x2F9D8, 'M', '𧼯'), + (0x2F9D9, 'M', '𠠄'), + (0x2F9DA, 'M', '跋'), + (0x2F9DB, 'M', '趼'), + (0x2F9DC, 'M', '跰'), + (0x2F9DD, 'M', '𠣞'), + (0x2F9DE, 'M', '軔'), + (0x2F9DF, 'M', '輸'), + (0x2F9E0, 'M', '𨗒'), + (0x2F9E1, 'M', '𨗭'), + (0x2F9E2, 'M', '邔'), + (0x2F9E3, 'M', '郱'), + (0x2F9E4, 'M', '鄑'), + (0x2F9E5, 'M', '𨜮'), + (0x2F9E6, 'M', '鄛'), + (0x2F9E7, 'M', '鈸'), + (0x2F9E8, 'M', '鋗'), + (0x2F9E9, 'M', '鋘'), + (0x2F9EA, 'M', '鉼'), + (0x2F9EB, 'M', '鏹'), + (0x2F9EC, 'M', '鐕'), + (0x2F9ED, 'M', '𨯺'), + (0x2F9EE, 'M', '開'), + (0x2F9EF, 'M', '䦕'), + (0x2F9F0, 'M', '閷'), + (0x2F9F1, 'M', '𨵷'), + (0x2F9F2, 'M', '䧦'), + (0x2F9F3, 'M', '雃'), + (0x2F9F4, 'M', '嶲'), + (0x2F9F5, 'M', '霣'), + (0x2F9F6, 'M', '𩅅'), + (0x2F9F7, 'M', '𩈚'), + (0x2F9F8, 'M', '䩮'), + (0x2F9F9, 'M', '䩶'), + (0x2F9FA, 'M', '韠'), + (0x2F9FB, 'M', '𩐊'), + (0x2F9FC, 'M', '䪲'), + (0x2F9FD, 'M', '𩒖'), + (0x2F9FE, 'M', '頋'), + (0x2FA00, 'M', '頩'), + (0x2FA01, 'M', '𩖶'), + (0x2FA02, 'M', '飢'), + (0x2FA03, 'M', '䬳'), + (0x2FA04, 'M', '餩'), + (0x2FA05, 'M', '馧'), + (0x2FA06, 'M', '駂'), + (0x2FA07, 'M', '駾'), + (0x2FA08, 'M', '䯎'), + (0x2FA09, 'M', '𩬰'), + (0x2FA0A, 'M', '鬒'), + (0x2FA0B, 'M', '鱀'), + (0x2FA0C, 'M', '鳽'), + (0x2FA0D, 'M', '䳎'), + (0x2FA0E, 'M', '䳭'), + (0x2FA0F, 'M', '鵧'), + (0x2FA10, 'M', '𪃎'), + (0x2FA11, 'M', '䳸'), + (0x2FA12, 'M', '𪄅'), + (0x2FA13, 'M', '𪈎'), + (0x2FA14, 'M', '𪊑'), + (0x2FA15, 'M', '麻'), + (0x2FA16, 'M', '䵖'), + (0x2FA17, 'M', '黹'), + (0x2FA18, 'M', '黾'), + (0x2FA19, 'M', '鼅'), + (0x2FA1A, 'M', '鼏'), + (0x2FA1B, 'M', '鼖'), + (0x2FA1C, 'M', '鼻'), + (0x2FA1D, 'M', '𪘀'), + (0x2FA1E, 'X'), + (0x30000, 'V'), + (0x3134B, 'X'), + (0xE0100, 'I'), + (0xE01F0, 'X'), + ] + +uts46data = tuple( + _seg_0() + + _seg_1() + + _seg_2() + + _seg_3() + + _seg_4() + + _seg_5() + + _seg_6() + + _seg_7() + + _seg_8() + + _seg_9() + + _seg_10() + + _seg_11() + + _seg_12() + + _seg_13() + + _seg_14() + + _seg_15() + + _seg_16() + + _seg_17() + + _seg_18() + + _seg_19() + + _seg_20() + + _seg_21() + + _seg_22() + + _seg_23() + + _seg_24() + + _seg_25() + + _seg_26() + + _seg_27() + + _seg_28() + + _seg_29() + + _seg_30() + + _seg_31() + + _seg_32() + + _seg_33() + + _seg_34() + + _seg_35() + + _seg_36() + + _seg_37() + + _seg_38() + + _seg_39() + + _seg_40() + + _seg_41() + + _seg_42() + + _seg_43() + + _seg_44() + + _seg_45() + + _seg_46() + + _seg_47() + + _seg_48() + + _seg_49() + + _seg_50() + + _seg_51() + + _seg_52() + + _seg_53() + + _seg_54() + + _seg_55() + + _seg_56() + + _seg_57() + + _seg_58() + + _seg_59() + + _seg_60() + + _seg_61() + + _seg_62() + + _seg_63() + + _seg_64() + + _seg_65() + + _seg_66() + + _seg_67() + + _seg_68() + + _seg_69() + + _seg_70() + + _seg_71() + + _seg_72() + + _seg_73() + + _seg_74() + + _seg_75() + + _seg_76() + + _seg_77() + + _seg_78() + + _seg_79() + + _seg_80() +) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/LICENSE new file mode 100644 index 0000000..31ecdfb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/LICENSE @@ -0,0 +1,19 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + diff --git a/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/METADATA new file mode 100644 index 0000000..c078a75 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/METADATA @@ -0,0 +1,78 @@ +Metadata-Version: 2.1 +Name: iniconfig +Version: 1.1.1 +Summary: iniconfig: brain-dead simple config-ini parsing +Home-page: http://github.com/RonnyPfannschmidt/iniconfig +Author: Ronny Pfannschmidt, Holger Krekel +Author-email: opensource@ronnypfannschmidt.de, holger.krekel@gmail.com +License: MIT License +Platform: unix +Platform: linux +Platform: osx +Platform: cygwin +Platform: win32 +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 + +iniconfig: brain-dead simple parsing of ini files +======================================================= + +iniconfig is a small and simple INI-file parser module +having a unique set of features: + +* tested against Python2.4 across to Python3.2, Jython, PyPy +* maintains order of sections and entries +* supports multi-line values with or without line-continuations +* supports "#" comments everywhere +* raises errors with proper line-numbers +* no bells and whistles like automatic substitutions +* iniconfig raises an Error if two sections have the same name. + +If you encounter issues or have feature wishes please report them to: + + http://github.com/RonnyPfannschmidt/iniconfig/issues + +Basic Example +=================================== + +If you have an ini file like this:: + + # content of example.ini + [section1] # comment + name1=value1 # comment + name1b=value1,value2 # comment + + [section2] + name2= + line1 + line2 + +then you can do:: + + >>> import iniconfig + >>> ini = iniconfig.IniConfig("example.ini") + >>> ini['section1']['name1'] # raises KeyError if not exists + 'value1' + >>> ini.get('section1', 'name1b', [], lambda x: x.split(",")) + ['value1', 'value2'] + >>> ini.get('section1', 'notexist', [], lambda x: x.split(",")) + [] + >>> [x.name for x in list(ini)] + ['section1', 'section2'] + >>> list(list(ini)[0].items()) + [('name1', 'value1'), ('name1b', 'value1,value2')] + >>> 'section1' in ini + True + >>> 'inexistendsection' in ini + False + + diff --git a/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/RECORD new file mode 100644 index 0000000..a666252 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/RECORD @@ -0,0 +1,9 @@ +iniconfig/__init__.py,sha256=-pBe5AF_6aAwo1CxJQ8i_zJq6ejc6IxHta7qk2tNJhY,5208 +iniconfig/__init__.pyi,sha256=-4KOctzq28ohRmTZsqlH6aylyFqsNKxYqtk1dteypi4,1205 +iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +iniconfig-1.1.1.dist-info/LICENSE,sha256=KvaAw570k_uCgwNW0dPfGstaBgM8ui3sehniHKp3qGY,1061 +iniconfig-1.1.1.dist-info/METADATA,sha256=_4-oFKpRXuZv5rzepScpXRwhq6DzqsgbnA5ZpgMUMcs,2405 +iniconfig-1.1.1.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110 +iniconfig-1.1.1.dist-info/top_level.txt,sha256=7KfM0fugdlToj9UW7enKXk2HYALQD8qHiyKtjhSzgN8,10 +iniconfig-1.1.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +iniconfig-1.1.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/WHEEL new file mode 100644 index 0000000..6d38aa0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.35.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/top_level.txt new file mode 100644 index 0000000..9dda536 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig-1.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +iniconfig diff --git a/myenv/lib/python3.9/site-packages/iniconfig/__init__.py b/myenv/lib/python3.9/site-packages/iniconfig/__init__.py new file mode 100644 index 0000000..6ad9eaf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig/__init__.py @@ -0,0 +1,165 @@ +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +__all__ = ['IniConfig', 'ParseError'] + +COMMENTCHARS = "#;" + + +class ParseError(Exception): + def __init__(self, path, lineno, msg): + Exception.__init__(self, path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self): + return "%s:%s: %s" % (self.path, self.lineno+1, self.msg) + + +class SectionWrapper(object): + def __init__(self, config, name): + self.config = config + self.name = name + + def lineof(self, name): + return self.config.lineof(self.name, name) + + def get(self, key, default=None, convert=str): + return self.config.get(self.name, key, + convert=convert, default=default) + + def __getitem__(self, key): + return self.config.sections[self.name][key] + + def __iter__(self): + section = self.config.sections.get(self.name, []) + + def lineof(key): + return self.config.lineof(self.name, key) + for name in sorted(section, key=lineof): + yield name + + def items(self): + for name in self: + yield name, self[name] + + +class IniConfig(object): + def __init__(self, path, data=None): + self.path = str(path) # convenience + if data is None: + f = open(self.path) + try: + tokens = self._parse(iter(f)) + finally: + f.close() + else: + tokens = self._parse(data.splitlines(True)) + + self._sources = {} + self.sections = {} + + for lineno, section, name, value in tokens: + if section is None: + self._raise(lineno, 'no section header defined') + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + self._raise(lineno, 'duplicate section %r' % (section, )) + self.sections[section] = {} + else: + if name in self.sections[section]: + self._raise(lineno, 'duplicate name %r' % (name, )) + self.sections[section][name] = value + + def _raise(self, lineno, msg): + raise ParseError(self.path, lineno, msg) + + def _parse(self, line_iter): + result = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = self._parseline(line, lineno) + # new value + if name is not None and data is not None: + result.append((lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + self._raise(lineno, 'empty section name') + section = name + result.append((lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + self._raise(lineno, 'unexpected value continuation') + last = result.pop() + last_name, last_data = last[-2:] + if last_name is None: + self._raise(lineno, 'unexpected value continuation') + + if last_data: + data = '%s\n%s' % (last_data, data) + result.append(last[:-1] + (data,)) + return result + + def _parseline(self, line, lineno): + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == '[': + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + return line[1:-1], None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split('=', 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + self._raise(lineno, 'unexpected line: %r' % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + def lineof(self, section, name=None): + lineno = self._sources.get((section, name)) + if lineno is not None: + return lineno + 1 + + def get(self, section, name, default=None, convert=str): + try: + return convert(self.sections[section][name]) + except KeyError: + return default + + def __getitem__(self, name): + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self): + for name in sorted(self.sections, key=self.lineof): + yield SectionWrapper(self, name) + + def __contains__(self, arg): + return arg in self.sections + + +def iscommentline(line): + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/myenv/lib/python3.9/site-packages/iniconfig/__init__.pyi b/myenv/lib/python3.9/site-packages/iniconfig/__init__.pyi new file mode 100644 index 0000000..b6284be --- /dev/null +++ b/myenv/lib/python3.9/site-packages/iniconfig/__init__.pyi @@ -0,0 +1,31 @@ +from typing import Callable, Iterator, Mapping, Optional, Tuple, TypeVar, Union +from typing_extensions import Final + +_D = TypeVar('_D') +_T = TypeVar('_T') + +class ParseError(Exception): + # Private __init__. + path: Final[str] + lineno: Final[int] + msg: Final[str] + +class SectionWrapper: + # Private __init__. + config: Final[IniConfig] + name: Final[str] + def __getitem__(self, key: str) -> str: ... + def __iter__(self) -> Iterator[str]: ... + def get(self, key: str, default: _D = ..., convert: Callable[[str], _T] = ...) -> Union[_T, _D]: ... + def items(self) -> Iterator[Tuple[str, str]]: ... + def lineof(self, name: str) -> Optional[int]: ... + +class IniConfig: + path: Final[str] + sections: Final[Mapping[str, Mapping[str, str]]] + def __init__(self, path: str, data: Optional[str] = None): ... + def __contains__(self, arg: str) -> bool: ... + def __getitem__(self, name: str) -> SectionWrapper: ... + def __iter__(self) -> Iterator[SectionWrapper]: ... + def get(self, section: str, name: str, default: _D = ..., convert: Callable[[str], _T] = ...) -> Union[_T, _D]: ... + def lineof(self, section: str, name: Optional[str] = ...) -> Optional[int]: ... diff --git a/myenv/lib/python3.9/site-packages/iniconfig/py.typed b/myenv/lib/python3.9/site-packages/iniconfig/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/LICENSE new file mode 100644 index 0000000..b5083a5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Timothy Edmund Crosley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/METADATA new file mode 100644 index 0000000..f0e1eaf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/METADATA @@ -0,0 +1,397 @@ +Metadata-Version: 2.1 +Name: isort +Version: 5.10.1 +Summary: A Python utility / library to sort Python imports. +Home-page: https://pycqa.github.io/isort/ +License: MIT +Keywords: Refactor,Lint,Imports,Sort,Clean +Author: Timothy Crosley +Author-email: timothy.crosley@gmail.com +Requires-Python: >=3.6.1,<4.0 +Classifier: Development Status :: 6 - Mature +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Provides-Extra: colors +Provides-Extra: pipfile_deprecated_finder +Provides-Extra: plugins +Provides-Extra: requirements_deprecated_finder +Requires-Dist: colorama (>=0.4.3,<0.5.0); extra == "colors" +Requires-Dist: pip-api; extra == "requirements_deprecated_finder" +Requires-Dist: pipreqs; extra == "pipfile_deprecated_finder" or extra == "requirements_deprecated_finder" +Requires-Dist: requirementslib; extra == "pipfile_deprecated_finder" +Requires-Dist: setuptools; extra == "plugins" +Project-URL: Changelog, https://github.com/pycqa/isort/blob/main/CHANGELOG.md +Project-URL: Documentation, https://pycqa.github.io/isort/ +Project-URL: Repository, https://github.com/pycqa/isort +Description-Content-Type: text/markdown + +[![isort - isort your imports, so you don't have to.](https://raw.githubusercontent.com/pycqa/isort/main/art/logo_large.png)](https://pycqa.github.io/isort/) + +------------------------------------------------------------------------ + +[![PyPI version](https://badge.fury.io/py/isort.svg)](https://badge.fury.io/py/isort) +[![Test Status](https://github.com/pycqa/isort/workflows/Test/badge.svg?branch=develop)](https://github.com/pycqa/isort/actions?query=workflow%3ATest) +[![Lint Status](https://github.com/pycqa/isort/workflows/Lint/badge.svg?branch=develop)](https://github.com/pycqa/isort/actions?query=workflow%3ALint) +[![Code coverage Status](https://codecov.io/gh/pycqa/isort/branch/main/graph/badge.svg)](https://codecov.io/gh/pycqa/isort) +[![License](https://img.shields.io/github/license/mashape/apistatus.svg)](https://pypi.org/project/isort/) +[![Join the chat at https://gitter.im/timothycrosley/isort](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/timothycrosley/isort?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Downloads](https://pepy.tech/badge/isort)](https://pepy.tech/project/isort) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) +[![DeepSource](https://static.deepsource.io/deepsource-badge-light-mini.svg)](https://deepsource.io/gh/pycqa/isort/?ref=repository-badge) +_________________ + +[Read Latest Documentation](https://pycqa.github.io/isort/) - [Browse GitHub Code Repository](https://github.com/pycqa/isort/) +_________________ + +isort your imports, so you don't have to. + +isort is a Python utility / library to sort imports alphabetically, and +automatically separated into sections and by type. It provides a command line +utility, Python library and [plugins for various +editors](https://github.com/pycqa/isort/wiki/isort-Plugins) to +quickly sort all your imports. It requires Python 3.6+ to run but +supports formatting Python 2 code too. + +- [Try isort now from your browser!](https://pycqa.github.io/isort/docs/quick_start/0.-try.html) +- [Using black? See the isort and black compatibility guide.](https://pycqa.github.io/isort/docs/configuration/black_compatibility.html) +- [isort has official support for pre-commit!](https://pycqa.github.io/isort/docs/configuration/pre-commit.html) + +![Example Usage](https://raw.github.com/pycqa/isort/main/example.gif) + +Before isort: + +```python +from my_lib import Object + +import os + +from my_lib import Object3 + +from my_lib import Object2 + +import sys + +from third_party import lib15, lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, lib10, lib11, lib12, lib13, lib14 + +import sys + +from __future__ import absolute_import + +from third_party import lib3 + +print("Hey") +print("yo") +``` + +After isort: + +```python +from __future__ import absolute_import + +import os +import sys + +from third_party import (lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, + lib9, lib10, lib11, lib12, lib13, lib14, lib15) + +from my_lib import Object, Object2, Object3 + +print("Hey") +print("yo") +``` + +## Installing isort + +Installing isort is as simple as: + +```bash +pip install isort +``` + +Install isort with requirements.txt support: + +```bash +pip install isort[requirements_deprecated_finder] +``` + +Install isort with Pipfile support: + +```bash +pip install isort[pipfile_deprecated_finder] +``` + +Install isort with both formats support: + +```bash +pip install isort[requirements_deprecated_finder,pipfile_deprecated_finder] +``` + +## Using isort + +**From the command line**: + +To run on specific files: + +```bash +isort mypythonfile.py mypythonfile2.py +``` + +To apply recursively: + +```bash +isort . +``` + +If [globstar](https://www.gnu.org/software/bash/manual/html_node/The-Shopt-Builtin.html) +is enabled, `isort .` is equivalent to: + +```bash +isort **/*.py +``` + +To view proposed changes without applying them: + +```bash +isort mypythonfile.py --diff +``` + +Finally, to atomically run isort against a project, only applying +changes if they don't introduce syntax errors: + +```bash +isort --atomic . +``` + +(Note: this is disabled by default, as it prevents isort from +running against code written using a different version of Python.) + +**From within Python**: + +```python +import isort + +isort.file("pythonfile.py") +``` + +or: + +```python +import isort + +sorted_code = isort.code("import b\nimport a\n") +``` + +## Installing isort's for your preferred text editor + +Several plugins have been written that enable to use isort from within a +variety of text-editors. You can find a full list of them [on the isort +wiki](https://github.com/pycqa/isort/wiki/isort-Plugins). +Additionally, I will enthusiastically accept pull requests that include +plugins for other text editors and add documentation for them as I am +notified. + +## Multi line output modes + +You will notice above the \"multi\_line\_output\" setting. This setting +defines how from imports wrap when they extend past the line\_length +limit and has [12 possible settings](https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html). + +## Indentation + +To change the how constant indents appear - simply change the +indent property with the following accepted formats: + +- Number of spaces you would like. For example: 4 would cause standard + 4 space indentation. +- Tab +- A verbatim string with quotes around it. + +For example: + +```python +" " +``` + +is equivalent to 4. + +For the import styles that use parentheses, you can control whether or +not to include a trailing comma after the last import with the +`include_trailing_comma` option (defaults to `False`). + +## Intelligently Balanced Multi-line Imports + +As of isort 3.1.0 support for balanced multi-line imports has been +added. With this enabled isort will dynamically change the import length +to the one that produces the most balanced grid, while staying below the +maximum import length defined. + +Example: + +```python +from __future__ import (absolute_import, division, + print_function, unicode_literals) +``` + +Will be produced instead of: + +```python +from __future__ import (absolute_import, division, print_function, + unicode_literals) +``` + +To enable this set `balanced_wrapping` to `True` in your config or pass +the `-e` option into the command line utility. + +## Custom Sections and Ordering + +isort provides configuration options to change almost every aspect of how +imports are organized, ordered, or grouped together in sections. + +[Click here](https://pycqa.github.io/isort/docs/configuration/custom_sections_and_ordering.html) for an overview of all these options. + +## Skip processing of imports (outside of configuration) + +To make isort ignore a single import simply add a comment at the end of +the import line containing the text `isort:skip`: + +```python +import module # isort:skip +``` + +or: + +```python +from xyz import (abc, # isort:skip + yo, + hey) +``` + +To make isort skip an entire file simply add `isort:skip_file` to the +module's doc string: + +```python +""" my_module.py + Best module ever + + isort:skip_file +""" + +import b +import a +``` + +## Adding or removing an import from multiple files + +isort can be ran or configured to add / remove imports automatically. + +[See a complete guide here.](https://pycqa.github.io/isort/docs/configuration/add_or_remove_imports.html) + +## Using isort to verify code + +The `--check-only` option +------------------------- + +isort can also be used to verify that code is correctly formatted +by running it with `-c`. Any files that contain incorrectly sorted +and/or formatted imports will be outputted to `stderr`. + +```bash +isort **/*.py -c -v + +SUCCESS: /home/timothy/Projects/Open_Source/isort/isort_kate_plugin.py Everything Looks Good! +ERROR: /home/timothy/Projects/Open_Source/isort/isort/isort.py Imports are incorrectly sorted. +``` + +One great place this can be used is with a pre-commit git hook, such as +this one by \@acdha: + + + +This can help to ensure a certain level of code quality throughout a +project. + +## Git hook + +isort provides a hook function that can be integrated into your Git +pre-commit script to check Python code before committing. + +[More info here.](https://pycqa.github.io/isort/docs/configuration/git_hook.html) + +## Setuptools integration + +Upon installation, isort enables a `setuptools` command that checks +Python files declared by your project. + +[More info here.](https://pycqa.github.io/isort/docs/configuration/setuptools_integration.html) + +## Spread the word + +[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) + +Place this badge at the top of your repository to let others know your project uses isort. + +For README.md: + +```markdown +[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) +``` + +Or README.rst: + +```rst +.. image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336 + :target: https://pycqa.github.io/isort/ +``` + +## Security contact information + +To report a security vulnerability, please use the [Tidelift security +contact](https://tidelift.com/security). Tidelift will coordinate the +fix and disclosure. + +## Why isort? + +isort simply stands for import sort. It was originally called +"sortImports" however I got tired of typing the extra characters and +came to the realization camelCase is not pythonic. + +I wrote isort because in an organization I used to work in the manager +came in one day and decided all code must have alphabetically sorted +imports. The code base was huge - and he meant for us to do it by hand. +However, being a programmer - I\'m too lazy to spend 8 hours mindlessly +performing a function, but not too lazy to spend 16 hours automating it. +I was given permission to open source sortImports and here we are :) + +------------------------------------------------------------------------ + +[Get professionally supported isort with the Tidelift +Subscription](https://tidelift.com/subscription/pkg/pypi-isort?utm_source=pypi-isort&utm_medium=referral&utm_campaign=readme) + +Professional support for isort is available as part of the [Tidelift +Subscription](https://tidelift.com/subscription/pkg/pypi-isort?utm_source=pypi-isort&utm_medium=referral&utm_campaign=readme). +Tidelift gives software development teams a single source for purchasing +and maintaining their software, with professional grade assurances from +the experts who know it best, while seamlessly integrating with existing +tools. + +------------------------------------------------------------------------ + +Thanks and I hope you find isort useful! + +~Timothy Crosley + diff --git a/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/RECORD new file mode 100644 index 0000000..c79fd95 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/RECORD @@ -0,0 +1,56 @@ +../../../bin/isort,sha256=KfvFH3JNYk3-tOrA4GG5mzM73j9nYhy6_4F6kpJA-XI,250 +../../../bin/isort-identify-imports,sha256=KSx7OLQsR8I593zsKTwH4dggYk6CoT8Bn0ZOxMx71TQ,284 +isort/__init__.py,sha256=5S6lmnFHXlZbzl7ni97ZANzkXePqKPkRUaMJyul3dIo,871 +isort/__main__.py,sha256=iK0trzN9CCXpQX-XPZDZ9JVkm2Lc0q0oiAgsa6FkJb4,36 +isort/_future/__init__.py,sha256=3Ftum0fE5cZdBaVuRCgjt8LDwZXa8jWV6g7r3_-Uzdk,310 +isort/_future/_dataclasses.py,sha256=-jj1nXvPe02i6emtxyb1xyYmbp1_2-_8xYk64Z7lV3Q,44114 +isort/_vendored/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +isort/_vendored/tomli/__init__.py,sha256=Y3N65pvphV_EF4k2qKiq_vYcohIUHhT05GzdRc0TOy8,213 +isort/_vendored/tomli/_parser.py,sha256=fhOEEYZATanBBAn-hyy0Au_aZbdqXfdKB8mGTvI1W3k,21397 +isort/_vendored/tomli/_re.py,sha256=0nrY6tQHUyJWcIf1IJwgqn0Zfhoi_dvSrn9NOY6fg5M,2833 +isort/_vendored/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +isort/_version.py,sha256=hJ77ITgc8GY6imxoql15eKMxOxScd8uLLH4ZpFYieGQ,23 +isort/api.py,sha256=oMtq_RH63175YWKUYAO_2E2feHn5l-8VXqDpTe_DbLo,26162 +isort/comments.py,sha256=6tLt0QRuSQvo-tpgTTM4oJKk-oqaE8MOTA95l89LtQQ,933 +isort/core.py,sha256=CE6a_7DVoAHXudHxsbXZ-z9bWoTAnzv57QBI7rHO_ig,20844 +isort/deprecated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +isort/deprecated/finders.py,sha256=nMAZwPKS7RoBOaFznKFm1IpyMzfdD6-tx3PRDmpY_dg,14812 +isort/exceptions.py,sha256=xmUyF5uS1K_rtlMDWoaAmtD_PA0B82u6rqgxgEjUjcw,7060 +isort/files.py,sha256=3wRqIAAquCCTF5aPzpzoDsWBvrTy49vqG11hAFseJD8,1589 +isort/format.py,sha256=Uv57ipCbGkQCXoTSxz3dWswMleTYjpNoEg-9Kek9_LY,5446 +isort/hooks.py,sha256=RAMAxwWNoXEka98HoDLkSqEkOprMkhb3wTIgd72f9xE,3131 +isort/identify.py,sha256=Q58HB4BRrOgdbiWuk6y_olcPnLaGIYazJyGvVheG7Y0,8373 +isort/io.py,sha256=HbNX5hwNOmO-QRV0u6weRShzvxpQVXiUuteFPN8XMsI,2275 +isort/literal.py,sha256=a5J_dWzd5hH6S-dan4BJdT0afTcKKHmKIZDceibVHWw,3685 +isort/logo.py,sha256=cL3al79O7O0G2viqRMRfBPp0qtRZmJw2nHSCZw8XWdQ,388 +isort/main.py,sha256=LFFm6Nd4KVGQ6KFaRDHPupfCCO4NANzqgFwjflZMg8Q,46322 +isort/output.py,sha256=_npuUogd9jSZx1-fuAuKIc8Lsc-EqqdGX3zPgZoj7jc,27007 +isort/parse.py,sha256=F7Xee3sHoMV8nlCGKsxH5ItH6hpxO9UpnSGfXk9dwPE,25094 +isort/place.py,sha256=ET3U_bUEDEM11TTKOn8Xwv9qG46uR7jZHqNV61dIz14,5173 +isort/profiles.py,sha256=BY68PIpf3s7305A2_JNkmFhMf6vXfGOat9KjESE_TI4,2119 +isort/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +isort/pylama_isort.py,sha256=vNP7jAxZy7ryZR4hotynA4JCzAxLtbasT9AYpZiiClk,1308 +isort/sections.py,sha256=xG5bwU4tOIKUmeBBhZ45EIfjP8HgDOx796bPvD5zWCw,297 +isort/settings.py,sha256=JxYUFLsb_GlYUeE_q3h91SwG9NI22f854DUKQWY6J7s,35418 +isort/setuptools_commands.py,sha256=YKDnILlXUYCayYxDh0boVQnk1C0c-NodhCnnGn6Arrw,2299 +isort/sorting.py,sha256=uZDt7sdG2HWmzAlekigvsEfj-e45ng8Kr1wwU7iKJig,4515 +isort/stdlibs/__init__.py,sha256=K0xJY1m9azaV2m_QwZ84mGe0hpe-6HhXUzvgSLLB_Xo,92 +isort/stdlibs/all.py,sha256=n8Es1WK6UlupYyVvf1PDjGbionqix-afC3LkY8nzTcw,57 +isort/stdlibs/py2.py,sha256=dTgWTa7ggz1cwN8fuI9eIs9-5nTmkRxG_uO61CGwfXI,41 +isort/stdlibs/py27.py,sha256=QriKfttNSHsjaRtDfR5WXytjzf7Xi7p9lxiOOcmA2JM,4504 +isort/stdlibs/py3.py,sha256=yWfPnYFgxosQqoRQAIoXn4quZKLE8w7KTm3aar7g7PQ,143 +isort/stdlibs/py310.py,sha256=oVYf6cuEEKXlEdEDdowa3D5rsVOMR-MnSkq6xZa_ntM,3263 +isort/stdlibs/py35.py,sha256=dA_eWId8Q5yOj76zuzT1mEKDeB4FmekKLBNBL7EHV_k,3295 +isort/stdlibs/py36.py,sha256=iuXIDLcFrSviMMSOP4PoKWCG5BveMnZbFravpduSUss,3310 +isort/stdlibs/py37.py,sha256=dLxxRerCvb4O9vrifTg5KWgO0L3a6AQB13haK_tSBRw,3334 +isort/stdlibs/py38.py,sha256=xsUSUZD5XUYX0PIuf9A3bSMD4ZcbfPyzqJqCudSfhaU,3319 +isort/stdlibs/py39.py,sha256=tlVRkhoDpoNJxJd803NdTt_hgzzuukJMkUQMgpE-vlA,3307 +isort/utils.py,sha256=5EEZUfZyyWcJLk2qnNF8ObDib_qPU4zwEQvWjpKRgb0,2413 +isort/wrap.py,sha256=o7_kcpPXUKpzh-Cna3XY__S50TdZI39R3j4hl6iSqkA,6068 +isort/wrap_modes.py,sha256=3N-izkXBrd-QU2CyEbqxuKVHuuhB-ulqzDYF7DV6wNk,13569 +isort-5.10.1.dist-info/entry_points.txt,sha256=stP-G7UtFo06wllIxS1jKbEJpc4u_3WPiLh_r13BGcc,213 +isort-5.10.1.dist-info/LICENSE,sha256=BjKUABw9Uj26y6ud1UrCKZgnVsyvWSylMkCysM3YIGU,1089 +isort-5.10.1.dist-info/WHEEL,sha256=SrtnPGVTMeYWttls9xnWA01eUhCZ3ufFdJUYb1J3r-U,83 +isort-5.10.1.dist-info/METADATA,sha256=Rgh0fARMrFFqafPC_HunYdNYGxjFQYZuW0B9-g-5mKs,12702 +isort-5.10.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +isort-5.10.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/WHEEL new file mode 100644 index 0000000..caca394 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry 1.0.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/entry_points.txt new file mode 100644 index 0000000..21bc432 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort-5.10.1.dist-info/entry_points.txt @@ -0,0 +1,10 @@ +[console_scripts] +isort=isort.main:main +isort-identify-imports=isort.main:identify_imports_main + +[distutils.commands] +isort=isort.setuptools_commands:ISortCommand + +[pylama.linter] +isort=isort.pylama_isort:Linter + diff --git a/myenv/lib/python3.9/site-packages/isort/__init__.py b/myenv/lib/python3.9/site-packages/isort/__init__.py new file mode 100644 index 0000000..e0754da --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/__init__.py @@ -0,0 +1,38 @@ +"""Defines the public isort interface""" +__all__ = ( + "Config", + "ImportKey", + "__version__", + "check_code", + "check_file", + "check_stream", + "code", + "file", + "find_imports_in_code", + "find_imports_in_file", + "find_imports_in_paths", + "find_imports_in_stream", + "place_module", + "place_module_with_reason", + "settings", + "stream", +) + +from . import settings +from ._version import __version__ +from .api import ImportKey +from .api import check_code_string as check_code +from .api import ( + check_file, + check_stream, + find_imports_in_code, + find_imports_in_file, + find_imports_in_paths, + find_imports_in_stream, + place_module, + place_module_with_reason, +) +from .api import sort_code_string as code +from .api import sort_file as file +from .api import sort_stream as stream +from .settings import Config diff --git a/myenv/lib/python3.9/site-packages/isort/__main__.py b/myenv/lib/python3.9/site-packages/isort/__main__.py new file mode 100644 index 0000000..94b1d05 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/__main__.py @@ -0,0 +1,3 @@ +from isort.main import main + +main() diff --git a/myenv/lib/python3.9/site-packages/isort/_future/__init__.py b/myenv/lib/python3.9/site-packages/isort/_future/__init__.py new file mode 100644 index 0000000..4802853 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_future/__init__.py @@ -0,0 +1,12 @@ +import sys + +if sys.version_info.major <= 3 and sys.version_info.minor <= 6: + from . import _dataclasses as dataclasses + +else: + import dataclasses # type: ignore + +dataclass = dataclasses.dataclass # type: ignore +field = dataclasses.field # type: ignore + +__all__ = ["dataclasses", "dataclass", "field"] diff --git a/myenv/lib/python3.9/site-packages/isort/_future/_dataclasses.py b/myenv/lib/python3.9/site-packages/isort/_future/_dataclasses.py new file mode 100644 index 0000000..71962e2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_future/_dataclasses.py @@ -0,0 +1,1209 @@ +# type: ignore +# flake8: noqa +# flake8: noqa +"""Backport of Python3.7 dataclasses Library + +Taken directly from here: https://github.com/ericvsmith/dataclasses +Licensed under the Apache License: https://github.com/ericvsmith/dataclasses/blob/master/LICENSE.txt + +Needed due to isorts strict no non-optional requirements stance. + +TODO: Remove once isort only supports 3.7+ +""" +import copy +import inspect +import keyword +import re +import sys +import types + +__all__ = [ + "dataclass", + "field", + "Field", + "FrozenInstanceError", + "InitVar", + "MISSING", + # Helper functions. + "fields", + "asdict", + "astuple", + "make_dataclass", + "replace", + "is_dataclass", +] + +# Conditions for adding methods. The boxes indicate what action the +# dataclass decorator takes. For all of these tables, when I talk +# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm +# referring to the arguments to the @dataclass decorator. When +# checking if a dunder method already exists, I mean check for an +# entry in the class's __dict__. I never check to see if an attribute +# is defined in a base class. + +# Key: +# +=========+=========================================+ +# + Value | Meaning | +# +=========+=========================================+ +# | | No action: no method is added. | +# +---------+-----------------------------------------+ +# | add | Generated method is added. | +# +---------+-----------------------------------------+ +# | raise | TypeError is raised. | +# +---------+-----------------------------------------+ +# | None | Attribute is set to None. | +# +=========+=========================================+ + +# __init__ +# +# +--- init= parameter +# | +# v | | | +# | no | yes | <--- class has __init__ in __dict__? +# +=======+=======+=======+ +# | False | | | +# +-------+-------+-------+ +# | True | add | | <- the default +# +=======+=======+=======+ + +# __repr__ +# +# +--- repr= parameter +# | +# v | | | +# | no | yes | <--- class has __repr__ in __dict__? +# +=======+=======+=======+ +# | False | | | +# +-------+-------+-------+ +# | True | add | | <- the default +# +=======+=======+=======+ + + +# __setattr__ +# __delattr__ +# +# +--- frozen= parameter +# | +# v | | | +# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__? +# +=======+=======+=======+ +# | False | | | <- the default +# +-------+-------+-------+ +# | True | add | raise | +# +=======+=======+=======+ +# Raise because not adding these methods would break the "frozen-ness" +# of the class. + +# __eq__ +# +# +--- eq= parameter +# | +# v | | | +# | no | yes | <--- class has __eq__ in __dict__? +# +=======+=======+=======+ +# | False | | | +# +-------+-------+-------+ +# | True | add | | <- the default +# +=======+=======+=======+ + +# __lt__ +# __le__ +# __gt__ +# __ge__ +# +# +--- order= parameter +# | +# v | | | +# | no | yes | <--- class has any comparison method in __dict__? +# +=======+=======+=======+ +# | False | | | <- the default +# +-------+-------+-------+ +# | True | add | raise | +# +=======+=======+=======+ +# Raise because to allow this case would interfere with using +# functools.total_ordering. + +# __hash__ + +# +------------------- unsafe_hash= parameter +# | +----------- eq= parameter +# | | +--- frozen= parameter +# | | | +# v v v | | | +# | no | yes | <--- class has explicitly defined __hash__ +# +=======+=======+=======+========+========+ +# | False | False | False | | | No __eq__, use the base class __hash__ +# +-------+-------+-------+--------+--------+ +# | False | False | True | | | No __eq__, use the base class __hash__ +# +-------+-------+-------+--------+--------+ +# | False | True | False | None | | <-- the default, not hashable +# +-------+-------+-------+--------+--------+ +# | False | True | True | add | | Frozen, so hashable, allows override +# +-------+-------+-------+--------+--------+ +# | True | False | False | add | raise | Has no __eq__, but hashable +# +-------+-------+-------+--------+--------+ +# | True | False | True | add | raise | Has no __eq__, but hashable +# +-------+-------+-------+--------+--------+ +# | True | True | False | add | raise | Not frozen, but hashable +# +-------+-------+-------+--------+--------+ +# | True | True | True | add | raise | Frozen, so hashable +# +=======+=======+=======+========+========+ +# For boxes that are blank, __hash__ is untouched and therefore +# inherited from the base class. If the base is object, then +# id-based hashing is used. +# +# Note that a class may already have __hash__=None if it specified an +# __eq__ method in the class body (not one that was created by +# @dataclass). +# +# See _hash_action (below) for a coded version of this table. + + +# Raised when an attempt is made to modify a frozen class. +class FrozenInstanceError(AttributeError): + pass + + +# A sentinel object for default values to signal that a default +# factory will be used. This is given a nice repr() which will appear +# in the function signature of dataclasses' constructors. +class _HAS_DEFAULT_FACTORY_CLASS: + def __repr__(self): + return "" + + +_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS() + +# A sentinel object to detect if a parameter is supplied or not. Use +# a class to give it a better repr. +class _MISSING_TYPE: + pass + + +MISSING = _MISSING_TYPE() + +# Since most per-field metadata will be unused, create an empty +# read-only proxy that can be shared among all fields. +_EMPTY_METADATA = types.MappingProxyType({}) + +# Markers for the various kinds of fields and pseudo-fields. +class _FIELD_BASE: + def __init__(self, name): + self.name = name + + def __repr__(self): + return self.name + + +_FIELD = _FIELD_BASE("_FIELD") +_FIELD_CLASSVAR = _FIELD_BASE("_FIELD_CLASSVAR") +_FIELD_INITVAR = _FIELD_BASE("_FIELD_INITVAR") + +# The name of an attribute on the class where we store the Field +# objects. Also used to check if a class is a Data Class. +_FIELDS = "__dataclass_fields__" + +# The name of an attribute on the class that stores the parameters to +# @dataclass. +_PARAMS = "__dataclass_params__" + +# The name of the function, that if it exists, is called at the end of +# __init__. +_POST_INIT_NAME = "__post_init__" + +# String regex that string annotations for ClassVar or InitVar must match. +# Allows "identifier.identifier[" or "identifier[". +# https://bugs.python.org/issue33453 for details. +_MODULE_IDENTIFIER_RE = re.compile(r"^(?:\s*(\w+)\s*\.)?\s*(\w+)") + + +class _InitVarMeta(type): + def __getitem__(self, params): + return self + + +class InitVar(metaclass=_InitVarMeta): + pass + + +# Instances of Field are only ever created from within this module, +# and only from the field() function, although Field instances are +# exposed externally as (conceptually) read-only objects. +# +# name and type are filled in after the fact, not in __init__. +# They're not known at the time this class is instantiated, but it's +# convenient if they're available later. +# +# When cls._FIELDS is filled in with a list of Field objects, the name +# and type fields will have been populated. +class Field: + __slots__ = ( + "name", + "type", + "default", + "default_factory", + "repr", + "hash", + "init", + "compare", + "metadata", + "_field_type", # Private: not to be used by user code. + ) + + def __init__(self, default, default_factory, init, repr, hash, compare, metadata): + self.name = None + self.type = None + self.default = default + self.default_factory = default_factory + self.init = init + self.repr = repr + self.hash = hash + self.compare = compare + self.metadata = ( + _EMPTY_METADATA + if metadata is None or len(metadata) == 0 + else types.MappingProxyType(metadata) + ) + self._field_type = None + + def __repr__(self): + return ( + "Field(" + f"name={self.name!r}," + f"type={self.type!r}," + f"default={self.default!r}," + f"default_factory={self.default_factory!r}," + f"init={self.init!r}," + f"repr={self.repr!r}," + f"hash={self.hash!r}," + f"compare={self.compare!r}," + f"metadata={self.metadata!r}," + f"_field_type={self._field_type}" + ")" + ) + + # This is used to support the PEP 487 __set_name__ protocol in the + # case where we're using a field that contains a descriptor as a + # default value. For details on __set_name__, see + # https://www.python.org/dev/peps/pep-0487/#implementation-details. + # + # Note that in _process_class, this Field object is overwritten + # with the default value, so the end result is a descriptor that + # had __set_name__ called on it at the right time. + def __set_name__(self, owner, name): + func = getattr(type(self.default), "__set_name__", None) + if func: + # There is a __set_name__ method on the descriptor, call + # it. + func(self.default, owner, name) + + +class _DataclassParams: + __slots__ = ("init", "repr", "eq", "order", "unsafe_hash", "frozen") + + def __init__(self, init, repr, eq, order, unsafe_hash, frozen): + self.init = init + self.repr = repr + self.eq = eq + self.order = order + self.unsafe_hash = unsafe_hash + self.frozen = frozen + + def __repr__(self): + return ( + "_DataclassParams(" + f"init={self.init!r}," + f"repr={self.repr!r}," + f"eq={self.eq!r}," + f"order={self.order!r}," + f"unsafe_hash={self.unsafe_hash!r}," + f"frozen={self.frozen!r}" + ")" + ) + + +# This function is used instead of exposing Field creation directly, +# so that a type checker can be told (via overloads) that this is a +# function whose type depends on its parameters. +def field( + *, + default=MISSING, + default_factory=MISSING, + init=True, + repr=True, + hash=None, + compare=True, + metadata=None, +): + """Return an object to identify dataclass fields. + default is the default value of the field. default_factory is a + 0-argument function called to initialize a field's value. If init + is True, the field will be a parameter to the class's __init__() + function. If repr is True, the field will be included in the + object's repr(). If hash is True, the field will be included in + the object's hash(). If compare is True, the field will be used + in comparison functions. metadata, if specified, must be a + mapping which is stored but not otherwise examined by dataclass. + It is an error to specify both default and default_factory. + """ + + if default is not MISSING and default_factory is not MISSING: + raise ValueError("cannot specify both default and default_factory") + return Field(default, default_factory, init, repr, hash, compare, metadata) + + +def _tuple_str(obj_name, fields): + # Return a string representing each field of obj_name as a tuple + # member. So, if fields is ['x', 'y'] and obj_name is "self", + # return "(self.x,self.y)". + + # Special case for the 0-tuple. + if not fields: + return "()" + # Note the trailing comma, needed if this turns out to be a 1-tuple. + return f'({",".join(f"{obj_name}.{f.name}" for f in fields)},)' + + +def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING): + # Note that we mutate locals when exec() is called. Caller + # beware! The only callers are internal to this module, so no + # worries about external callers. + if locals is None: + locals = {} + return_annotation = "" + if return_type is not MISSING: + locals["_return_type"] = return_type + return_annotation = "->_return_type" + args = ",".join(args) + body = "\n".join(f" {b}" for b in body) + + # Compute the text of the entire function. + txt = f"def {name}({args}){return_annotation}:\n{body}" + + exec(txt, globals, locals) # nosec + return locals[name] + + +def _field_assign(frozen, name, value, self_name): + # If we're a frozen class, then assign to our fields in __init__ + # via object.__setattr__. Otherwise, just use a simple + # assignment. + # + # self_name is what "self" is called in this function: don't + # hard-code "self", since that might be a field name. + if frozen: + return f"object.__setattr__({self_name},{name!r},{value})" + return f"{self_name}.{name}={value}" + + +def _field_init(f, frozen, globals, self_name): + # Return the text of the line in the body of __init__ that will + # initialize this field. + + default_name = f"_dflt_{f.name}" + if f.default_factory is not MISSING: + if f.init: + # This field has a default factory. If a parameter is + # given, use it. If not, call the factory. + globals[default_name] = f.default_factory + value = f"{default_name}() " f"if {f.name} is _HAS_DEFAULT_FACTORY " f"else {f.name}" + else: + # This is a field that's not in the __init__ params, but + # has a default factory function. It needs to be + # initialized here by calling the factory function, + # because there's no other way to initialize it. + + # For a field initialized with a default=defaultvalue, the + # class dict just has the default value + # (cls.fieldname=defaultvalue). But that won't work for a + # default factory, the factory must be called in __init__ + # and we must assign that to self.fieldname. We can't + # fall back to the class dict's value, both because it's + # not set, and because it might be different per-class + # (which, after all, is why we have a factory function!). + + globals[default_name] = f.default_factory + value = f"{default_name}()" + else: + # No default factory. + if f.init: + if f.default is MISSING: + # There's no default, just do an assignment. + value = f.name + elif f.default is not MISSING: + globals[default_name] = f.default + value = f.name + else: + # This field does not need initialization. Signify that + # to the caller by returning None. + return None + + # Only test this now, so that we can create variables for the + # default. However, return None to signify that we're not going + # to actually do the assignment statement for InitVars. + if f._field_type == _FIELD_INITVAR: + return None + + # Now, actually generate the field assignment. + return _field_assign(frozen, f.name, value, self_name) + + +def _init_param(f): + # Return the __init__ parameter string for this field. For + # example, the equivalent of 'x:int=3' (except instead of 'int', + # reference a variable set to int, and instead of '3', reference a + # variable set to 3). + if f.default is MISSING and f.default_factory is MISSING: + # There's no default, and no default_factory, just output the + # variable name and type. + default = "" + elif f.default is not MISSING: + # There's a default, this will be the name that's used to look + # it up. + default = f"=_dflt_{f.name}" + elif f.default_factory is not MISSING: + # There's a factory function. Set a marker. + default = "=_HAS_DEFAULT_FACTORY" + return f"{f.name}:_type_{f.name}{default}" + + +def _init_fn(fields, frozen, has_post_init, self_name): + # fields contains both real fields and InitVar pseudo-fields. + + # Make sure we don't have fields without defaults following fields + # with defaults. This actually would be caught when exec-ing the + # function source code, but catching it here gives a better error + # message, and future-proofs us in case we build up the function + # using ast. + seen_default = False + for f in fields: + # Only consider fields in the __init__ call. + if f.init: + if not (f.default is MISSING and f.default_factory is MISSING): + seen_default = True + elif seen_default: + raise TypeError(f"non-default argument {f.name!r} " "follows default argument") + + globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY} + + body_lines = [] + for f in fields: + line = _field_init(f, frozen, globals, self_name) + # line is None means that this field doesn't require + # initialization (it's a pseudo-field). Just skip it. + if line: + body_lines.append(line) + + # Does this class have a post-init function? + if has_post_init: + params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR) + body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})") + + # If no body lines, use 'pass'. + if not body_lines: + body_lines = ["pass"] + + locals = {f"_type_{f.name}": f.type for f in fields} + return _create_fn( + "__init__", + [self_name] + [_init_param(f) for f in fields if f.init], + body_lines, + locals=locals, + globals=globals, + return_type=None, + ) + + +def _repr_fn(fields): + return _create_fn( + "__repr__", + ("self",), + [ + 'return self.__class__.__qualname__ + f"(' + + ", ".join(f"{f.name}={{self.{f.name}!r}}" for f in fields) + + ')"' + ], + ) + + +def _frozen_get_del_attr(cls, fields): + # XXX: globals is modified on the first call to _create_fn, then + # the modified version is used in the second call. Is this okay? + globals = {"cls": cls, "FrozenInstanceError": FrozenInstanceError} + if fields: + fields_str = "(" + ",".join(repr(f.name) for f in fields) + ",)" + else: + # Special case for the zero-length tuple. + fields_str = "()" + return ( + _create_fn( + "__setattr__", + ("self", "name", "value"), + ( + f"if type(self) is cls or name in {fields_str}:", + ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', + f"super(cls, self).__setattr__(name, value)", + ), + globals=globals, + ), + _create_fn( + "__delattr__", + ("self", "name"), + ( + f"if type(self) is cls or name in {fields_str}:", + ' raise FrozenInstanceError(f"cannot delete field {name!r}")', + f"super(cls, self).__delattr__(name)", + ), + globals=globals, + ), + ) + + +def _cmp_fn(name, op, self_tuple, other_tuple): + # Create a comparison function. If the fields in the object are + # named 'x' and 'y', then self_tuple is the string + # '(self.x,self.y)' and other_tuple is the string + # '(other.x,other.y)'. + + return _create_fn( + name, + ("self", "other"), + [ + "if other.__class__ is self.__class__:", + f" return {self_tuple}{op}{other_tuple}", + "return NotImplemented", + ], + ) + + +def _hash_fn(fields): + self_tuple = _tuple_str("self", fields) + return _create_fn("__hash__", ("self",), [f"return hash({self_tuple})"]) + + +def _is_classvar(a_type, typing): + # This test uses a typing internal class, but it's the best way to + # test if this is a ClassVar. + return type(a_type) is typing._ClassVar + + +def _is_initvar(a_type, dataclasses): + # The module we're checking against is the module we're + # currently in (dataclasses.py). + return a_type is dataclasses.InitVar + + +def _is_type(annotation, cls, a_module, a_type, is_type_predicate): + # Given a type annotation string, does it refer to a_type in + # a_module? For example, when checking that annotation denotes a + # ClassVar, then a_module is typing, and a_type is + # typing.ClassVar. + + # It's possible to look up a_module given a_type, but it involves + # looking in sys.modules (again!), and seems like a waste since + # the caller already knows a_module. + + # - annotation is a string type annotation + # - cls is the class that this annotation was found in + # - a_module is the module we want to match + # - a_type is the type in that module we want to match + # - is_type_predicate is a function called with (obj, a_module) + # that determines if obj is of the desired type. + + # Since this test does not do a local namespace lookup (and + # instead only a module (global) lookup), there are some things it + # gets wrong. + + # With string annotations, cv0 will be detected as a ClassVar: + # CV = ClassVar + # @dataclass + # class C0: + # cv0: CV + + # But in this example cv1 will not be detected as a ClassVar: + # @dataclass + # class C1: + # CV = ClassVar + # cv1: CV + + # In C1, the code in this function (_is_type) will look up "CV" in + # the module and not find it, so it will not consider cv1 as a + # ClassVar. This is a fairly obscure corner case, and the best + # way to fix it would be to eval() the string "CV" with the + # correct global and local namespaces. However that would involve + # a eval() penalty for every single field of every dataclass + # that's defined. It was judged not worth it. + + match = _MODULE_IDENTIFIER_RE.match(annotation) + if match: + ns = None + module_name = match.group(1) + if not module_name: + # No module name, assume the class's module did + # "from dataclasses import InitVar". + ns = sys.modules.get(cls.__module__).__dict__ + else: + # Look up module_name in the class's module. + module = sys.modules.get(cls.__module__) + if module and module.__dict__.get(module_name) is a_module: + ns = sys.modules.get(a_type.__module__).__dict__ + if ns and is_type_predicate(ns.get(match.group(2)), a_module): + return True + return False + + +def _get_field(cls, a_name, a_type): + # Return a Field object for this field name and type. ClassVars + # and InitVars are also returned, but marked as such (see + # f._field_type). + + # If the default value isn't derived from Field, then it's only a + # normal default value. Convert it to a Field(). + default = getattr(cls, a_name, MISSING) + if isinstance(default, Field): + f = default + else: + if isinstance(default, types.MemberDescriptorType): + # This is a field in __slots__, so it has no default value. + default = MISSING + f = field(default=default) + + # Only at this point do we know the name and the type. Set them. + f.name = a_name + f.type = a_type + + # Assume it's a normal field until proven otherwise. We're next + # going to decide if it's a ClassVar or InitVar, everything else + # is just a normal field. + f._field_type = _FIELD + + # In addition to checking for actual types here, also check for + # string annotations. get_type_hints() won't always work for us + # (see https://github.com/python/typing/issues/508 for example), + # plus it's expensive and would require an eval for every string + # annotation. So, make a best effort to see if this is a ClassVar + # or InitVar using regex's and checking that the thing referenced + # is actually of the correct type. + + # For the complete discussion, see https://bugs.python.org/issue33453 + + # If typing has not been imported, then it's impossible for any + # annotation to be a ClassVar. So, only look for ClassVar if + # typing has been imported by any module (not necessarily cls's + # module). + typing = sys.modules.get("typing") + if typing: + if _is_classvar(a_type, typing) or ( + isinstance(f.type, str) and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar) + ): + f._field_type = _FIELD_CLASSVAR + + # If the type is InitVar, or if it's a matching string annotation, + # then it's an InitVar. + if f._field_type is _FIELD: + # The module we're checking against is the module we're + # currently in (dataclasses.py). + dataclasses = sys.modules[__name__] + if _is_initvar(a_type, dataclasses) or ( + isinstance(f.type, str) + and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar) + ): + f._field_type = _FIELD_INITVAR + + # Validations for individual fields. This is delayed until now, + # instead of in the Field() constructor, since only here do we + # know the field name, which allows for better error reporting. + + # Special restrictions for ClassVar and InitVar. + if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR): + if f.default_factory is not MISSING: + raise TypeError(f"field {f.name} cannot have a " "default factory") + # Should I check for other field settings? default_factory + # seems the most serious to check for. Maybe add others. For + # example, how about init=False (or really, + # init=)? It makes no sense for + # ClassVar and InitVar to specify init=. + + # For real fields, disallow mutable defaults for known types. + if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)): + raise ValueError( + f"mutable default {type(f.default)} for field " + f"{f.name} is not allowed: use default_factory" + ) + + return f + + +def _set_new_attribute(cls, name, value): + # Never overwrites an existing attribute. Returns True if the + # attribute already exists. + if name in cls.__dict__: + return True + setattr(cls, name, value) + return False + + +# Decide if/how we're going to create a hash function. Key is +# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to +# take. The common case is to do nothing, so instead of providing a +# function that is a no-op, use None to signify that. + + +def _hash_set_none(cls, fields): + return None + + +def _hash_add(cls, fields): + flds = [f for f in fields if (f.compare if f.hash is None else f.hash)] + return _hash_fn(flds) + + +def _hash_exception(cls, fields): + # Raise an exception. + raise TypeError(f"Cannot overwrite attribute __hash__ " f"in class {cls.__name__}") + + +# +# +-------------------------------------- unsafe_hash? +# | +------------------------------- eq? +# | | +------------------------ frozen? +# | | | +---------------- has-explicit-hash? +# | | | | +# | | | | +------- action +# | | | | | +# v v v v v +_hash_action = { + (False, False, False, False): None, + (False, False, False, True): None, + (False, False, True, False): None, + (False, False, True, True): None, + (False, True, False, False): _hash_set_none, + (False, True, False, True): None, + (False, True, True, False): _hash_add, + (False, True, True, True): None, + (True, False, False, False): _hash_add, + (True, False, False, True): _hash_exception, + (True, False, True, False): _hash_add, + (True, False, True, True): _hash_exception, + (True, True, False, False): _hash_add, + (True, True, False, True): _hash_exception, + (True, True, True, False): _hash_add, + (True, True, True, True): _hash_exception, +} +# See https://bugs.python.org/issue32929#msg312829 for an if-statement +# version of this table. + + +def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen): + # Now that dicts retain insertion order, there's no reason to use + # an ordered dict. I am leveraging that ordering here, because + # derived class fields overwrite base class fields, but the order + # is defined by the base class, which is found first. + fields = {} + + setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen)) + + # Find our base classes in reverse MRO order, and exclude + # ourselves. In reversed order so that more derived classes + # override earlier field definitions in base classes. As long as + # we're iterating over them, see if any are frozen. + any_frozen_base = False + has_dataclass_bases = False + for b in cls.__mro__[-1:0:-1]: + # Only process classes that have been processed by our + # decorator. That is, they have a _FIELDS attribute. + base_fields = getattr(b, _FIELDS, None) + if base_fields: + has_dataclass_bases = True + for f in base_fields.values(): + fields[f.name] = f + if getattr(b, _PARAMS).frozen: + any_frozen_base = True + + # Annotations that are defined in this class (not in base + # classes). If __annotations__ isn't present, then this class + # adds no new annotations. We use this to compute fields that are + # added by this class. + # + # Fields are found from cls_annotations, which is guaranteed to be + # ordered. Default values are from class attributes, if a field + # has a default. If the default value is a Field(), then it + # contains additional info beyond (and possibly including) the + # actual default value. Pseudo-fields ClassVars and InitVars are + # included, despite the fact that they're not real fields. That's + # dealt with later. + cls_annotations = cls.__dict__.get("__annotations__", {}) + + # Now find fields in our class. While doing so, validate some + # things, and set the default values (as class attributes) where + # we can. + cls_fields = [_get_field(cls, name, type) for name, type in cls_annotations.items()] + for f in cls_fields: + fields[f.name] = f + + # If the class attribute (which is the default value for this + # field) exists and is of type 'Field', replace it with the + # real default. This is so that normal class introspection + # sees a real default value, not a Field. + if isinstance(getattr(cls, f.name, None), Field): + if f.default is MISSING: + # If there's no default, delete the class attribute. + # This happens if we specify field(repr=False), for + # example (that is, we specified a field object, but + # no default value). Also if we're using a default + # factory. The class attribute should not be set at + # all in the post-processed class. + delattr(cls, f.name) + else: + setattr(cls, f.name, f.default) + + # Do we have any Field members that don't also have annotations? + for name, value in cls.__dict__.items(): + if isinstance(value, Field) and not name in cls_annotations: + raise TypeError(f"{name!r} is a field but has no type annotation") + + # Check rules that apply if we are derived from any dataclasses. + if has_dataclass_bases: + # Raise an exception if any of our bases are frozen, but we're not. + if any_frozen_base and not frozen: + raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one") + + # Raise an exception if we're frozen, but none of our bases are. + if not any_frozen_base and frozen: + raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one") + + # Remember all of the fields on our class (including bases). This + # also marks this class as being a dataclass. + setattr(cls, _FIELDS, fields) + + # Was this class defined with an explicit __hash__? Note that if + # __eq__ is defined in this class, then python will automatically + # set __hash__ to None. This is a heuristic, as it's possible + # that such a __hash__ == None was not auto-generated, but it + # close enough. + class_hash = cls.__dict__.get("__hash__", MISSING) + has_explicit_hash = not ( + class_hash is MISSING or (class_hash is None and "__eq__" in cls.__dict__) + ) + + # If we're generating ordering methods, we must be generating the + # eq methods. + if order and not eq: + raise ValueError("eq must be true if order is true") + + if init: + # Does this class have a post-init function? + has_post_init = hasattr(cls, _POST_INIT_NAME) + + # Include InitVars and regular fields (so, not ClassVars). + flds = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)] + _set_new_attribute( + cls, + "__init__", + _init_fn( + flds, + frozen, + has_post_init, + # The name to use for the "self" + # param in __init__. Use "self" + # if possible. + "__dataclass_self__" if "self" in fields else "self", + ), + ) + + # Get the fields as a list, and include only real fields. This is + # used in all of the following methods. + field_list = [f for f in fields.values() if f._field_type is _FIELD] + + if repr: + flds = [f for f in field_list if f.repr] + _set_new_attribute(cls, "__repr__", _repr_fn(flds)) + + if eq: + # Create _eq__ method. There's no need for a __ne__ method, + # since python will call __eq__ and negate it. + flds = [f for f in field_list if f.compare] + self_tuple = _tuple_str("self", flds) + other_tuple = _tuple_str("other", flds) + _set_new_attribute(cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple)) + + if order: + # Create and set the ordering methods. + flds = [f for f in field_list if f.compare] + self_tuple = _tuple_str("self", flds) + other_tuple = _tuple_str("other", flds) + for name, op in [("__lt__", "<"), ("__le__", "<="), ("__gt__", ">"), ("__ge__", ">=")]: + if _set_new_attribute(cls, name, _cmp_fn(name, op, self_tuple, other_tuple)): + raise TypeError( + f"Cannot overwrite attribute {name} " + f"in class {cls.__name__}. Consider using " + "functools.total_ordering" + ) + + if frozen: + for fn in _frozen_get_del_attr(cls, field_list): + if _set_new_attribute(cls, fn.__name__, fn): + raise TypeError( + f"Cannot overwrite attribute {fn.__name__} " f"in class {cls.__name__}" + ) + + # Decide if/how we're going to create a hash function. + hash_action = _hash_action[bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash] + if hash_action: + # No need to call _set_new_attribute here, since by the time + # we're here the overwriting is unconditional. + cls.__hash__ = hash_action(cls, field_list) + + if not getattr(cls, "__doc__"): + # Create a class doc-string. + cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "") + + return cls + + +# _cls should never be specified by keyword, so start it with an +# underscore. The presence of _cls is used to detect if this +# decorator is being called with parameters or not. +def dataclass( + _cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False +): + """Returns the same class as was passed in, with dunder methods + added based on the fields defined in the class. + Examines PEP 526 __annotations__ to determine fields. + If init is true, an __init__() method is added to the class. If + repr is true, a __repr__() method is added. If order is true, rich + comparison dunder methods are added. If unsafe_hash is true, a + __hash__() method function is added. If frozen is true, fields may + not be assigned to after instance creation. + """ + + def wrap(cls): + return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen) + + # See if we're being called as @dataclass or @dataclass(). + if _cls is None: + # We're called with parens. + return wrap + + # We're called as @dataclass without parens. + return wrap(_cls) + + +def fields(class_or_instance): + """Return a tuple describing the fields of this dataclass. + Accepts a dataclass or an instance of one. Tuple elements are of + type Field. + """ + + # Might it be worth caching this, per class? + try: + fields = getattr(class_or_instance, _FIELDS) + except AttributeError: + raise TypeError("must be called with a dataclass type or instance") + + # Exclude pseudo-fields. Note that fields is sorted by insertion + # order, so the order of the tuple is as the fields were defined. + return tuple(f for f in fields.values() if f._field_type is _FIELD) + + +def _is_dataclass_instance(obj): + """Returns True if obj is an instance of a dataclass.""" + return not isinstance(obj, type) and hasattr(obj, _FIELDS) + + +def is_dataclass(obj): + """Returns True if obj is a dataclass or an instance of a + dataclass.""" + return hasattr(obj, _FIELDS) + + +def asdict(obj, *, dict_factory=dict): + """Return the fields of a dataclass instance as a new dictionary mapping + field names to field values. + Example usage: + @dataclass + class C: + x: int + y: int + c = C(1, 2) + assert asdict(c) == {'x': 1, 'y': 2} + If given, 'dict_factory' will be used instead of built-in dict. + The function applies recursively to field values that are + dataclass instances. This will also look into built-in containers: + tuples, lists, and dicts. + """ + if not _is_dataclass_instance(obj): + raise TypeError("asdict() should be called on dataclass instances") + return _asdict_inner(obj, dict_factory) + + +def _asdict_inner(obj, dict_factory): + if _is_dataclass_instance(obj): + result = [] + for f in fields(obj): + value = _asdict_inner(getattr(obj, f.name), dict_factory) + result.append((f.name, value)) + return dict_factory(result) + elif isinstance(obj, (list, tuple)): + return type(obj)(_asdict_inner(v, dict_factory) for v in obj) + elif isinstance(obj, dict): + return type(obj)( + (_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) for k, v in obj.items() + ) + else: + return copy.deepcopy(obj) + + +def astuple(obj, *, tuple_factory=tuple): + """Return the fields of a dataclass instance as a new tuple of field values. + Example usage:: + @dataclass + class C: + x: int + y: int + c = C(1, 2) + assert astuple(c) == (1, 2) + If given, 'tuple_factory' will be used instead of built-in tuple. + The function applies recursively to field values that are + dataclass instances. This will also look into built-in containers: + tuples, lists, and dicts. + """ + + if not _is_dataclass_instance(obj): + raise TypeError("astuple() should be called on dataclass instances") + return _astuple_inner(obj, tuple_factory) + + +def _astuple_inner(obj, tuple_factory): + if _is_dataclass_instance(obj): + result = [] + for f in fields(obj): + value = _astuple_inner(getattr(obj, f.name), tuple_factory) + result.append(value) + return tuple_factory(result) + elif isinstance(obj, (list, tuple)): + return type(obj)(_astuple_inner(v, tuple_factory) for v in obj) + elif isinstance(obj, dict): + return type(obj)( + (_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory)) + for k, v in obj.items() + ) + else: + return copy.deepcopy(obj) + + +def make_dataclass( + cls_name, + fields, + *, + bases=(), + namespace=None, + init=True, + repr=True, + eq=True, + order=False, + unsafe_hash=False, + frozen=False, +): + """Return a new dynamically created dataclass. + The dataclass name will be 'cls_name'. 'fields' is an iterable + of either (name), (name, type) or (name, type, Field) objects. If type is + omitted, use the string 'typing.Any'. Field objects are created by + the equivalent of calling 'field(name, type [, Field-info])'. + C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,)) + is equivalent to: + @dataclass + class C(Base): + x: 'typing.Any' + y: int + z: int = field(init=False) + For the bases and namespace parameters, see the builtin type() function. + The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to + dataclass(). + """ + + if namespace is None: + namespace = {} + else: + # Copy namespace since we're going to mutate it. + namespace = namespace.copy() + + # While we're looking through the field names, validate that they + # are identifiers, are not keywords, and not duplicates. + seen = set() + anns = {} + for item in fields: + if isinstance(item, str): + name = item + tp = "typing.Any" + elif len(item) == 2: + ( + name, + tp, + ) = item + elif len(item) == 3: + name, tp, spec = item + namespace[name] = spec + else: + raise TypeError(f"Invalid field: {item!r}") + + if not isinstance(name, str) or not name.isidentifier(): + raise TypeError(f"Field names must be valid identifiers: {name!r}") + if keyword.iskeyword(name): + raise TypeError(f"Field names must not be keywords: {name!r}") + if name in seen: + raise TypeError(f"Field name duplicated: {name!r}") + + seen.add(name) + anns[name] = tp + + namespace["__annotations__"] = anns + # We use `types.new_class()` instead of simply `type()` to allow dynamic creation + # of generic dataclassses. + cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace)) + return dataclass( + cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen + ) + + +def replace(obj, **changes): + """Return a new object replacing specified fields with new values. + This is especially useful for frozen classes. Example usage: + @dataclass(frozen=True) + class C: + x: int + y: int + c = C(1, 2) + c1 = replace(c, x=3) + assert c1.x == 3 and c1.y == 2 + """ + + # We're going to mutate 'changes', but that's okay because it's a + # new dict, even if called with 'replace(obj, **my_changes)'. + + if not _is_dataclass_instance(obj): + raise TypeError("replace() should be called on dataclass instances") + + # It's an error to have init=False fields in 'changes'. + # If a field is not in 'changes', read its value from the provided obj. + + for f in getattr(obj, _FIELDS).values(): + if not f.init: + # Error if this field is specified in changes. + if f.name in changes: + raise ValueError( + f"field {f.name} is declared with " + "init=False, it cannot be specified with " + "replace()" + ) + continue + + if f.name not in changes: + changes[f.name] = getattr(obj, f.name) + + # Create the new object, which calls __init__() and + # __post_init__() (if defined), using all of the init fields we've + # added and/or left in 'changes'. If there are values supplied in + # changes that aren't fields, this will correctly raise a + # TypeError. + return obj.__class__(**changes) diff --git a/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/LICENSE b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/LICENSE new file mode 100644 index 0000000..e859590 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taneli Hukkinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/__init__.py b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/__init__.py new file mode 100644 index 0000000..5b9f247 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/__init__.py @@ -0,0 +1,6 @@ +"""A lil' TOML parser.""" + +__all__ = ("loads", "load", "TOMLDecodeError") +__version__ = "1.2.0" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT + +from ._parser import TOMLDecodeError, load, loads diff --git a/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/_parser.py b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/_parser.py new file mode 100644 index 0000000..156848f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/_parser.py @@ -0,0 +1,650 @@ +import string +import warnings +from types import MappingProxyType +from typing import IO, Any, Callable, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple + +from ._re import ( + RE_DATETIME, + RE_LOCALTIME, + RE_NUMBER, + match_to_datetime, + match_to_localtime, + match_to_number, +) + +ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) + +# Neither of these sets include quotation mark or backslash. They are +# currently handled as separate cases in the parser functions. +ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") +ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n\r") + +ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ASCII_CTRL - frozenset("\t\n") + +ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS + +TOML_WS = frozenset(" \t") +TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") +BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") +KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") +HEXDIGIT_CHARS = frozenset(string.hexdigits) + +BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( + { + "\\b": "\u0008", # backspace + "\\t": "\u0009", # tab + "\\n": "\u000A", # linefeed + "\\f": "\u000C", # form feed + "\\r": "\u000D", # carriage return + '\\"': "\u0022", # quote + "\\\\": "\u005C", # backslash + } +) + +# Type annotations +ParseFloat = Callable[[str], Any] +Key = Tuple[str, ...] +Pos = int + + +class TOMLDecodeError(ValueError): + """An error raised if a document is not valid TOML.""" + + +def load(fp: IO, *, parse_float: ParseFloat = float) -> Dict[str, Any]: + """Parse TOML from a file object.""" + s = fp.read() + if isinstance(s, bytes): + s = s.decode() + else: + warnings.warn( + "Text file object support is deprecated in favor of binary file objects." + ' Use `open("foo.toml", "rb")` to open the file in binary mode.', + DeprecationWarning, + ) + return loads(s, parse_float=parse_float) + + +def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901 + """Parse TOML from a string.""" + + # The spec allows converting "\r\n" to "\n", even in string + # literals. Let's do so to simplify parsing. + src = s.replace("\r\n", "\n") + pos = 0 + out = Output(NestedDict(), Flags()) + header: Key = () + + # Parse one statement at a time + # (typically means one line in TOML source) + while True: + # 1. Skip line leading whitespace + pos = skip_chars(src, pos, TOML_WS) + + # 2. Parse rules. Expect one of the following: + # - end of file + # - end of line + # - comment + # - key/value pair + # - append dict to list (and move to its namespace) + # - create dict (and move to its namespace) + # Skip trailing whitespace when applicable. + try: + char = src[pos] + except IndexError: + break + if char == "\n": + pos += 1 + continue + if char in KEY_INITIAL_CHARS: + pos = key_value_rule(src, pos, out, header, parse_float) + pos = skip_chars(src, pos, TOML_WS) + elif char == "[": + try: + second_char: Optional[str] = src[pos + 1] + except IndexError: + second_char = None + if second_char == "[": + pos, header = create_list_rule(src, pos, out) + else: + pos, header = create_dict_rule(src, pos, out) + pos = skip_chars(src, pos, TOML_WS) + elif char != "#": + raise suffixed_err(src, pos, "Invalid statement") + + # 3. Skip comment + pos = skip_comment(src, pos) + + # 4. Expect end of line or end of file + try: + char = src[pos] + except IndexError: + break + if char != "\n": + raise suffixed_err(src, pos, "Expected newline or end of document after a statement") + pos += 1 + + return out.data.dict + + +class Flags: + """Flags that map to parsed keys/namespaces.""" + + # Marks an immutable namespace (inline array or inline table). + FROZEN = 0 + # Marks a nest that has been explicitly created and can no longer + # be opened using the "[table]" syntax. + EXPLICIT_NEST = 1 + + def __init__(self) -> None: + self._flags: Dict[str, dict] = {} + + def unset_all(self, key: Key) -> None: + cont = self._flags + for k in key[:-1]: + if k not in cont: + return + cont = cont[k]["nested"] + cont.pop(key[-1], None) + + def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None: + cont = self._flags + for k in head_key: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + for k in rel_key: + if k in cont: + cont[k]["flags"].add(flag) + else: + cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + cont = self._flags + key_parent, key_stem = key[:-1], key[-1] + for k in key_parent: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + if key_stem not in cont: + cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) + + def is_(self, key: Key, flag: int) -> bool: + if not key: + return False # document root has no flags + cont = self._flags + for k in key[:-1]: + if k not in cont: + return False + inner_cont = cont[k] + if flag in inner_cont["recursive_flags"]: + return True + cont = inner_cont["nested"] + key_stem = key[-1] + if key_stem in cont: + cont = cont[key_stem] + return flag in cont["flags"] or flag in cont["recursive_flags"] + return False + + +class NestedDict: + def __init__(self) -> None: + # The parsed content of the TOML document + self.dict: Dict[str, Any] = {} + + def get_or_create_nest( + self, + key: Key, + *, + access_lists: bool = True, + ) -> dict: + cont: Any = self.dict + for k in key: + if k not in cont: + cont[k] = {} + cont = cont[k] + if access_lists and isinstance(cont, list): + cont = cont[-1] + if not isinstance(cont, dict): + raise KeyError("There is no nest behind this key") + return cont + + def append_nest_to_list(self, key: Key) -> None: + cont = self.get_or_create_nest(key[:-1]) + last_key = key[-1] + if last_key in cont: + list_ = cont[last_key] + if not isinstance(list_, list): + raise KeyError("An object other than list found behind this key") + list_.append({}) + else: + cont[last_key] = [{}] + + +class Output(NamedTuple): + data: NestedDict + flags: Flags + + +def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: + try: + while src[pos] in chars: + pos += 1 + except IndexError: + pass + return pos + + +def skip_until( + src: str, + pos: Pos, + expect: str, + *, + error_on: FrozenSet[str], + error_on_eof: bool, +) -> Pos: + try: + new_pos = src.index(expect, pos) + except ValueError: + new_pos = len(src) + if error_on_eof: + raise suffixed_err(src, new_pos, f'Expected "{expect!r}"') + + if not error_on.isdisjoint(src[pos:new_pos]): + while src[pos] not in error_on: + pos += 1 + raise suffixed_err(src, pos, f'Found invalid character "{src[pos]!r}"') + return new_pos + + +def skip_comment(src: str, pos: Pos) -> Pos: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char == "#": + return skip_until(src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False) + return pos + + +def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: + while True: + pos_before_skip = pos + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + pos = skip_comment(src, pos) + if pos == pos_before_skip: + return pos + + +def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: + pos += 1 # Skip "[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not declare {key} twice") + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.get_or_create_nest(key) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") + + if not src.startswith("]", pos): + raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration') + return pos + 1, key + + +def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: + pos += 2 # Skip "[[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") + # Free the namespace now that it points to another empty list item... + out.flags.unset_all(key) + # ...but this key precisely is still prohibited from table declaration + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.append_nest_to_list(key) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") + + if not src.startswith("]]", pos): + raise suffixed_err(src, pos, 'Expected "]]" at the end of an array declaration') + return pos + 2, key + + +def key_value_rule(src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat) -> Pos: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + abs_key_parent = header + key_parent + + if out.flags.is_(abs_key_parent, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {abs_key_parent}") + # Containers in the relative path can't be opened with the table syntax after this + out.flags.set_for_relative_key(header, key, Flags.EXPLICIT_NEST) + try: + nest = out.data.get_or_create_nest(abs_key_parent) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") + if key_stem in nest: + raise suffixed_err(src, pos, "Can not overwrite a value") + # Mark inline table and array namespaces recursively immutable + if isinstance(value, (dict, list)): + out.flags.set(header + key, Flags.FROZEN, recursive=True) + nest[key_stem] = value + return pos + + +def parse_key_value_pair(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, Key, Any]: + pos, key = parse_key(src, pos) + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char != "=": + raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair') + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, value = parse_value(src, pos, parse_float) + return pos, key, value + + +def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]: + pos, key_part = parse_key_part(src, pos) + key: Key = (key_part,) + pos = skip_chars(src, pos, TOML_WS) + while True: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char != ".": + return pos, key + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, key_part = parse_key_part(src, pos) + key += (key_part,) + pos = skip_chars(src, pos, TOML_WS) + + +def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char in BARE_KEY_CHARS: + start_pos = pos + pos = skip_chars(src, pos, BARE_KEY_CHARS) + return pos, src[start_pos:pos] + if char == "'": + return parse_literal_str(src, pos) + if char == '"': + return parse_one_line_basic_str(src, pos) + raise suffixed_err(src, pos, "Invalid initial character for a key part") + + +def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]: + pos += 1 + return parse_basic_str(src, pos, multiline=False) + + +def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]: + pos += 1 + array: list = [] + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + while True: + pos, val = parse_value(src, pos, parse_float) + array.append(val) + pos = skip_comments_and_array_ws(src, pos) + + c = src[pos : pos + 1] + if c == "]": + return pos + 1, array + if c != ",": + raise suffixed_err(src, pos, "Unclosed array") + pos += 1 + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + + +def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]: + pos += 1 + nested_dict = NestedDict() + flags = Flags() + + pos = skip_chars(src, pos, TOML_WS) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict + while True: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + if flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") + try: + nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") + if key_stem in nest: + raise suffixed_err(src, pos, f'Duplicate inline table key "{key_stem}"') + nest[key_stem] = value + pos = skip_chars(src, pos, TOML_WS) + c = src[pos : pos + 1] + if c == "}": + return pos + 1, nested_dict.dict + if c != ",": + raise suffixed_err(src, pos, "Unclosed inline table") + if isinstance(value, (dict, list)): + flags.set(key, Flags.FROZEN, recursive=True) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + + +def parse_basic_str_escape( # noqa: C901 + src: str, pos: Pos, *, multiline: bool = False +) -> Tuple[Pos, str]: + escape_id = src[pos : pos + 2] + pos += 2 + if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: + # Skip whitespace until next non-whitespace character or end of + # the doc. Error if non-whitespace is found before newline. + if escape_id != "\\\n": + pos = skip_chars(src, pos, TOML_WS) + try: + char = src[pos] + except IndexError: + return pos, "" + if char != "\n": + raise suffixed_err(src, pos, 'Unescaped "\\" in a string') + pos += 1 + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + return pos, "" + if escape_id == "\\u": + return parse_hex_char(src, pos, 4) + if escape_id == "\\U": + return parse_hex_char(src, pos, 8) + try: + return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] + except KeyError: + if len(escape_id) != 2: + raise suffixed_err(src, pos, "Unterminated string") + raise suffixed_err(src, pos, 'Unescaped "\\" in a string') + + +def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]: + return parse_basic_str_escape(src, pos, multiline=True) + + +def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]: + hex_str = src[pos : pos + hex_len] + if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): + raise suffixed_err(src, pos, "Invalid hex value") + pos += hex_len + hex_int = int(hex_str, 16) + if not is_unicode_scalar_value(hex_int): + raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") + return pos, chr(hex_int) + + +def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]: + pos += 1 # Skip starting apostrophe + start_pos = pos + pos = skip_until(src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True) + return pos + 1, src[start_pos:pos] # Skip ending apostrophe + + +def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]: + pos += 3 + if src.startswith("\n", pos): + pos += 1 + + if literal: + delim = "'" + end_pos = skip_until( + src, + pos, + "'''", + error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, + error_on_eof=True, + ) + result = src[pos:end_pos] + pos = end_pos + 3 + else: + delim = '"' + pos, result = parse_basic_str(src, pos, multiline=True) + + # Add at maximum two extra apostrophes/quotes if the end sequence + # is 4 or 5 chars long instead of just 3. + if not src.startswith(delim, pos): + return pos, result + pos += 1 + if not src.startswith(delim, pos): + return pos, result + delim + pos += 1 + return pos, result + (delim * 2) + + +def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]: + if multiline: + error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape_multiline + else: + error_on = ILLEGAL_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape + result = "" + start_pos = pos + while True: + try: + char = src[pos] + except IndexError: + raise suffixed_err(src, pos, "Unterminated string") + if char == '"': + if not multiline: + return pos + 1, result + src[start_pos:pos] + if src.startswith('"""', pos): + return pos + 3, result + src[start_pos:pos] + pos += 1 + continue + if char == "\\": + result += src[start_pos:pos] + pos, parsed_escape = parse_escapes(src, pos) + result += parsed_escape + start_pos = pos + continue + if char in error_on: + raise suffixed_err(src, pos, f'Illegal character "{char!r}"') + pos += 1 + + +def parse_value(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, Any]: # noqa: C901 + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + + # Basic strings + if char == '"': + if src.startswith('"""', pos): + return parse_multiline_str(src, pos, literal=False) + return parse_one_line_basic_str(src, pos) + + # Literal strings + if char == "'": + if src.startswith("'''", pos): + return parse_multiline_str(src, pos, literal=True) + return parse_literal_str(src, pos) + + # Booleans + if char == "t": + if src.startswith("true", pos): + return pos + 4, True + if char == "f": + if src.startswith("false", pos): + return pos + 5, False + + # Dates and times + datetime_match = RE_DATETIME.match(src, pos) + if datetime_match: + try: + datetime_obj = match_to_datetime(datetime_match) + except ValueError: + raise suffixed_err(src, pos, "Invalid date or datetime") + return datetime_match.end(), datetime_obj + localtime_match = RE_LOCALTIME.match(src, pos) + if localtime_match: + return localtime_match.end(), match_to_localtime(localtime_match) + + # Integers and "normal" floats. + # The regex will greedily match any type starting with a decimal + # char, so needs to be located after handling of dates and times. + number_match = RE_NUMBER.match(src, pos) + if number_match: + return number_match.end(), match_to_number(number_match, parse_float) + + # Arrays + if char == "[": + return parse_array(src, pos, parse_float) + + # Inline tables + if char == "{": + return parse_inline_table(src, pos, parse_float) + + # Special floats + first_three = src[pos : pos + 3] + if first_three in {"inf", "nan"}: + return pos + 3, parse_float(first_three) + first_four = src[pos : pos + 4] + if first_four in {"-inf", "+inf", "-nan", "+nan"}: + return pos + 4, parse_float(first_four) + + raise suffixed_err(src, pos, "Invalid value") + + +def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: + """Return a `TOMLDecodeError` where error message is suffixed with + coordinates in source.""" + + def coord_repr(src: str, pos: Pos) -> str: + if pos >= len(src): + return "end of document" + line = src.count("\n", 0, pos) + 1 + if line == 1: + column = pos + 1 + else: + column = pos - src.rindex("\n", 0, pos) + return f"line {line}, column {column}" + + return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") + + +def is_unicode_scalar_value(codepoint: int) -> bool: + return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) diff --git a/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/_re.py b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/_re.py new file mode 100644 index 0000000..c8b06f0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/_re.py @@ -0,0 +1,100 @@ +import re +from datetime import date, datetime, time, timedelta, timezone, tzinfo +from functools import lru_cache +from typing import TYPE_CHECKING, Any, Optional, Union + +if TYPE_CHECKING: + from tomli._parser import ParseFloat + +# E.g. +# - 00:32:00.999999 +# - 00:32:00 +_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" + +RE_NUMBER = re.compile( + r""" +0 +(?: + x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex + | + b[01](?:_?[01])* # bin + | + o[0-7](?:_?[0-7])* # oct +) +| +[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part +(?P + (?:\.[0-9](?:_?[0-9])*)? # optional fractional part + (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part +) +""", + flags=re.VERBOSE, +) +RE_LOCALTIME = re.compile(_TIME_RE_STR) +RE_DATETIME = re.compile( + fr""" +([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 +(?: + [T ] + {_TIME_RE_STR} + (?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset +)? +""", + flags=re.VERBOSE, +) + + +def match_to_datetime(match: "re.Match") -> Union[datetime, date]: + """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. + + Raises ValueError if the match does not correspond to a valid date + or datetime. + """ + ( + year_str, + month_str, + day_str, + hour_str, + minute_str, + sec_str, + micros_str, + zulu_time, + offset_sign_str, + offset_hour_str, + offset_minute_str, + ) = match.groups() + year, month, day = int(year_str), int(month_str), int(day_str) + if hour_str is None: + return date(year, month, day) + hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + if offset_sign_str: + tz: Optional[tzinfo] = cached_tz(offset_hour_str, offset_minute_str, offset_sign_str) + elif zulu_time: + tz = timezone.utc + else: # local date-time + tz = None + return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) + + +@lru_cache(maxsize=None) +def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: + sign = 1 if sign_str == "+" else -1 + return timezone( + timedelta( + hours=sign * int(hour_str), + minutes=sign * int(minute_str), + ) + ) + + +def match_to_localtime(match: "re.Match") -> time: + hour_str, minute_str, sec_str, micros_str = match.groups() + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + return time(int(hour_str), int(minute_str), int(sec_str), micros) + + +def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any: + if match.group("floatpart"): + return parse_float(match.group()) + return int(match.group(), 0) diff --git a/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/py.typed b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/py.typed new file mode 100644 index 0000000..7632ecf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_vendored/tomli/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 diff --git a/myenv/lib/python3.9/site-packages/isort/_version.py b/myenv/lib/python3.9/site-packages/isort/_version.py new file mode 100644 index 0000000..0561ca3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/_version.py @@ -0,0 +1 @@ +__version__ = "5.10.1" diff --git a/myenv/lib/python3.9/site-packages/isort/api.py b/myenv/lib/python3.9/site-packages/isort/api.py new file mode 100644 index 0000000..636f117 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/api.py @@ -0,0 +1,651 @@ +__all__ = ( + "ImportKey", + "check_code_string", + "check_file", + "check_stream", + "find_imports_in_code", + "find_imports_in_file", + "find_imports_in_paths", + "find_imports_in_stream", + "place_module", + "place_module_with_reason", + "sort_code_string", + "sort_file", + "sort_stream", +) + +import contextlib +import shutil +import sys +from enum import Enum +from io import StringIO +from itertools import chain +from pathlib import Path +from typing import Any, Iterator, Optional, Set, TextIO, Union, cast +from warnings import warn + +from isort import core + +from . import files, identify, io +from .exceptions import ( + ExistingSyntaxErrors, + FileSkipComment, + FileSkipSetting, + IntroducedSyntaxErrors, +) +from .format import ask_whether_to_apply_changes_to_file, create_terminal_printer, show_unified_diff +from .io import Empty, File +from .place import module as place_module # noqa: F401 +from .place import module_with_reason as place_module_with_reason # noqa: F401 +from .settings import CYTHON_EXTENSIONS, DEFAULT_CONFIG, Config + + +class ImportKey(Enum): + """Defines how to key an individual import, generally for deduping. + + Import keys are defined from less to more specific: + + from x.y import z as a + ______| | | | + | | | | + PACKAGE | | | + ________| | | + | | | + MODULE | | + _________________| | + | | + ATTRIBUTE | + ______________________| + | + ALIAS + """ + + PACKAGE = 1 + MODULE = 2 + ATTRIBUTE = 3 + ALIAS = 4 + + +def sort_code_string( + code: str, + extension: Optional[str] = None, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + disregard_skip: bool = False, + show_diff: Union[bool, TextIO] = False, + **config_kwargs: Any, +) -> str: + """Sorts any imports within the provided code string, returning a new string with them sorted. + + - **code**: The string of code with imports that need to be sorted. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file. + - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a + TextIO stream is provided results will be written to it, otherwise no diff will be computed. + - ****config_kwargs**: Any config modifications. + """ + input_stream = StringIO(code) + output_stream = StringIO() + config = _config(path=file_path, config=config, **config_kwargs) + sort_stream( + input_stream, + output_stream, + extension=extension, + config=config, + file_path=file_path, + disregard_skip=disregard_skip, + show_diff=show_diff, + ) + output_stream.seek(0) + return output_stream.read() + + +def check_code_string( + code: str, + show_diff: Union[bool, TextIO] = False, + extension: Optional[str] = None, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + disregard_skip: bool = False, + **config_kwargs: Any, +) -> bool: + """Checks the order, format, and categorization of imports within the provided code string. + Returns `True` if everything is correct, otherwise `False`. + + - **code**: The string of code with imports that need to be sorted. + - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a + TextIO stream is provided results will be written to it, otherwise no diff will be computed. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file. + - ****config_kwargs**: Any config modifications. + """ + config = _config(path=file_path, config=config, **config_kwargs) + return check_stream( + StringIO(code), + show_diff=show_diff, + extension=extension, + config=config, + file_path=file_path, + disregard_skip=disregard_skip, + ) + + +def sort_stream( + input_stream: TextIO, + output_stream: TextIO, + extension: Optional[str] = None, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + disregard_skip: bool = False, + show_diff: Union[bool, TextIO] = False, + raise_on_skip: bool = True, + **config_kwargs: Any, +) -> bool: + """Sorts any imports within the provided code stream, outputs to the provided output stream. + Returns `True` if anything is modified from the original input stream, otherwise `False`. + + - **input_stream**: The stream of code with imports that need to be sorted. + - **output_stream**: The stream where sorted imports should be written to. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file. + - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a + TextIO stream is provided results will be written to it, otherwise no diff will be computed. + - ****config_kwargs**: Any config modifications. + """ + extension = extension or (file_path and file_path.suffix.lstrip(".")) or "py" + if show_diff: + _output_stream = StringIO() + _input_stream = StringIO(input_stream.read()) + changed = sort_stream( + input_stream=_input_stream, + output_stream=_output_stream, + extension=extension, + config=config, + file_path=file_path, + disregard_skip=disregard_skip, + raise_on_skip=raise_on_skip, + **config_kwargs, + ) + _output_stream.seek(0) + _input_stream.seek(0) + show_unified_diff( + file_input=_input_stream.read(), + file_output=_output_stream.read(), + file_path=file_path, + output=output_stream if show_diff is True else cast(TextIO, show_diff), + color_output=config.color_output, + ) + return changed + + config = _config(path=file_path, config=config, **config_kwargs) + content_source = str(file_path or "Passed in content") + if not disregard_skip and file_path and config.is_skipped(file_path): + raise FileSkipSetting(content_source) + + _internal_output = output_stream + + if config.atomic: + try: + file_content = input_stream.read() + compile(file_content, content_source, "exec", 0, 1) + except SyntaxError: + if extension not in CYTHON_EXTENSIONS: + raise ExistingSyntaxErrors(content_source) + if config.verbose: + warn( + f"{content_source} Python AST errors found but ignored due to Cython extension" + ) + input_stream = StringIO(file_content) + + if not output_stream.readable(): + _internal_output = StringIO() + + try: + changed = core.process( + input_stream, + _internal_output, + extension=extension, + config=config, + raise_on_skip=raise_on_skip, + ) + except FileSkipComment: + raise FileSkipComment(content_source) + + if config.atomic: + _internal_output.seek(0) + try: + compile(_internal_output.read(), content_source, "exec", 0, 1) + _internal_output.seek(0) + except SyntaxError: # pragma: no cover + if extension not in CYTHON_EXTENSIONS: + raise IntroducedSyntaxErrors(content_source) + if config.verbose: + warn( + f"{content_source} Python AST errors found but ignored due to Cython extension" + ) + if _internal_output != output_stream: + output_stream.write(_internal_output.read()) + + return changed + + +def check_stream( + input_stream: TextIO, + show_diff: Union[bool, TextIO] = False, + extension: Optional[str] = None, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + disregard_skip: bool = False, + **config_kwargs: Any, +) -> bool: + """Checks any imports within the provided code stream, returning `False` if any unsorted or + incorrectly imports are found or `True` if no problems are identified. + + - **input_stream**: The stream of code with imports that need to be sorted. + - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a + TextIO stream is provided results will be written to it, otherwise no diff will be computed. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file. + - ****config_kwargs**: Any config modifications. + """ + config = _config(path=file_path, config=config, **config_kwargs) + + if show_diff: + input_stream = StringIO(input_stream.read()) + + changed: bool = sort_stream( + input_stream=input_stream, + output_stream=Empty, + extension=extension, + config=config, + file_path=file_path, + disregard_skip=disregard_skip, + ) + printer = create_terminal_printer( + color=config.color_output, error=config.format_error, success=config.format_success + ) + if not changed: + if config.verbose and not config.only_modified: + printer.success(f"{file_path or ''} Everything Looks Good!") + return True + + printer.error(f"{file_path or ''} Imports are incorrectly sorted and/or formatted.") + if show_diff: + output_stream = StringIO() + input_stream.seek(0) + file_contents = input_stream.read() + sort_stream( + input_stream=StringIO(file_contents), + output_stream=output_stream, + extension=extension, + config=config, + file_path=file_path, + disregard_skip=disregard_skip, + ) + output_stream.seek(0) + + show_unified_diff( + file_input=file_contents, + file_output=output_stream.read(), + file_path=file_path, + output=None if show_diff is True else cast(TextIO, show_diff), + color_output=config.color_output, + ) + return False + + +def check_file( + filename: Union[str, Path], + show_diff: Union[bool, TextIO] = False, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + disregard_skip: bool = True, + extension: Optional[str] = None, + **config_kwargs: Any, +) -> bool: + """Checks any imports within the provided file, returning `False` if any unsorted or + incorrectly imports are found or `True` if no problems are identified. + + - **filename**: The name or Path of the file to check. + - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a + TextIO stream is provided results will be written to it, otherwise no diff will be computed. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - ****config_kwargs**: Any config modifications. + """ + file_config: Config = config + + if "config_trie" in config_kwargs: + config_trie = config_kwargs.pop("config_trie", None) + if config_trie: + config_info = config_trie.search(filename) + if config.verbose: + print(f"{config_info[0]} used for file {filename}") + + file_config = Config(**config_info[1]) + + with io.File.read(filename) as source_file: + return check_stream( + source_file.stream, + show_diff=show_diff, + extension=extension, + config=file_config, + file_path=file_path or source_file.path, + disregard_skip=disregard_skip, + **config_kwargs, + ) + + +def _tmp_file(source_file: File) -> Path: + return source_file.path.with_suffix(source_file.path.suffix + ".isorted") + + +@contextlib.contextmanager +def _in_memory_output_stream_context() -> Iterator[TextIO]: + yield StringIO(newline=None) + + +@contextlib.contextmanager +def _file_output_stream_context(filename: Union[str, Path], source_file: File) -> Iterator[TextIO]: + tmp_file = _tmp_file(source_file) + with tmp_file.open("w+", encoding=source_file.encoding, newline="") as output_stream: + shutil.copymode(filename, tmp_file) + yield output_stream + + +def sort_file( + filename: Union[str, Path], + extension: Optional[str] = None, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + disregard_skip: bool = True, + ask_to_apply: bool = False, + show_diff: Union[bool, TextIO] = False, + write_to_stdout: bool = False, + output: Optional[TextIO] = None, + **config_kwargs: Any, +) -> bool: + """Sorts and formats any groups of imports imports within the provided file or Path. + Returns `True` if the file has been changed, otherwise `False`. + + - **filename**: The name or Path of the file to format. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file. + - **ask_to_apply**: If `True`, prompt before applying any changes. + - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a + TextIO stream is provided results will be written to it, otherwise no diff will be computed. + - **write_to_stdout**: If `True`, write to stdout instead of the input file. + - **output**: If a TextIO is provided, results will be written there rather than replacing + the original file content. + - ****config_kwargs**: Any config modifications. + """ + file_config: Config = config + + if "config_trie" in config_kwargs: + config_trie = config_kwargs.pop("config_trie", None) + if config_trie: + config_info = config_trie.search(filename) + if config.verbose: + print(f"{config_info[0]} used for file {filename}") + + file_config = Config(**config_info[1]) + + with io.File.read(filename) as source_file: + actual_file_path = file_path or source_file.path + config = _config(path=actual_file_path, config=file_config, **config_kwargs) + changed: bool = False + try: + if write_to_stdout: + changed = sort_stream( + input_stream=source_file.stream, + output_stream=sys.stdout, + config=config, + file_path=actual_file_path, + disregard_skip=disregard_skip, + extension=extension, + ) + else: + if output is None: + try: + if config.overwrite_in_place: + output_stream_context = _in_memory_output_stream_context() + else: + output_stream_context = _file_output_stream_context( + filename, source_file + ) + with output_stream_context as output_stream: + changed = sort_stream( + input_stream=source_file.stream, + output_stream=output_stream, + config=config, + file_path=actual_file_path, + disregard_skip=disregard_skip, + extension=extension, + ) + output_stream.seek(0) + if changed: + if show_diff or ask_to_apply: + source_file.stream.seek(0) + show_unified_diff( + file_input=source_file.stream.read(), + file_output=output_stream.read(), + file_path=actual_file_path, + output=None + if show_diff is True + else cast(TextIO, show_diff), + color_output=config.color_output, + ) + if show_diff or ( + ask_to_apply + and not ask_whether_to_apply_changes_to_file( + str(source_file.path) + ) + ): + return False + source_file.stream.close() + if config.overwrite_in_place: + output_stream.seek(0) + with source_file.path.open("w") as fs: + shutil.copyfileobj(output_stream, fs) + if changed: + if not config.overwrite_in_place: + tmp_file = _tmp_file(source_file) + tmp_file.replace(source_file.path) + if not config.quiet: + print(f"Fixing {source_file.path}") + finally: + try: # Python 3.8+: use `missing_ok=True` instead of try except. + if not config.overwrite_in_place: # pragma: no branch + tmp_file = _tmp_file(source_file) + tmp_file.unlink() + except FileNotFoundError: + pass # pragma: no cover + else: + changed = sort_stream( + input_stream=source_file.stream, + output_stream=output, + config=config, + file_path=actual_file_path, + disregard_skip=disregard_skip, + extension=extension, + ) + if changed and show_diff: + source_file.stream.seek(0) + output.seek(0) + show_unified_diff( + file_input=source_file.stream.read(), + file_output=output.read(), + file_path=actual_file_path, + output=None if show_diff is True else cast(TextIO, show_diff), + color_output=config.color_output, + ) + source_file.stream.close() + + except ExistingSyntaxErrors: + warn(f"{actual_file_path} unable to sort due to existing syntax errors") + except IntroducedSyntaxErrors: # pragma: no cover + warn(f"{actual_file_path} unable to sort as isort introduces new syntax errors") + + return changed + + +def find_imports_in_code( + code: str, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + unique: Union[bool, ImportKey] = False, + top_only: bool = False, + **config_kwargs: Any, +) -> Iterator[identify.Import]: + """Finds and returns all imports within the provided code string. + + - **code**: The string of code with imports that need to be sorted. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **unique**: If True, only the first instance of an import is returned. + - **top_only**: If True, only return imports that occur before the first function or class. + - ****config_kwargs**: Any config modifications. + """ + yield from find_imports_in_stream( + input_stream=StringIO(code), + config=config, + file_path=file_path, + unique=unique, + top_only=top_only, + **config_kwargs, + ) + + +def find_imports_in_stream( + input_stream: TextIO, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + unique: Union[bool, ImportKey] = False, + top_only: bool = False, + _seen: Optional[Set[str]] = None, + **config_kwargs: Any, +) -> Iterator[identify.Import]: + """Finds and returns all imports within the provided code stream. + + - **input_stream**: The stream of code with imports that need to be sorted. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **unique**: If True, only the first instance of an import is returned. + - **top_only**: If True, only return imports that occur before the first function or class. + - **_seen**: An optional set of imports already seen. Generally meant only for internal use. + - ****config_kwargs**: Any config modifications. + """ + config = _config(config=config, **config_kwargs) + identified_imports = identify.imports( + input_stream, config=config, file_path=file_path, top_only=top_only + ) + if not unique: + yield from identified_imports + + seen: Set[str] = set() if _seen is None else _seen + for identified_import in identified_imports: + if unique in (True, ImportKey.ALIAS): + key = identified_import.statement() + elif unique == ImportKey.ATTRIBUTE: + key = f"{identified_import.module}.{identified_import.attribute}" + elif unique == ImportKey.MODULE: + key = identified_import.module + elif unique == ImportKey.PACKAGE: # pragma: no branch # type checking ensures this + key = identified_import.module.split(".")[0] + + if key and key not in seen: + seen.add(key) + yield identified_import + + +def find_imports_in_file( + filename: Union[str, Path], + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + unique: Union[bool, ImportKey] = False, + top_only: bool = False, + **config_kwargs: Any, +) -> Iterator[identify.Import]: + """Finds and returns all imports within the provided source file. + + - **filename**: The name or Path of the file to look for imports in. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **unique**: If True, only the first instance of an import is returned. + - **top_only**: If True, only return imports that occur before the first function or class. + - ****config_kwargs**: Any config modifications. + """ + with io.File.read(filename) as source_file: + yield from find_imports_in_stream( + input_stream=source_file.stream, + config=config, + file_path=file_path or source_file.path, + unique=unique, + top_only=top_only, + **config_kwargs, + ) + + +def find_imports_in_paths( + paths: Iterator[Union[str, Path]], + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + unique: Union[bool, ImportKey] = False, + top_only: bool = False, + **config_kwargs: Any, +) -> Iterator[identify.Import]: + """Finds and returns all imports within the provided source paths. + + - **paths**: A collection of paths to recursively look for imports within. + - **extension**: The file extension that contains imports. Defaults to filename extension or py. + - **config**: The config object to use when sorting imports. + - **file_path**: The disk location where the code string was pulled from. + - **unique**: If True, only the first instance of an import is returned. + - **top_only**: If True, only return imports that occur before the first function or class. + - ****config_kwargs**: Any config modifications. + """ + config = _config(config=config, **config_kwargs) + seen: Optional[Set[str]] = set() if unique else None + yield from chain( + *( + find_imports_in_file( + file_name, unique=unique, config=config, top_only=top_only, _seen=seen + ) + for file_name in files.find(map(str, paths), config, [], []) + ) + ) + + +def _config( + path: Optional[Path] = None, config: Config = DEFAULT_CONFIG, **config_kwargs: Any +) -> Config: + if path and ( + config is DEFAULT_CONFIG + and "settings_path" not in config_kwargs + and "settings_file" not in config_kwargs + ): + config_kwargs["settings_path"] = path + + if config_kwargs: + if config is not DEFAULT_CONFIG: + raise ValueError( + "You can either specify custom configuration options using kwargs or " + "passing in a Config object. Not Both!" + ) + + config = Config(**config_kwargs) + + return config diff --git a/myenv/lib/python3.9/site-packages/isort/comments.py b/myenv/lib/python3.9/site-packages/isort/comments.py new file mode 100644 index 0000000..55c3da6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/comments.py @@ -0,0 +1,32 @@ +from typing import List, Optional, Tuple + + +def parse(line: str) -> Tuple[str, str]: + """Parses import lines for comments and returns back the + import statement and the associated comment. + """ + comment_start = line.find("#") + if comment_start != -1: + return (line[:comment_start], line[comment_start + 1 :].strip()) + + return (line, "") + + +def add_to_line( + comments: Optional[List[str]], + original_string: str = "", + removed: bool = False, + comment_prefix: str = "", +) -> str: + """Returns a string with comments added if removed is not set.""" + if removed: + return parse(original_string)[0] + + if not comments: + return original_string + + unique_comments: List[str] = [] + for comment in comments: + if comment not in unique_comments: + unique_comments.append(comment) + return f"{parse(original_string)[0]}{comment_prefix} {'; '.join(unique_comments)}" diff --git a/myenv/lib/python3.9/site-packages/isort/core.py b/myenv/lib/python3.9/site-packages/isort/core.py new file mode 100644 index 0000000..c0bcd36 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/core.py @@ -0,0 +1,476 @@ +import textwrap +from io import StringIO +from itertools import chain +from typing import List, TextIO, Union + +import isort.literal +from isort.settings import DEFAULT_CONFIG, Config + +from . import output, parse +from .exceptions import FileSkipComment +from .format import format_natural, remove_whitespace +from .settings import FILE_SKIP_COMMENTS + +CIMPORT_IDENTIFIERS = ("cimport ", "cimport*", "from.cimport") +IMPORT_START_IDENTIFIERS = ("from ", "from.import", "import ", "import*") + CIMPORT_IDENTIFIERS +DOCSTRING_INDICATORS = ('"""', "'''") +COMMENT_INDICATORS = DOCSTRING_INDICATORS + ("'", '"', "#") +CODE_SORT_COMMENTS = ( + "# isort: list", + "# isort: dict", + "# isort: set", + "# isort: unique-list", + "# isort: tuple", + "# isort: unique-tuple", + "# isort: assignments", +) + + +def process( + input_stream: TextIO, + output_stream: TextIO, + extension: str = "py", + raise_on_skip: bool = True, + config: Config = DEFAULT_CONFIG, +) -> bool: + """Parses stream identifying sections of contiguous imports and sorting them + + Code with unsorted imports is read from the provided `input_stream`, sorted and then + outputted to the specified `output_stream`. + + - `input_stream`: Text stream with unsorted import sections. + - `output_stream`: Text stream to output sorted inputs into. + - `config`: Config settings to use when sorting imports. Defaults settings. + - *Default*: `isort.settings.DEFAULT_CONFIG`. + - `extension`: The file extension or file extension rules that should be used. + - *Default*: `"py"`. + - *Choices*: `["py", "pyi", "pyx"]`. + + Returns `True` if there were changes that needed to be made (errors present) from what + was provided in the input_stream, otherwise `False`. + """ + line_separator: str = config.line_ending + add_imports: List[str] = [format_natural(addition) for addition in config.add_imports] + import_section: str = "" + next_import_section: str = "" + next_cimports: bool = False + in_quote: str = "" + was_in_quote: bool = False + first_comment_index_start: int = -1 + first_comment_index_end: int = -1 + contains_imports: bool = False + in_top_comment: bool = False + first_import_section: bool = True + indent: str = "" + isort_off: bool = False + skip_file: bool = False + code_sorting: Union[bool, str] = False + code_sorting_section: str = "" + code_sorting_indent: str = "" + cimports: bool = False + made_changes: bool = False + stripped_line: str = "" + end_of_file: bool = False + verbose_output: List[str] = [] + + if config.float_to_top: + new_input = "" + current = "" + isort_off = False + for line in chain(input_stream, (None,)): + if isort_off and line is not None: + if line == "# isort: on\n": + isort_off = False + new_input += line + elif line in ("# isort: split\n", "# isort: off\n", None) or str(line).endswith( + "# isort: split\n" + ): + if line == "# isort: off\n": + isort_off = True + if current: + if add_imports: + add_line_separator = line_separator or "\n" + current += add_line_separator + add_line_separator.join(add_imports) + add_imports = [] + parsed = parse.file_contents(current, config=config) + verbose_output += parsed.verbose_output + extra_space = "" + while current and current[-1] == "\n": + extra_space += "\n" + current = current[:-1] + extra_space = extra_space.replace("\n", "", 1) + sorted_output = output.sorted_imports( + parsed, config, extension, import_type="import" + ) + made_changes = made_changes or _has_changed( + before=current, + after=sorted_output, + line_separator=parsed.line_separator, + ignore_whitespace=config.ignore_whitespace, + ) + new_input += sorted_output + new_input += extra_space + current = "" + new_input += line or "" + else: + current += line or "" + + input_stream = StringIO(new_input) + + for index, line in enumerate(chain(input_stream, (None,))): + if line is None: + if index == 0 and not config.force_adds: + return False + + not_imports = True + end_of_file = True + line = "" + if not line_separator: + line_separator = "\n" + + if code_sorting and code_sorting_section: + sorted_code = textwrap.indent( + isort.literal.assignment( + code_sorting_section, + str(code_sorting), + extension, + config=_indented_config(config, indent), + ), + code_sorting_indent, + ) + made_changes = made_changes or _has_changed( + before=code_sorting_section, + after=sorted_code, + line_separator=line_separator, + ignore_whitespace=config.ignore_whitespace, + ) + output_stream.write(sorted_code) + else: + stripped_line = line.strip() + if stripped_line and not line_separator: + line_separator = line[len(line.rstrip()) :].replace(" ", "").replace("\t", "") + + for file_skip_comment in FILE_SKIP_COMMENTS: + if file_skip_comment in line: + if raise_on_skip: + raise FileSkipComment("Passed in content") + isort_off = True + skip_file = True + + if not in_quote: + if stripped_line == "# isort: off": + isort_off = True + elif stripped_line.startswith("# isort: dont-add-imports"): + add_imports = [] + elif stripped_line.startswith("# isort: dont-add-import:"): + import_not_to_add = stripped_line.split("# isort: dont-add-import:", 1)[ + 1 + ].strip() + add_imports = [ + import_to_add + for import_to_add in add_imports + if not import_to_add == import_not_to_add + ] + + if ( + (index == 0 or (index in (1, 2) and not contains_imports)) + and stripped_line.startswith("#") + and stripped_line not in config.section_comments + and stripped_line not in CODE_SORT_COMMENTS + ): + in_top_comment = True + elif in_top_comment and ( + not line.startswith("#") + or stripped_line in config.section_comments + or stripped_line in CODE_SORT_COMMENTS + ): + in_top_comment = False + first_comment_index_end = index - 1 + + was_in_quote = bool(in_quote) + if (not stripped_line.startswith("#") or in_quote) and '"' in line or "'" in line: + char_index = 0 + if first_comment_index_start == -1 and ( + line.startswith('"') or line.startswith("'") + ): + first_comment_index_start = index + while char_index < len(line): + if line[char_index] == "\\": + char_index += 1 + elif in_quote: + if line[char_index : char_index + len(in_quote)] == in_quote: + in_quote = "" + if first_comment_index_end < first_comment_index_start: + first_comment_index_end = index + elif line[char_index] in ("'", '"'): + long_quote = line[char_index : char_index + 3] + if long_quote in ('"""', "'''"): + in_quote = long_quote + char_index += 2 + else: + in_quote = line[char_index] + elif line[char_index] == "#": + break + char_index += 1 + + not_imports = bool(in_quote) or was_in_quote or in_top_comment or isort_off + if not (in_quote or was_in_quote or in_top_comment): + if isort_off: + if not skip_file and stripped_line == "# isort: on": + isort_off = False + elif stripped_line.endswith("# isort: split"): + not_imports = True + elif stripped_line in CODE_SORT_COMMENTS: + code_sorting = stripped_line.split("isort: ")[1].strip() + code_sorting_indent = line[: -len(line.lstrip())] + not_imports = True + elif code_sorting: + if not stripped_line: + sorted_code = textwrap.indent( + isort.literal.assignment( + code_sorting_section, + str(code_sorting), + extension, + config=_indented_config(config, indent), + ), + code_sorting_indent, + ) + made_changes = made_changes or _has_changed( + before=code_sorting_section, + after=sorted_code, + line_separator=line_separator, + ignore_whitespace=config.ignore_whitespace, + ) + output_stream.write(sorted_code) + not_imports = True + code_sorting = False + code_sorting_section = "" + code_sorting_indent = "" + else: + code_sorting_section += line + line = "" + elif ( + stripped_line in config.section_comments + or stripped_line in config.section_comments_end + ): + if import_section and not contains_imports: + output_stream.write(import_section) + import_section = line + not_imports = False + else: + import_section += line + indent = line[: -len(line.lstrip())] + elif not (stripped_line or contains_imports): + not_imports = True + elif ( + not stripped_line + or stripped_line.startswith("#") + and (not indent or indent + line.lstrip() == line) + and not config.treat_all_comments_as_code + and stripped_line not in config.treat_comments_as_code + ): + import_section += line + elif stripped_line.startswith(IMPORT_START_IDENTIFIERS): + new_indent = line[: -len(line.lstrip())] + import_statement = line + stripped_line = line.strip().split("#")[0] + while stripped_line.endswith("\\") or ( + "(" in stripped_line and ")" not in stripped_line + ): + if stripped_line.endswith("\\"): + while stripped_line and stripped_line.endswith("\\"): + line = input_stream.readline() + stripped_line = line.strip().split("#")[0] + import_statement += line + else: + while ")" not in stripped_line: + line = input_stream.readline() + stripped_line = line.strip().split("#")[0] + import_statement += line + + if ( + import_statement.lstrip().startswith("from") + and "import" not in import_statement + ): + line = import_statement + not_imports = True + else: + did_contain_imports = contains_imports + contains_imports = True + + cimport_statement: bool = False + if ( + import_statement.lstrip().startswith(CIMPORT_IDENTIFIERS) + or " cimport " in import_statement + or " cimport*" in import_statement + or " cimport(" in import_statement + or ".cimport" in import_statement + ): + cimport_statement = True + + if cimport_statement != cimports or ( + new_indent != indent + and import_section + and (not did_contain_imports or len(new_indent) < len(indent)) + ): + indent = new_indent + if import_section: + next_cimports = cimport_statement + next_import_section = import_statement + import_statement = "" + not_imports = True + line = "" + else: + cimports = cimport_statement + else: + if new_indent != indent: + if import_section and did_contain_imports: + import_statement = indent + import_statement.lstrip() + else: + indent = new_indent + import_section += import_statement + else: + not_imports = True + + if not_imports: + raw_import_section: str = import_section + if ( + add_imports + and (stripped_line or end_of_file) + and not config.append_only + and not in_top_comment + and not was_in_quote + and not import_section + and not line.lstrip().startswith(COMMENT_INDICATORS) + and not (line.rstrip().endswith(DOCSTRING_INDICATORS) and "=" not in line) + ): + add_line_separator = line_separator or "\n" + import_section = add_line_separator.join(add_imports) + add_line_separator + if end_of_file and index != 0: + output_stream.write(add_line_separator) + contains_imports = True + add_imports = [] + + if next_import_section and not import_section: # pragma: no cover + raw_import_section = import_section = next_import_section + next_import_section = "" + + if import_section: + if add_imports and (contains_imports or not config.append_only) and not indent: + import_section = ( + line_separator.join(add_imports) + line_separator + import_section + ) + contains_imports = True + add_imports = [] + + if not indent: + import_section += line + raw_import_section += line + if not contains_imports: + output_stream.write(import_section) + + else: + leading_whitespace = import_section[: -len(import_section.lstrip())] + trailing_whitespace = import_section[len(import_section.rstrip()) :] + if first_import_section and not import_section.lstrip( + line_separator + ).startswith(COMMENT_INDICATORS): + import_section = import_section.lstrip(line_separator) + raw_import_section = raw_import_section.lstrip(line_separator) + first_import_section = False + + if indent: + import_section = "".join( + line[len(indent) :] for line in import_section.splitlines(keepends=True) + ) + + parsed_content = parse.file_contents(import_section, config=config) + verbose_output += parsed_content.verbose_output + + sorted_import_section = output.sorted_imports( + parsed_content, + _indented_config(config, indent), + extension, + import_type="cimport" if cimports else "import", + ) + if not (import_section.strip() and not sorted_import_section): + if indent: + sorted_import_section = ( + leading_whitespace + + textwrap.indent(sorted_import_section, indent).strip() + + trailing_whitespace + ) + + made_changes = made_changes or _has_changed( + before=raw_import_section, + after=sorted_import_section, + line_separator=line_separator, + ignore_whitespace=config.ignore_whitespace, + ) + output_stream.write(sorted_import_section) + if not line and not indent and next_import_section: + output_stream.write(line_separator) + + if indent: + output_stream.write(line) + if not next_import_section: + indent = "" + + if next_import_section: + cimports = next_cimports + contains_imports = True + else: + contains_imports = False + import_section = next_import_section + next_import_section = "" + else: + output_stream.write(line) + not_imports = False + + if stripped_line and not in_quote and not import_section and not next_import_section: + if stripped_line == "yield": + while not stripped_line or stripped_line == "yield": + new_line = input_stream.readline() + if not new_line: + break + + output_stream.write(new_line) + stripped_line = new_line.strip().split("#")[0] + + if stripped_line.startswith("raise") or stripped_line.startswith("yield"): + while stripped_line.endswith("\\"): + new_line = input_stream.readline() + if not new_line: + break + + output_stream.write(new_line) + stripped_line = new_line.strip().split("#")[0] + + if made_changes and config.only_modified: + for output_str in verbose_output: + print(output_str) + + return made_changes + + +def _indented_config(config: Config, indent: str) -> Config: + if not indent: + return config + + return Config( + config=config, + line_length=max(config.line_length - len(indent), 0), + wrap_length=max(config.wrap_length - len(indent), 0), + lines_after_imports=1, + import_headings=config.import_headings if config.indented_import_headings else {}, + import_footers=config.import_footers if config.indented_import_headings else {}, + ) + + +def _has_changed(before: str, after: str, line_separator: str, ignore_whitespace: bool) -> bool: + if ignore_whitespace: + return ( + remove_whitespace(before, line_separator=line_separator).strip() + != remove_whitespace(after, line_separator=line_separator).strip() + ) + return before.strip() != after.strip() diff --git a/myenv/lib/python3.9/site-packages/isort/deprecated/__init__.py b/myenv/lib/python3.9/site-packages/isort/deprecated/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/isort/deprecated/finders.py b/myenv/lib/python3.9/site-packages/isort/deprecated/finders.py new file mode 100644 index 0000000..52bd7cf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/deprecated/finders.py @@ -0,0 +1,415 @@ +"""Finders try to find right section for passed module name""" +import importlib.machinery +import inspect +import os +import os.path +import re +import sys +import sysconfig +from abc import ABCMeta, abstractmethod +from contextlib import contextmanager +from fnmatch import fnmatch +from functools import lru_cache +from glob import glob +from pathlib import Path +from typing import Dict, Iterable, Iterator, List, Optional, Pattern, Sequence, Tuple, Type + +from isort import sections +from isort.settings import KNOWN_SECTION_MAPPING, Config +from isort.utils import exists_case_sensitive + +try: + from pipreqs import pipreqs # type: ignore + +except ImportError: + pipreqs = None + +try: + from pip_api import parse_requirements # type: ignore + +except ImportError: + parse_requirements = None + +try: + from requirementslib import Pipfile # type: ignore + +except ImportError: + Pipfile = None + + +@contextmanager +def chdir(path: str) -> Iterator[None]: + """Context manager for changing dir and restoring previous workdir after exit.""" + curdir = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curdir) + + +class BaseFinder(metaclass=ABCMeta): + def __init__(self, config: Config) -> None: + self.config = config + + @abstractmethod + def find(self, module_name: str) -> Optional[str]: + raise NotImplementedError + + +class ForcedSeparateFinder(BaseFinder): + def find(self, module_name: str) -> Optional[str]: + for forced_separate in self.config.forced_separate: + # Ensure all forced_separate patterns will match to end of string + path_glob = forced_separate + if not forced_separate.endswith("*"): + path_glob = "%s*" % forced_separate + + if fnmatch(module_name, path_glob) or fnmatch(module_name, "." + path_glob): + return forced_separate + return None + + +class LocalFinder(BaseFinder): + def find(self, module_name: str) -> Optional[str]: + if module_name.startswith("."): + return "LOCALFOLDER" + return None + + +class KnownPatternFinder(BaseFinder): + def __init__(self, config: Config) -> None: + super().__init__(config) + + self.known_patterns: List[Tuple[Pattern[str], str]] = [] + for placement in reversed(config.sections): + known_placement = KNOWN_SECTION_MAPPING.get(placement, placement).lower() + config_key = f"known_{known_placement}" + known_patterns = list( + getattr(self.config, config_key, self.config.known_other.get(known_placement, [])) + ) + known_patterns = [ + pattern + for known_pattern in known_patterns + for pattern in self._parse_known_pattern(known_pattern) + ] + for known_pattern in known_patterns: + regexp = "^" + known_pattern.replace("*", ".*").replace("?", ".?") + "$" + self.known_patterns.append((re.compile(regexp), placement)) + + def _parse_known_pattern(self, pattern: str) -> List[str]: + """Expand pattern if identified as a directory and return found sub packages""" + if pattern.endswith(os.path.sep): + patterns = [ + filename + for filename in os.listdir(os.path.join(self.config.directory, pattern)) + if os.path.isdir(os.path.join(self.config.directory, pattern, filename)) + ] + else: + patterns = [pattern] + + return patterns + + def find(self, module_name: str) -> Optional[str]: + # Try to find most specific placement instruction match (if any) + parts = module_name.split(".") + module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1)) + for module_name_to_check in module_names_to_check: + for pattern, placement in self.known_patterns: + if pattern.match(module_name_to_check): + return placement + return None + + +class PathFinder(BaseFinder): + def __init__(self, config: Config, path: str = ".") -> None: + super().__init__(config) + + # restore the original import path (i.e. not the path to bin/isort) + root_dir = os.path.abspath(path) + src_dir = f"{root_dir}/src" + self.paths = [root_dir, src_dir] + + # virtual env + self.virtual_env = self.config.virtual_env or os.environ.get("VIRTUAL_ENV") + if self.virtual_env: + self.virtual_env = os.path.realpath(self.virtual_env) + self.virtual_env_src = "" + if self.virtual_env: + self.virtual_env_src = f"{self.virtual_env}/src/" + for venv_path in glob(f"{self.virtual_env}/lib/python*/site-packages"): + if venv_path not in self.paths: + self.paths.append(venv_path) + for nested_venv_path in glob(f"{self.virtual_env}/lib/python*/*/site-packages"): + if nested_venv_path not in self.paths: + self.paths.append(nested_venv_path) + for venv_src_path in glob(f"{self.virtual_env}/src/*"): + if os.path.isdir(venv_src_path): + self.paths.append(venv_src_path) + + # conda + self.conda_env = self.config.conda_env or os.environ.get("CONDA_PREFIX") or "" + if self.conda_env: + self.conda_env = os.path.realpath(self.conda_env) + for conda_path in glob(f"{self.conda_env}/lib/python*/site-packages"): + if conda_path not in self.paths: + self.paths.append(conda_path) + for nested_conda_path in glob(f"{self.conda_env}/lib/python*/*/site-packages"): + if nested_conda_path not in self.paths: + self.paths.append(nested_conda_path) + + # handle case-insensitive paths on windows + self.stdlib_lib_prefix = os.path.normcase(sysconfig.get_paths()["stdlib"]) + if self.stdlib_lib_prefix not in self.paths: + self.paths.append(self.stdlib_lib_prefix) + + # add system paths + for system_path in sys.path[1:]: + if system_path not in self.paths: + self.paths.append(system_path) + + def find(self, module_name: str) -> Optional[str]: + for prefix in self.paths: + package_path = "/".join((prefix, module_name.split(".")[0])) + path_obj = Path(package_path).resolve() + is_module = ( + exists_case_sensitive(package_path + ".py") + or any( + exists_case_sensitive(package_path + ext_suffix) + for ext_suffix in importlib.machinery.EXTENSION_SUFFIXES + ) + or exists_case_sensitive(package_path + "/__init__.py") + ) + is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path) + if is_module or is_package: + if ( + "site-packages" in prefix + or "dist-packages" in prefix + or (self.virtual_env and self.virtual_env_src in prefix) + ): + return sections.THIRDPARTY + if os.path.normcase(prefix) == self.stdlib_lib_prefix: + return sections.STDLIB + if self.conda_env and self.conda_env in prefix: + return sections.THIRDPARTY + for src_path in self.config.src_paths: + if src_path in path_obj.parents and not self.config.is_skipped(path_obj): + return sections.FIRSTPARTY + + if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix): + return sections.STDLIB # pragma: no cover - edge case for one OS. Hard to test. + + return self.config.default_section + return None + + +class ReqsBaseFinder(BaseFinder): + enabled = False + + def __init__(self, config: Config, path: str = ".") -> None: + super().__init__(config) + self.path = path + if self.enabled: + self.mapping = self._load_mapping() + self.names = self._load_names() + + @abstractmethod + def _get_names(self, path: str) -> Iterator[str]: + raise NotImplementedError + + @abstractmethod + def _get_files_from_dir(self, path: str) -> Iterator[str]: + raise NotImplementedError + + @staticmethod + def _load_mapping() -> Optional[Dict[str, str]]: + """Return list of mappings `package_name -> module_name` + + Example: + django-haystack -> haystack + """ + if not pipreqs: + return None + path = os.path.dirname(inspect.getfile(pipreqs)) + path = os.path.join(path, "mapping") + with open(path) as f: + mappings: Dict[str, str] = {} # pypi_name: import_name + for line in f: + import_name, _, pypi_name = line.strip().partition(":") + mappings[pypi_name] = import_name + return mappings + # return dict(tuple(line.strip().split(":")[::-1]) for line in f) + + def _load_names(self) -> List[str]: + """Return list of thirdparty modules from requirements""" + names = [] + for path in self._get_files(): + for name in self._get_names(path): + names.append(self._normalize_name(name)) + return names + + @staticmethod + def _get_parents(path: str) -> Iterator[str]: + prev = "" + while path != prev: + prev = path + yield path + path = os.path.dirname(path) + + def _get_files(self) -> Iterator[str]: + """Return paths to all requirements files""" + path = os.path.abspath(self.path) + if os.path.isfile(path): + path = os.path.dirname(path) + + for path in self._get_parents(path): + yield from self._get_files_from_dir(path) + + def _normalize_name(self, name: str) -> str: + """Convert package name to module name + + Examples: + Django -> django + django-haystack -> django_haystack + Flask-RESTFul -> flask_restful + """ + if self.mapping: + name = self.mapping.get(name.replace("-", "_"), name) + return name.lower().replace("-", "_") + + def find(self, module_name: str) -> Optional[str]: + # required lib not installed yet + if not self.enabled: + return None + + module_name, _sep, _submodules = module_name.partition(".") + module_name = module_name.lower() + if not module_name: + return None + + for name in self.names: + if module_name == name: + return sections.THIRDPARTY + return None + + +class RequirementsFinder(ReqsBaseFinder): + exts = (".txt", ".in") + enabled = bool(parse_requirements) + + def _get_files_from_dir(self, path: str) -> Iterator[str]: + """Return paths to requirements files from passed dir.""" + yield from self._get_files_from_dir_cached(path) + + @classmethod + @lru_cache(maxsize=16) + def _get_files_from_dir_cached(cls, path: str) -> List[str]: + results = [] + + for fname in os.listdir(path): + if "requirements" not in fname: + continue + full_path = os.path.join(path, fname) + + # *requirements*/*.{txt,in} + if os.path.isdir(full_path): + for subfile_name in os.listdir(full_path): + for ext in cls.exts: + if subfile_name.endswith(ext): + results.append(os.path.join(full_path, subfile_name)) + continue + + # *requirements*.{txt,in} + if os.path.isfile(full_path): + for ext in cls.exts: + if fname.endswith(ext): + results.append(full_path) + break + + return results + + def _get_names(self, path: str) -> Iterator[str]: + """Load required packages from path to requirements file""" + yield from self._get_names_cached(path) + + @classmethod + @lru_cache(maxsize=16) + def _get_names_cached(cls, path: str) -> List[str]: + result = [] + + with chdir(os.path.dirname(path)): + requirements = parse_requirements(path) + for req in requirements.values(): + if req.name: + result.append(req.name) + + return result + + +class PipfileFinder(ReqsBaseFinder): + enabled = bool(Pipfile) + + def _get_names(self, path: str) -> Iterator[str]: + with chdir(path): + project = Pipfile.load(path) + for req in project.packages: + yield req.name + + def _get_files_from_dir(self, path: str) -> Iterator[str]: + if "Pipfile" in os.listdir(path): + yield path + + +class DefaultFinder(BaseFinder): + def find(self, module_name: str) -> Optional[str]: + return self.config.default_section + + +class FindersManager: + _default_finders_classes: Sequence[Type[BaseFinder]] = ( + ForcedSeparateFinder, + LocalFinder, + KnownPatternFinder, + PathFinder, + PipfileFinder, + RequirementsFinder, + DefaultFinder, + ) + + def __init__( + self, config: Config, finder_classes: Optional[Iterable[Type[BaseFinder]]] = None + ) -> None: + self.verbose: bool = config.verbose + + if finder_classes is None: + finder_classes = self._default_finders_classes + finders: List[BaseFinder] = [] + for finder_cls in finder_classes: + try: + finders.append(finder_cls(config)) + except Exception as exception: + # if one finder fails to instantiate isort can continue using the rest + if self.verbose: + print( + ( + f"{finder_cls.__name__} encountered an error ({exception}) during " + "instantiation and cannot be used" + ) + ) + self.finders: Tuple[BaseFinder, ...] = tuple(finders) + + def find(self, module_name: str) -> Optional[str]: + for finder in self.finders: + try: + section = finder.find(module_name) + if section is not None: + return section + except Exception as exception: + # isort has to be able to keep trying to identify the correct + # import section even if one approach fails + if self.verbose: + print( + f"{finder.__class__.__name__} encountered an error ({exception}) while " + f"trying to identify the {module_name} module" + ) + return None diff --git a/myenv/lib/python3.9/site-packages/isort/exceptions.py b/myenv/lib/python3.9/site-packages/isort/exceptions.py new file mode 100644 index 0000000..6be8240 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/exceptions.py @@ -0,0 +1,197 @@ +"""All isort specific exception classes should be defined here""" +from functools import partial +from pathlib import Path +from typing import Any, Dict, List, Type, Union + +from .profiles import profiles + + +class ISortError(Exception): + """Base isort exception object from which all isort sourced exceptions should inherit""" + + def __reduce__(self): # type: ignore + return (partial(type(self), **self.__dict__), ()) + + +class InvalidSettingsPath(ISortError): + """Raised when a settings path is provided that is neither a valid file or directory""" + + def __init__(self, settings_path: str): + super().__init__( + f"isort was told to use the settings_path: {settings_path} as the base directory or " + "file that represents the starting point of config file discovery, but it does not " + "exist." + ) + self.settings_path = settings_path + + +class ExistingSyntaxErrors(ISortError): + """Raised when isort is told to sort imports within code that has existing syntax errors""" + + def __init__(self, file_path: str): + super().__init__( + f"isort was told to sort imports within code that contains syntax errors: " + f"{file_path}." + ) + self.file_path = file_path + + +class IntroducedSyntaxErrors(ISortError): + """Raised when isort has introduced a syntax error in the process of sorting imports""" + + def __init__(self, file_path: str): + super().__init__( + f"isort introduced syntax errors when attempting to sort the imports contained within " + f"{file_path}." + ) + self.file_path = file_path + + +class FileSkipped(ISortError): + """Should be raised when a file is skipped for any reason""" + + def __init__(self, message: str, file_path: str): + super().__init__(message) + self.message = message + self.file_path = file_path + + +class FileSkipComment(FileSkipped): + """Raised when an entire file is skipped due to a isort skip file comment""" + + def __init__(self, file_path: str, **kwargs: str): + super().__init__( + f"{file_path} contains a file skip comment and was skipped.", file_path=file_path + ) + + +class FileSkipSetting(FileSkipped): + """Raised when an entire file is skipped due to provided isort settings""" + + def __init__(self, file_path: str, **kwargs: str): + super().__init__( + f"{file_path} was skipped as it's listed in 'skip' setting" + " or matches a glob in 'skip_glob' setting", + file_path=file_path, + ) + + +class ProfileDoesNotExist(ISortError): + """Raised when a profile is set by the user that doesn't exist""" + + def __init__(self, profile: str): + super().__init__( + f"Specified profile of {profile} does not exist. " + f"Available profiles: {','.join(profiles)}." + ) + self.profile = profile + + +class SortingFunctionDoesNotExist(ISortError): + """Raised when the specified sorting function isn't available""" + + def __init__(self, sort_order: str, available_sort_orders: List[str]): + super().__init__( + f"Specified sort_order of {sort_order} does not exist. " + f"Available sort_orders: {','.join(available_sort_orders)}." + ) + self.sort_order = sort_order + self.available_sort_orders = available_sort_orders + + +class FormattingPluginDoesNotExist(ISortError): + """Raised when a formatting plugin is set by the user that doesn't exist""" + + def __init__(self, formatter: str): + super().__init__(f"Specified formatting plugin of {formatter} does not exist. ") + self.formatter = formatter + + +class LiteralParsingFailure(ISortError): + """Raised when one of isorts literal sorting comments is used but isort can't parse the + the given data structure. + """ + + def __init__(self, code: str, original_error: Union[Exception, Type[Exception]]): + super().__init__( + f"isort failed to parse the given literal {code}. It's important to note " + "that isort literal sorting only supports simple literals parsable by " + f"ast.literal_eval which gave the exception of {original_error}." + ) + self.code = code + self.original_error = original_error + + +class LiteralSortTypeMismatch(ISortError): + """Raised when an isort literal sorting comment is used, with a type that doesn't match the + supplied data structure's type. + """ + + def __init__(self, kind: type, expected_kind: type): + super().__init__( + f"isort was told to sort a literal of type {expected_kind} but was given " + f"a literal of type {kind}." + ) + self.kind = kind + self.expected_kind = expected_kind + + +class AssignmentsFormatMismatch(ISortError): + """Raised when isort is told to sort assignments but the format of the assignment section + doesn't match isort's expectation. + """ + + def __init__(self, code: str): + super().__init__( + "isort was told to sort a section of assignments, however the given code:\n\n" + f"{code}\n\n" + "Does not match isort's strict single line formatting requirement for assignment " + "sorting:\n\n" + "{variable_name} = {value}\n" + "{variable_name2} = {value2}\n" + "...\n\n" + ) + self.code = code + + +class UnsupportedSettings(ISortError): + """Raised when settings are passed into isort (either from config, CLI, or runtime) + that it doesn't support. + """ + + @staticmethod + def _format_option(name: str, value: Any, source: str) -> str: + return f"\t- {name} = {value} (source: '{source}')" + + def __init__(self, unsupported_settings: Dict[str, Dict[str, str]]): + errors = "\n".join( + self._format_option(name, **option) for name, option in unsupported_settings.items() + ) + + super().__init__( + "isort was provided settings that it doesn't support:\n\n" + f"{errors}\n\n" + "For a complete and up-to-date listing of supported settings see: " + "https://pycqa.github.io/isort/docs/configuration/options.\n" + ) + self.unsupported_settings = unsupported_settings + + +class UnsupportedEncoding(ISortError): + """Raised when isort encounters an encoding error while trying to read a file""" + + def __init__(self, filename: Union[str, Path]): + super().__init__(f"Unknown or unsupported encoding in {filename}") + self.filename = filename + + +class MissingSection(ISortError): + """Raised when isort encounters an import that matches a section that is not defined""" + + def __init__(self, import_module: str, section: str): + super().__init__( + f"Found {import_module} import while parsing, but {section} was not included " + "in the `sections` setting of your config. Please add it before continuing\n" + "See https://pycqa.github.io/isort/#custom-sections-and-ordering " + "for more info." + ) diff --git a/myenv/lib/python3.9/site-packages/isort/files.py b/myenv/lib/python3.9/site-packages/isort/files.py new file mode 100644 index 0000000..28a916c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/files.py @@ -0,0 +1,41 @@ +import os +from pathlib import Path +from typing import Iterable, Iterator, List, Set + +from isort.settings import Config + + +def find( + paths: Iterable[str], config: Config, skipped: List[str], broken: List[str] +) -> Iterator[str]: + """Fines and provides an iterator for all Python source files defined in paths.""" + visited_dirs: Set[Path] = set() + + for path in paths: + if os.path.isdir(path): + for dirpath, dirnames, filenames in os.walk( + path, topdown=True, followlinks=config.follow_links + ): + base_path = Path(dirpath) + for dirname in list(dirnames): + full_path = base_path / dirname + resolved_path = full_path.resolve() + if config.is_skipped(full_path): + skipped.append(dirname) + dirnames.remove(dirname) + else: + if resolved_path in visited_dirs: # pragma: no cover + dirnames.remove(dirname) + visited_dirs.add(resolved_path) + + for filename in filenames: + filepath = os.path.join(dirpath, filename) + if config.is_supported_filetype(filepath): + if config.is_skipped(Path(os.path.abspath(filepath))): + skipped.append(filename) + else: + yield filepath + elif not os.path.exists(path): + broken.append(path) + else: + yield path diff --git a/myenv/lib/python3.9/site-packages/isort/format.py b/myenv/lib/python3.9/site-packages/isort/format.py new file mode 100644 index 0000000..3a7c1e0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/format.py @@ -0,0 +1,156 @@ +import re +import sys +from datetime import datetime +from difflib import unified_diff +from pathlib import Path +from typing import Optional, TextIO + +try: + import colorama +except ImportError: + colorama_unavailable = True +else: + colorama_unavailable = False + colorama.init(strip=False) + + +ADDED_LINE_PATTERN = re.compile(r"\+[^+]") +REMOVED_LINE_PATTERN = re.compile(r"-[^-]") + + +def format_simplified(import_line: str) -> str: + import_line = import_line.strip() + if import_line.startswith("from "): + import_line = import_line.replace("from ", "") + import_line = import_line.replace(" import ", ".") + elif import_line.startswith("import "): + import_line = import_line.replace("import ", "") + + return import_line + + +def format_natural(import_line: str) -> str: + import_line = import_line.strip() + if not import_line.startswith("from ") and not import_line.startswith("import "): + if "." not in import_line: + return f"import {import_line}" + parts = import_line.split(".") + end = parts.pop(-1) + return f"from {'.'.join(parts)} import {end}" + + return import_line + + +def show_unified_diff( + *, + file_input: str, + file_output: str, + file_path: Optional[Path], + output: Optional[TextIO] = None, + color_output: bool = False, +) -> None: + """Shows a unified_diff for the provided input and output against the provided file path. + + - **file_input**: A string that represents the contents of a file before changes. + - **file_output**: A string that represents the contents of a file after changes. + - **file_path**: A Path object that represents the file path of the file being changed. + - **output**: A stream to output the diff to. If non is provided uses sys.stdout. + - **color_output**: Use color in output if True. + """ + printer = create_terminal_printer(color_output, output) + file_name = "" if file_path is None else str(file_path) + file_mtime = str( + datetime.now() if file_path is None else datetime.fromtimestamp(file_path.stat().st_mtime) + ) + unified_diff_lines = unified_diff( + file_input.splitlines(keepends=True), + file_output.splitlines(keepends=True), + fromfile=file_name + ":before", + tofile=file_name + ":after", + fromfiledate=file_mtime, + tofiledate=str(datetime.now()), + ) + for line in unified_diff_lines: + printer.diff_line(line) + + +def ask_whether_to_apply_changes_to_file(file_path: str) -> bool: + answer = None + while answer not in ("yes", "y", "no", "n", "quit", "q"): + answer = input(f"Apply suggested changes to '{file_path}' [y/n/q]? ") # nosec + answer = answer.lower() + if answer in ("no", "n"): + return False + if answer in ("quit", "q"): + sys.exit(1) + return True + + +def remove_whitespace(content: str, line_separator: str = "\n") -> str: + content = content.replace(line_separator, "").replace(" ", "").replace("\x0c", "") + return content + + +class BasicPrinter: + ERROR = "ERROR" + SUCCESS = "SUCCESS" + + def __init__(self, error: str, success: str, output: Optional[TextIO] = None): + self.output = output or sys.stdout + self.success_message = success + self.error_message = error + + def success(self, message: str) -> None: + print(self.success_message.format(success=self.SUCCESS, message=message), file=self.output) + + def error(self, message: str) -> None: + print(self.error_message.format(error=self.ERROR, message=message), file=sys.stderr) + + def diff_line(self, line: str) -> None: + self.output.write(line) + + +class ColoramaPrinter(BasicPrinter): + def __init__(self, error: str, success: str, output: Optional[TextIO]): + super().__init__(error, success, output=output) + + # Note: this constants are instance variables instead ofs class variables + # because they refer to colorama which might not be installed. + self.ERROR = self.style_text("ERROR", colorama.Fore.RED) + self.SUCCESS = self.style_text("SUCCESS", colorama.Fore.GREEN) + self.ADDED_LINE = colorama.Fore.GREEN + self.REMOVED_LINE = colorama.Fore.RED + + @staticmethod + def style_text(text: str, style: Optional[str] = None) -> str: + if style is None: + return text + return style + text + str(colorama.Style.RESET_ALL) + + def diff_line(self, line: str) -> None: + style = None + if re.match(ADDED_LINE_PATTERN, line): + style = self.ADDED_LINE + elif re.match(REMOVED_LINE_PATTERN, line): + style = self.REMOVED_LINE + self.output.write(self.style_text(line, style)) + + +def create_terminal_printer( + color: bool, output: Optional[TextIO] = None, error: str = "", success: str = "" +) -> BasicPrinter: + if color and colorama_unavailable: + no_colorama_message = ( + "\n" + "Sorry, but to use --color (color_output) the colorama python package is required.\n\n" + "Reference: https://pypi.org/project/colorama/\n\n" + "You can either install it separately on your system or as the colors extra " + "for isort. Ex: \n\n" + "$ pip install isort[colors]\n" + ) + print(no_colorama_message, file=sys.stderr) + sys.exit(1) + + return ( + ColoramaPrinter(error, success, output) if color else BasicPrinter(error, success, output) + ) diff --git a/myenv/lib/python3.9/site-packages/isort/hooks.py b/myenv/lib/python3.9/site-packages/isort/hooks.py new file mode 100644 index 0000000..135886f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/hooks.py @@ -0,0 +1,86 @@ +"""Defines a git hook to allow pre-commit warnings and errors about import order. + +usage: + exit_code = git_hook(strict=True|False, modify=True|False) +""" +import os +import subprocess # nosec - Needed for hook +from pathlib import Path +from typing import List + +from isort import Config, api, exceptions + + +def get_output(command: List[str]) -> str: + """Run a command and return raw output + + :param str command: the command to run + :returns: the stdout output of the command + """ + result = subprocess.run(command, stdout=subprocess.PIPE, check=True) # nosec - trusted input + return result.stdout.decode() + + +def get_lines(command: List[str]) -> List[str]: + """Run a command and return lines of output + + :param str command: the command to run + :returns: list of whitespace-stripped lines output by command + """ + stdout = get_output(command) + return [line.strip() for line in stdout.splitlines()] + + +def git_hook( + strict: bool = False, modify: bool = False, lazy: bool = False, settings_file: str = "" +) -> int: + """Git pre-commit hook to check staged files for isort errors + + :param bool strict - if True, return number of errors on exit, + causing the hook to fail. If False, return zero so it will + just act as a warning. + :param bool modify - if True, fix the sources if they are not + sorted properly. If False, only report result without + modifying anything. + :param bool lazy - if True, also check/fix unstaged files. + This is useful if you frequently use ``git commit -a`` for example. + If False, only check/fix the staged files for isort errors. + :param str settings_file - A path to a file to be used as + the configuration file for this run. + When settings_file is the empty string, the configuration file + will be searched starting at the directory containing the first + staged file, if any, and going upward in the directory structure. + + :return number of errors if in strict mode, 0 otherwise. + """ + # Get list of files modified and staged + diff_cmd = ["git", "diff-index", "--cached", "--name-only", "--diff-filter=ACMRTUXB", "HEAD"] + if lazy: + diff_cmd.remove("--cached") + + files_modified = get_lines(diff_cmd) + if not files_modified: + return 0 + + errors = 0 + config = Config( + settings_file=settings_file, + settings_path=os.path.dirname(os.path.abspath(files_modified[0])), + ) + for filename in files_modified: + if filename.endswith(".py"): + # Get the staged contents of the file + staged_cmd = ["git", "show", f":{filename}"] + staged_contents = get_output(staged_cmd) + + try: + if not api.check_code_string( + staged_contents, file_path=Path(filename), config=config + ): + errors += 1 + if modify: + api.sort_file(filename, config=config) + except exceptions.FileSkipped: # pragma: no cover + pass + + return errors if strict else 0 diff --git a/myenv/lib/python3.9/site-packages/isort/identify.py b/myenv/lib/python3.9/site-packages/isort/identify.py new file mode 100644 index 0000000..e45fcb3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/identify.py @@ -0,0 +1,206 @@ +"""Fast stream based import identification. +Eventually this will likely replace parse.py +""" +from functools import partial +from pathlib import Path +from typing import Iterator, NamedTuple, Optional, TextIO, Tuple + +from isort.parse import _normalize_line, _strip_syntax, skip_line + +from .comments import parse as parse_comments +from .settings import DEFAULT_CONFIG, Config + +STATEMENT_DECLARATIONS: Tuple[str, ...] = ("def ", "cdef ", "cpdef ", "class ", "@", "async def") + + +class Import(NamedTuple): + line_number: int + indented: bool + module: str + attribute: Optional[str] = None + alias: Optional[str] = None + cimport: bool = False + file_path: Optional[Path] = None + + def statement(self) -> str: + import_cmd = "cimport" if self.cimport else "import" + if self.attribute: + import_string = f"from {self.module} {import_cmd} {self.attribute}" + else: + import_string = f"{import_cmd} {self.module}" + if self.alias: + import_string += f" as {self.alias}" + return import_string + + def __str__(self) -> str: + return ( + f"{self.file_path or ''}:{self.line_number} " + f"{'indented ' if self.indented else ''}{self.statement()}" + ) + + +def imports( + input_stream: TextIO, + config: Config = DEFAULT_CONFIG, + file_path: Optional[Path] = None, + top_only: bool = False, +) -> Iterator[Import]: + """Parses a python file taking out and categorizing imports.""" + in_quote = "" + + indexed_input = enumerate(input_stream) + for index, raw_line in indexed_input: + (skipping_line, in_quote) = skip_line( + raw_line, in_quote=in_quote, index=index, section_comments=config.section_comments + ) + + if top_only and not in_quote and raw_line.startswith(STATEMENT_DECLARATIONS): + break + if skipping_line: + continue + + stripped_line = raw_line.strip().split("#")[0] + if stripped_line.startswith("raise") or stripped_line.startswith("yield"): + if stripped_line == "yield": + while not stripped_line or stripped_line == "yield": + try: + index, next_line = next(indexed_input) + except StopIteration: + break + + stripped_line = next_line.strip().split("#")[0] + while stripped_line.endswith("\\"): + try: + index, next_line = next(indexed_input) + except StopIteration: + break + + stripped_line = next_line.strip().split("#")[0] + continue # pragma: no cover + + line, *end_of_line_comment = raw_line.split("#", 1) + statements = [line.strip() for line in line.split(";")] + if end_of_line_comment: + statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}" + + for statement in statements: + line, _raw_line = _normalize_line(statement) + if line.startswith(("import ", "cimport ")): + type_of_import = "straight" + elif line.startswith("from "): + type_of_import = "from" + else: + continue # pragma: no cover + + import_string, _ = parse_comments(line) + normalized_import_string = ( + import_string.replace("import(", "import (").replace("\\", " ").replace("\n", " ") + ) + cimports: bool = ( + " cimport " in normalized_import_string + or normalized_import_string.startswith("cimport") + ) + identified_import = partial( + Import, + index + 1, # line numbers use 1 based indexing + raw_line.startswith((" ", "\t")), + cimport=cimports, + file_path=file_path, + ) + + if "(" in line.split("#", 1)[0]: + while not line.split("#")[0].strip().endswith(")"): + try: + index, next_line = next(indexed_input) + except StopIteration: + break + + line, _ = parse_comments(next_line) + import_string += "\n" + line + else: + while line.strip().endswith("\\"): + try: + index, next_line = next(indexed_input) + except StopIteration: + break + + line, _ = parse_comments(next_line) + + # Still need to check for parentheses after an escaped line + if "(" in line.split("#")[0] and ")" not in line.split("#")[0]: + import_string += "\n" + line + + while not line.split("#")[0].strip().endswith(")"): + try: + index, next_line = next(indexed_input) + except StopIteration: + break + line, _ = parse_comments(next_line) + import_string += "\n" + line + else: + if import_string.strip().endswith( + (" import", " cimport") + ) or line.strip().startswith(("import ", "cimport ")): + import_string += "\n" + line + else: + import_string = ( + import_string.rstrip().rstrip("\\") + " " + line.lstrip() + ) + + if type_of_import == "from": + import_string = ( + import_string.replace("import(", "import (") + .replace("\\", " ") + .replace("\n", " ") + ) + parts = import_string.split(" cimport " if cimports else " import ") + + from_import = parts[0].split(" ") + import_string = (" cimport " if cimports else " import ").join( + [from_import[0] + " " + "".join(from_import[1:])] + parts[1:] + ) + + just_imports = [ + item.replace("{|", "{ ").replace("|}", " }") + for item in _strip_syntax(import_string).split() + ] + + direct_imports = just_imports[1:] + top_level_module = "" + if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports): + while "as" in just_imports: + attribute = None + as_index = just_imports.index("as") + if type_of_import == "from": + attribute = just_imports[as_index - 1] + top_level_module = just_imports[0] + module = top_level_module + "." + attribute + alias = just_imports[as_index + 1] + direct_imports.remove(attribute) + direct_imports.remove(alias) + direct_imports.remove("as") + just_imports[1:] = direct_imports + if attribute == alias and config.remove_redundant_aliases: + yield identified_import(top_level_module, attribute) + else: + yield identified_import(top_level_module, attribute, alias=alias) + + else: + module = just_imports[as_index - 1] + alias = just_imports[as_index + 1] + just_imports.remove(alias) + just_imports.remove("as") + just_imports.remove(module) + if module == alias and config.remove_redundant_aliases: + yield identified_import(module) + else: + yield identified_import(module, alias=alias) + + if just_imports: + if type_of_import == "from": + module = just_imports.pop(0) + for attribute in just_imports: + yield identified_import(module, attribute) + else: + for module in just_imports: + yield identified_import(module) diff --git a/myenv/lib/python3.9/site-packages/isort/io.py b/myenv/lib/python3.9/site-packages/isort/io.py new file mode 100644 index 0000000..e7a74bf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/io.py @@ -0,0 +1,73 @@ +"""Defines any IO utilities used by isort""" +import re +import tokenize +from contextlib import contextmanager +from io import BytesIO, StringIO, TextIOWrapper +from pathlib import Path +from typing import Any, Callable, Iterator, TextIO, Union + +from isort._future import dataclass +from isort.exceptions import UnsupportedEncoding + +_ENCODING_PATTERN = re.compile(br"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)") + + +@dataclass(frozen=True) +class File: + stream: TextIO + path: Path + encoding: str + + @staticmethod + def detect_encoding(filename: Union[str, Path], readline: Callable[[], bytes]) -> str: + try: + return tokenize.detect_encoding(readline)[0] + except Exception: + raise UnsupportedEncoding(filename) + + @staticmethod + def from_contents(contents: str, filename: str) -> "File": + encoding = File.detect_encoding(filename, BytesIO(contents.encode("utf-8")).readline) + return File( # type: ignore + stream=StringIO(contents), path=Path(filename).resolve(), encoding=encoding + ) + + @property + def extension(self) -> str: + return self.path.suffix.lstrip(".") + + @staticmethod + def _open(filename: Union[str, Path]) -> TextIOWrapper: + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = open(filename, "rb") + try: + encoding = File.detect_encoding(filename, buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True, newline="") + text.mode = "r" # type: ignore + return text + except Exception: + buffer.close() + raise + + @staticmethod + @contextmanager + def read(filename: Union[str, Path]) -> Iterator["File"]: + file_path = Path(filename).resolve() + stream = None + try: + stream = File._open(file_path) + yield File(stream=stream, path=file_path, encoding=stream.encoding) # type: ignore + finally: + if stream is not None: + stream.close() + + +class _EmptyIO(StringIO): + def write(self, *args: Any, **kwargs: Any) -> None: # type: ignore # skipcq: PTC-W0049 + pass + + +Empty = _EmptyIO() diff --git a/myenv/lib/python3.9/site-packages/isort/literal.py b/myenv/lib/python3.9/site-packages/isort/literal.py new file mode 100644 index 0000000..62feea1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/literal.py @@ -0,0 +1,113 @@ +import ast +from pprint import PrettyPrinter +from typing import Any, Callable, Dict, List, Set, Tuple + +from isort.exceptions import ( + AssignmentsFormatMismatch, + LiteralParsingFailure, + LiteralSortTypeMismatch, +) +from isort.settings import DEFAULT_CONFIG, Config + + +class ISortPrettyPrinter(PrettyPrinter): + """an isort customized pretty printer for sorted literals""" + + def __init__(self, config: Config): + super().__init__(width=config.line_length, compact=True) + + +type_mapping: Dict[str, Tuple[type, Callable[[Any, ISortPrettyPrinter], str]]] = {} + + +def assignments(code: str) -> str: + values = {} + for line in code.splitlines(keepends=True): + if not line.strip(): + continue + if " = " not in line: + raise AssignmentsFormatMismatch(code) + variable_name, value = line.split(" = ", 1) + values[variable_name] = value + + return "".join( + f"{variable_name} = {values[variable_name]}" for variable_name in sorted(values.keys()) + ) + + +def assignment(code: str, sort_type: str, extension: str, config: Config = DEFAULT_CONFIG) -> str: + """Sorts the literal present within the provided code against the provided sort type, + returning the sorted representation of the source code. + """ + if sort_type == "assignments": + return assignments(code) + if sort_type not in type_mapping: + raise ValueError( + "Trying to sort using an undefined sort_type. " + f"Defined sort types are {', '.join(type_mapping.keys())}." + ) + + variable_name, literal = code.split(" = ") + variable_name = variable_name.lstrip() + try: + value = ast.literal_eval(literal) + except Exception as error: + raise LiteralParsingFailure(code, error) + + expected_type, sort_function = type_mapping[sort_type] + if type(value) != expected_type: + raise LiteralSortTypeMismatch(type(value), expected_type) + + printer = ISortPrettyPrinter(config) + sorted_value_code = f"{variable_name} = {sort_function(value, printer)}" + if config.formatting_function: + sorted_value_code = config.formatting_function( + sorted_value_code, extension, config + ).rstrip() + + sorted_value_code += code[len(code.rstrip()) :] + return sorted_value_code + + +def register_type( + name: str, kind: type +) -> Callable[[Callable[[Any, ISortPrettyPrinter], str]], Callable[[Any, ISortPrettyPrinter], str]]: + """Registers a new literal sort type.""" + + def wrap( + function: Callable[[Any, ISortPrettyPrinter], str] + ) -> Callable[[Any, ISortPrettyPrinter], str]: + type_mapping[name] = (kind, function) + return function + + return wrap + + +@register_type("dict", dict) +def _dict(value: Dict[Any, Any], printer: ISortPrettyPrinter) -> str: + return printer.pformat(dict(sorted(value.items(), key=lambda item: item[1]))) # type: ignore + + +@register_type("list", list) +def _list(value: List[Any], printer: ISortPrettyPrinter) -> str: + return printer.pformat(sorted(value)) + + +@register_type("unique-list", list) +def _unique_list(value: List[Any], printer: ISortPrettyPrinter) -> str: + return printer.pformat(list(sorted(set(value)))) + + +@register_type("set", set) +def _set(value: Set[Any], printer: ISortPrettyPrinter) -> str: + return "{" + printer.pformat(tuple(sorted(value)))[1:-1] + "}" + + +@register_type("tuple", tuple) +def _tuple(value: Tuple[Any, ...], printer: ISortPrettyPrinter) -> str: + return printer.pformat(tuple(sorted(value))) + + +@register_type("unique-tuple", tuple) +def _unique_tuple(value: Tuple[Any, ...], printer: ISortPrettyPrinter) -> str: + return printer.pformat(tuple(sorted(set(value)))) diff --git a/myenv/lib/python3.9/site-packages/isort/logo.py b/myenv/lib/python3.9/site-packages/isort/logo.py new file mode 100644 index 0000000..6377d86 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/logo.py @@ -0,0 +1,19 @@ +from ._version import __version__ + +ASCII_ART = rf""" + _ _ + (_) ___ ___ _ __| |_ + | |/ _/ / _ \/ '__ _/ + | |\__ \/\_\/| | | |_ + |_|\___/\___/\_/ \_/ + + isort your imports, so you don't have to. + + VERSION {__version__} +""" + +__doc__ = f""" +```python +{ASCII_ART} +``` +""" diff --git a/myenv/lib/python3.9/site-packages/isort/main.py b/myenv/lib/python3.9/site-packages/isort/main.py new file mode 100644 index 0000000..3d0c09d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/main.py @@ -0,0 +1,1285 @@ +"""Tool for sorting imports alphabetically, and automatically separated into sections.""" +import argparse +import functools +import json +import os +import sys +from gettext import gettext as _ +from io import TextIOWrapper +from pathlib import Path +from typing import Any, Dict, List, Optional, Sequence, Union +from warnings import warn + +from . import __version__, api, files, sections +from .exceptions import FileSkipped, ISortError, UnsupportedEncoding +from .format import create_terminal_printer +from .logo import ASCII_ART +from .profiles import profiles +from .settings import VALID_PY_TARGETS, Config, find_all_configs +from .utils import Trie +from .wrap_modes import WrapModes + +DEPRECATED_SINGLE_DASH_ARGS = { + "-ac", + "-af", + "-ca", + "-cs", + "-df", + "-ds", + "-dt", + "-fas", + "-fass", + "-ff", + "-fgw", + "-fss", + "-lai", + "-lbt", + "-le", + "-ls", + "-nis", + "-nlb", + "-ot", + "-rr", + "-sd", + "-sg", + "-sl", + "-sp", + "-tc", + "-wl", + "-ws", +} +QUICK_GUIDE = f""" +{ASCII_ART} + +Nothing to do: no files or paths have have been passed in! + +Try one of the following: + + `isort .` - sort all Python files, starting from the current directory, recursively. + `isort . --interactive` - Do the same, but ask before making any changes. + `isort . --check --diff` - Check to see if imports are correctly sorted within this project. + `isort --help` - In-depth information about isort's available command-line options. + +Visit https://pycqa.github.io/isort/ for complete information about how to use isort. +""" + + +class SortAttempt: + def __init__(self, incorrectly_sorted: bool, skipped: bool, supported_encoding: bool) -> None: + self.incorrectly_sorted = incorrectly_sorted + self.skipped = skipped + self.supported_encoding = supported_encoding + + +def sort_imports( + file_name: str, + config: Config, + check: bool = False, + ask_to_apply: bool = False, + write_to_stdout: bool = False, + **kwargs: Any, +) -> Optional[SortAttempt]: + incorrectly_sorted: bool = False + skipped: bool = False + try: + if check: + try: + incorrectly_sorted = not api.check_file(file_name, config=config, **kwargs) + except FileSkipped: + skipped = True + return SortAttempt(incorrectly_sorted, skipped, True) + + try: + incorrectly_sorted = not api.sort_file( + file_name, + config=config, + ask_to_apply=ask_to_apply, + write_to_stdout=write_to_stdout, + **kwargs, + ) + except FileSkipped: + skipped = True + return SortAttempt(incorrectly_sorted, skipped, True) + except (OSError, ValueError) as error: + warn(f"Unable to parse file {file_name} due to {error}") + return None + except UnsupportedEncoding: + if config.verbose: + warn(f"Encoding not supported for {file_name}") + return SortAttempt(incorrectly_sorted, skipped, False) + except ISortError as error: + _print_hard_fail(config, message=str(error)) + sys.exit(1) + except Exception: + _print_hard_fail(config, offending_file=file_name) + raise + + +def _print_hard_fail( + config: Config, offending_file: Optional[str] = None, message: Optional[str] = None +) -> None: + """Fail on unrecoverable exception with custom message.""" + message = message or ( + f"Unrecoverable exception thrown when parsing {offending_file or ''}!" + "This should NEVER happen.\n" + "If encountered, please open an issue: https://github.com/PyCQA/isort/issues/new" + ) + printer = create_terminal_printer( + color=config.color_output, error=config.format_error, success=config.format_success + ) + printer.error(message) + + +def _build_arg_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Sort Python import definitions alphabetically " + "within logical sections. Run with no arguments to see a quick " + "start guide, otherwise, one or more files/directories/stdin must be provided. " + "Use `-` as the first argument to represent stdin. Use --interactive to use the pre 5.0.0 " + "interactive behavior." + " " + "If you've used isort 4 but are new to isort 5, see the upgrading guide: " + "https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html", + add_help=False, # prevent help option from appearing in "optional arguments" group + ) + + general_group = parser.add_argument_group("general options") + target_group = parser.add_argument_group("target options") + output_group = parser.add_argument_group("general output options") + inline_args_group = output_group.add_mutually_exclusive_group() + section_group = parser.add_argument_group("section output options") + deprecated_group = parser.add_argument_group("deprecated options") + + general_group.add_argument( + "-h", + "--help", + action="help", + default=argparse.SUPPRESS, + help=_("show this help message and exit"), + ) + general_group.add_argument( + "-V", + "--version", + action="store_true", + dest="show_version", + help="Displays the currently installed version of isort.", + ) + general_group.add_argument( + "--vn", + "--version-number", + action="version", + version=__version__, + help="Returns just the current version number without the logo", + ) + general_group.add_argument( + "-v", + "--verbose", + action="store_true", + dest="verbose", + help="Shows verbose output, such as when files are skipped or when a check is successful.", + ) + general_group.add_argument( + "--only-modified", + "--om", + dest="only_modified", + action="store_true", + help="Suppresses verbose output for non-modified files.", + ) + general_group.add_argument( + "--dedup-headings", + dest="dedup_headings", + action="store_true", + help="Tells isort to only show an identical custom import heading comment once, even if" + " there are multiple sections with the comment set.", + ) + general_group.add_argument( + "-q", + "--quiet", + action="store_true", + dest="quiet", + help="Shows extra quiet output, only errors are outputted.", + ) + general_group.add_argument( + "-d", + "--stdout", + help="Force resulting output to stdout, instead of in-place.", + dest="write_to_stdout", + action="store_true", + ) + general_group.add_argument( + "--overwrite-in-place", + help="Tells isort to overwrite in place using the same file handle. " + "Comes at a performance and memory usage penalty over its standard " + "approach but ensures all file flags and modes stay unchanged.", + dest="overwrite_in_place", + action="store_true", + ) + general_group.add_argument( + "--show-config", + dest="show_config", + action="store_true", + help="See isort's determined config, as well as sources of config options.", + ) + general_group.add_argument( + "--show-files", + dest="show_files", + action="store_true", + help="See the files isort will be run against with the current config options.", + ) + general_group.add_argument( + "--df", + "--diff", + dest="show_diff", + action="store_true", + help="Prints a diff of all the changes isort would make to a file, instead of " + "changing it in place", + ) + general_group.add_argument( + "-c", + "--check-only", + "--check", + action="store_true", + dest="check", + help="Checks the file for unsorted / unformatted imports and prints them to the " + "command line without modifying the file. Returns 0 when nothing would change and " + "returns 1 when the file would be reformatted.", + ) + general_group.add_argument( + "--ws", + "--ignore-whitespace", + action="store_true", + dest="ignore_whitespace", + help="Tells isort to ignore whitespace differences when --check-only is being used.", + ) + general_group.add_argument( + "--sp", + "--settings-path", + "--settings-file", + "--settings", + dest="settings_path", + help="Explicitly set the settings path or file instead of auto determining " + "based on file location.", + ) + general_group.add_argument( + "--cr", + "--config-root", + dest="config_root", + help="Explicitly set the config root for resolving all configs. When used " + "with the --resolve-all-configs flag, isort will look at all sub-folders " + "in this config root to resolve config files and sort files based on the " + "closest available config(if any)", + ) + general_group.add_argument( + "--resolve-all-configs", + dest="resolve_all_configs", + action="store_true", + help="Tells isort to resolve the configs for all sub-directories " + "and sort files in terms of its closest config files.", + ) + general_group.add_argument( + "--profile", + dest="profile", + type=str, + help="Base profile type to use for configuration. " + f"Profiles include: {', '.join(profiles.keys())}. As well as any shared profiles.", + ) + general_group.add_argument( + "--old-finders", + "--magic-placement", + dest="old_finders", + action="store_true", + help="Use the old deprecated finder logic that relies on environment introspection magic.", + ) + general_group.add_argument( + "-j", + "--jobs", + help="Number of files to process in parallel.", + dest="jobs", + type=int, + nargs="?", + const=-1, + ) + general_group.add_argument( + "--ac", + "--atomic", + dest="atomic", + action="store_true", + help="Ensures the output doesn't save if the resulting file contains syntax errors.", + ) + general_group.add_argument( + "--interactive", + dest="ask_to_apply", + action="store_true", + help="Tells isort to apply changes interactively.", + ) + general_group.add_argument( + "--format-error", + dest="format_error", + help="Override the format used to print errors.", + ) + general_group.add_argument( + "--format-success", + dest="format_success", + help="Override the format used to print success.", + ) + + target_group.add_argument( + "files", nargs="*", help="One or more Python source files that need their imports sorted." + ) + target_group.add_argument( + "--filter-files", + dest="filter_files", + action="store_true", + help="Tells isort to filter files even when they are explicitly passed in as " + "part of the CLI command.", + ) + target_group.add_argument( + "-s", + "--skip", + help="Files that isort should skip over. If you want to skip multiple " + "files you should specify twice: --skip file1 --skip file2. Values can be " + "file names, directory names or file paths. To skip all files in a nested path " + "use --skip-glob.", + dest="skip", + action="append", + ) + target_group.add_argument( + "--extend-skip", + help="Extends --skip to add additional files that isort should skip over. " + "If you want to skip multiple " + "files you should specify twice: --skip file1 --skip file2. Values can be " + "file names, directory names or file paths. To skip all files in a nested path " + "use --skip-glob.", + dest="extend_skip", + action="append", + ) + target_group.add_argument( + "--sg", + "--skip-glob", + help="Files that isort should skip over.", + dest="skip_glob", + action="append", + ) + target_group.add_argument( + "--extend-skip-glob", + help="Additional files that isort should skip over (extending --skip-glob).", + dest="extend_skip_glob", + action="append", + ) + target_group.add_argument( + "--gitignore", + "--skip-gitignore", + action="store_true", + dest="skip_gitignore", + help="Treat project as a git repository and ignore files listed in .gitignore." + "\nNOTE: This requires git to be installed and accessible from the same shell as isort.", + ) + target_group.add_argument( + "--ext", + "--extension", + "--supported-extension", + dest="supported_extensions", + action="append", + help="Specifies what extensions isort can be run against.", + ) + target_group.add_argument( + "--blocked-extension", + dest="blocked_extensions", + action="append", + help="Specifies what extensions isort can never be run against.", + ) + target_group.add_argument( + "--dont-follow-links", + dest="dont_follow_links", + action="store_true", + help="Tells isort not to follow symlinks that are encountered when running recursively.", + ) + target_group.add_argument( + "--filename", + dest="filename", + help="Provide the filename associated with a stream.", + ) + target_group.add_argument( + "--allow-root", + action="store_true", + default=False, + help="Tells isort not to treat / specially, allowing it to be run against the root dir.", + ) + + output_group.add_argument( + "-a", + "--add-import", + dest="add_imports", + action="append", + help="Adds the specified import line to all files, " + "automatically determining correct placement.", + ) + output_group.add_argument( + "--append", + "--append-only", + dest="append_only", + action="store_true", + help="Only adds the imports specified in --add-import if the file" + " contains existing imports.", + ) + output_group.add_argument( + "--af", + "--force-adds", + dest="force_adds", + action="store_true", + help="Forces import adds even if the original file is empty.", + ) + output_group.add_argument( + "--rm", + "--remove-import", + dest="remove_imports", + action="append", + help="Removes the specified import from all files.", + ) + output_group.add_argument( + "--float-to-top", + dest="float_to_top", + action="store_true", + help="Causes all non-indented imports to float to the top of the file having its imports " + "sorted (immediately below the top of file comment).\n" + "This can be an excellent shortcut for collecting imports every once in a while " + "when you place them in the middle of a file to avoid context switching.\n\n" + "*NOTE*: It currently doesn't work with cimports and introduces some extra over-head " + "and a performance penalty.", + ) + output_group.add_argument( + "--dont-float-to-top", + dest="dont_float_to_top", + action="store_true", + help="Forces --float-to-top setting off. See --float-to-top for more information.", + ) + output_group.add_argument( + "--ca", + "--combine-as", + dest="combine_as_imports", + action="store_true", + help="Combines as imports on the same line.", + ) + output_group.add_argument( + "--cs", + "--combine-star", + dest="combine_star", + action="store_true", + help="Ensures that if a star import is present, " + "nothing else is imported from that namespace.", + ) + output_group.add_argument( + "-e", + "--balanced", + dest="balanced_wrapping", + action="store_true", + help="Balances wrapping to produce the most consistent line length possible", + ) + output_group.add_argument( + "--ff", + "--from-first", + dest="from_first", + action="store_true", + help="Switches the typical ordering preference, " + "showing from imports first then straight ones.", + ) + output_group.add_argument( + "--fgw", + "--force-grid-wrap", + nargs="?", + const=2, + type=int, + dest="force_grid_wrap", + help="Force number of from imports (defaults to 2 when passed as CLI flag without value) " + "to be grid wrapped regardless of line " + "length. If 0 is passed in (the global default) only line length is considered.", + ) + output_group.add_argument( + "-i", + "--indent", + help='String to place for indents defaults to " " (4 spaces).', + dest="indent", + type=str, + ) + output_group.add_argument( + "--lbi", "--lines-before-imports", dest="lines_before_imports", type=int + ) + output_group.add_argument( + "--lai", "--lines-after-imports", dest="lines_after_imports", type=int + ) + output_group.add_argument( + "--lbt", "--lines-between-types", dest="lines_between_types", type=int + ) + output_group.add_argument( + "--le", + "--line-ending", + dest="line_ending", + help="Forces line endings to the specified value. " + "If not set, values will be guessed per-file.", + ) + output_group.add_argument( + "--ls", + "--length-sort", + help="Sort imports by their string length.", + dest="length_sort", + action="store_true", + ) + output_group.add_argument( + "--lss", + "--length-sort-straight", + help="Sort straight imports by their string length. Similar to `length_sort` " + "but applies only to straight imports and doesn't affect from imports.", + dest="length_sort_straight", + action="store_true", + ) + output_group.add_argument( + "-m", + "--multi-line", + dest="multi_line_output", + choices=list(WrapModes.__members__.keys()) + + [str(mode.value) for mode in WrapModes.__members__.values()], + type=str, + help="Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, " + "5-vert-grid-grouped, 6-deprecated-alias-for-5, 7-noqa, " + "8-vertical-hanging-indent-bracket, 9-vertical-prefix-from-module-import, " + "10-hanging-indent-with-parentheses).", + ) + output_group.add_argument( + "-n", + "--ensure-newline-before-comments", + dest="ensure_newline_before_comments", + action="store_true", + help="Inserts a blank line before a comment following an import.", + ) + inline_args_group.add_argument( + "--nis", + "--no-inline-sort", + dest="no_inline_sort", + action="store_true", + help="Leaves `from` imports with multiple imports 'as-is' " + "(e.g. `from foo import a, c ,b`).", + ) + output_group.add_argument( + "--ot", + "--order-by-type", + dest="order_by_type", + action="store_true", + help="Order imports by type, which is determined by case, in addition to alphabetically.\n" + "\n**NOTE**: type here refers to the implied type from the import name capitalization.\n" + ' isort does not do type introspection for the imports. These "types" are simply: ' + "CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8" + " or a related coding standard and has many imports this is a good default, otherwise you " + "likely will want to turn it off. From the CLI the `--dont-order-by-type` option will turn " + "this off.", + ) + output_group.add_argument( + "--dt", + "--dont-order-by-type", + dest="dont_order_by_type", + action="store_true", + help="Don't order imports by type, which is determined by case, in addition to " + "alphabetically.\n\n" + "**NOTE**: type here refers to the implied type from the import name capitalization.\n" + ' isort does not do type introspection for the imports. These "types" are simply: ' + "CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8" + " or a related coding standard and has many imports this is a good default. You can turn " + "this on from the CLI using `--order-by-type`.", + ) + output_group.add_argument( + "--rr", + "--reverse-relative", + dest="reverse_relative", + action="store_true", + help="Reverse order of relative imports.", + ) + output_group.add_argument( + "--reverse-sort", + dest="reverse_sort", + action="store_true", + help="Reverses the ordering of imports.", + ) + output_group.add_argument( + "--sort-order", + dest="sort_order", + help="Specify sorting function. Can be built in (natural[default] = force numbers " + "to be sequential, native = Python's built-in sorted function) or an installable plugin.", + ) + inline_args_group.add_argument( + "--sl", + "--force-single-line-imports", + dest="force_single_line", + action="store_true", + help="Forces all from imports to appear on their own line", + ) + output_group.add_argument( + "--nsl", + "--single-line-exclusions", + help="One or more modules to exclude from the single line rule.", + dest="single_line_exclusions", + action="append", + ) + output_group.add_argument( + "--tc", + "--trailing-comma", + dest="include_trailing_comma", + action="store_true", + help="Includes a trailing comma on multi line imports that include parentheses.", + ) + output_group.add_argument( + "--up", + "--use-parentheses", + dest="use_parentheses", + action="store_true", + help="Use parentheses for line continuation on length limit instead of slashes." + " **NOTE**: This is separate from wrap modes, and only affects how individual lines that " + " are too long get continued, not sections of multiple imports.", + ) + output_group.add_argument( + "-l", + "-w", + "--line-length", + "--line-width", + help="The max length of an import line (used for wrapping long imports).", + dest="line_length", + type=int, + ) + output_group.add_argument( + "--wl", + "--wrap-length", + dest="wrap_length", + type=int, + help="Specifies how long lines that are wrapped should be, if not set line_length is used." + "\nNOTE: wrap_length must be LOWER than or equal to line_length.", + ) + output_group.add_argument( + "--case-sensitive", + dest="case_sensitive", + action="store_true", + help="Tells isort to include casing when sorting module names", + ) + output_group.add_argument( + "--remove-redundant-aliases", + dest="remove_redundant_aliases", + action="store_true", + help=( + "Tells isort to remove redundant aliases from imports, such as `import os as os`." + " This defaults to `False` simply because some projects use these seemingly useless " + " aliases to signify intent and change behaviour." + ), + ) + output_group.add_argument( + "--honor-noqa", + dest="honor_noqa", + action="store_true", + help="Tells isort to honor noqa comments to enforce skipping those comments.", + ) + output_group.add_argument( + "--treat-comment-as-code", + dest="treat_comments_as_code", + action="append", + help="Tells isort to treat the specified single line comment(s) as if they are code.", + ) + output_group.add_argument( + "--treat-all-comment-as-code", + dest="treat_all_comments_as_code", + action="store_true", + help="Tells isort to treat all single line comments as if they are code.", + ) + output_group.add_argument( + "--formatter", + dest="formatter", + type=str, + help="Specifies the name of a formatting plugin to use when producing output.", + ) + output_group.add_argument( + "--color", + dest="color_output", + action="store_true", + help="Tells isort to use color in terminal output.", + ) + output_group.add_argument( + "--ext-format", + dest="ext_format", + help="Tells isort to format the given files according to an extensions formatting rules.", + ) + output_group.add_argument( + "--star-first", + help="Forces star imports above others to avoid overriding directly imported variables.", + dest="star_first", + action="store_true", + ) + + section_group.add_argument( + "--sd", + "--section-default", + dest="default_section", + help="Sets the default section for import options: " + str(sections.DEFAULT), + ) + section_group.add_argument( + "--only-sections", + "--os", + dest="only_sections", + action="store_true", + help="Causes imports to be sorted based on their sections like STDLIB, THIRDPARTY, etc. " + "Within sections, the imports are ordered by their import style and the imports with " + "the same style maintain their relative positions.", + ) + section_group.add_argument( + "--ds", + "--no-sections", + help="Put all imports into the same section bucket", + dest="no_sections", + action="store_true", + ) + section_group.add_argument( + "--fas", + "--force-alphabetical-sort", + action="store_true", + dest="force_alphabetical_sort", + help="Force all imports to be sorted as a single section", + ) + section_group.add_argument( + "--fss", + "--force-sort-within-sections", + action="store_true", + dest="force_sort_within_sections", + help="Don't sort straight-style imports (like import sys) before from-style imports " + "(like from itertools import groupby). Instead, sort the imports by module, " + "independent of import style.", + ) + section_group.add_argument( + "--hcss", + "--honor-case-in-force-sorted-sections", + action="store_true", + dest="honor_case_in_force_sorted_sections", + help="Honor `--case-sensitive` when `--force-sort-within-sections` is being used. " + "Without this option set, `--order-by-type` decides module name ordering too.", + ) + section_group.add_argument( + "--srss", + "--sort-relative-in-force-sorted-sections", + action="store_true", + dest="sort_relative_in_force_sorted_sections", + help="When using `--force-sort-within-sections`, sort relative imports the same " + "way as they are sorted when not using that setting.", + ) + section_group.add_argument( + "--fass", + "--force-alphabetical-sort-within-sections", + action="store_true", + dest="force_alphabetical_sort_within_sections", + help="Force all imports to be sorted alphabetically within a section", + ) + section_group.add_argument( + "-t", + "--top", + help="Force specific imports to the top of their appropriate section.", + dest="force_to_top", + action="append", + ) + section_group.add_argument( + "--combine-straight-imports", + "--csi", + dest="combine_straight_imports", + action="store_true", + help="Combines all the bare straight imports of the same section in a single line. " + "Won't work with sections which have 'as' imports", + ) + section_group.add_argument( + "--nlb", + "--no-lines-before", + help="Sections which should not be split with previous by empty lines", + dest="no_lines_before", + action="append", + ) + section_group.add_argument( + "--src", + "--src-path", + dest="src_paths", + action="append", + help="Add an explicitly defined source path " + "(modules within src paths have their imports automatically categorized as first_party)." + " Glob expansion (`*` and `**`) is supported for this option.", + ) + section_group.add_argument( + "-b", + "--builtin", + dest="known_standard_library", + action="append", + help="Force isort to recognize a module as part of Python's standard library.", + ) + section_group.add_argument( + "--extra-builtin", + dest="extra_standard_library", + action="append", + help="Extra modules to be included in the list of ones in Python's standard library.", + ) + section_group.add_argument( + "-f", + "--future", + dest="known_future_library", + action="append", + help="Force isort to recognize a module as part of Python's internal future compatibility " + "libraries. WARNING: this overrides the behavior of __future__ handling and therefore" + " can result in code that can't execute. If you're looking to add dependencies such " + "as six, a better option is to create another section below --future using custom " + "sections. See: https://github.com/PyCQA/isort#custom-sections-and-ordering and the " + "discussion here: https://github.com/PyCQA/isort/issues/1463.", + ) + section_group.add_argument( + "-o", + "--thirdparty", + dest="known_third_party", + action="append", + help="Force isort to recognize a module as being part of a third party library.", + ) + section_group.add_argument( + "-p", + "--project", + dest="known_first_party", + action="append", + help="Force isort to recognize a module as being part of the current python project.", + ) + section_group.add_argument( + "--known-local-folder", + dest="known_local_folder", + action="append", + help="Force isort to recognize a module as being a local folder. " + "Generally, this is reserved for relative imports (from . import module).", + ) + section_group.add_argument( + "--virtual-env", + dest="virtual_env", + help="Virtual environment to use for determining whether a package is third-party", + ) + section_group.add_argument( + "--conda-env", + dest="conda_env", + help="Conda environment to use for determining whether a package is third-party", + ) + section_group.add_argument( + "--py", + "--python-version", + action="store", + dest="py_version", + choices=tuple(VALID_PY_TARGETS) + ("auto",), + help="Tells isort to set the known standard library based on the specified Python " + "version. Default is to assume any Python 3 version could be the target, and use a union " + "of all stdlib modules across versions. If auto is specified, the version of the " + "interpreter used to run isort " + f"(currently: {sys.version_info.major}{sys.version_info.minor}) will be used.", + ) + + # deprecated options + deprecated_group.add_argument( + "--recursive", + dest="deprecated_flags", + action="append_const", + const="--recursive", + help=argparse.SUPPRESS, + ) + deprecated_group.add_argument( + "-rc", dest="deprecated_flags", action="append_const", const="-rc", help=argparse.SUPPRESS + ) + deprecated_group.add_argument( + "--dont-skip", + dest="deprecated_flags", + action="append_const", + const="--dont-skip", + help=argparse.SUPPRESS, + ) + deprecated_group.add_argument( + "-ns", dest="deprecated_flags", action="append_const", const="-ns", help=argparse.SUPPRESS + ) + deprecated_group.add_argument( + "--apply", + dest="deprecated_flags", + action="append_const", + const="--apply", + help=argparse.SUPPRESS, + ) + deprecated_group.add_argument( + "-k", + "--keep-direct-and-as", + dest="deprecated_flags", + action="append_const", + const="--keep-direct-and-as", + help=argparse.SUPPRESS, + ) + + return parser + + +def parse_args(argv: Optional[Sequence[str]] = None) -> Dict[str, Any]: + argv = sys.argv[1:] if argv is None else list(argv) + remapped_deprecated_args = [] + for index, arg in enumerate(argv): + if arg in DEPRECATED_SINGLE_DASH_ARGS: + remapped_deprecated_args.append(arg) + argv[index] = f"-{arg}" + + parser = _build_arg_parser() + arguments = {key: value for key, value in vars(parser.parse_args(argv)).items() if value} + if remapped_deprecated_args: + arguments["remapped_deprecated_args"] = remapped_deprecated_args + if "dont_order_by_type" in arguments: + arguments["order_by_type"] = False + del arguments["dont_order_by_type"] + if "dont_follow_links" in arguments: + arguments["follow_links"] = False + del arguments["dont_follow_links"] + if "dont_float_to_top" in arguments: + del arguments["dont_float_to_top"] + if arguments.get("float_to_top", False): + sys.exit("Can't set both --float-to-top and --dont-float-to-top.") + else: + arguments["float_to_top"] = False + multi_line_output = arguments.get("multi_line_output", None) + if multi_line_output: + if multi_line_output.isdigit(): + arguments["multi_line_output"] = WrapModes(int(multi_line_output)) + else: + arguments["multi_line_output"] = WrapModes[multi_line_output] + + return arguments + + +def _preconvert(item: Any) -> Union[str, List[Any]]: + """Preconverts objects from native types into JSONifyiable types""" + if isinstance(item, (set, frozenset)): + return list(item) + if isinstance(item, WrapModes): + return str(item.name) + if isinstance(item, Path): + return str(item) + if callable(item) and hasattr(item, "__name__"): + return str(item.__name__) + raise TypeError("Unserializable object {} of type {}".format(item, type(item))) + + +def identify_imports_main( + argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = None +) -> None: + parser = argparse.ArgumentParser( + description="Get all import definitions from a given file." + "Use `-` as the first argument to represent stdin." + ) + parser.add_argument( + "files", nargs="+", help="One or more Python source files that need their imports sorted." + ) + parser.add_argument( + "--top-only", + action="store_true", + default=False, + help="Only identify imports that occur in before functions or classes.", + ) + + target_group = parser.add_argument_group("target options") + target_group.add_argument( + "--follow-links", + action="store_true", + default=False, + help="Tells isort to follow symlinks that are encountered when running recursively.", + ) + + uniqueness = parser.add_mutually_exclusive_group() + uniqueness.add_argument( + "--unique", + action="store_true", + default=False, + help="If true, isort will only identify unique imports.", + ) + uniqueness.add_argument( + "--packages", + dest="unique", + action="store_const", + const=api.ImportKey.PACKAGE, + default=False, + help="If true, isort will only identify the unique top level modules imported.", + ) + uniqueness.add_argument( + "--modules", + dest="unique", + action="store_const", + const=api.ImportKey.MODULE, + default=False, + help="If true, isort will only identify the unique modules imported.", + ) + uniqueness.add_argument( + "--attributes", + dest="unique", + action="store_const", + const=api.ImportKey.ATTRIBUTE, + default=False, + help="If true, isort will only identify the unique attributes imported.", + ) + + arguments = parser.parse_args(argv) + + file_names = arguments.files + if file_names == ["-"]: + identified_imports = api.find_imports_in_stream( + sys.stdin if stdin is None else stdin, + unique=arguments.unique, + top_only=arguments.top_only, + follow_links=arguments.follow_links, + ) + else: + identified_imports = api.find_imports_in_paths( + file_names, + unique=arguments.unique, + top_only=arguments.top_only, + follow_links=arguments.follow_links, + ) + + for identified_import in identified_imports: + if arguments.unique == api.ImportKey.PACKAGE: + print(identified_import.module.split(".")[0]) + elif arguments.unique == api.ImportKey.MODULE: + print(identified_import.module) + elif arguments.unique == api.ImportKey.ATTRIBUTE: + print(f"{identified_import.module}.{identified_import.attribute}") + else: + print(str(identified_import)) + + +def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = None) -> None: + arguments = parse_args(argv) + if arguments.get("show_version"): + print(ASCII_ART) + return + + show_config: bool = arguments.pop("show_config", False) + show_files: bool = arguments.pop("show_files", False) + if show_config and show_files: + sys.exit("Error: either specify show-config or show-files not both.") + + if "settings_path" in arguments: + if os.path.isfile(arguments["settings_path"]): + arguments["settings_file"] = os.path.abspath(arguments["settings_path"]) + arguments["settings_path"] = os.path.dirname(arguments["settings_file"]) + else: + arguments["settings_path"] = os.path.abspath(arguments["settings_path"]) + + if "virtual_env" in arguments: + venv = arguments["virtual_env"] + arguments["virtual_env"] = os.path.abspath(venv) + if not os.path.isdir(arguments["virtual_env"]): + warn(f"virtual_env dir does not exist: {arguments['virtual_env']}") + + file_names = arguments.pop("files", []) + if not file_names and not show_config: + print(QUICK_GUIDE) + if arguments: + sys.exit("Error: arguments passed in without any paths or content.") + return + if "settings_path" not in arguments: + arguments["settings_path"] = ( + os.path.abspath(file_names[0] if file_names else ".") or os.getcwd() + ) + if not os.path.isdir(arguments["settings_path"]): + arguments["settings_path"] = os.path.dirname(arguments["settings_path"]) + + config_dict = arguments.copy() + ask_to_apply = config_dict.pop("ask_to_apply", False) + jobs = config_dict.pop("jobs", None) + check = config_dict.pop("check", False) + show_diff = config_dict.pop("show_diff", False) + write_to_stdout = config_dict.pop("write_to_stdout", False) + deprecated_flags = config_dict.pop("deprecated_flags", False) + remapped_deprecated_args = config_dict.pop("remapped_deprecated_args", False) + stream_filename = config_dict.pop("filename", None) + ext_format = config_dict.pop("ext_format", None) + allow_root = config_dict.pop("allow_root", None) + resolve_all_configs = config_dict.pop("resolve_all_configs", False) + wrong_sorted_files = False + all_attempt_broken = False + no_valid_encodings = False + + config_trie: Optional[Trie] = None + if resolve_all_configs: + config_trie = find_all_configs(config_dict.pop("config_root", ".")) + + if "src_paths" in config_dict: + config_dict["src_paths"] = { + Path(src_path).resolve() for src_path in config_dict.get("src_paths", ()) + } + + config = Config(**config_dict) + if show_config: + print(json.dumps(config.__dict__, indent=4, separators=(",", ": "), default=_preconvert)) + return + if file_names == ["-"]: + file_path = Path(stream_filename) if stream_filename else None + if show_files: + sys.exit("Error: can't show files for streaming input.") + + input_stream = sys.stdin if stdin is None else stdin + if check: + incorrectly_sorted = not api.check_stream( + input_stream=input_stream, + config=config, + show_diff=show_diff, + file_path=file_path, + extension=ext_format, + ) + + wrong_sorted_files = incorrectly_sorted + else: + try: + api.sort_stream( + input_stream=input_stream, + output_stream=sys.stdout, + config=config, + show_diff=show_diff, + file_path=file_path, + extension=ext_format, + raise_on_skip=False, + ) + except FileSkipped: + sys.stdout.write(input_stream.read()) + elif "/" in file_names and not allow_root: + printer = create_terminal_printer( + color=config.color_output, error=config.format_error, success=config.format_success + ) + printer.error("it is dangerous to operate recursively on '/'") + printer.error("use --allow-root to override this failsafe") + sys.exit(1) + else: + if stream_filename: + printer = create_terminal_printer( + color=config.color_output, error=config.format_error, success=config.format_success + ) + printer.error("Filename override is intended only for stream (-) sorting.") + sys.exit(1) + skipped: List[str] = [] + broken: List[str] = [] + + if config.filter_files: + filtered_files = [] + for file_name in file_names: + if config.is_skipped(Path(file_name)): + skipped.append(file_name) + else: + filtered_files.append(file_name) + file_names = filtered_files + + file_names = files.find(file_names, config, skipped, broken) + if show_files: + for file_name in file_names: + print(file_name) + return + num_skipped = 0 + num_broken = 0 + num_invalid_encoding = 0 + if config.verbose: + print(ASCII_ART) + + if jobs: + import multiprocessing + + executor = multiprocessing.Pool(jobs if jobs > 0 else multiprocessing.cpu_count()) + attempt_iterator = executor.imap( + functools.partial( + sort_imports, + config=config, + check=check, + ask_to_apply=ask_to_apply, + write_to_stdout=write_to_stdout, + extension=ext_format, + config_trie=config_trie, + ), + file_names, + ) + else: + # https://github.com/python/typeshed/pull/2814 + attempt_iterator = ( + sort_imports( # type: ignore + file_name, + config=config, + check=check, + ask_to_apply=ask_to_apply, + show_diff=show_diff, + write_to_stdout=write_to_stdout, + extension=ext_format, + config_trie=config_trie, + ) + for file_name in file_names + ) + + # If any files passed in are missing considered as error, should be removed + is_no_attempt = True + any_encoding_valid = False + for sort_attempt in attempt_iterator: + if not sort_attempt: + continue # pragma: no cover - shouldn't happen, satisfies type constraint + incorrectly_sorted = sort_attempt.incorrectly_sorted + if arguments.get("check", False) and incorrectly_sorted: + wrong_sorted_files = True + if sort_attempt.skipped: + num_skipped += ( + 1 # pragma: no cover - shouldn't happen, due to skip in iter_source_code + ) + + if not sort_attempt.supported_encoding: + num_invalid_encoding += 1 + else: + any_encoding_valid = True + + is_no_attempt = False + + num_skipped += len(skipped) + if num_skipped and not config.quiet: + if config.verbose: + for was_skipped in skipped: + print( + f"{was_skipped} was skipped as it's listed in 'skip' setting, " + "matches a glob in 'skip_glob' setting, or is in a .gitignore file with " + "--skip-gitignore enabled." + ) + print(f"Skipped {num_skipped} files") + + num_broken += len(broken) + if num_broken and not config.quiet: + if config.verbose: + for was_broken in broken: + warn(f"{was_broken} was broken path, make sure it exists correctly") + print(f"Broken {num_broken} paths") + + if num_broken > 0 and is_no_attempt: + all_attempt_broken = True + if num_invalid_encoding > 0 and not any_encoding_valid: + no_valid_encodings = True + + if not config.quiet and (remapped_deprecated_args or deprecated_flags): + if remapped_deprecated_args: + warn( + "W0502: The following deprecated single dash CLI flags were used and translated: " + f"{', '.join(remapped_deprecated_args)}!" + ) + if deprecated_flags: + warn( + "W0501: The following deprecated CLI flags were used and ignored: " + f"{', '.join(deprecated_flags)}!" + ) + warn( + "W0500: Please see the 5.0.0 Upgrade guide: " + "https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html" + ) + + if wrong_sorted_files: + sys.exit(1) + + if all_attempt_broken: + sys.exit(1) + + if no_valid_encodings: + printer = create_terminal_printer( + color=config.color_output, error=config.format_error, success=config.format_success + ) + printer.error("No valid encodings.") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/myenv/lib/python3.9/site-packages/isort/output.py b/myenv/lib/python3.9/site-packages/isort/output.py new file mode 100644 index 0000000..d049daf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/output.py @@ -0,0 +1,655 @@ +import copy +import itertools +from functools import partial +from typing import Any, Iterable, List, Optional, Set, Tuple, Type + +from isort.format import format_simplified + +from . import parse, sorting, wrap +from .comments import add_to_line as with_comments +from .identify import STATEMENT_DECLARATIONS +from .settings import DEFAULT_CONFIG, Config + + +def sorted_imports( + parsed: parse.ParsedContent, + config: Config = DEFAULT_CONFIG, + extension: str = "py", + import_type: str = "import", +) -> str: + """Adds the imports back to the file. + + (at the index of the first import) sorted alphabetically and split between groups + + """ + if parsed.import_index == -1: + return _output_as_string(parsed.lines_without_imports, parsed.line_separator) + + formatted_output: List[str] = parsed.lines_without_imports.copy() + remove_imports = [format_simplified(removal) for removal in config.remove_imports] + + sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) + + if config.no_sections: + parsed.imports["no_sections"] = {"straight": {}, "from": {}} + base_sections: Tuple[str, ...] = () + for section in sections: + if section == "FUTURE": + base_sections = ("FUTURE",) + continue + parsed.imports["no_sections"]["straight"].update( + parsed.imports[section].get("straight", {}) + ) + parsed.imports["no_sections"]["from"].update(parsed.imports[section].get("from", {})) + sections = base_sections + ("no_sections",) + + output: List[str] = [] + seen_headings: Set[str] = set() + pending_lines_before = False + for section in sections: + straight_modules = parsed.imports[section]["straight"] + if not config.only_sections: + straight_modules = sorting.sort( + config, + straight_modules, + key=lambda key: sorting.module_key( + key, config, section_name=section, straight_import=True + ), + reverse=config.reverse_sort, + ) + + from_modules = parsed.imports[section]["from"] + if not config.only_sections: + from_modules = sorting.sort( + config, + from_modules, + key=lambda key: sorting.module_key(key, config, section_name=section), + reverse=config.reverse_sort, + ) + + if config.star_first: + star_modules = [] + other_modules = [] + for module in from_modules: + if "*" in parsed.imports[section]["from"][module]: + star_modules.append(module) + else: + other_modules.append(module) + from_modules = star_modules + other_modules + + straight_imports = _with_straight_imports( + parsed, config, straight_modules, section, remove_imports, import_type + ) + from_imports = _with_from_imports( + parsed, config, from_modules, section, remove_imports, import_type + ) + + lines_between = [""] * ( + config.lines_between_types if from_modules and straight_modules else 0 + ) + if config.from_first: + section_output = from_imports + lines_between + straight_imports + else: + section_output = straight_imports + lines_between + from_imports + + if config.force_sort_within_sections: + # collapse comments + comments_above = [] + new_section_output: List[str] = [] + for line in section_output: + if not line: + continue + if line.startswith("#"): + comments_above.append(line) + elif comments_above: + new_section_output.append(_LineWithComments(line, comments_above)) + comments_above = [] + else: + new_section_output.append(line) + # only_sections options is not imposed if force_sort_within_sections is True + new_section_output = sorting.sort( + config, + new_section_output, + key=partial(sorting.section_key, config=config), + reverse=config.reverse_sort, + ) + + # uncollapse comments + section_output = [] + for line in new_section_output: + comments = getattr(line, "comments", ()) + if comments: + section_output.extend(comments) + section_output.append(str(line)) + + section_name = section + no_lines_before = section_name in config.no_lines_before + + if section_output: + if section_name in parsed.place_imports: + parsed.place_imports[section_name] = section_output + continue + + section_title = config.import_headings.get(section_name.lower(), "") + if section_title and section_title not in seen_headings: + if config.dedup_headings: + seen_headings.add(section_title) + section_comment = f"# {section_title}" + if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch + section_output.insert(0, section_comment) + + section_footer = config.import_footers.get(section_name.lower(), "") + if section_footer and section_footer not in seen_headings: + if config.dedup_headings: + seen_headings.add(section_footer) + section_comment_end = f"# {section_footer}" + if ( + section_comment_end not in parsed.lines_without_imports[-1:] + ): # pragma: no branch + section_output.append("") # Empty line for black compatibility + section_output.append(section_comment_end) + + if pending_lines_before or not no_lines_before: + output += [""] * config.lines_between_sections + + output += section_output + + pending_lines_before = False + else: + pending_lines_before = pending_lines_before or not no_lines_before + + if config.ensure_newline_before_comments: + output = _ensure_newline_before_comment(output) + + while output and output[-1].strip() == "": + output.pop() # pragma: no cover + while output and output[0].strip() == "": + output.pop(0) + + if config.formatting_function: + output = config.formatting_function( + parsed.line_separator.join(output), extension, config + ).splitlines() + + output_at = 0 + if parsed.import_index < parsed.original_line_count: + output_at = parsed.import_index + formatted_output[output_at:0] = output + + if output: + imports_tail = output_at + len(output) + while [ + character.strip() for character in formatted_output[imports_tail : imports_tail + 1] + ] == [""]: + formatted_output.pop(imports_tail) + + if len(formatted_output) > imports_tail: + next_construct = "" + tail = formatted_output[imports_tail:] + + for index, line in enumerate(tail): # pragma: no branch + should_skip, in_quote, *_ = parse.skip_line( + line, + in_quote="", + index=len(formatted_output), + section_comments=config.section_comments, + needs_import=False, + ) + if not should_skip and line.strip(): + if ( + line.strip().startswith("#") + and len(tail) > (index + 1) + and tail[index + 1].strip() + ): + continue + next_construct = line + break + if in_quote: # pragma: no branch + next_construct = line + break + + if config.lines_after_imports != -1: + formatted_output[imports_tail:0] = [ + "" for line in range(config.lines_after_imports) + ] + elif extension != "pyi" and next_construct.startswith(STATEMENT_DECLARATIONS): + formatted_output[imports_tail:0] = ["", ""] + else: + formatted_output[imports_tail:0] = [""] + + if config.lines_before_imports != -1: + formatted_output[:0] = ["" for line in range(config.lines_before_imports)] + + if parsed.place_imports: + new_out_lines = [] + for index, line in enumerate(formatted_output): + new_out_lines.append(line) + if line in parsed.import_placements: + new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) + if ( + len(formatted_output) <= (index + 1) + or formatted_output[index + 1].strip() != "" + ): + new_out_lines.append("") + formatted_output = new_out_lines + + return _output_as_string(formatted_output, parsed.line_separator) + + +def _with_from_imports( + parsed: parse.ParsedContent, + config: Config, + from_modules: Iterable[str], + section: str, + remove_imports: List[str], + import_type: str, +) -> List[str]: + output: List[str] = [] + for module in from_modules: + if module in remove_imports: + continue + + import_start = f"from {module} {import_type} " + from_imports = list(parsed.imports[section]["from"][module]) + if ( + not config.no_inline_sort + or (config.force_single_line and module not in config.single_line_exclusions) + ) and not config.only_sections: + from_imports = sorting.sort( + config, + from_imports, + key=lambda key: sorting.module_key( + key, + config, + True, + config.force_alphabetical_sort_within_sections, + section_name=section, + ), + reverse=config.reverse_sort, + ) + if remove_imports: + from_imports = [ + line for line in from_imports if f"{module}.{line}" not in remove_imports + ] + + sub_modules = [f"{module}.{from_import}" for from_import in from_imports] + as_imports = { + from_import: [ + f"{from_import} as {as_module}" for as_module in parsed.as_map["from"][sub_module] + ] + for from_import, sub_module in zip(from_imports, sub_modules) + if sub_module in parsed.as_map["from"] + } + if config.combine_as_imports and not ("*" in from_imports and config.combine_star): + if not config.no_inline_sort: + for as_import in as_imports: + if not config.only_sections: + as_imports[as_import] = sorting.sort(config, as_imports[as_import]) + for from_import in copy.copy(from_imports): + if from_import in as_imports: + idx = from_imports.index(from_import) + if parsed.imports[section]["from"][module][from_import]: + from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import) + else: + from_imports[idx : (idx + 1)] = as_imports.pop(from_import) + + only_show_as_imports = False + comments = parsed.categorized_comments["from"].pop(module, ()) + above_comments = parsed.categorized_comments["above"]["from"].pop(module, None) + while from_imports: + if above_comments: + output.extend(above_comments) + above_comments = None + + if "*" in from_imports and config.combine_star: + import_statement = wrap.line( + with_comments( + _with_star_comments(parsed, module, list(comments or ())), + f"{import_start}*", + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ), + parsed.line_separator, + config, + ) + from_imports = [ + from_import for from_import in from_imports if from_import in as_imports + ] + only_show_as_imports = True + elif config.force_single_line and module not in config.single_line_exclusions: + import_statement = "" + while from_imports: + from_import = from_imports.pop(0) + single_import_line = with_comments( + comments, + import_start + from_import, + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + comment = ( + parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None) + ) + if comment: + single_import_line += ( + f"{comments and ';' or config.comment_prefix} " f"{comment}" + ) + if from_import in as_imports: + if ( + parsed.imports[section]["from"][module][from_import] + and not only_show_as_imports + ): + output.append( + wrap.line(single_import_line, parsed.line_separator, config) + ) + from_comments = parsed.categorized_comments["straight"].get( + f"{module}.{from_import}" + ) + + if not config.only_sections: + output.extend( + with_comments( + from_comments, + wrap.line( + import_start + as_import, parsed.line_separator, config + ), + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + for as_import in sorting.sort(config, as_imports[from_import]) + ) + + else: + output.extend( + with_comments( + from_comments, + wrap.line( + import_start + as_import, parsed.line_separator, config + ), + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + for as_import in as_imports[from_import] + ) + else: + output.append(wrap.line(single_import_line, parsed.line_separator, config)) + comments = None + else: + while from_imports and from_imports[0] in as_imports: + from_import = from_imports.pop(0) + + if not config.only_sections: + as_imports[from_import] = sorting.sort(config, as_imports[from_import]) + from_comments = ( + parsed.categorized_comments["straight"].get(f"{module}.{from_import}") or [] + ) + if ( + parsed.imports[section]["from"][module][from_import] + and not only_show_as_imports + ): + specific_comment = ( + parsed.categorized_comments["nested"] + .get(module, {}) + .pop(from_import, None) + ) + if specific_comment: + from_comments.append(specific_comment) + output.append( + wrap.line( + with_comments( + from_comments, + import_start + from_import, + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ), + parsed.line_separator, + config, + ) + ) + from_comments = [] + + for as_import in as_imports[from_import]: + specific_comment = ( + parsed.categorized_comments["nested"] + .get(module, {}) + .pop(as_import, None) + ) + if specific_comment: + from_comments.append(specific_comment) + + output.append( + wrap.line( + with_comments( + from_comments, + import_start + as_import, + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ), + parsed.line_separator, + config, + ) + ) + + from_comments = [] + + if "*" in from_imports: + output.append( + with_comments( + _with_star_comments(parsed, module, []), + f"{import_start}*", + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + ) + from_imports.remove("*") + + for from_import in copy.copy(from_imports): + comment = ( + parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None) + ) + if comment: + from_imports.remove(from_import) + if from_imports: + use_comments = [] + else: + use_comments = comments + comments = None + single_import_line = with_comments( + use_comments, + import_start + from_import, + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + single_import_line += ( + f"{use_comments and ';' or config.comment_prefix} " f"{comment}" + ) + output.append(wrap.line(single_import_line, parsed.line_separator, config)) + + from_import_section = [] + while from_imports and ( + from_imports[0] not in as_imports + or ( + config.combine_as_imports + and parsed.imports[section]["from"][module][from_import] + ) + ): + from_import_section.append(from_imports.pop(0)) + if config.combine_as_imports: + comments = (comments or []) + list( + parsed.categorized_comments["from"].pop(f"{module}.__combined_as__", ()) + ) + import_statement = with_comments( + comments, + import_start + (", ").join(from_import_section), + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + if not from_import_section: + import_statement = "" + + do_multiline_reformat = False + + force_grid_wrap = config.force_grid_wrap + if force_grid_wrap and len(from_import_section) >= force_grid_wrap: + do_multiline_reformat = True + + if len(import_statement) > config.line_length and len(from_import_section) > 1: + do_multiline_reformat = True + + # If line too long AND have imports AND we are + # NOT using GRID or VERTICAL wrap modes + if ( + len(import_statement) > config.line_length + and len(from_import_section) > 0 + and config.multi_line_output + not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore + ): + do_multiline_reformat = True + + if do_multiline_reformat: + import_statement = wrap.import_statement( + import_start=import_start, + from_imports=from_import_section, + comments=comments, + line_separator=parsed.line_separator, + config=config, + ) + if config.multi_line_output == wrap.Modes.GRID: # type: ignore + other_import_statement = wrap.import_statement( + import_start=import_start, + from_imports=from_import_section, + comments=comments, + line_separator=parsed.line_separator, + config=config, + multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore + ) + if ( + max( + len(import_line) + for import_line in import_statement.split(parsed.line_separator) + ) + > config.line_length + ): + import_statement = other_import_statement + if not do_multiline_reformat and len(import_statement) > config.line_length: + import_statement = wrap.line(import_statement, parsed.line_separator, config) + + if import_statement: + output.append(import_statement) + return output + + +def _with_straight_imports( + parsed: parse.ParsedContent, + config: Config, + straight_modules: Iterable[str], + section: str, + remove_imports: List[str], + import_type: str, +) -> List[str]: + output: List[str] = [] + + as_imports = any((module in parsed.as_map["straight"] for module in straight_modules)) + + # combine_straight_imports only works for bare imports, 'as' imports not included + if config.combine_straight_imports and not as_imports: + if not straight_modules: + return [] + + above_comments: List[str] = [] + inline_comments: List[str] = [] + + for module in straight_modules: + if module in parsed.categorized_comments["above"]["straight"]: + above_comments.extend(parsed.categorized_comments["above"]["straight"].pop(module)) + if module in parsed.categorized_comments["straight"]: + inline_comments.extend(parsed.categorized_comments["straight"][module]) + + combined_straight_imports = ", ".join(straight_modules) + if inline_comments: + combined_inline_comments = " ".join(inline_comments) + else: + combined_inline_comments = "" + + output.extend(above_comments) + + if combined_inline_comments: + output.append( + f"{import_type} {combined_straight_imports} # {combined_inline_comments}" + ) + else: + output.append(f"{import_type} {combined_straight_imports}") + + return output + + for module in straight_modules: + if module in remove_imports: + continue + + import_definition = [] + if module in parsed.as_map["straight"]: + if parsed.imports[section]["straight"][module]: + import_definition.append((f"{import_type} {module}", module)) + import_definition.extend( + (f"{import_type} {module} as {as_import}", f"{module} as {as_import}") + for as_import in parsed.as_map["straight"][module] + ) + else: + import_definition.append((f"{import_type} {module}", module)) + + comments_above = parsed.categorized_comments["above"]["straight"].pop(module, None) + if comments_above: + output.extend(comments_above) + output.extend( + with_comments( + parsed.categorized_comments["straight"].get(imodule), + idef, + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + for idef, imodule in import_definition + ) + + return output + + +def _output_as_string(lines: List[str], line_separator: str) -> str: + return line_separator.join(_normalize_empty_lines(lines)) + + +def _normalize_empty_lines(lines: List[str]) -> List[str]: + while lines and lines[-1].strip() == "": + lines.pop(-1) + + lines.append("") + return lines + + +class _LineWithComments(str): + comments: List[str] + + def __new__( + cls: Type["_LineWithComments"], value: Any, comments: List[str] + ) -> "_LineWithComments": + instance = super().__new__(cls, value) + instance.comments = comments + return instance + + +def _ensure_newline_before_comment(output: List[str]) -> List[str]: + new_output: List[str] = [] + + def is_comment(line: Optional[str]) -> bool: + return line.startswith("#") if line else False + + for line, prev_line in zip(output, [None] + output): # type: ignore + if is_comment(line) and prev_line != "" and not is_comment(prev_line): + new_output.append("") + new_output.append(line) + return new_output + + +def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]: + star_comment = parsed.categorized_comments["nested"].get(module, {}).pop("*", None) + if star_comment: + return comments + [star_comment] + return comments diff --git a/myenv/lib/python3.9/site-packages/isort/parse.py b/myenv/lib/python3.9/site-packages/isort/parse.py new file mode 100644 index 0000000..7fc6c8e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/parse.py @@ -0,0 +1,590 @@ +"""Defines parsing functions used by isort for parsing import definitions""" +from collections import OrderedDict, defaultdict +from functools import partial +from itertools import chain +from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Tuple +from warnings import warn + +from . import place +from .comments import parse as parse_comments +from .exceptions import MissingSection +from .settings import DEFAULT_CONFIG, Config + +if TYPE_CHECKING: + from mypy_extensions import TypedDict + + CommentsAboveDict = TypedDict( + "CommentsAboveDict", {"straight": Dict[str, Any], "from": Dict[str, Any]} + ) + + CommentsDict = TypedDict( + "CommentsDict", + { + "from": Dict[str, Any], + "straight": Dict[str, Any], + "nested": Dict[str, Any], + "above": CommentsAboveDict, + }, + ) + + +def _infer_line_separator(contents: str) -> str: + if "\r\n" in contents: + return "\r\n" + if "\r" in contents: + return "\r" + return "\n" + + +def _normalize_line(raw_line: str) -> Tuple[str, str]: + """Normalizes import related statements in the provided line. + + Returns (normalized_line: str, raw_line: str) + """ + line = raw_line.replace("from.import ", "from . import ") + line = line.replace("from.cimport ", "from . cimport ") + line = line.replace("import*", "import *") + line = line.replace(" .import ", " . import ") + line = line.replace(" .cimport ", " . cimport ") + line = line.replace("\t", " ") + return (line, raw_line) + + +def import_type(line: str, config: Config = DEFAULT_CONFIG) -> Optional[str]: + """If the current line is an import line it will return its type (from or straight)""" + if config.honor_noqa and line.lower().rstrip().endswith("noqa"): + return None + if "isort:skip" in line or "isort: skip" in line or "isort: split" in line: + return None + if line.startswith(("import ", "cimport ")): + return "straight" + if line.startswith("from "): + return "from" + return None + + +def _strip_syntax(import_string: str) -> str: + import_string = import_string.replace("_import", "[[i]]") + import_string = import_string.replace("_cimport", "[[ci]]") + for remove_syntax in ["\\", "(", ")", ","]: + import_string = import_string.replace(remove_syntax, " ") + import_list = import_string.split() + for key in ("from", "import", "cimport"): + if key in import_list: + import_list.remove(key) + import_string = " ".join(import_list) + import_string = import_string.replace("[[i]]", "_import") + import_string = import_string.replace("[[ci]]", "_cimport") + return import_string.replace("{ ", "{|").replace(" }", "|}") + + +def skip_line( + line: str, + in_quote: str, + index: int, + section_comments: Tuple[str, ...], + needs_import: bool = True, +) -> Tuple[bool, str]: + """Determine if a given line should be skipped. + + Returns back a tuple containing: + + (skip_line: bool, + in_quote: str,) + """ + should_skip = bool(in_quote) + if '"' in line or "'" in line: + char_index = 0 + while char_index < len(line): + if line[char_index] == "\\": + char_index += 1 + elif in_quote: + if line[char_index : char_index + len(in_quote)] == in_quote: + in_quote = "" + elif line[char_index] in ("'", '"'): + long_quote = line[char_index : char_index + 3] + if long_quote in ('"""', "'''"): + in_quote = long_quote + char_index += 2 + else: + in_quote = line[char_index] + elif line[char_index] == "#": + break + char_index += 1 + + if ";" in line.split("#")[0] and needs_import: + for part in (part.strip() for part in line.split(";")): + if ( + part + and not part.startswith("from ") + and not part.startswith(("import ", "cimport ")) + ): + should_skip = True + + return (bool(should_skip or in_quote), in_quote) + + +class ParsedContent(NamedTuple): + in_lines: List[str] + lines_without_imports: List[str] + import_index: int + place_imports: Dict[str, List[str]] + import_placements: Dict[str, str] + as_map: Dict[str, Dict[str, List[str]]] + imports: Dict[str, Dict[str, Any]] + categorized_comments: "CommentsDict" + change_count: int + original_line_count: int + line_separator: str + sections: Any + verbose_output: List[str] + + +def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedContent: + """Parses a python file taking out and categorizing imports.""" + line_separator: str = config.line_ending or _infer_line_separator(contents) + in_lines = contents.splitlines() + if contents and contents[-1] in ("\n", "\r"): + in_lines.append("") + + out_lines = [] + original_line_count = len(in_lines) + if config.old_finders: + from .deprecated.finders import FindersManager + + finder = FindersManager(config=config).find + else: + finder = partial(place.module, config=config) + + line_count = len(in_lines) + + place_imports: Dict[str, List[str]] = {} + import_placements: Dict[str, str] = {} + as_map: Dict[str, Dict[str, List[str]]] = { + "straight": defaultdict(list), + "from": defaultdict(list), + } + imports: OrderedDict[str, Dict[str, Any]] = OrderedDict() + verbose_output: List[str] = [] + + for section in chain(config.sections, config.forced_separate): + imports[section] = {"straight": OrderedDict(), "from": OrderedDict()} + categorized_comments: CommentsDict = { + "from": {}, + "straight": {}, + "nested": {}, + "above": {"straight": {}, "from": {}}, + } + + index = 0 + import_index = -1 + in_quote = "" + while index < line_count: + line = in_lines[index] + index += 1 + statement_index = index + (skipping_line, in_quote) = skip_line( + line, in_quote=in_quote, index=index, section_comments=config.section_comments + ) + + if ( + line in config.section_comments or line in config.section_comments_end + ) and not skipping_line: + if import_index == -1: # pragma: no branch + import_index = index - 1 + continue + + if "isort:imports-" in line and line.startswith("#"): + section = line.split("isort:imports-")[-1].split()[0].upper() + place_imports[section] = [] + import_placements[line] = section + elif "isort: imports-" in line and line.startswith("#"): + section = line.split("isort: imports-")[-1].split()[0].upper() + place_imports[section] = [] + import_placements[line] = section + + if skipping_line: + out_lines.append(line) + continue + + lstripped_line = line.lstrip() + if ( + config.float_to_top + and import_index == -1 + and line + and not in_quote + and not lstripped_line.startswith("#") + and not lstripped_line.startswith("'''") + and not lstripped_line.startswith('"""') + ): + if not lstripped_line.startswith("import") and not lstripped_line.startswith("from"): + import_index = index - 1 + while import_index and not in_lines[import_index - 1]: + import_index -= 1 + else: + commentless = line.split("#", 1)[0].strip() + if ( + ("isort:skip" in line or "isort: skip" in line) + and "(" in commentless + and ")" not in commentless + ): + import_index = index + + starting_line = line + while "isort:skip" in starting_line or "isort: skip" in starting_line: + commentless = starting_line.split("#", 1)[0] + if ( + "(" in commentless + and not commentless.rstrip().endswith(")") + and import_index < line_count + ): + + while import_index < line_count and not commentless.rstrip().endswith( + ")" + ): + commentless = in_lines[import_index].split("#", 1)[0] + import_index += 1 + else: + import_index += 1 + + if import_index >= line_count: + break + + starting_line = in_lines[import_index] + + line, *end_of_line_comment = line.split("#", 1) + if ";" in line: + statements = [line.strip() for line in line.split(";")] + else: + statements = [line] + if end_of_line_comment: + statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}" + + for statement in statements: + line, raw_line = _normalize_line(statement) + type_of_import = import_type(line, config) or "" + raw_lines = [raw_line] + if not type_of_import: + out_lines.append(raw_line) + continue + + if import_index == -1: + import_index = index - 1 + nested_comments = {} + import_string, comment = parse_comments(line) + comments = [comment] if comment else [] + line_parts = [part for part in _strip_syntax(import_string).strip().split(" ") if part] + if type_of_import == "from" and len(line_parts) == 2 and comments: + nested_comments[line_parts[-1]] = comments[0] + + if "(" in line.split("#", 1)[0] and index < line_count: + while not line.split("#")[0].strip().endswith(")") and index < line_count: + line, new_comment = parse_comments(in_lines[index]) + index += 1 + if new_comment: + comments.append(new_comment) + stripped_line = _strip_syntax(line).strip() + if ( + type_of_import == "from" + and stripped_line + and " " not in stripped_line.replace(" as ", "") + and new_comment + ): + nested_comments[stripped_line] = comments[-1] + import_string += line_separator + line + raw_lines.append(line) + else: + while line.strip().endswith("\\"): + line, new_comment = parse_comments(in_lines[index]) + line = line.lstrip() + index += 1 + if new_comment: + comments.append(new_comment) + + # Still need to check for parentheses after an escaped line + if ( + "(" in line.split("#")[0] + and ")" not in line.split("#")[0] + and index < line_count + ): + stripped_line = _strip_syntax(line).strip() + if ( + type_of_import == "from" + and stripped_line + and " " not in stripped_line.replace(" as ", "") + and new_comment + ): + nested_comments[stripped_line] = comments[-1] + import_string += line_separator + line + raw_lines.append(line) + + while not line.split("#")[0].strip().endswith(")") and index < line_count: + line, new_comment = parse_comments(in_lines[index]) + index += 1 + if new_comment: + comments.append(new_comment) + stripped_line = _strip_syntax(line).strip() + if ( + type_of_import == "from" + and stripped_line + and " " not in stripped_line.replace(" as ", "") + and new_comment + ): + nested_comments[stripped_line] = comments[-1] + import_string += line_separator + line + raw_lines.append(line) + + stripped_line = _strip_syntax(line).strip() + if ( + type_of_import == "from" + and stripped_line + and " " not in stripped_line.replace(" as ", "") + and new_comment + ): + nested_comments[stripped_line] = comments[-1] + if import_string.strip().endswith( + (" import", " cimport") + ) or line.strip().startswith(("import ", "cimport ")): + import_string += line_separator + line + else: + import_string = import_string.rstrip().rstrip("\\") + " " + line.lstrip() + + if type_of_import == "from": + cimports: bool + import_string = ( + import_string.replace("import(", "import (") + .replace("\\", " ") + .replace("\n", " ") + ) + if "import " not in import_string: + out_lines.extend(raw_lines) + continue + + if " cimport " in import_string: + parts = import_string.split(" cimport ") + cimports = True + + else: + parts = import_string.split(" import ") + cimports = False + + from_import = parts[0].split(" ") + import_string = (" cimport " if cimports else " import ").join( + [from_import[0] + " " + "".join(from_import[1:])] + parts[1:] + ) + + just_imports = [ + item.replace("{|", "{ ").replace("|}", " }") + for item in _strip_syntax(import_string).split() + ] + + attach_comments_to: Optional[List[Any]] = None + direct_imports = just_imports[1:] + straight_import = True + top_level_module = "" + if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports): + straight_import = False + while "as" in just_imports: + nested_module = None + as_index = just_imports.index("as") + if type_of_import == "from": + nested_module = just_imports[as_index - 1] + top_level_module = just_imports[0] + module = top_level_module + "." + nested_module + as_name = just_imports[as_index + 1] + direct_imports.remove(nested_module) + direct_imports.remove(as_name) + direct_imports.remove("as") + if nested_module == as_name and config.remove_redundant_aliases: + pass + elif as_name not in as_map["from"][module]: # pragma: no branch + as_map["from"][module].append(as_name) + + full_name = f"{nested_module} as {as_name}" + associated_comment = nested_comments.get(full_name) + if associated_comment: + categorized_comments["nested"].setdefault(top_level_module, {})[ + full_name + ] = associated_comment + if associated_comment in comments: # pragma: no branch + comments.pop(comments.index(associated_comment)) + else: + module = just_imports[as_index - 1] + as_name = just_imports[as_index + 1] + if module == as_name and config.remove_redundant_aliases: + pass + elif as_name not in as_map["straight"][module]: + as_map["straight"][module].append(as_name) + + if comments and attach_comments_to is None: + if nested_module and config.combine_as_imports: + attach_comments_to = categorized_comments["from"].setdefault( + f"{top_level_module}.__combined_as__", [] + ) + else: + if type_of_import == "from" or ( + config.remove_redundant_aliases and as_name == module.split(".")[-1] + ): + attach_comments_to = categorized_comments["straight"].setdefault( + module, [] + ) + else: + attach_comments_to = categorized_comments["straight"].setdefault( + f"{module} as {as_name}", [] + ) + del just_imports[as_index : as_index + 2] + + if type_of_import == "from": + import_from = just_imports.pop(0) + placed_module = finder(import_from) + if config.verbose and not config.only_modified: + print(f"from-type place_module for {import_from} returned {placed_module}") + + elif config.verbose: + verbose_output.append( + f"from-type place_module for {import_from} returned {placed_module}" + ) + if placed_module == "": + warn( + f"could not place module {import_from} of line {line} --" + " Do you need to define a default section?" + ) + + if placed_module and placed_module not in imports: + raise MissingSection(import_module=import_from, section=placed_module) + + root = imports[placed_module][type_of_import] # type: ignore + for import_name in just_imports: + associated_comment = nested_comments.get(import_name) + if associated_comment: + categorized_comments["nested"].setdefault(import_from, {})[ + import_name + ] = associated_comment + if associated_comment in comments: # pragma: no branch + comments.pop(comments.index(associated_comment)) + if ( + config.force_single_line + and comments + and attach_comments_to is None + and len(just_imports) == 1 + ): + nested_from_comments = categorized_comments["nested"].setdefault( + import_from, {} + ) + existing_comment = nested_from_comments.get(just_imports[0], "") + nested_from_comments[ + just_imports[0] + ] = f"{existing_comment}{'; ' if existing_comment else ''}{'; '.join(comments)}" + comments = [] + + if comments and attach_comments_to is None: + attach_comments_to = categorized_comments["from"].setdefault(import_from, []) + + if len(out_lines) > max(import_index, 1) - 1: + last = out_lines[-1].rstrip() if out_lines else "" + while ( + last.startswith("#") + and not last.endswith('"""') + and not last.endswith("'''") + and "isort:imports-" not in last + and "isort: imports-" not in last + and not config.treat_all_comments_as_code + and not last.strip() in config.treat_comments_as_code + ): + categorized_comments["above"]["from"].setdefault(import_from, []).insert( + 0, out_lines.pop(-1) + ) + if out_lines: + last = out_lines[-1].rstrip() + else: + last = "" + if statement_index - 1 == import_index: # pragma: no cover + import_index -= len( + categorized_comments["above"]["from"].get(import_from, []) + ) + + if import_from not in root: + root[import_from] = OrderedDict( + (module, module in direct_imports) for module in just_imports + ) + else: + root[import_from].update( + (module, root[import_from].get(module, False) or module in direct_imports) + for module in just_imports + ) + + if comments and attach_comments_to is not None: + attach_comments_to.extend(comments) + else: + if comments and attach_comments_to is not None: + attach_comments_to.extend(comments) + comments = [] + + for module in just_imports: + if comments: + categorized_comments["straight"][module] = comments + comments = [] + + if len(out_lines) > max(import_index, +1, 1) - 1: + + last = out_lines[-1].rstrip() if out_lines else "" + while ( + last.startswith("#") + and not last.endswith('"""') + and not last.endswith("'''") + and "isort:imports-" not in last + and "isort: imports-" not in last + and not config.treat_all_comments_as_code + and not last.strip() in config.treat_comments_as_code + ): + categorized_comments["above"]["straight"].setdefault(module, []).insert( + 0, out_lines.pop(-1) + ) + if out_lines: + last = out_lines[-1].rstrip() + else: + last = "" + if index - 1 == import_index: + import_index -= len( + categorized_comments["above"]["straight"].get(module, []) + ) + placed_module = finder(module) + if config.verbose and not config.only_modified: + print(f"else-type place_module for {module} returned {placed_module}") + + elif config.verbose: + verbose_output.append( + f"else-type place_module for {module} returned {placed_module}" + ) + if placed_module == "": + warn( + f"could not place module {module} of line {line} --" + " Do you need to define a default section?" + ) + imports.setdefault("", {"straight": OrderedDict(), "from": OrderedDict()}) + + if placed_module and placed_module not in imports: + raise MissingSection(import_module=module, section=placed_module) + + straight_import |= imports[placed_module][type_of_import].get( # type: ignore + module, False + ) + imports[placed_module][type_of_import][module] = straight_import # type: ignore + + change_count = len(out_lines) - original_line_count + + return ParsedContent( + in_lines=in_lines, + lines_without_imports=out_lines, + import_index=import_index, + place_imports=place_imports, + import_placements=import_placements, + as_map=as_map, + imports=imports, + categorized_comments=categorized_comments, + change_count=change_count, + original_line_count=original_line_count, + line_separator=line_separator, + sections=config.sections, + verbose_output=verbose_output, + ) diff --git a/myenv/lib/python3.9/site-packages/isort/place.py b/myenv/lib/python3.9/site-packages/isort/place.py new file mode 100644 index 0000000..47a68c7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/place.py @@ -0,0 +1,145 @@ +"""Contains all logic related to placing an import within a certain section.""" +import importlib +from fnmatch import fnmatch +from functools import lru_cache +from pathlib import Path +from typing import FrozenSet, Iterable, Optional, Tuple + +from isort import sections +from isort.settings import DEFAULT_CONFIG, Config +from isort.utils import exists_case_sensitive + +LOCAL = "LOCALFOLDER" + + +def module(name: str, config: Config = DEFAULT_CONFIG) -> str: + """Returns the section placement for the given module name.""" + return module_with_reason(name, config)[0] + + +@lru_cache(maxsize=1000) +def module_with_reason(name: str, config: Config = DEFAULT_CONFIG) -> Tuple[str, str]: + """Returns the section placement for the given module name alongside the reasoning.""" + return ( + _forced_separate(name, config) + or _local(name, config) + or _known_pattern(name, config) + or _src_path(name, config) + or (config.default_section, "Default option in Config or universal default.") + ) + + +def _forced_separate(name: str, config: Config) -> Optional[Tuple[str, str]]: + for forced_separate in config.forced_separate: + # Ensure all forced_separate patterns will match to end of string + path_glob = forced_separate + if not forced_separate.endswith("*"): + path_glob = "%s*" % forced_separate + + if fnmatch(name, path_glob) or fnmatch(name, "." + path_glob): + return (forced_separate, f"Matched forced_separate ({forced_separate}) config value.") + + return None + + +def _local(name: str, config: Config) -> Optional[Tuple[str, str]]: + if name.startswith("."): + return (LOCAL, "Module name started with a dot.") + + return None + + +def _known_pattern(name: str, config: Config) -> Optional[Tuple[str, str]]: + parts = name.split(".") + module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1)) + for module_name_to_check in module_names_to_check: + for pattern, placement in config.known_patterns: + if placement in config.sections and pattern.match(module_name_to_check): + return (placement, f"Matched configured known pattern {pattern}") + + return None + + +def _src_path( + name: str, + config: Config, + src_paths: Optional[Iterable[Path]] = None, + prefix: Tuple[str, ...] = (), +) -> Optional[Tuple[str, str]]: + if src_paths is None: + src_paths = config.src_paths + + root_module_name, *nested_module = name.split(".", 1) + new_prefix = prefix + (root_module_name,) + namespace = ".".join(new_prefix) + + for src_path in src_paths: + module_path = (src_path / root_module_name).resolve() + if not prefix and not module_path.is_dir() and src_path.name == root_module_name: + module_path = src_path.resolve() + if nested_module and ( + namespace in config.namespace_packages + or ( + config.auto_identify_namespace_packages + and _is_namespace_package(module_path, config.supported_extensions) + ) + ): + return _src_path(nested_module[0], config, (module_path,), new_prefix) + if ( + _is_module(module_path) + or _is_package(module_path) + or _src_path_is_module(src_path, root_module_name) + ): + return (sections.FIRSTPARTY, f"Found in one of the configured src_paths: {src_path}.") + + return None + + +def _is_module(path: Path) -> bool: + return ( + exists_case_sensitive(str(path.with_suffix(".py"))) + or any( + exists_case_sensitive(str(path.with_suffix(ext_suffix))) + for ext_suffix in importlib.machinery.EXTENSION_SUFFIXES + ) + or exists_case_sensitive(str(path / "__init__.py")) + ) + + +def _is_package(path: Path) -> bool: + return exists_case_sensitive(str(path)) and path.is_dir() + + +def _is_namespace_package(path: Path, src_extensions: FrozenSet[str]) -> bool: + if not _is_package(path): + return False + + init_file = path / "__init__.py" + if not init_file.exists(): + filenames = [ + filepath + for filepath in path.iterdir() + if filepath.suffix.lstrip(".") in src_extensions + or filepath.name.lower() in ("setup.cfg", "pyproject.toml") + ] + if filenames: + return False + else: + with init_file.open("rb") as open_init_file: + file_start = open_init_file.read(4096) + if ( + b"__import__('pkg_resources').declare_namespace(__name__)" not in file_start + and b'__import__("pkg_resources").declare_namespace(__name__)' not in file_start + and b"__path__ = __import__('pkgutil').extend_path(__path__, __name__)" + not in file_start + and b'__path__ = __import__("pkgutil").extend_path(__path__, __name__)' + not in file_start + ): + return False + return True + + +def _src_path_is_module(src_path: Path, module_name: str) -> bool: + return ( + module_name == src_path.name and src_path.is_dir() and exists_case_sensitive(str(src_path)) + ) diff --git a/myenv/lib/python3.9/site-packages/isort/profiles.py b/myenv/lib/python3.9/site-packages/isort/profiles.py new file mode 100644 index 0000000..21d0646 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/profiles.py @@ -0,0 +1,86 @@ +"""Common profiles are defined here to be easily used within a project using --profile {name}""" +from typing import Any, Dict + +black = { + "multi_line_output": 3, + "include_trailing_comma": True, + "force_grid_wrap": 0, + "use_parentheses": True, + "ensure_newline_before_comments": True, + "line_length": 88, +} +django = { + "combine_as_imports": True, + "include_trailing_comma": True, + "multi_line_output": 5, + "line_length": 79, +} +pycharm = { + "multi_line_output": 3, + "force_grid_wrap": 2, + "lines_after_imports": 2, +} +google = { + "force_single_line": True, + "force_sort_within_sections": True, + "lexicographical": True, + "single_line_exclusions": ("typing",), + "order_by_type": False, + "group_by_package": True, +} +open_stack = { + "force_single_line": True, + "force_sort_within_sections": True, + "lexicographical": True, +} +plone = { + "force_alphabetical_sort": True, + "force_single_line": True, + "lines_after_imports": 2, + "line_length": 200, +} +attrs = { + "atomic": True, + "force_grid_wrap": 0, + "include_trailing_comma": True, + "lines_after_imports": 2, + "lines_between_types": 1, + "multi_line_output": 3, + "use_parentheses": True, +} +hug = { + "multi_line_output": 3, + "include_trailing_comma": True, + "force_grid_wrap": 0, + "use_parentheses": True, + "line_length": 100, +} +wemake = { + "multi_line_output": 3, + "include_trailing_comma": True, + "use_parentheses": True, + "line_length": 80, +} +appnexus = { + **black, + "force_sort_within_sections": True, + "order_by_type": False, + "case_sensitive": False, + "reverse_relative": True, + "sort_relative_in_force_sorted_sections": True, + "sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"], + "no_lines_before": "LOCALFOLDER", +} + +profiles: Dict[str, Dict[str, Any]] = { + "black": black, + "django": django, + "pycharm": pycharm, + "google": google, + "open_stack": open_stack, + "plone": plone, + "attrs": attrs, + "hug": hug, + "wemake": wemake, + "appnexus": appnexus, +} diff --git a/myenv/lib/python3.9/site-packages/isort/py.typed b/myenv/lib/python3.9/site-packages/isort/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/isort/pylama_isort.py b/myenv/lib/python3.9/site-packages/isort/pylama_isort.py new file mode 100644 index 0000000..52da535 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/pylama_isort.py @@ -0,0 +1,45 @@ +import os +import sys +from contextlib import contextmanager +from typing import Any, Dict, Iterator, List, Optional + +from pylama.lint import Linter as BaseLinter # type: ignore + +from isort.exceptions import FileSkipped + +from . import api + + +@contextmanager +def suppress_stdout() -> Iterator[None]: + stdout = sys.stdout + with open(os.devnull, "w") as devnull: + sys.stdout = devnull + yield + sys.stdout = stdout + + +class Linter(BaseLinter): # type: ignore + def allow(self, path: str) -> bool: + """Determine if this path should be linted.""" + return path.endswith(".py") + + def run( + self, path: str, params: Optional[Dict[str, Any]] = None, **meta: Any + ) -> List[Dict[str, Any]]: + """Lint the file. Return an array of error dicts if appropriate.""" + with suppress_stdout(): + try: + if not api.check_file(path, disregard_skip=False, **params or {}): + return [ + { + "lnum": 0, + "col": 0, + "text": "Incorrectly sorted imports.", + "type": "ISORT", + } + ] + except FileSkipped: + pass + + return [] diff --git a/myenv/lib/python3.9/site-packages/isort/sections.py b/myenv/lib/python3.9/site-packages/isort/sections.py new file mode 100644 index 0000000..f59db69 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/sections.py @@ -0,0 +1,9 @@ +"""Defines all sections isort uses by default""" +from typing import Tuple + +FUTURE: str = "FUTURE" +STDLIB: str = "STDLIB" +THIRDPARTY: str = "THIRDPARTY" +FIRSTPARTY: str = "FIRSTPARTY" +LOCALFOLDER: str = "LOCALFOLDER" +DEFAULT: Tuple[str, ...] = (FUTURE, STDLIB, THIRDPARTY, FIRSTPARTY, LOCALFOLDER) diff --git a/myenv/lib/python3.9/site-packages/isort/settings.py b/myenv/lib/python3.9/site-packages/isort/settings.py new file mode 100644 index 0000000..681d7b4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/settings.py @@ -0,0 +1,925 @@ +"""isort/settings.py. + +Defines how the default settings for isort should be loaded +""" +import configparser +import fnmatch +import os +import posixpath +import re +import stat +import subprocess # nosec: Needed for gitignore support. +import sys +from functools import lru_cache +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Pattern, + Set, + Tuple, + Type, + Union, +) +from warnings import warn + +from . import sorting, stdlibs +from ._future import dataclass, field +from .exceptions import ( + FormattingPluginDoesNotExist, + InvalidSettingsPath, + ProfileDoesNotExist, + SortingFunctionDoesNotExist, + UnsupportedSettings, +) +from .profiles import profiles +from .sections import DEFAULT as SECTION_DEFAULTS +from .sections import FIRSTPARTY, FUTURE, LOCALFOLDER, STDLIB, THIRDPARTY +from .utils import Trie +from .wrap_modes import WrapModes +from .wrap_modes import from_string as wrap_mode_from_string + +if TYPE_CHECKING: + tomli: Any +else: + from ._vendored import tomli + +_SHEBANG_RE = re.compile(br"^#!.*\bpython[23w]?\b") +CYTHON_EXTENSIONS = frozenset({"pyx", "pxd"}) +SUPPORTED_EXTENSIONS = frozenset({"py", "pyi", *CYTHON_EXTENSIONS}) +BLOCKED_EXTENSIONS = frozenset({"pex"}) +FILE_SKIP_COMMENTS: Tuple[str, ...] = ( + "isort:" + "skip_file", + "isort: " + "skip_file", +) # Concatenated to avoid this file being skipped +MAX_CONFIG_SEARCH_DEPTH: int = 25 # The number of parent directories to for a config file within +STOP_CONFIG_SEARCH_ON_DIRS: Tuple[str, ...] = (".git", ".hg") +VALID_PY_TARGETS: Tuple[str, ...] = tuple( + target.replace("py", "") for target in dir(stdlibs) if not target.startswith("_") +) +CONFIG_SOURCES: Tuple[str, ...] = ( + ".isort.cfg", + "pyproject.toml", + "setup.cfg", + "tox.ini", + ".editorconfig", +) +DEFAULT_SKIP: FrozenSet[str] = frozenset( + { + ".venv", + "venv", + ".tox", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".svn", + ".bzr", + "_build", + "buck-out", + "build", + "dist", + ".pants.d", + ".direnv", + "node_modules", + "__pypackages__", + } +) + +CONFIG_SECTIONS: Dict[str, Tuple[str, ...]] = { + ".isort.cfg": ("settings", "isort"), + "pyproject.toml": ("tool.isort",), + "setup.cfg": ("isort", "tool:isort"), + "tox.ini": ("isort", "tool:isort"), + ".editorconfig": ("*", "*.py", "**.py", "*.{py}"), +} +FALLBACK_CONFIG_SECTIONS: Tuple[str, ...] = ("isort", "tool:isort", "tool.isort") + +IMPORT_HEADING_PREFIX = "import_heading_" +IMPORT_FOOTER_PREFIX = "import_footer_" +KNOWN_PREFIX = "known_" +KNOWN_SECTION_MAPPING: Dict[str, str] = { + STDLIB: "STANDARD_LIBRARY", + FUTURE: "FUTURE_LIBRARY", + FIRSTPARTY: "FIRST_PARTY", + THIRDPARTY: "THIRD_PARTY", + LOCALFOLDER: "LOCAL_FOLDER", +} + +RUNTIME_SOURCE = "runtime" + +DEPRECATED_SETTINGS = ("not_skip", "keep_direct_and_as_imports") + +_STR_BOOLEAN_MAPPING = { + "y": True, + "yes": True, + "t": True, + "on": True, + "1": True, + "true": True, + "n": False, + "no": False, + "f": False, + "off": False, + "0": False, + "false": False, +} + + +@dataclass(frozen=True) +class _Config: + """Defines the data schema and defaults used for isort configuration. + + NOTE: known lists, such as known_standard_library, are intentionally not complete as they are + dynamically determined later on. + """ + + py_version: str = "3" + force_to_top: FrozenSet[str] = frozenset() + skip: FrozenSet[str] = DEFAULT_SKIP + extend_skip: FrozenSet[str] = frozenset() + skip_glob: FrozenSet[str] = frozenset() + extend_skip_glob: FrozenSet[str] = frozenset() + skip_gitignore: bool = False + line_length: int = 79 + wrap_length: int = 0 + line_ending: str = "" + sections: Tuple[str, ...] = SECTION_DEFAULTS + no_sections: bool = False + known_future_library: FrozenSet[str] = frozenset(("__future__",)) + known_third_party: FrozenSet[str] = frozenset() + known_first_party: FrozenSet[str] = frozenset() + known_local_folder: FrozenSet[str] = frozenset() + known_standard_library: FrozenSet[str] = frozenset() + extra_standard_library: FrozenSet[str] = frozenset() + known_other: Dict[str, FrozenSet[str]] = field(default_factory=dict) + multi_line_output: WrapModes = WrapModes.GRID # type: ignore + forced_separate: Tuple[str, ...] = () + indent: str = " " * 4 + comment_prefix: str = " #" + length_sort: bool = False + length_sort_straight: bool = False + length_sort_sections: FrozenSet[str] = frozenset() + add_imports: FrozenSet[str] = frozenset() + remove_imports: FrozenSet[str] = frozenset() + append_only: bool = False + reverse_relative: bool = False + force_single_line: bool = False + single_line_exclusions: Tuple[str, ...] = () + default_section: str = THIRDPARTY + import_headings: Dict[str, str] = field(default_factory=dict) + import_footers: Dict[str, str] = field(default_factory=dict) + balanced_wrapping: bool = False + use_parentheses: bool = False + order_by_type: bool = True + atomic: bool = False + lines_before_imports: int = -1 + lines_after_imports: int = -1 + lines_between_sections: int = 1 + lines_between_types: int = 0 + combine_as_imports: bool = False + combine_star: bool = False + include_trailing_comma: bool = False + from_first: bool = False + verbose: bool = False + quiet: bool = False + force_adds: bool = False + force_alphabetical_sort_within_sections: bool = False + force_alphabetical_sort: bool = False + force_grid_wrap: int = 0 + force_sort_within_sections: bool = False + lexicographical: bool = False + group_by_package: bool = False + ignore_whitespace: bool = False + no_lines_before: FrozenSet[str] = frozenset() + no_inline_sort: bool = False + ignore_comments: bool = False + case_sensitive: bool = False + sources: Tuple[Dict[str, Any], ...] = () + virtual_env: str = "" + conda_env: str = "" + ensure_newline_before_comments: bool = False + directory: str = "" + profile: str = "" + honor_noqa: bool = False + src_paths: Tuple[Path, ...] = () + old_finders: bool = False + remove_redundant_aliases: bool = False + float_to_top: bool = False + filter_files: bool = False + formatter: str = "" + formatting_function: Optional[Callable[[str, str, object], str]] = None + color_output: bool = False + treat_comments_as_code: FrozenSet[str] = frozenset() + treat_all_comments_as_code: bool = False + supported_extensions: FrozenSet[str] = SUPPORTED_EXTENSIONS + blocked_extensions: FrozenSet[str] = BLOCKED_EXTENSIONS + constants: FrozenSet[str] = frozenset() + classes: FrozenSet[str] = frozenset() + variables: FrozenSet[str] = frozenset() + dedup_headings: bool = False + only_sections: bool = False + only_modified: bool = False + combine_straight_imports: bool = False + auto_identify_namespace_packages: bool = True + namespace_packages: FrozenSet[str] = frozenset() + follow_links: bool = True + indented_import_headings: bool = True + honor_case_in_force_sorted_sections: bool = False + sort_relative_in_force_sorted_sections: bool = False + overwrite_in_place: bool = False + reverse_sort: bool = False + star_first: bool = False + import_dependencies = Dict[str, str] + git_ignore: Dict[Path, Set[Path]] = field(default_factory=dict) + format_error: str = "{error}: {message}" + format_success: str = "{success}: {message}" + sort_order: str = "natural" + + def __post_init__(self) -> None: + py_version = self.py_version + if py_version == "auto": # pragma: no cover + if sys.version_info.major == 2 and sys.version_info.minor <= 6: + py_version = "2" + elif sys.version_info.major == 3 and ( + sys.version_info.minor <= 5 or sys.version_info.minor >= 10 + ): + py_version = "3" + else: + py_version = f"{sys.version_info.major}{sys.version_info.minor}" + + if py_version not in VALID_PY_TARGETS: + raise ValueError( + f"The python version {py_version} is not supported. " + "You can set a python version with the -py or --python-version flag. " + f"The following versions are supported: {VALID_PY_TARGETS}" + ) + + if py_version != "all": + object.__setattr__(self, "py_version", f"py{py_version}") + + if not self.known_standard_library: + object.__setattr__( + self, "known_standard_library", frozenset(getattr(stdlibs, self.py_version).stdlib) + ) + + if self.multi_line_output == WrapModes.VERTICAL_GRID_GROUPED_NO_COMMA: # type: ignore + vertical_grid_grouped = WrapModes.VERTICAL_GRID_GROUPED # type: ignore + object.__setattr__(self, "multi_line_output", vertical_grid_grouped) + if self.force_alphabetical_sort: + object.__setattr__(self, "force_alphabetical_sort_within_sections", True) + object.__setattr__(self, "no_sections", True) + object.__setattr__(self, "lines_between_types", 1) + object.__setattr__(self, "from_first", True) + if self.wrap_length > self.line_length: + raise ValueError( + "wrap_length must be set lower than or equal to line_length: " + f"{self.wrap_length} > {self.line_length}." + ) + + def __hash__(self) -> int: + return id(self) + + +_DEFAULT_SETTINGS = {**vars(_Config()), "source": "defaults"} + + +class Config(_Config): + def __init__( + self, + settings_file: str = "", + settings_path: str = "", + config: Optional[_Config] = None, + **config_overrides: Any, + ): + self._known_patterns: Optional[List[Tuple[Pattern[str], str]]] = None + self._section_comments: Optional[Tuple[str, ...]] = None + self._section_comments_end: Optional[Tuple[str, ...]] = None + self._skips: Optional[FrozenSet[str]] = None + self._skip_globs: Optional[FrozenSet[str]] = None + self._sorting_function: Optional[Callable[..., List[str]]] = None + + if config: + config_vars = vars(config).copy() + config_vars.update(config_overrides) + config_vars["py_version"] = config_vars["py_version"].replace("py", "") + config_vars.pop("_known_patterns") + config_vars.pop("_section_comments") + config_vars.pop("_section_comments_end") + config_vars.pop("_skips") + config_vars.pop("_skip_globs") + config_vars.pop("_sorting_function") + super().__init__(**config_vars) # type: ignore + return + + # We can't use self.quiet to conditionally show warnings before super.__init__() is called + # at the end of this method. _Config is also frozen so setting self.quiet isn't possible. + # Therefore we extract quiet early here in a variable and use that in warning conditions. + quiet = config_overrides.get("quiet", False) + + sources: List[Dict[str, Any]] = [_DEFAULT_SETTINGS] + + config_settings: Dict[str, Any] + project_root: str + if settings_file: + config_settings = _get_config_data( + settings_file, + CONFIG_SECTIONS.get(os.path.basename(settings_file), FALLBACK_CONFIG_SECTIONS), + ) + project_root = os.path.dirname(settings_file) + if not config_settings and not quiet: + warn( + f"A custom settings file was specified: {settings_file} but no configuration " + "was found inside. This can happen when [settings] is used as the config " + "header instead of [isort]. " + "See: https://pycqa.github.io/isort/docs/configuration/config_files" + "/#custom_config_files for more information." + ) + elif settings_path: + if not os.path.exists(settings_path): + raise InvalidSettingsPath(settings_path) + + settings_path = os.path.abspath(settings_path) + project_root, config_settings = _find_config(settings_path) + else: + config_settings = {} + project_root = os.getcwd() + + profile_name = config_overrides.get("profile", config_settings.get("profile", "")) + profile: Dict[str, Any] = {} + if profile_name: + if profile_name not in profiles: + import pkg_resources + + for plugin in pkg_resources.iter_entry_points("isort.profiles"): + profiles.setdefault(plugin.name, plugin.load()) + + if profile_name not in profiles: + raise ProfileDoesNotExist(profile_name) + + profile = profiles[profile_name].copy() + profile["source"] = f"{profile_name} profile" + sources.append(profile) + + if config_settings: + sources.append(config_settings) + if config_overrides: + config_overrides["source"] = RUNTIME_SOURCE + sources.append(config_overrides) + + combined_config = {**profile, **config_settings, **config_overrides} + if "indent" in combined_config: + indent = str(combined_config["indent"]) + if indent.isdigit(): + indent = " " * int(indent) + else: + indent = indent.strip("'").strip('"') + if indent.lower() == "tab": + indent = "\t" + combined_config["indent"] = indent + + known_other = {} + import_headings = {} + import_footers = {} + for key, value in tuple(combined_config.items()): + # Collect all known sections beyond those that have direct entries + if key.startswith(KNOWN_PREFIX) and key not in ( + "known_standard_library", + "known_future_library", + "known_third_party", + "known_first_party", + "known_local_folder", + ): + import_heading = key[len(KNOWN_PREFIX) :].lower() + maps_to_section = import_heading.upper() + combined_config.pop(key) + if maps_to_section in KNOWN_SECTION_MAPPING: + section_name = f"known_{KNOWN_SECTION_MAPPING[maps_to_section].lower()}" + if section_name in combined_config and not quiet: + warn( + f"Can't set both {key} and {section_name} in the same config file.\n" + f"Default to {section_name} if unsure." + "\n\n" + "See: https://pycqa.github.io/isort/" + "#custom-sections-and-ordering." + ) + else: + combined_config[section_name] = frozenset(value) + else: + known_other[import_heading] = frozenset(value) + if maps_to_section not in combined_config.get("sections", ()) and not quiet: + warn( + f"`{key}` setting is defined, but {maps_to_section} is not" + " included in `sections` config option:" + f" {combined_config.get('sections', SECTION_DEFAULTS)}.\n\n" + "See: https://pycqa.github.io/isort/" + "#custom-sections-and-ordering." + ) + if key.startswith(IMPORT_HEADING_PREFIX): + import_headings[key[len(IMPORT_HEADING_PREFIX) :].lower()] = str(value) + if key.startswith(IMPORT_FOOTER_PREFIX): + import_footers[key[len(IMPORT_FOOTER_PREFIX) :].lower()] = str(value) + + # Coerce all provided config values into their correct type + default_value = _DEFAULT_SETTINGS.get(key, None) + if default_value is None: + continue + + combined_config[key] = type(default_value)(value) + + for section in combined_config.get("sections", ()): + if section in SECTION_DEFAULTS: + continue + + if not section.lower() in known_other: + config_keys = ", ".join(known_other.keys()) + warn( + f"`sections` setting includes {section}, but no known_{section.lower()} " + "is defined. " + f"The following known_SECTION config options are defined: {config_keys}." + ) + + if "directory" not in combined_config: + combined_config["directory"] = ( + os.path.dirname(config_settings["source"]) + if config_settings.get("source", None) + else os.getcwd() + ) + + path_root = Path(combined_config.get("directory", project_root)).resolve() + path_root = path_root if path_root.is_dir() else path_root.parent + if "src_paths" not in combined_config: + combined_config["src_paths"] = (path_root / "src", path_root) + else: + src_paths: List[Path] = [] + for src_path in combined_config.get("src_paths", ()): + full_paths = ( + path_root.glob(src_path) if "*" in str(src_path) else [path_root / src_path] + ) + for path in full_paths: + if path not in src_paths: + src_paths.append(path) + + combined_config["src_paths"] = tuple(src_paths) + + if "formatter" in combined_config: + import pkg_resources + + for plugin in pkg_resources.iter_entry_points("isort.formatters"): + if plugin.name == combined_config["formatter"]: + combined_config["formatting_function"] = plugin.load() + break + else: + raise FormattingPluginDoesNotExist(combined_config["formatter"]) + + # Remove any config values that are used for creating config object but + # aren't defined in dataclass + combined_config.pop("source", None) + combined_config.pop("sources", None) + combined_config.pop("runtime_src_paths", None) + + deprecated_options_used = [ + option for option in combined_config if option in DEPRECATED_SETTINGS + ] + if deprecated_options_used: + for deprecated_option in deprecated_options_used: + combined_config.pop(deprecated_option) + if not quiet: + warn( + "W0503: Deprecated config options were used: " + f"{', '.join(deprecated_options_used)}." + "Please see the 5.0.0 upgrade guide: " + "https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html" + ) + + if known_other: + combined_config["known_other"] = known_other + if import_headings: + for import_heading_key in import_headings: + combined_config.pop(f"{IMPORT_HEADING_PREFIX}{import_heading_key}") + combined_config["import_headings"] = import_headings + if import_footers: + for import_footer_key in import_footers: + combined_config.pop(f"{IMPORT_FOOTER_PREFIX}{import_footer_key}") + combined_config["import_footers"] = import_footers + + unsupported_config_errors = {} + for option in set(combined_config.keys()).difference( + getattr(_Config, "__dataclass_fields__", {}).keys() + ): + for source in reversed(sources): + if option in source: + unsupported_config_errors[option] = { + "value": source[option], + "source": source["source"], + } + if unsupported_config_errors: + raise UnsupportedSettings(unsupported_config_errors) + + super().__init__(sources=tuple(sources), **combined_config) # type: ignore + + def is_supported_filetype(self, file_name: str) -> bool: + _root, ext = os.path.splitext(file_name) + ext = ext.lstrip(".") + if ext in self.supported_extensions: + return True + if ext in self.blocked_extensions: + return False + + # Skip editor backup files. + if file_name.endswith("~"): + return False + + try: + if stat.S_ISFIFO(os.stat(file_name).st_mode): + return False + except OSError: + pass + + try: + with open(file_name, "rb") as fp: + line = fp.readline(100) + except OSError: + return False + else: + return bool(_SHEBANG_RE.match(line)) + + def _check_folder_gitignore(self, folder: str) -> Optional[Path]: + env = {**os.environ, "LANG": "C.UTF-8"} + try: + topfolder_result = subprocess.check_output( # nosec # skipcq: PYL-W1510 + ["git", "-C", folder, "rev-parse", "--show-toplevel"], encoding="utf-8", env=env + ) + except subprocess.CalledProcessError: + return None + + git_folder = Path(topfolder_result.rstrip()).resolve() + + files: List[str] = [] + # don't check symlinks; either part of the repo and would be checked + # twice, or is external to the repo and git won't know anything about it + for root, _dirs, git_files in os.walk(git_folder, followlinks=False): + if ".git" in _dirs: + _dirs.remove(".git") + for git_file in git_files: + files.append(os.path.join(root, git_file)) + git_options = ["-C", str(git_folder), "-c", "core.quotePath="] + try: + ignored = subprocess.check_output( # nosec # skipcq: PYL-W1510 + ["git", *git_options, "check-ignore", "-z", "--stdin", "--no-index"], + encoding="utf-8", + env=env, + input="\0".join(files), + ) + except subprocess.CalledProcessError: + return None + + self.git_ignore[git_folder] = {Path(f) for f in ignored.rstrip("\0").split("\0")} + return git_folder + + def is_skipped(self, file_path: Path) -> bool: + """Returns True if the file and/or folder should be skipped based on current settings.""" + if self.directory and Path(self.directory) in file_path.resolve().parents: + file_name = os.path.relpath(file_path.resolve(), self.directory) + else: + file_name = str(file_path) + + os_path = str(file_path) + + normalized_path = os_path.replace("\\", "/") + if normalized_path[1:2] == ":": + normalized_path = normalized_path[2:] + + for skip_path in self.skips: + if posixpath.abspath(normalized_path) == posixpath.abspath( + skip_path.replace("\\", "/") + ): + return True + + position = os.path.split(file_name) + while position[1]: + if position[1] in self.skips: + return True + position = os.path.split(position[0]) + + for sglob in self.skip_globs: + if fnmatch.fnmatch(file_name, sglob) or fnmatch.fnmatch("/" + file_name, sglob): + return True + + if not (os.path.isfile(os_path) or os.path.isdir(os_path) or os.path.islink(os_path)): + return True + + if self.skip_gitignore: + if file_path.name == ".git": # pragma: no cover + return True + + git_folder = None + + file_paths = [file_path, file_path.resolve()] + for folder in self.git_ignore: + if any(folder in path.parents for path in file_paths): + git_folder = folder + break + else: + git_folder = self._check_folder_gitignore(str(file_path.parent)) + + if git_folder and any(path in self.git_ignore[git_folder] for path in file_paths): + return True + + return False + + @property + def known_patterns(self) -> List[Tuple[Pattern[str], str]]: + if self._known_patterns is not None: + return self._known_patterns + + self._known_patterns = [] + pattern_sections = [STDLIB] + [section for section in self.sections if section != STDLIB] + for placement in reversed(pattern_sections): + known_placement = KNOWN_SECTION_MAPPING.get(placement, placement).lower() + config_key = f"{KNOWN_PREFIX}{known_placement}" + known_modules = getattr(self, config_key, self.known_other.get(known_placement, ())) + extra_modules = getattr(self, f"extra_{known_placement}", ()) + all_modules = set(extra_modules).union(known_modules) + known_patterns = [ + pattern + for known_pattern in all_modules + for pattern in self._parse_known_pattern(known_pattern) + ] + for known_pattern in known_patterns: + regexp = "^" + known_pattern.replace("*", ".*").replace("?", ".?") + "$" + self._known_patterns.append((re.compile(regexp), placement)) + + return self._known_patterns + + @property + def section_comments(self) -> Tuple[str, ...]: + if self._section_comments is not None: + return self._section_comments + + self._section_comments = tuple(f"# {heading}" for heading in self.import_headings.values()) + return self._section_comments + + @property + def section_comments_end(self) -> Tuple[str, ...]: + if self._section_comments_end is not None: + return self._section_comments_end + + self._section_comments_end = tuple(f"# {footer}" for footer in self.import_footers.values()) + return self._section_comments_end + + @property + def skips(self) -> FrozenSet[str]: + if self._skips is not None: + return self._skips + + self._skips = self.skip.union(self.extend_skip) + return self._skips + + @property + def skip_globs(self) -> FrozenSet[str]: + if self._skip_globs is not None: + return self._skip_globs + + self._skip_globs = self.skip_glob.union(self.extend_skip_glob) + return self._skip_globs + + @property + def sorting_function(self) -> Callable[..., List[str]]: + if self._sorting_function is not None: + return self._sorting_function + + if self.sort_order == "natural": + self._sorting_function = sorting.naturally + elif self.sort_order == "native": + self._sorting_function = sorted + else: + available_sort_orders = ["natural", "native"] + import pkg_resources + + for sort_plugin in pkg_resources.iter_entry_points("isort.sort_function"): + available_sort_orders.append(sort_plugin.name) + if sort_plugin.name == self.sort_order: + self._sorting_function = sort_plugin.load() + break + else: + raise SortingFunctionDoesNotExist(self.sort_order, available_sort_orders) + + return self._sorting_function + + def _parse_known_pattern(self, pattern: str) -> List[str]: + """Expand pattern if identified as a directory and return found sub packages""" + if pattern.endswith(os.path.sep): + patterns = [ + filename + for filename in os.listdir(os.path.join(self.directory, pattern)) + if os.path.isdir(os.path.join(self.directory, pattern, filename)) + ] + else: + patterns = [pattern] + + return patterns + + +def _get_str_to_type_converter(setting_name: str) -> Union[Callable[[str], Any], Type[Any]]: + type_converter: Union[Callable[[str], Any], Type[Any]] = type( + _DEFAULT_SETTINGS.get(setting_name, "") + ) + if type_converter == WrapModes: + type_converter = wrap_mode_from_string + return type_converter + + +def _as_list(value: str) -> List[str]: + if isinstance(value, list): + return [item.strip() for item in value] + filtered = [item.strip() for item in value.replace("\n", ",").split(",") if item.strip()] + return filtered + + +def _abspaths(cwd: str, values: Iterable[str]) -> Set[str]: + paths = { + os.path.join(cwd, value) + if not value.startswith(os.path.sep) and value.endswith(os.path.sep) + else value + for value in values + } + return paths + + +@lru_cache() +def _find_config(path: str) -> Tuple[str, Dict[str, Any]]: + current_directory = path + tries = 0 + while current_directory and tries < MAX_CONFIG_SEARCH_DEPTH: + for config_file_name in CONFIG_SOURCES: + potential_config_file = os.path.join(current_directory, config_file_name) + if os.path.isfile(potential_config_file): + config_data: Dict[str, Any] + try: + config_data = _get_config_data( + potential_config_file, CONFIG_SECTIONS[config_file_name] + ) + except Exception: + warn(f"Failed to pull configuration information from {potential_config_file}") + config_data = {} + if config_data: + return (current_directory, config_data) + + for stop_dir in STOP_CONFIG_SEARCH_ON_DIRS: + if os.path.isdir(os.path.join(current_directory, stop_dir)): + return (current_directory, {}) + + new_directory = os.path.split(current_directory)[0] + if new_directory == current_directory: + break + + current_directory = new_directory + tries += 1 + + return (path, {}) + + +@lru_cache() +def find_all_configs(path: str) -> Trie: + """ + Looks for config files in the path provided and in all of its sub-directories. + Parses and stores any config file encountered in a trie and returns the root of + the trie + """ + trie_root = Trie("default", {}) + + for (dirpath, _, _) in os.walk(path): + for config_file_name in CONFIG_SOURCES: + potential_config_file = os.path.join(dirpath, config_file_name) + if os.path.isfile(potential_config_file): + config_data: Dict[str, Any] + try: + config_data = _get_config_data( + potential_config_file, CONFIG_SECTIONS[config_file_name] + ) + except Exception: + warn(f"Failed to pull configuration information from {potential_config_file}") + config_data = {} + + if config_data: + trie_root.insert(potential_config_file, config_data) + break + + return trie_root + + +@lru_cache() +def _get_config_data(file_path: str, sections: Tuple[str]) -> Dict[str, Any]: + settings: Dict[str, Any] = {} + + if file_path.endswith(".toml"): + with open(file_path, "rb") as bin_config_file: + config = tomli.load(bin_config_file) + for section in sections: + config_section = config + for key in section.split("."): + config_section = config_section.get(key, {}) + settings.update(config_section) + else: + with open(file_path, encoding="utf-8") as config_file: + if file_path.endswith(".editorconfig"): + line = "\n" + last_position = config_file.tell() + while line: + line = config_file.readline() + if "[" in line: + config_file.seek(last_position) + break + last_position = config_file.tell() + + config = configparser.ConfigParser(strict=False) + config.read_file(config_file) + for section in sections: + if section.startswith("*.{") and section.endswith("}"): + extension = section[len("*.{") : -1] + for config_key in config.keys(): + if ( + config_key.startswith("*.{") + and config_key.endswith("}") + and extension + in map( + lambda text: text.strip(), config_key[len("*.{") : -1].split(",") # type: ignore # noqa + ) + ): + settings.update(config.items(config_key)) + + elif config.has_section(section): + settings.update(config.items(section)) + + if settings: + settings["source"] = file_path + + if file_path.endswith(".editorconfig"): + indent_style = settings.pop("indent_style", "").strip() + indent_size = settings.pop("indent_size", "").strip() + if indent_size == "tab": + indent_size = settings.pop("tab_width", "").strip() + + if indent_style == "space": + settings["indent"] = " " * (indent_size and int(indent_size) or 4) + + elif indent_style == "tab": + settings["indent"] = "\t" * (indent_size and int(indent_size) or 1) + + max_line_length = settings.pop("max_line_length", "").strip() + if max_line_length and (max_line_length == "off" or max_line_length.isdigit()): + settings["line_length"] = ( + float("inf") if max_line_length == "off" else int(max_line_length) + ) + settings = { + key: value + for key, value in settings.items() + if key in _DEFAULT_SETTINGS.keys() or key.startswith(KNOWN_PREFIX) + } + + for key, value in settings.items(): + existing_value_type = _get_str_to_type_converter(key) + if existing_value_type == tuple: + settings[key] = tuple(_as_list(value)) + elif existing_value_type == frozenset: + settings[key] = frozenset(_as_list(settings.get(key))) # type: ignore + elif existing_value_type == bool: + # Only some configuration formats support native boolean values. + if not isinstance(value, bool): + value = _as_bool(value) + settings[key] = value + elif key.startswith(KNOWN_PREFIX): + settings[key] = _abspaths(os.path.dirname(file_path), _as_list(value)) + elif key == "force_grid_wrap": + try: + result = existing_value_type(value) + except ValueError: # backwards compatibility for true / false force grid wrap + result = 0 if value.lower().strip() == "false" else 2 + settings[key] = result + elif key == "comment_prefix": + settings[key] = str(value).strip("'").strip('"') + else: + settings[key] = existing_value_type(value) + + return settings + + +def _as_bool(value: str) -> bool: + """Given a string value that represents True or False, returns the Boolean equivalent. + Heavily inspired from distutils strtobool. + """ + try: + return _STR_BOOLEAN_MAPPING[value.lower()] + except KeyError: + raise ValueError(f"invalid truth value {value}") + + +DEFAULT_CONFIG = Config() diff --git a/myenv/lib/python3.9/site-packages/isort/setuptools_commands.py b/myenv/lib/python3.9/site-packages/isort/setuptools_commands.py new file mode 100644 index 0000000..d60deda --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/setuptools_commands.py @@ -0,0 +1,61 @@ +import glob +import os +import sys +from typing import Any, Dict, Iterator, List +from warnings import warn + +import setuptools # type: ignore + +from . import api +from .settings import DEFAULT_CONFIG + + +class ISortCommand(setuptools.Command): # type: ignore + """The :class:`ISortCommand` class is used by setuptools to perform + imports checks on registered modules. + """ + + description = "Run isort on modules registered in setuptools" + user_options: List[Any] = [] + + def initialize_options(self) -> None: + default_settings = vars(DEFAULT_CONFIG).copy() + for key, value in default_settings.items(): + setattr(self, key, value) + + def finalize_options(self) -> None: + """Get options from config files.""" + self.arguments: Dict[str, Any] = {} # skipcq: PYL-W0201 + self.arguments["settings_path"] = os.getcwd() + + def distribution_files(self) -> Iterator[str]: + """Find distribution packages.""" + # This is verbatim from flake8 + if self.distribution.packages: # pragma: no cover + package_dirs = self.distribution.package_dir or {} + for package in self.distribution.packages: + pkg_dir = package + if package in package_dirs: + pkg_dir = package_dirs[package] + elif "" in package_dirs: # pragma: no cover + pkg_dir = package_dirs[""] + os.path.sep + pkg_dir + yield pkg_dir.replace(".", os.path.sep) + + if self.distribution.py_modules: + for filename in self.distribution.py_modules: + yield "%s.py" % filename + # Don't miss the setup.py file itself + yield "setup.py" + + def run(self) -> None: + arguments = self.arguments + wrong_sorted_files = False + for path in self.distribution_files(): + for python_file in glob.iglob(os.path.join(path, "*.py")): + try: + if not api.check_file(python_file, **arguments): + wrong_sorted_files = True # pragma: no cover + except OSError as error: # pragma: no cover + warn(f"Unable to parse file {python_file} due to {error}") + if wrong_sorted_files: + sys.exit(1) # pragma: no cover diff --git a/myenv/lib/python3.9/site-packages/isort/sorting.py b/myenv/lib/python3.9/site-packages/isort/sorting.py new file mode 100644 index 0000000..22fdc38 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/sorting.py @@ -0,0 +1,130 @@ +import re +from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional + +if TYPE_CHECKING: + from .settings import Config +else: + Config = Any + +_import_line_intro_re = re.compile("^(?:from|import) ") +_import_line_midline_import_re = re.compile(" import ") + + +def module_key( + module_name: str, + config: Config, + sub_imports: bool = False, + ignore_case: bool = False, + section_name: Optional[Any] = None, + straight_import: Optional[bool] = False, +) -> str: + match = re.match(r"^(\.+)\s*(.*)", module_name) + if match: + sep = " " if config.reverse_relative else "_" + module_name = sep.join(match.groups()) + + prefix = "" + if ignore_case: + module_name = str(module_name).lower() + else: + module_name = str(module_name) + + if sub_imports and config.order_by_type: + if module_name in config.constants: + prefix = "A" + elif module_name in config.classes: + prefix = "B" + elif module_name in config.variables: + prefix = "C" + elif module_name.isupper() and len(module_name) > 1: # see issue #376 + prefix = "A" + elif module_name in config.classes or module_name[0:1].isupper(): + prefix = "B" + else: + prefix = "C" + if not config.case_sensitive: + module_name = module_name.lower() + + length_sort = ( + config.length_sort + or (config.length_sort_straight and straight_import) + or str(section_name).lower() in config.length_sort_sections + ) + _length_sort_maybe = (str(len(module_name)) + ":" + module_name) if length_sort else module_name + return f"{module_name in config.force_to_top and 'A' or 'B'}{prefix}{_length_sort_maybe}" + + +def section_key(line: str, config: Config) -> str: + section = "B" + + if ( + not config.sort_relative_in_force_sorted_sections + and config.reverse_relative + and line.startswith("from .") + ): + match = re.match(r"^from (\.+)\s*(.*)", line) + if match: # pragma: no cover - regex always matches if line starts with "from ." + line = f"from {' '.join(match.groups())}" + if config.group_by_package and line.strip().startswith("from"): + line = line.split(" import", 1)[0] + + if config.lexicographical: + line = _import_line_intro_re.sub("", _import_line_midline_import_re.sub(".", line)) + else: + line = re.sub("^from ", "", line) + line = re.sub("^import ", "", line) + if config.sort_relative_in_force_sorted_sections: + sep = " " if config.reverse_relative else "_" + line = re.sub(r"^(\.+)", fr"\1{sep}", line) + if line.split(" ")[0] in config.force_to_top: + section = "A" + # * If honor_case_in_force_sorted_sections is true, and case_sensitive and + # order_by_type are different, only ignore case in part of the line. + # * Otherwise, let order_by_type decide the sorting of the whole line. This + # is only "correct" if case_sensitive and order_by_type have the same value. + if config.honor_case_in_force_sorted_sections and config.case_sensitive != config.order_by_type: + split_module = line.split(" import ", 1) + if len(split_module) > 1: + module_name, names = split_module + if not config.case_sensitive: + module_name = module_name.lower() + if not config.order_by_type: + names = names.lower() + line = " import ".join([module_name, names]) + elif not config.case_sensitive: + line = line.lower() + elif not config.order_by_type: + line = line.lower() + + return f"{section}{len(line) if config.length_sort else ''}{line}" + + +def sort( + config: Config, + to_sort: Iterable[str], + key: Optional[Callable[[str], Any]] = None, + reverse: bool = False, +) -> List[str]: + return config.sorting_function(to_sort, key=key, reverse=reverse) + + +def naturally( + to_sort: Iterable[str], key: Optional[Callable[[str], Any]] = None, reverse: bool = False +) -> List[str]: + """Returns a naturally sorted list""" + if key is None: + key_callback = _natural_keys + else: + + def key_callback(text: str) -> List[Any]: + return _natural_keys(key(text)) # type: ignore + + return sorted(to_sort, key=key_callback, reverse=reverse) + + +def _atoi(text: str) -> Any: + return int(text) if text.isdigit() else text + + +def _natural_keys(text: str) -> List[Any]: + return [_atoi(c) for c in re.split(r"(\d+)", text)] diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/__init__.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/__init__.py new file mode 100644 index 0000000..7b00716 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/__init__.py @@ -0,0 +1,2 @@ +from . import all as _all +from . import py2, py3, py27, py35, py36, py37, py38, py39, py310 diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/all.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/all.py new file mode 100644 index 0000000..08a365e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/all.py @@ -0,0 +1,3 @@ +from . import py2, py3 + +stdlib = py2.stdlib | py3.stdlib diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py2.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py2.py new file mode 100644 index 0000000..74af019 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py2.py @@ -0,0 +1,3 @@ +from . import py27 + +stdlib = py27.stdlib diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py27.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py27.py new file mode 100644 index 0000000..a9bc99d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py27.py @@ -0,0 +1,301 @@ +""" +File contains the standard library of Python 2.7. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "AL", + "BaseHTTPServer", + "Bastion", + "CGIHTTPServer", + "Carbon", + "ColorPicker", + "ConfigParser", + "Cookie", + "DEVICE", + "DocXMLRPCServer", + "EasyDialogs", + "FL", + "FrameWork", + "GL", + "HTMLParser", + "MacOS", + "MimeWriter", + "MiniAEFrame", + "Nav", + "PixMapWrapper", + "Queue", + "SUNAUDIODEV", + "ScrolledText", + "SimpleHTTPServer", + "SimpleXMLRPCServer", + "SocketServer", + "StringIO", + "Tix", + "Tkinter", + "UserDict", + "UserList", + "UserString", + "W", + "__builtin__", + "_ast", + "_winreg", + "abc", + "aepack", + "aetools", + "aetypes", + "aifc", + "al", + "anydbm", + "applesingle", + "argparse", + "array", + "ast", + "asynchat", + "asyncore", + "atexit", + "audioop", + "autoGIL", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "bsddb", + "buildtools", + "bz2", + "cPickle", + "cProfile", + "cStringIO", + "calendar", + "cd", + "cfmfile", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "commands", + "compileall", + "compiler", + "contextlib", + "cookielib", + "copy", + "copy_reg", + "crypt", + "csv", + "ctypes", + "curses", + "datetime", + "dbhash", + "dbm", + "decimal", + "difflib", + "dircache", + "dis", + "distutils", + "dl", + "doctest", + "dumbdbm", + "dummy_thread", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "errno", + "exceptions", + "fcntl", + "filecmp", + "fileinput", + "findertools", + "fl", + "flp", + "fm", + "fnmatch", + "formatter", + "fpectl", + "fpformat", + "fractions", + "ftplib", + "functools", + "future_builtins", + "gc", + "gdbm", + "gensuitemodule", + "getopt", + "getpass", + "gettext", + "gl", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "hotshot", + "htmlentitydefs", + "htmllib", + "httplib", + "ic", + "icopen", + "imageop", + "imaplib", + "imgfile", + "imghdr", + "imp", + "importlib", + "imputil", + "inspect", + "io", + "itertools", + "jpeg", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "macerrors", + "macostools", + "macpath", + "macresource", + "mailbox", + "mailcap", + "marshal", + "math", + "md5", + "mhlib", + "mimetools", + "mimetypes", + "mimify", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multifile", + "multiprocessing", + "mutex", + "netrc", + "new", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "popen2", + "poplib", + "posix", + "posixfile", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "quopri", + "random", + "re", + "readline", + "resource", + "rexec", + "rfc822", + "rlcompleter", + "robotparser", + "runpy", + "sched", + "select", + "sets", + "sgmllib", + "sha", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statvfs", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "sunaudiodev", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "thread", + "threading", + "time", + "timeit", + "token", + "tokenize", + "trace", + "traceback", + "ttk", + "tty", + "turtle", + "types", + "unicodedata", + "unittest", + "urllib", + "urllib2", + "urlparse", + "user", + "uu", + "uuid", + "videoreader", + "warnings", + "wave", + "weakref", + "webbrowser", + "whichdb", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpclib", + "zipfile", + "zipimport", + "zlib", +} diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py3.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py3.py new file mode 100644 index 0000000..4e0b5ee --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py3.py @@ -0,0 +1,3 @@ +from . import py35, py36, py37, py38, py39, py310 + +stdlib = py35.stdlib | py36.stdlib | py37.stdlib | py38.stdlib | py39.stdlib | py310.stdlib diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py310.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py310.py new file mode 100644 index 0000000..1db2755 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py310.py @@ -0,0 +1,221 @@ +""" +File contains the standard library of Python 3.10. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "_ast", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "graphlib", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", + "zoneinfo", +} diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py35.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py35.py new file mode 100644 index 0000000..29ab9ae --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py35.py @@ -0,0 +1,223 @@ +""" +File contains the standard library of Python 3.5. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "_ast", + "_dummy_thread", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fpectl", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "macpath", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", +} diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py36.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py36.py new file mode 100644 index 0000000..59ebd24 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py36.py @@ -0,0 +1,224 @@ +""" +File contains the standard library of Python 3.6. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "_ast", + "_dummy_thread", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fpectl", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "macpath", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", +} diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py37.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py37.py new file mode 100644 index 0000000..e0ad122 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py37.py @@ -0,0 +1,225 @@ +""" +File contains the standard library of Python 3.7. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "_ast", + "_dummy_thread", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "macpath", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", +} diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py38.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py38.py new file mode 100644 index 0000000..3d89fd2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py38.py @@ -0,0 +1,224 @@ +""" +File contains the standard library of Python 3.8. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "_ast", + "_dummy_thread", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", +} diff --git a/myenv/lib/python3.9/site-packages/isort/stdlibs/py39.py b/myenv/lib/python3.9/site-packages/isort/stdlibs/py39.py new file mode 100644 index 0000000..4b7dd59 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/stdlibs/py39.py @@ -0,0 +1,224 @@ +""" +File contains the standard library of Python 3.9. + +DO NOT EDIT. If the standard library changes, a new list should be created +using the mkstdlibs.py script. +""" + +stdlib = { + "_ast", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "graphlib", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", + "zoneinfo", +} diff --git a/myenv/lib/python3.9/site-packages/isort/utils.py b/myenv/lib/python3.9/site-packages/isort/utils.py new file mode 100644 index 0000000..339c86f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/utils.py @@ -0,0 +1,72 @@ +import os +import sys +from pathlib import Path +from typing import Any, Dict, Optional, Tuple + + +class TrieNode: + def __init__(self, config_file: str = "", config_data: Optional[Dict[str, Any]] = None) -> None: + if not config_data: + config_data = {} + + self.nodes: Dict[str, TrieNode] = {} + self.config_info: Tuple[str, Dict[str, Any]] = (config_file, config_data) + + +class Trie: + """ + A prefix tree to store the paths of all config files and to search the nearest config + associated with each file + """ + + def __init__(self, config_file: str = "", config_data: Optional[Dict[str, Any]] = None) -> None: + self.root: TrieNode = TrieNode(config_file, config_data) + + def insert(self, config_file: str, config_data: Dict[str, Any]) -> None: + resolved_config_path_as_tuple = Path(config_file).parent.resolve().parts + + temp = self.root + + for path in resolved_config_path_as_tuple: + if path not in temp.nodes: + temp.nodes[path] = TrieNode() + + temp = temp.nodes[path] + + temp.config_info = (config_file, config_data) + + def search(self, filename: str) -> Tuple[str, Dict[str, Any]]: + """ + Returns the closest config relative to filename by doing a depth + first search on the prefix tree. + """ + resolved_file_path_as_tuple = Path(filename).resolve().parts + + temp = self.root + + last_stored_config: Tuple[str, Dict[str, Any]] = ("", {}) + + for path in resolved_file_path_as_tuple: + if temp.config_info[0]: + last_stored_config = temp.config_info + + if path not in temp.nodes: + break + + temp = temp.nodes[path] + + return last_stored_config + + +def exists_case_sensitive(path: str) -> bool: + """Returns if the given path exists and also matches the case on Windows. + + When finding files that can be imported, it is important for the cases to match because while + file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, + Python can only import using the case of the real file. + """ + result = os.path.exists(path) + if (sys.platform.startswith("win") or sys.platform == "darwin") and result: # pragma: no cover + directory, basename = os.path.split(path) + result = basename in os.listdir(directory) + return result diff --git a/myenv/lib/python3.9/site-packages/isort/wrap.py b/myenv/lib/python3.9/site-packages/isort/wrap.py new file mode 100644 index 0000000..5fb4631 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/wrap.py @@ -0,0 +1,139 @@ +import copy +import re +from typing import List, Optional, Sequence + +from .settings import DEFAULT_CONFIG, Config +from .wrap_modes import WrapModes as Modes +from .wrap_modes import formatter_from_string + + +def import_statement( + import_start: str, + from_imports: List[str], + comments: Sequence[str] = (), + line_separator: str = "\n", + config: Config = DEFAULT_CONFIG, + multi_line_output: Optional[Modes] = None, +) -> str: + """Returns a multi-line wrapped form of the provided from import statement.""" + formatter = formatter_from_string((multi_line_output or config.multi_line_output).name) + dynamic_indent = " " * (len(import_start) + 1) + indent = config.indent + line_length = config.wrap_length or config.line_length + statement = formatter( + statement=import_start, + imports=copy.copy(from_imports), + white_space=dynamic_indent, + indent=indent, + line_length=line_length, + comments=comments, + line_separator=line_separator, + comment_prefix=config.comment_prefix, + include_trailing_comma=config.include_trailing_comma, + remove_comments=config.ignore_comments, + ) + if config.balanced_wrapping: + lines = statement.split(line_separator) + line_count = len(lines) + if len(lines) > 1: + minimum_length = min(len(line) for line in lines[:-1]) + else: + minimum_length = 0 + new_import_statement = statement + while len(lines[-1]) < minimum_length and len(lines) == line_count and line_length > 10: + statement = new_import_statement + line_length -= 1 + new_import_statement = formatter( + statement=import_start, + imports=copy.copy(from_imports), + white_space=dynamic_indent, + indent=indent, + line_length=line_length, + comments=comments, + line_separator=line_separator, + comment_prefix=config.comment_prefix, + include_trailing_comma=config.include_trailing_comma, + remove_comments=config.ignore_comments, + ) + lines = new_import_statement.split(line_separator) + if statement.count(line_separator) == 0: + return _wrap_line(statement, line_separator, config) + return statement + + +def line(content: str, line_separator: str, config: Config = DEFAULT_CONFIG) -> str: + """Returns a line wrapped to the specified line-length, if possible.""" + wrap_mode = config.multi_line_output + if len(content) > config.line_length and wrap_mode != Modes.NOQA: # type: ignore + line_without_comment = content + comment = None + if "#" in content: + line_without_comment, comment = content.split("#", 1) + for splitter in ("import ", ".", "as "): + exp = r"\b" + re.escape(splitter) + r"\b" + if re.search(exp, line_without_comment) and not line_without_comment.strip().startswith( + splitter + ): + line_parts = re.split(exp, line_without_comment) + if comment and not (config.use_parentheses and "noqa" in comment): + _comma_maybe = ( + "," + if ( + config.include_trailing_comma + and config.use_parentheses + and not line_without_comment.rstrip().endswith(",") + ) + else "" + ) + line_parts[ + -1 + ] = f"{line_parts[-1].strip()}{_comma_maybe}{config.comment_prefix}{comment}" + next_line = [] + while (len(content) + 2) > ( + config.wrap_length or config.line_length + ) and line_parts: + next_line.append(line_parts.pop()) + content = splitter.join(line_parts) + if not content: + content = next_line.pop() + + cont_line = _wrap_line( + config.indent + splitter.join(next_line).lstrip(), + line_separator, + config, + ) + if config.use_parentheses: + if splitter == "as ": + output = f"{content}{splitter}{cont_line.lstrip()}" + else: + _comma = "," if config.include_trailing_comma and not comment else "" + + if wrap_mode in ( + Modes.VERTICAL_HANGING_INDENT, # type: ignore + Modes.VERTICAL_GRID_GROUPED, # type: ignore + ): + _separator = line_separator + else: + _separator = "" + _comment = "" + if comment and "noqa" in comment: + _comment = f"{config.comment_prefix}{comment}" + cont_line = cont_line.rstrip() + _comma = "," if config.include_trailing_comma else "" + output = ( + f"{content}{splitter}({_comment}" + f"{line_separator}{cont_line}{_comma}{_separator})" + ) + lines = output.split(line_separator) + if config.comment_prefix in lines[-1] and lines[-1].endswith(")"): + content, comment = lines[-1].split(config.comment_prefix, 1) + lines[-1] = content + ")" + config.comment_prefix + comment[:-1] + return line_separator.join(lines) + return f"{content}{splitter}\\{line_separator}{cont_line}" + elif len(content) > config.line_length and wrap_mode == Modes.NOQA and "# NOQA" not in content: # type: ignore + return f"{content}{config.comment_prefix} NOQA" + + return content + + +_wrap_line = line diff --git a/myenv/lib/python3.9/site-packages/isort/wrap_modes.py b/myenv/lib/python3.9/site-packages/isort/wrap_modes.py new file mode 100644 index 0000000..6ea2801 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/isort/wrap_modes.py @@ -0,0 +1,376 @@ +"""Defines all wrap modes that can be used when outputting formatted imports""" +import enum +from inspect import signature +from typing import Any, Callable, Dict, List + +import isort.comments + +_wrap_modes: Dict[str, Callable[..., str]] = {} + + +def from_string(value: str) -> "WrapModes": + return getattr(WrapModes, str(value), None) or WrapModes(int(value)) + + +def formatter_from_string(name: str) -> Callable[..., str]: + return _wrap_modes.get(name.upper(), grid) + + +def _wrap_mode_interface( + statement: str, + imports: List[str], + white_space: str, + indent: str, + line_length: int, + comments: List[str], + line_separator: str, + comment_prefix: str, + include_trailing_comma: bool, + remove_comments: bool, +) -> str: + """Defines the common interface used by all wrap mode functions""" + return "" + + +def _wrap_mode(function: Callable[..., str]) -> Callable[..., str]: + """Registers an individual wrap mode. Function name and order are significant and used for + creating enum. + """ + _wrap_modes[function.__name__.upper()] = function + function.__signature__ = signature(_wrap_mode_interface) # type: ignore + function.__annotations__ = _wrap_mode_interface.__annotations__ + return function + + +@_wrap_mode +def grid(**interface: Any) -> str: + if not interface["imports"]: + return "" + + interface["statement"] += "(" + interface["imports"].pop(0) + while interface["imports"]: + next_import = interface["imports"].pop(0) + next_statement = isort.comments.add_to_line( + interface["comments"], + interface["statement"] + ", " + next_import, + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + if ( + len(next_statement.split(interface["line_separator"])[-1]) + 1 + > interface["line_length"] + ): + lines = [f"{interface['white_space']}{next_import.split(' ')[0]}"] + for part in next_import.split(" ")[1:]: + new_line = f"{lines[-1]} {part}" + if len(new_line) + 1 > interface["line_length"]: + lines.append(f"{interface['white_space']}{part}") + else: + lines[-1] = new_line + next_import = interface["line_separator"].join(lines) + interface["statement"] = ( + isort.comments.add_to_line( + interface["comments"], + f"{interface['statement']},", + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + + f"{interface['line_separator']}{next_import}" + ) + interface["comments"] = [] + else: + interface["statement"] += ", " + next_import + return f"{interface['statement']}{',' if interface['include_trailing_comma'] else ''})" + + +@_wrap_mode +def vertical(**interface: Any) -> str: + if not interface["imports"]: + return "" + + first_import = ( + isort.comments.add_to_line( + interface["comments"], + interface["imports"].pop(0) + ",", + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + + interface["line_separator"] + + interface["white_space"] + ) + + _imports = ("," + interface["line_separator"] + interface["white_space"]).join( + interface["imports"] + ) + _comma_maybe = "," if interface["include_trailing_comma"] else "" + return f"{interface['statement']}({first_import}{_imports}{_comma_maybe})" + + +def _hanging_indent_end_line(line: str) -> str: + if not line.endswith(" "): + line += " " + return line + "\\" + + +@_wrap_mode +def hanging_indent(**interface: Any) -> str: + if not interface["imports"]: + return "" + + line_length_limit = interface["line_length"] - 3 + + next_import = interface["imports"].pop(0) + next_statement = interface["statement"] + next_import + # Check for first import + if len(next_statement) > line_length_limit: + next_statement = ( + _hanging_indent_end_line(interface["statement"]) + + interface["line_separator"] + + interface["indent"] + + next_import + ) + + interface["statement"] = next_statement + while interface["imports"]: + next_import = interface["imports"].pop(0) + next_statement = interface["statement"] + ", " + next_import + if len(next_statement.split(interface["line_separator"])[-1]) > line_length_limit: + next_statement = ( + _hanging_indent_end_line(interface["statement"] + ",") + + f"{interface['line_separator']}{interface['indent']}{next_import}" + ) + interface["statement"] = next_statement + + interface[ + "statement" + ] = f"{interface['statement']}{',' if interface['include_trailing_comma'] else ''}" + if interface["comments"]: + statement_with_comments = isort.comments.add_to_line( + interface["comments"], + interface["statement"], + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + if len(statement_with_comments.split(interface["line_separator"])[-1]) <= ( + line_length_limit + 2 + ): + return statement_with_comments + return ( + _hanging_indent_end_line(interface["statement"]) + + str(interface["line_separator"]) + + isort.comments.add_to_line( + interface["comments"], + interface["indent"], + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"].lstrip(), + ) + ) + return str(interface["statement"]) + + +@_wrap_mode +def vertical_hanging_indent(**interface: Any) -> str: + _line_with_comments = isort.comments.add_to_line( + interface["comments"], + "", + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + _imports = ("," + interface["line_separator"] + interface["indent"]).join(interface["imports"]) + _comma_maybe = "," if interface["include_trailing_comma"] else "" + return ( + f"{interface['statement']}({_line_with_comments}{interface['line_separator']}" + f"{interface['indent']}{_imports}{_comma_maybe}{interface['line_separator']})" + ) + + +def _vertical_grid_common(need_trailing_char: bool, **interface: Any) -> str: + if not interface["imports"]: + return "" + + interface["statement"] += ( + isort.comments.add_to_line( + interface["comments"], + "(", + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + + interface["line_separator"] + + interface["indent"] + + interface["imports"].pop(0) + ) + while interface["imports"]: + next_import = interface["imports"].pop(0) + next_statement = f"{interface['statement']}, {next_import}" + current_line_length = len(next_statement.split(interface["line_separator"])[-1]) + if interface["imports"] or interface["include_trailing_comma"]: + # We need to account for a comma after this import. + current_line_length += 1 + if not interface["imports"] and need_trailing_char: + # We need to account for a closing ) we're going to add. + current_line_length += 1 + if current_line_length > interface["line_length"]: + next_statement = ( + f"{interface['statement']},{interface['line_separator']}" + f"{interface['indent']}{next_import}" + ) + interface["statement"] = next_statement + if interface["include_trailing_comma"]: + interface["statement"] += "," + return str(interface["statement"]) + + +@_wrap_mode +def vertical_grid(**interface: Any) -> str: + return _vertical_grid_common(need_trailing_char=True, **interface) + ")" + + +@_wrap_mode +def vertical_grid_grouped(**interface: Any) -> str: + return ( + _vertical_grid_common(need_trailing_char=False, **interface) + + str(interface["line_separator"]) + + ")" + ) + + +@_wrap_mode +def vertical_grid_grouped_no_comma(**interface: Any) -> str: + # This is a deprecated alias for vertical_grid_grouped above. This function + # needs to exist for backwards compatibility but should never get called. + raise NotImplementedError + + +@_wrap_mode +def noqa(**interface: Any) -> str: + _imports = ", ".join(interface["imports"]) + retval = f"{interface['statement']}{_imports}" + comment_str = " ".join(interface["comments"]) + if interface["comments"]: + if ( + len(retval) + len(interface["comment_prefix"]) + 1 + len(comment_str) + <= interface["line_length"] + ): + return f"{retval}{interface['comment_prefix']} {comment_str}" + if "NOQA" in interface["comments"]: + return f"{retval}{interface['comment_prefix']} {comment_str}" + return f"{retval}{interface['comment_prefix']} NOQA {comment_str}" + + if len(retval) <= interface["line_length"]: + return retval + return f"{retval}{interface['comment_prefix']} NOQA" + + +@_wrap_mode +def vertical_hanging_indent_bracket(**interface: Any) -> str: + if not interface["imports"]: + return "" + statement = vertical_hanging_indent(**interface) + return f'{statement[:-1]}{interface["indent"]})' + + +@_wrap_mode +def vertical_prefix_from_module_import(**interface: Any) -> str: + if not interface["imports"]: + return "" + + prefix_statement = interface["statement"] + output_statement = prefix_statement + interface["imports"].pop(0) + comments = interface["comments"] + + statement = output_statement + statement_with_comments = "" + for next_import in interface["imports"]: + statement = statement + ", " + next_import + statement_with_comments = isort.comments.add_to_line( + comments, + statement, + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + if ( + len(statement_with_comments.split(interface["line_separator"])[-1]) + 1 + > interface["line_length"] + ): + statement = ( + isort.comments.add_to_line( + comments, + output_statement, + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + + f"{interface['line_separator']}{prefix_statement}{next_import}" + ) + comments = [] + output_statement = statement + + if comments and statement_with_comments: + output_statement = statement_with_comments + return str(output_statement) + + +@_wrap_mode +def hanging_indent_with_parentheses(**interface: Any) -> str: + if not interface["imports"]: + return "" + + line_length_limit = interface["line_length"] - 1 + + interface["statement"] += "(" + next_import = interface["imports"].pop(0) + next_statement = interface["statement"] + next_import + # Check for first import + if len(next_statement) > line_length_limit: + next_statement = ( + isort.comments.add_to_line( + interface["comments"], + interface["statement"], + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + + f"{interface['line_separator']}{interface['indent']}{next_import}" + ) + interface["comments"] = [] + interface["statement"] = next_statement + while interface["imports"]: + next_import = interface["imports"].pop(0) + if ( + not interface["line_separator"] in interface["statement"] + and "#" in interface["statement"] + ): # pragma: no cover # TODO: fix, this is because of test run inconsistency. + line, comments = interface["statement"].split("#", 1) + next_statement = ( + f"{line.rstrip()}, {next_import}{interface['comment_prefix']}{comments}" + ) + else: + next_statement = isort.comments.add_to_line( + interface["comments"], + interface["statement"] + ", " + next_import, + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + current_line = next_statement.split(interface["line_separator"])[-1] + if len(current_line) > line_length_limit: + next_statement = ( + isort.comments.add_to_line( + interface["comments"], + interface["statement"] + ",", + removed=interface["remove_comments"], + comment_prefix=interface["comment_prefix"], + ) + + f"{interface['line_separator']}{interface['indent']}{next_import}" + ) + interface["comments"] = [] + interface["statement"] = next_statement + return f"{interface['statement']}{',' if interface['include_trailing_comma'] else ''})" + + +@_wrap_mode +def backslash_grid(**interface: Any) -> str: + interface["indent"] = interface["white_space"][:-1] + return hanging_indent(**interface) + + +WrapModes = enum.Enum( # type: ignore + "WrapModes", {wrap_mode: index for index, wrap_mode in enumerate(_wrap_modes.keys())} +) diff --git a/myenv/lib/python3.9/site-packages/jose/__init__.py b/myenv/lib/python3.9/site-packages/jose/__init__.py new file mode 100644 index 0000000..054baa7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/__init__.py @@ -0,0 +1,10 @@ +__version__ = "3.3.0" +__author__ = "Michael Davis" +__license__ = "MIT" +__copyright__ = "Copyright 2016 Michael Davis" + + +from .exceptions import ExpiredSignatureError # noqa: F401 +from .exceptions import JOSEError # noqa: F401 +from .exceptions import JWSError # noqa: F401 +from .exceptions import JWTError # noqa: F401 diff --git a/myenv/lib/python3.9/site-packages/jose/backends/__init__.py b/myenv/lib/python3.9/site-packages/jose/backends/__init__.py new file mode 100644 index 0000000..e7bba69 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/__init__.py @@ -0,0 +1,32 @@ +try: + from jose.backends.cryptography_backend import get_random_bytes # noqa: F401 +except ImportError: + try: + from jose.backends.pycrypto_backend import get_random_bytes # noqa: F401 + except ImportError: + from jose.backends.native import get_random_bytes # noqa: F401 + +try: + from jose.backends.cryptography_backend import CryptographyRSAKey as RSAKey # noqa: F401 +except ImportError: + try: + from jose.backends.rsa_backend import RSAKey # noqa: F401 + except ImportError: + RSAKey = None + +try: + from jose.backends.cryptography_backend import CryptographyECKey as ECKey # noqa: F401 +except ImportError: + from jose.backends.ecdsa_backend import ECDSAECKey as ECKey # noqa: F401 + +try: + from jose.backends.cryptography_backend import CryptographyAESKey as AESKey # noqa: F401 +except ImportError: + AESKey = None + +try: + from jose.backends.cryptography_backend import CryptographyHMACKey as HMACKey # noqa: F401 +except ImportError: + from jose.backends.native import HMACKey # noqa: F401 + +from .base import DIRKey # noqa: F401 diff --git a/myenv/lib/python3.9/site-packages/jose/backends/_asn1.py b/myenv/lib/python3.9/site-packages/jose/backends/_asn1.py new file mode 100644 index 0000000..af5fa8b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/_asn1.py @@ -0,0 +1,83 @@ +"""ASN1 encoding helpers for converting between PKCS1 and PKCS8. + +Required by rsa_backend but not cryptography_backend. +""" +from pyasn1.codec.der import decoder, encoder +from pyasn1.type import namedtype, univ + +RSA_ENCRYPTION_ASN1_OID = "1.2.840.113549.1.1.1" + + +class RsaAlgorithmIdentifier(univ.Sequence): + """ASN1 structure for recording RSA PrivateKeyAlgorithm identifiers.""" + + componentType = namedtype.NamedTypes( + namedtype.NamedType("rsaEncryption", univ.ObjectIdentifier()), namedtype.NamedType("parameters", univ.Null()) + ) + + +class PKCS8PrivateKey(univ.Sequence): + """ASN1 structure for recording PKCS8 private keys.""" + + componentType = namedtype.NamedTypes( + namedtype.NamedType("version", univ.Integer()), + namedtype.NamedType("privateKeyAlgorithm", RsaAlgorithmIdentifier()), + namedtype.NamedType("privateKey", univ.OctetString()), + ) + + +class PublicKeyInfo(univ.Sequence): + """ASN1 structure for recording PKCS8 public keys.""" + + componentType = namedtype.NamedTypes( + namedtype.NamedType("algorithm", RsaAlgorithmIdentifier()), namedtype.NamedType("publicKey", univ.BitString()) + ) + + +def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key): + """Convert a PKCS8-encoded RSA private key to PKCS1.""" + decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey()) + + try: + decoded_key = decoded_values[0] + except IndexError: + raise ValueError("Invalid private key encoding") + + return decoded_key["privateKey"] + + +def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key): + """Convert a PKCS1-encoded RSA private key to PKCS8.""" + algorithm = RsaAlgorithmIdentifier() + algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID + + pkcs8_key = PKCS8PrivateKey() + pkcs8_key["version"] = 0 + pkcs8_key["privateKeyAlgorithm"] = algorithm + pkcs8_key["privateKey"] = pkcs1_key + + return encoder.encode(pkcs8_key) + + +def rsa_public_key_pkcs1_to_pkcs8(pkcs1_key): + """Convert a PKCS1-encoded RSA private key to PKCS8.""" + algorithm = RsaAlgorithmIdentifier() + algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID + + pkcs8_key = PublicKeyInfo() + pkcs8_key["algorithm"] = algorithm + pkcs8_key["publicKey"] = univ.BitString.fromOctetString(pkcs1_key) + + return encoder.encode(pkcs8_key) + + +def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key): + """Convert a PKCS8-encoded RSA private key to PKCS1.""" + decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo()) + + try: + decoded_key = decoded_values[0] + except IndexError: + raise ValueError("Invalid public key encoding.") + + return decoded_key["publicKey"].asOctets() diff --git a/myenv/lib/python3.9/site-packages/jose/backends/base.py b/myenv/lib/python3.9/site-packages/jose/backends/base.py new file mode 100644 index 0000000..b000a52 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/base.py @@ -0,0 +1,89 @@ +from ..utils import base64url_encode, ensure_binary + + +class Key: + """ + A simple interface for implementing JWK keys. + """ + + def __init__(self, key, algorithm): + pass + + def sign(self, msg): + raise NotImplementedError() + + def verify(self, msg, sig): + raise NotImplementedError() + + def public_key(self): + raise NotImplementedError() + + def to_pem(self): + raise NotImplementedError() + + def to_dict(self): + raise NotImplementedError() + + def encrypt(self, plain_text, aad=None): + """ + Encrypt the plain text and generate an auth tag if appropriate + + Args: + plain_text (bytes): Data to encrypt + aad (bytes, optional): Authenticated Additional Data if key's algorithm supports auth mode + + Returns: + (bytes, bytes, bytes): IV, cipher text, and auth tag + """ + raise NotImplementedError() + + def decrypt(self, cipher_text, iv=None, aad=None, tag=None): + """ + Decrypt the cipher text and validate the auth tag if present + Args: + cipher_text (bytes): Cipher text to decrypt + iv (bytes): IV if block mode + aad (bytes): Additional Authenticated Data to verify if auth mode + tag (bytes): Authentication tag if auth mode + + Returns: + bytes: Decrypted value + """ + raise NotImplementedError() + + def wrap_key(self, key_data): + """ + Wrap the the plain text key data + + Args: + key_data (bytes): Key data to wrap + + Returns: + bytes: Wrapped key + """ + raise NotImplementedError() + + def unwrap_key(self, wrapped_key): + """ + Unwrap the the wrapped key data + + Args: + wrapped_key (bytes): Wrapped key data to unwrap + + Returns: + bytes: Unwrapped key + """ + raise NotImplementedError() + + +class DIRKey(Key): + def __init__(self, key_data, algorithm): + self._key = ensure_binary(key_data) + self._alg = algorithm + + def to_dict(self): + return { + "alg": self._alg, + "kty": "oct", + "k": base64url_encode(self._key), + } diff --git a/myenv/lib/python3.9/site-packages/jose/backends/cryptography_backend.py b/myenv/lib/python3.9/site-packages/jose/backends/cryptography_backend.py new file mode 100644 index 0000000..abd2426 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/cryptography_backend.py @@ -0,0 +1,605 @@ +import math +import warnings + +from cryptography.exceptions import InvalidSignature, InvalidTag +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.bindings.openssl.binding import Binding +from cryptography.hazmat.primitives import hashes, hmac, serialization +from cryptography.hazmat.primitives.asymmetric import ec, padding, rsa +from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature, encode_dss_signature +from cryptography.hazmat.primitives.ciphers import Cipher, aead, algorithms, modes +from cryptography.hazmat.primitives.keywrap import InvalidUnwrap, aes_key_unwrap, aes_key_wrap +from cryptography.hazmat.primitives.padding import PKCS7 +from cryptography.hazmat.primitives.serialization import load_pem_private_key, load_pem_public_key +from cryptography.utils import int_to_bytes +from cryptography.x509 import load_pem_x509_certificate + +from ..constants import ALGORITHMS +from ..exceptions import JWEError, JWKError +from ..utils import base64_to_long, base64url_decode, base64url_encode, ensure_binary, long_to_base64 +from .base import Key + +_binding = None + + +def get_random_bytes(num_bytes): + """ + Get random bytes + + Currently, Cryptography returns OS random bytes. If you want OpenSSL + generated random bytes, you'll have to switch the RAND engine after + initializing the OpenSSL backend + Args: + num_bytes (int): Number of random bytes to generate and return + Returns: + bytes: Random bytes + """ + global _binding + + if _binding is None: + _binding = Binding() + + buf = _binding.ffi.new("char[]", num_bytes) + _binding.lib.RAND_bytes(buf, num_bytes) + rand_bytes = _binding.ffi.buffer(buf, num_bytes)[:] + return rand_bytes + + +class CryptographyECKey(Key): + SHA256 = hashes.SHA256 + SHA384 = hashes.SHA384 + SHA512 = hashes.SHA512 + + def __init__(self, key, algorithm, cryptography_backend=default_backend): + if algorithm not in ALGORITHMS.EC: + raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) + + self.hash_alg = { + ALGORITHMS.ES256: self.SHA256, + ALGORITHMS.ES384: self.SHA384, + ALGORITHMS.ES512: self.SHA512, + }.get(algorithm) + self._algorithm = algorithm + + self.cryptography_backend = cryptography_backend + + if hasattr(key, "public_bytes") or hasattr(key, "private_bytes"): + self.prepared_key = key + return + + if hasattr(key, "to_pem"): + # convert to PEM and let cryptography below load it as PEM + key = key.to_pem().decode("utf-8") + + if isinstance(key, dict): + self.prepared_key = self._process_jwk(key) + return + + if isinstance(key, str): + key = key.encode("utf-8") + + if isinstance(key, bytes): + # Attempt to load key. We don't know if it's + # a Public Key or a Private Key, so we try + # the Public Key first. + try: + try: + key = load_pem_public_key(key, self.cryptography_backend()) + except ValueError: + key = load_pem_private_key(key, password=None, backend=self.cryptography_backend()) + except Exception as e: + raise JWKError(e) + + self.prepared_key = key + return + + raise JWKError("Unable to parse an ECKey from key: %s" % key) + + def _process_jwk(self, jwk_dict): + if not jwk_dict.get("kty") == "EC": + raise JWKError("Incorrect key type. Expected: 'EC', Received: %s" % jwk_dict.get("kty")) + + if not all(k in jwk_dict for k in ["x", "y", "crv"]): + raise JWKError("Mandatory parameters are missing") + + x = base64_to_long(jwk_dict.get("x")) + y = base64_to_long(jwk_dict.get("y")) + curve = { + "P-256": ec.SECP256R1, + "P-384": ec.SECP384R1, + "P-521": ec.SECP521R1, + }[jwk_dict["crv"]] + + public = ec.EllipticCurvePublicNumbers(x, y, curve()) + + if "d" in jwk_dict: + d = base64_to_long(jwk_dict.get("d")) + private = ec.EllipticCurvePrivateNumbers(d, public) + + return private.private_key(self.cryptography_backend()) + else: + return public.public_key(self.cryptography_backend()) + + def _sig_component_length(self): + """Determine the correct serialization length for an encoded signature component. + + This is the number of bytes required to encode the maximum key value. + """ + return int(math.ceil(self.prepared_key.key_size / 8.0)) + + def _der_to_raw(self, der_signature): + """Convert signature from DER encoding to RAW encoding.""" + r, s = decode_dss_signature(der_signature) + component_length = self._sig_component_length() + return int_to_bytes(r, component_length) + int_to_bytes(s, component_length) + + def _raw_to_der(self, raw_signature): + """Convert signature from RAW encoding to DER encoding.""" + component_length = self._sig_component_length() + if len(raw_signature) != int(2 * component_length): + raise ValueError("Invalid signature") + + r_bytes = raw_signature[:component_length] + s_bytes = raw_signature[component_length:] + r = int.from_bytes(r_bytes, "big") + s = int.from_bytes(s_bytes, "big") + return encode_dss_signature(r, s) + + def sign(self, msg): + if self.hash_alg.digest_size * 8 > self.prepared_key.curve.key_size: + raise TypeError( + "this curve (%s) is too short " + "for your digest (%d)" % (self.prepared_key.curve.name, 8 * self.hash_alg.digest_size) + ) + signature = self.prepared_key.sign(msg, ec.ECDSA(self.hash_alg())) + return self._der_to_raw(signature) + + def verify(self, msg, sig): + try: + signature = self._raw_to_der(sig) + self.prepared_key.verify(signature, msg, ec.ECDSA(self.hash_alg())) + return True + except Exception: + return False + + def is_public(self): + return hasattr(self.prepared_key, "public_bytes") + + def public_key(self): + if self.is_public(): + return self + return self.__class__(self.prepared_key.public_key(), self._algorithm) + + def to_pem(self): + if self.is_public(): + pem = self.prepared_key.public_bytes( + encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo + ) + return pem + pem = self.prepared_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ) + return pem + + def to_dict(self): + if not self.is_public(): + public_key = self.prepared_key.public_key() + else: + public_key = self.prepared_key + + crv = { + "secp256r1": "P-256", + "secp384r1": "P-384", + "secp521r1": "P-521", + }[self.prepared_key.curve.name] + + # Calculate the key size in bytes. Section 6.2.1.2 and 6.2.1.3 of + # RFC7518 prescribes that the 'x', 'y' and 'd' parameters of the curve + # points must be encoded as octed-strings of this length. + key_size = (self.prepared_key.curve.key_size + 7) // 8 + + data = { + "alg": self._algorithm, + "kty": "EC", + "crv": crv, + "x": long_to_base64(public_key.public_numbers().x, size=key_size).decode("ASCII"), + "y": long_to_base64(public_key.public_numbers().y, size=key_size).decode("ASCII"), + } + + if not self.is_public(): + private_value = self.prepared_key.private_numbers().private_value + data["d"] = long_to_base64(private_value, size=key_size).decode("ASCII") + + return data + + +class CryptographyRSAKey(Key): + SHA256 = hashes.SHA256 + SHA384 = hashes.SHA384 + SHA512 = hashes.SHA512 + + RSA1_5 = padding.PKCS1v15() + RSA_OAEP = padding.OAEP(padding.MGF1(hashes.SHA1()), hashes.SHA1(), None) + RSA_OAEP_256 = padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None) + + def __init__(self, key, algorithm, cryptography_backend=default_backend): + if algorithm not in ALGORITHMS.RSA: + raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) + + self.hash_alg = { + ALGORITHMS.RS256: self.SHA256, + ALGORITHMS.RS384: self.SHA384, + ALGORITHMS.RS512: self.SHA512, + }.get(algorithm) + self._algorithm = algorithm + + self.padding = { + ALGORITHMS.RSA1_5: self.RSA1_5, + ALGORITHMS.RSA_OAEP: self.RSA_OAEP, + ALGORITHMS.RSA_OAEP_256: self.RSA_OAEP_256, + }.get(algorithm) + + self.cryptography_backend = cryptography_backend + + # if it conforms to RSAPublicKey interface + if hasattr(key, "public_bytes") and hasattr(key, "public_numbers"): + self.prepared_key = key + return + + if isinstance(key, dict): + self.prepared_key = self._process_jwk(key) + return + + if isinstance(key, str): + key = key.encode("utf-8") + + if isinstance(key, bytes): + try: + if key.startswith(b"-----BEGIN CERTIFICATE-----"): + self._process_cert(key) + return + + try: + self.prepared_key = load_pem_public_key(key, self.cryptography_backend()) + except ValueError: + self.prepared_key = load_pem_private_key(key, password=None, backend=self.cryptography_backend()) + except Exception as e: + raise JWKError(e) + return + + raise JWKError("Unable to parse an RSA_JWK from key: %s" % key) + + def _process_jwk(self, jwk_dict): + if not jwk_dict.get("kty") == "RSA": + raise JWKError("Incorrect key type. Expected: 'RSA', Received: %s" % jwk_dict.get("kty")) + + e = base64_to_long(jwk_dict.get("e", 256)) + n = base64_to_long(jwk_dict.get("n")) + public = rsa.RSAPublicNumbers(e, n) + + if "d" not in jwk_dict: + return public.public_key(self.cryptography_backend()) + else: + # This is a private key. + d = base64_to_long(jwk_dict.get("d")) + + extra_params = ["p", "q", "dp", "dq", "qi"] + + if any(k in jwk_dict for k in extra_params): + # Precomputed private key parameters are available. + if not all(k in jwk_dict for k in extra_params): + # These values must be present when 'p' is according to + # Section 6.3.2 of RFC7518, so if they are not we raise + # an error. + raise JWKError("Precomputed private key parameters are incomplete.") + + p = base64_to_long(jwk_dict["p"]) + q = base64_to_long(jwk_dict["q"]) + dp = base64_to_long(jwk_dict["dp"]) + dq = base64_to_long(jwk_dict["dq"]) + qi = base64_to_long(jwk_dict["qi"]) + else: + # The precomputed private key parameters are not available, + # so we use cryptography's API to fill them in. + p, q = rsa.rsa_recover_prime_factors(n, e, d) + dp = rsa.rsa_crt_dmp1(d, p) + dq = rsa.rsa_crt_dmq1(d, q) + qi = rsa.rsa_crt_iqmp(p, q) + + private = rsa.RSAPrivateNumbers(p, q, d, dp, dq, qi, public) + + return private.private_key(self.cryptography_backend()) + + def _process_cert(self, key): + key = load_pem_x509_certificate(key, self.cryptography_backend()) + self.prepared_key = key.public_key() + + def sign(self, msg): + try: + signature = self.prepared_key.sign(msg, padding.PKCS1v15(), self.hash_alg()) + except Exception as e: + raise JWKError(e) + return signature + + def verify(self, msg, sig): + if not self.is_public(): + warnings.warn("Attempting to verify a message with a private key. " "This is not recommended.") + + try: + self.public_key().prepared_key.verify(sig, msg, padding.PKCS1v15(), self.hash_alg()) + return True + except InvalidSignature: + return False + + def is_public(self): + return hasattr(self.prepared_key, "public_bytes") + + def public_key(self): + if self.is_public(): + return self + return self.__class__(self.prepared_key.public_key(), self._algorithm) + + def to_pem(self, pem_format="PKCS8"): + if self.is_public(): + if pem_format == "PKCS8": + fmt = serialization.PublicFormat.SubjectPublicKeyInfo + elif pem_format == "PKCS1": + fmt = serialization.PublicFormat.PKCS1 + else: + raise ValueError("Invalid format specified: %r" % pem_format) + pem = self.prepared_key.public_bytes(encoding=serialization.Encoding.PEM, format=fmt) + return pem + + if pem_format == "PKCS8": + fmt = serialization.PrivateFormat.PKCS8 + elif pem_format == "PKCS1": + fmt = serialization.PrivateFormat.TraditionalOpenSSL + else: + raise ValueError("Invalid format specified: %r" % pem_format) + + return self.prepared_key.private_bytes( + encoding=serialization.Encoding.PEM, format=fmt, encryption_algorithm=serialization.NoEncryption() + ) + + def to_dict(self): + if not self.is_public(): + public_key = self.prepared_key.public_key() + else: + public_key = self.prepared_key + + data = { + "alg": self._algorithm, + "kty": "RSA", + "n": long_to_base64(public_key.public_numbers().n).decode("ASCII"), + "e": long_to_base64(public_key.public_numbers().e).decode("ASCII"), + } + + if not self.is_public(): + data.update( + { + "d": long_to_base64(self.prepared_key.private_numbers().d).decode("ASCII"), + "p": long_to_base64(self.prepared_key.private_numbers().p).decode("ASCII"), + "q": long_to_base64(self.prepared_key.private_numbers().q).decode("ASCII"), + "dp": long_to_base64(self.prepared_key.private_numbers().dmp1).decode("ASCII"), + "dq": long_to_base64(self.prepared_key.private_numbers().dmq1).decode("ASCII"), + "qi": long_to_base64(self.prepared_key.private_numbers().iqmp).decode("ASCII"), + } + ) + + return data + + def wrap_key(self, key_data): + try: + wrapped_key = self.prepared_key.encrypt(key_data, self.padding) + except Exception as e: + raise JWEError(e) + + return wrapped_key + + def unwrap_key(self, wrapped_key): + try: + unwrapped_key = self.prepared_key.decrypt(wrapped_key, self.padding) + return unwrapped_key + except Exception as e: + raise JWEError(e) + + +class CryptographyAESKey(Key): + KEY_128 = (ALGORITHMS.A128GCM, ALGORITHMS.A128GCMKW, ALGORITHMS.A128KW, ALGORITHMS.A128CBC) + KEY_192 = (ALGORITHMS.A192GCM, ALGORITHMS.A192GCMKW, ALGORITHMS.A192KW, ALGORITHMS.A192CBC) + KEY_256 = ( + ALGORITHMS.A256GCM, + ALGORITHMS.A256GCMKW, + ALGORITHMS.A256KW, + ALGORITHMS.A128CBC_HS256, + ALGORITHMS.A256CBC, + ) + KEY_384 = (ALGORITHMS.A192CBC_HS384,) + KEY_512 = (ALGORITHMS.A256CBC_HS512,) + + AES_KW_ALGS = (ALGORITHMS.A128KW, ALGORITHMS.A192KW, ALGORITHMS.A256KW) + + MODES = { + ALGORITHMS.A128GCM: modes.GCM, + ALGORITHMS.A192GCM: modes.GCM, + ALGORITHMS.A256GCM: modes.GCM, + ALGORITHMS.A128CBC_HS256: modes.CBC, + ALGORITHMS.A192CBC_HS384: modes.CBC, + ALGORITHMS.A256CBC_HS512: modes.CBC, + ALGORITHMS.A128CBC: modes.CBC, + ALGORITHMS.A192CBC: modes.CBC, + ALGORITHMS.A256CBC: modes.CBC, + ALGORITHMS.A128GCMKW: modes.GCM, + ALGORITHMS.A192GCMKW: modes.GCM, + ALGORITHMS.A256GCMKW: modes.GCM, + ALGORITHMS.A128KW: None, + ALGORITHMS.A192KW: None, + ALGORITHMS.A256KW: None, + } + + def __init__(self, key, algorithm): + if algorithm not in ALGORITHMS.AES: + raise JWKError("%s is not a valid AES algorithm" % algorithm) + if algorithm not in ALGORITHMS.SUPPORTED.union(ALGORITHMS.AES_PSEUDO): + raise JWKError("%s is not a supported algorithm" % algorithm) + + self._algorithm = algorithm + self._mode = self.MODES.get(self._algorithm) + + if algorithm in self.KEY_128 and len(key) != 16: + raise JWKError(f"Key must be 128 bit for alg {algorithm}") + elif algorithm in self.KEY_192 and len(key) != 24: + raise JWKError(f"Key must be 192 bit for alg {algorithm}") + elif algorithm in self.KEY_256 and len(key) != 32: + raise JWKError(f"Key must be 256 bit for alg {algorithm}") + elif algorithm in self.KEY_384 and len(key) != 48: + raise JWKError(f"Key must be 384 bit for alg {algorithm}") + elif algorithm in self.KEY_512 and len(key) != 64: + raise JWKError(f"Key must be 512 bit for alg {algorithm}") + + self._key = key + + def to_dict(self): + data = {"alg": self._algorithm, "kty": "oct", "k": base64url_encode(self._key)} + return data + + def encrypt(self, plain_text, aad=None): + plain_text = ensure_binary(plain_text) + try: + iv = get_random_bytes(algorithms.AES.block_size // 8) + mode = self._mode(iv) + if mode.name == "GCM": + cipher = aead.AESGCM(self._key) + cipher_text_and_tag = cipher.encrypt(iv, plain_text, aad) + cipher_text = cipher_text_and_tag[: len(cipher_text_and_tag) - 16] + auth_tag = cipher_text_and_tag[-16:] + else: + cipher = Cipher(algorithms.AES(self._key), mode, backend=default_backend()) + encryptor = cipher.encryptor() + padder = PKCS7(algorithms.AES.block_size).padder() + padded_data = padder.update(plain_text) + padded_data += padder.finalize() + cipher_text = encryptor.update(padded_data) + encryptor.finalize() + auth_tag = None + return iv, cipher_text, auth_tag + except Exception as e: + raise JWEError(e) + + def decrypt(self, cipher_text, iv=None, aad=None, tag=None): + cipher_text = ensure_binary(cipher_text) + try: + iv = ensure_binary(iv) + mode = self._mode(iv) + if mode.name == "GCM": + if tag is None: + raise ValueError("tag cannot be None") + cipher = aead.AESGCM(self._key) + cipher_text_and_tag = cipher_text + tag + try: + plain_text = cipher.decrypt(iv, cipher_text_and_tag, aad) + except InvalidTag: + raise JWEError("Invalid JWE Auth Tag") + else: + cipher = Cipher(algorithms.AES(self._key), mode, backend=default_backend()) + decryptor = cipher.decryptor() + padded_plain_text = decryptor.update(cipher_text) + padded_plain_text += decryptor.finalize() + unpadder = PKCS7(algorithms.AES.block_size).unpadder() + plain_text = unpadder.update(padded_plain_text) + plain_text += unpadder.finalize() + + return plain_text + except Exception as e: + raise JWEError(e) + + def wrap_key(self, key_data): + key_data = ensure_binary(key_data) + cipher_text = aes_key_wrap(self._key, key_data, default_backend()) + return cipher_text # IV, cipher text, auth tag + + def unwrap_key(self, wrapped_key): + wrapped_key = ensure_binary(wrapped_key) + try: + plain_text = aes_key_unwrap(self._key, wrapped_key, default_backend()) + except InvalidUnwrap as cause: + raise JWEError(cause) + return plain_text + + +class CryptographyHMACKey(Key): + """ + Performs signing and verification operations using HMAC + and the specified hash function. + """ + + ALG_MAP = {ALGORITHMS.HS256: hashes.SHA256(), ALGORITHMS.HS384: hashes.SHA384(), ALGORITHMS.HS512: hashes.SHA512()} + + def __init__(self, key, algorithm): + if algorithm not in ALGORITHMS.HMAC: + raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) + self._algorithm = algorithm + self._hash_alg = self.ALG_MAP.get(algorithm) + + if isinstance(key, dict): + self.prepared_key = self._process_jwk(key) + return + + if not isinstance(key, str) and not isinstance(key, bytes): + raise JWKError("Expecting a string- or bytes-formatted key.") + + if isinstance(key, str): + key = key.encode("utf-8") + + invalid_strings = [ + b"-----BEGIN PUBLIC KEY-----", + b"-----BEGIN RSA PUBLIC KEY-----", + b"-----BEGIN CERTIFICATE-----", + b"ssh-rsa", + ] + + if any(string_value in key for string_value in invalid_strings): + raise JWKError( + "The specified key is an asymmetric key or x509 certificate and" + " should not be used as an HMAC secret." + ) + + self.prepared_key = key + + def _process_jwk(self, jwk_dict): + if not jwk_dict.get("kty") == "oct": + raise JWKError("Incorrect key type. Expected: 'oct', Received: %s" % jwk_dict.get("kty")) + + k = jwk_dict.get("k") + k = k.encode("utf-8") + k = bytes(k) + k = base64url_decode(k) + + return k + + def to_dict(self): + return { + "alg": self._algorithm, + "kty": "oct", + "k": base64url_encode(self.prepared_key).decode("ASCII"), + } + + def sign(self, msg): + msg = ensure_binary(msg) + h = hmac.HMAC(self.prepared_key, self._hash_alg, backend=default_backend()) + h.update(msg) + signature = h.finalize() + return signature + + def verify(self, msg, sig): + msg = ensure_binary(msg) + sig = ensure_binary(sig) + h = hmac.HMAC(self.prepared_key, self._hash_alg, backend=default_backend()) + h.update(msg) + try: + h.verify(sig) + verified = True + except InvalidSignature: + verified = False + return verified diff --git a/myenv/lib/python3.9/site-packages/jose/backends/ecdsa_backend.py b/myenv/lib/python3.9/site-packages/jose/backends/ecdsa_backend.py new file mode 100644 index 0000000..756c7ea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/ecdsa_backend.py @@ -0,0 +1,150 @@ +import hashlib + +import ecdsa + +from jose.backends.base import Key +from jose.constants import ALGORITHMS +from jose.exceptions import JWKError +from jose.utils import base64_to_long, long_to_base64 + + +class ECDSAECKey(Key): + """ + Performs signing and verification operations using + ECDSA and the specified hash function + + This class requires the ecdsa package to be installed. + + This is based off of the implementation in PyJWT 0.3.2 + """ + + SHA256 = hashlib.sha256 + SHA384 = hashlib.sha384 + SHA512 = hashlib.sha512 + + CURVE_MAP = { + SHA256: ecdsa.curves.NIST256p, + SHA384: ecdsa.curves.NIST384p, + SHA512: ecdsa.curves.NIST521p, + } + CURVE_NAMES = ( + (ecdsa.curves.NIST256p, "P-256"), + (ecdsa.curves.NIST384p, "P-384"), + (ecdsa.curves.NIST521p, "P-521"), + ) + + def __init__(self, key, algorithm): + if algorithm not in ALGORITHMS.EC: + raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) + + self.hash_alg = { + ALGORITHMS.ES256: self.SHA256, + ALGORITHMS.ES384: self.SHA384, + ALGORITHMS.ES512: self.SHA512, + }.get(algorithm) + self._algorithm = algorithm + + self.curve = self.CURVE_MAP.get(self.hash_alg) + + if isinstance(key, (ecdsa.SigningKey, ecdsa.VerifyingKey)): + self.prepared_key = key + return + + if isinstance(key, dict): + self.prepared_key = self._process_jwk(key) + return + + if isinstance(key, str): + key = key.encode("utf-8") + + if isinstance(key, bytes): + # Attempt to load key. We don't know if it's + # a Signing Key or a Verifying Key, so we try + # the Verifying Key first. + try: + key = ecdsa.VerifyingKey.from_pem(key) + except ecdsa.der.UnexpectedDER: + key = ecdsa.SigningKey.from_pem(key) + except Exception as e: + raise JWKError(e) + + self.prepared_key = key + return + + raise JWKError("Unable to parse an ECKey from key: %s" % key) + + def _process_jwk(self, jwk_dict): + if not jwk_dict.get("kty") == "EC": + raise JWKError("Incorrect key type. Expected: 'EC', Received: %s" % jwk_dict.get("kty")) + + if not all(k in jwk_dict for k in ["x", "y", "crv"]): + raise JWKError("Mandatory parameters are missing") + + if "d" in jwk_dict: + # We are dealing with a private key; the secret exponent is enough + # to create an ecdsa key. + d = base64_to_long(jwk_dict.get("d")) + return ecdsa.keys.SigningKey.from_secret_exponent(d, self.curve) + else: + x = base64_to_long(jwk_dict.get("x")) + y = base64_to_long(jwk_dict.get("y")) + + if not ecdsa.ecdsa.point_is_valid(self.curve.generator, x, y): + raise JWKError(f"Point: {x}, {y} is not a valid point") + + point = ecdsa.ellipticcurve.Point(self.curve.curve, x, y, self.curve.order) + return ecdsa.keys.VerifyingKey.from_public_point(point, self.curve) + + def sign(self, msg): + return self.prepared_key.sign( + msg, hashfunc=self.hash_alg, sigencode=ecdsa.util.sigencode_string, allow_truncate=False + ) + + def verify(self, msg, sig): + try: + return self.prepared_key.verify( + sig, msg, hashfunc=self.hash_alg, sigdecode=ecdsa.util.sigdecode_string, allow_truncate=False + ) + except Exception: + return False + + def is_public(self): + return isinstance(self.prepared_key, ecdsa.VerifyingKey) + + def public_key(self): + if self.is_public(): + return self + return self.__class__(self.prepared_key.get_verifying_key(), self._algorithm) + + def to_pem(self): + return self.prepared_key.to_pem() + + def to_dict(self): + if not self.is_public(): + public_key = self.prepared_key.get_verifying_key() + else: + public_key = self.prepared_key + crv = None + for key, value in self.CURVE_NAMES: + if key == self.prepared_key.curve: + crv = value + if not crv: + raise KeyError(f"Can't match {self.prepared_key.curve}") + + # Calculate the key size in bytes. Section 6.2.1.2 and 6.2.1.3 of + # RFC7518 prescribes that the 'x', 'y' and 'd' parameters of the curve + # points must be encoded as octed-strings of this length. + key_size = self.prepared_key.curve.baselen + + data = { + "alg": self._algorithm, + "kty": "EC", + "crv": crv, + "x": long_to_base64(public_key.pubkey.point.x(), size=key_size).decode("ASCII"), + "y": long_to_base64(public_key.pubkey.point.y(), size=key_size).decode("ASCII"), + } + + if not self.is_public(): + data["d"] = long_to_base64(self.prepared_key.privkey.secret_multiplier, size=key_size).decode("ASCII") + + return data diff --git a/myenv/lib/python3.9/site-packages/jose/backends/native.py b/myenv/lib/python3.9/site-packages/jose/backends/native.py new file mode 100644 index 0000000..eb3a6ae --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/native.py @@ -0,0 +1,76 @@ +import hashlib +import hmac +import os + +from jose.backends.base import Key +from jose.constants import ALGORITHMS +from jose.exceptions import JWKError +from jose.utils import base64url_decode, base64url_encode + + +def get_random_bytes(num_bytes): + return bytes(os.urandom(num_bytes)) + + +class HMACKey(Key): + """ + Performs signing and verification operations using HMAC + and the specified hash function. + """ + + HASHES = {ALGORITHMS.HS256: hashlib.sha256, ALGORITHMS.HS384: hashlib.sha384, ALGORITHMS.HS512: hashlib.sha512} + + def __init__(self, key, algorithm): + if algorithm not in ALGORITHMS.HMAC: + raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) + self._algorithm = algorithm + self._hash_alg = self.HASHES.get(algorithm) + + if isinstance(key, dict): + self.prepared_key = self._process_jwk(key) + return + + if not isinstance(key, str) and not isinstance(key, bytes): + raise JWKError("Expecting a string- or bytes-formatted key.") + + if isinstance(key, str): + key = key.encode("utf-8") + + invalid_strings = [ + b"-----BEGIN PUBLIC KEY-----", + b"-----BEGIN RSA PUBLIC KEY-----", + b"-----BEGIN CERTIFICATE-----", + b"ssh-rsa", + ] + + if any(string_value in key for string_value in invalid_strings): + raise JWKError( + "The specified key is an asymmetric key or x509 certificate and" + " should not be used as an HMAC secret." + ) + + self.prepared_key = key + + def _process_jwk(self, jwk_dict): + if not jwk_dict.get("kty") == "oct": + raise JWKError("Incorrect key type. Expected: 'oct', Received: %s" % jwk_dict.get("kty")) + + k = jwk_dict.get("k") + k = k.encode("utf-8") + k = bytes(k) + k = base64url_decode(k) + + return k + + def sign(self, msg): + return hmac.new(self.prepared_key, msg, self._hash_alg).digest() + + def verify(self, msg, sig): + return hmac.compare_digest(sig, self.sign(msg)) + + def to_dict(self): + return { + "alg": self._algorithm, + "kty": "oct", + "k": base64url_encode(self.prepared_key).decode("ASCII"), + } diff --git a/myenv/lib/python3.9/site-packages/jose/backends/rsa_backend.py b/myenv/lib/python3.9/site-packages/jose/backends/rsa_backend.py new file mode 100644 index 0000000..4e8ccf1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/backends/rsa_backend.py @@ -0,0 +1,284 @@ +import binascii +import warnings + +import rsa as pyrsa +import rsa.pem as pyrsa_pem +from pyasn1.error import PyAsn1Error +from rsa import DecryptionError + +from jose.backends._asn1 import ( + rsa_private_key_pkcs1_to_pkcs8, + rsa_private_key_pkcs8_to_pkcs1, + rsa_public_key_pkcs1_to_pkcs8, +) +from jose.backends.base import Key +from jose.constants import ALGORITHMS +from jose.exceptions import JWEError, JWKError +from jose.utils import base64_to_long, long_to_base64 + +ALGORITHMS.SUPPORTED.remove(ALGORITHMS.RSA_OAEP) # RSA OAEP not supported + +LEGACY_INVALID_PKCS8_RSA_HEADER = binascii.unhexlify( + "30" # sequence + "8204BD" # DER-encoded sequence contents length of 1213 bytes -- INCORRECT STATIC LENGTH + "020100" # integer: 0 -- Version + "30" # sequence + "0D" # DER-encoded sequence contents length of 13 bytes -- PrivateKeyAlgorithmIdentifier + "06092A864886F70D010101" # OID -- rsaEncryption + "0500" # NULL -- parameters +) +ASN1_SEQUENCE_ID = binascii.unhexlify("30") +RSA_ENCRYPTION_ASN1_OID = "1.2.840.113549.1.1.1" + +# Functions gcd and rsa_recover_prime_factors were copied from cryptography 1.9 +# to enable pure python rsa module to be in compliance with section 6.3.1 of RFC7518 +# which requires only private exponent (d) for private key. + + +def _gcd(a, b): + """Calculate the Greatest Common Divisor of a and b. + + Unless b==0, the result will have the same sign as b (so that when + b is divided by it, the result comes out positive). + """ + while b: + a, b = b, (a % b) + return a + + +# Controls the number of iterations rsa_recover_prime_factors will perform +# to obtain the prime factors. Each iteration increments by 2 so the actual +# maximum attempts is half this number. +_MAX_RECOVERY_ATTEMPTS = 1000 + + +def _rsa_recover_prime_factors(n, e, d): + """ + Compute factors p and q from the private exponent d. We assume that n has + no more than two factors. This function is adapted from code in PyCrypto. + """ + # See 8.2.2(i) in Handbook of Applied Cryptography. + ktot = d * e - 1 + # The quantity d*e-1 is a multiple of phi(n), even, + # and can be represented as t*2^s. + t = ktot + while t % 2 == 0: + t = t // 2 + # Cycle through all multiplicative inverses in Zn. + # The algorithm is non-deterministic, but there is a 50% chance + # any candidate a leads to successful factoring. + # See "Digitalized Signatures and Public Key Functions as Intractable + # as Factorization", M. Rabin, 1979 + spotted = False + a = 2 + while not spotted and a < _MAX_RECOVERY_ATTEMPTS: + k = t + # Cycle through all values a^{t*2^i}=a^k + while k < ktot: + cand = pow(a, k, n) + # Check if a^k is a non-trivial root of unity (mod n) + if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1: + # We have found a number such that (cand-1)(cand+1)=0 (mod n). + # Either of the terms divides n. + p = _gcd(cand + 1, n) + spotted = True + break + k *= 2 + # This value was not any good... let's try another! + a += 2 + if not spotted: + raise ValueError("Unable to compute factors p and q from exponent d.") + # Found ! + q, r = divmod(n, p) + assert r == 0 + p, q = sorted((p, q), reverse=True) + return (p, q) + + +def pem_to_spki(pem, fmt="PKCS8"): + key = RSAKey(pem, ALGORITHMS.RS256) + return key.to_pem(fmt) + + +def _legacy_private_key_pkcs8_to_pkcs1(pkcs8_key): + """Legacy RSA private key PKCS8-to-PKCS1 conversion. + + .. warning:: + + This is incorrect parsing and only works because the legacy PKCS1-to-PKCS8 + encoding was also incorrect. + """ + # Only allow this processing if the prefix matches + # AND the following byte indicates an ASN1 sequence, + # as we would expect with the legacy encoding. + if not pkcs8_key.startswith(LEGACY_INVALID_PKCS8_RSA_HEADER + ASN1_SEQUENCE_ID): + raise ValueError("Invalid private key encoding") + + return pkcs8_key[len(LEGACY_INVALID_PKCS8_RSA_HEADER) :] + + +class RSAKey(Key): + SHA256 = "SHA-256" + SHA384 = "SHA-384" + SHA512 = "SHA-512" + + def __init__(self, key, algorithm): + if algorithm not in ALGORITHMS.RSA: + raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) + + if algorithm in ALGORITHMS.RSA_KW and algorithm != ALGORITHMS.RSA1_5: + raise JWKError("alg: %s is not supported by the RSA backend" % algorithm) + + self.hash_alg = { + ALGORITHMS.RS256: self.SHA256, + ALGORITHMS.RS384: self.SHA384, + ALGORITHMS.RS512: self.SHA512, + }.get(algorithm) + self._algorithm = algorithm + + if isinstance(key, dict): + self._prepared_key = self._process_jwk(key) + return + + if isinstance(key, (pyrsa.PublicKey, pyrsa.PrivateKey)): + self._prepared_key = key + return + + if isinstance(key, str): + key = key.encode("utf-8") + + if isinstance(key, bytes): + try: + self._prepared_key = pyrsa.PublicKey.load_pkcs1(key) + except ValueError: + try: + self._prepared_key = pyrsa.PublicKey.load_pkcs1_openssl_pem(key) + except ValueError: + try: + self._prepared_key = pyrsa.PrivateKey.load_pkcs1(key) + except ValueError: + try: + der = pyrsa_pem.load_pem(key, b"PRIVATE KEY") + try: + pkcs1_key = rsa_private_key_pkcs8_to_pkcs1(der) + except PyAsn1Error: + # If the key was encoded using the old, invalid, + # encoding then pyasn1 will throw an error attempting + # to parse the key. + pkcs1_key = _legacy_private_key_pkcs8_to_pkcs1(der) + self._prepared_key = pyrsa.PrivateKey.load_pkcs1(pkcs1_key, format="DER") + except ValueError as e: + raise JWKError(e) + return + raise JWKError("Unable to parse an RSA_JWK from key: %s" % key) + + def _process_jwk(self, jwk_dict): + if not jwk_dict.get("kty") == "RSA": + raise JWKError("Incorrect key type. Expected: 'RSA', Received: %s" % jwk_dict.get("kty")) + + e = base64_to_long(jwk_dict.get("e")) + n = base64_to_long(jwk_dict.get("n")) + + if "d" not in jwk_dict: + return pyrsa.PublicKey(e=e, n=n) + else: + d = base64_to_long(jwk_dict.get("d")) + extra_params = ["p", "q", "dp", "dq", "qi"] + + if any(k in jwk_dict for k in extra_params): + # Precomputed private key parameters are available. + if not all(k in jwk_dict for k in extra_params): + # These values must be present when 'p' is according to + # Section 6.3.2 of RFC7518, so if they are not we raise + # an error. + raise JWKError("Precomputed private key parameters are incomplete.") + + p = base64_to_long(jwk_dict["p"]) + q = base64_to_long(jwk_dict["q"]) + return pyrsa.PrivateKey(e=e, n=n, d=d, p=p, q=q) + else: + p, q = _rsa_recover_prime_factors(n, e, d) + return pyrsa.PrivateKey(n=n, e=e, d=d, p=p, q=q) + + def sign(self, msg): + return pyrsa.sign(msg, self._prepared_key, self.hash_alg) + + def verify(self, msg, sig): + if not self.is_public(): + warnings.warn("Attempting to verify a message with a private key. " "This is not recommended.") + try: + pyrsa.verify(msg, sig, self._prepared_key) + return True + except pyrsa.pkcs1.VerificationError: + return False + + def is_public(self): + return isinstance(self._prepared_key, pyrsa.PublicKey) + + def public_key(self): + if isinstance(self._prepared_key, pyrsa.PublicKey): + return self + return self.__class__(pyrsa.PublicKey(n=self._prepared_key.n, e=self._prepared_key.e), self._algorithm) + + def to_pem(self, pem_format="PKCS8"): + + if isinstance(self._prepared_key, pyrsa.PrivateKey): + der = self._prepared_key.save_pkcs1(format="DER") + if pem_format == "PKCS8": + pkcs8_der = rsa_private_key_pkcs1_to_pkcs8(der) + pem = pyrsa_pem.save_pem(pkcs8_der, pem_marker="PRIVATE KEY") + elif pem_format == "PKCS1": + pem = pyrsa_pem.save_pem(der, pem_marker="RSA PRIVATE KEY") + else: + raise ValueError(f"Invalid pem format specified: {pem_format!r}") + else: + if pem_format == "PKCS8": + pkcs1_der = self._prepared_key.save_pkcs1(format="DER") + pkcs8_der = rsa_public_key_pkcs1_to_pkcs8(pkcs1_der) + pem = pyrsa_pem.save_pem(pkcs8_der, pem_marker="PUBLIC KEY") + elif pem_format == "PKCS1": + der = self._prepared_key.save_pkcs1(format="DER") + pem = pyrsa_pem.save_pem(der, pem_marker="RSA PUBLIC KEY") + else: + raise ValueError(f"Invalid pem format specified: {pem_format!r}") + return pem + + def to_dict(self): + if not self.is_public(): + public_key = self.public_key()._prepared_key + else: + public_key = self._prepared_key + + data = { + "alg": self._algorithm, + "kty": "RSA", + "n": long_to_base64(public_key.n).decode("ASCII"), + "e": long_to_base64(public_key.e).decode("ASCII"), + } + + if not self.is_public(): + data.update( + { + "d": long_to_base64(self._prepared_key.d).decode("ASCII"), + "p": long_to_base64(self._prepared_key.p).decode("ASCII"), + "q": long_to_base64(self._prepared_key.q).decode("ASCII"), + "dp": long_to_base64(self._prepared_key.exp1).decode("ASCII"), + "dq": long_to_base64(self._prepared_key.exp2).decode("ASCII"), + "qi": long_to_base64(self._prepared_key.coef).decode("ASCII"), + } + ) + + return data + + def wrap_key(self, key_data): + if not self.is_public(): + warnings.warn("Attempting to encrypt a message with a private key." " This is not recommended.") + wrapped_key = pyrsa.encrypt(key_data, self._prepared_key) + return wrapped_key + + def unwrap_key(self, wrapped_key): + try: + unwrapped_key = pyrsa.decrypt(wrapped_key, self._prepared_key) + except DecryptionError as e: + raise JWEError(e) + return unwrapped_key diff --git a/myenv/lib/python3.9/site-packages/jose/constants.py b/myenv/lib/python3.9/site-packages/jose/constants.py new file mode 100644 index 0000000..ab4d74d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/constants.py @@ -0,0 +1,98 @@ +import hashlib + + +class Algorithms: + # DS Algorithms + NONE = "none" + HS256 = "HS256" + HS384 = "HS384" + HS512 = "HS512" + RS256 = "RS256" + RS384 = "RS384" + RS512 = "RS512" + ES256 = "ES256" + ES384 = "ES384" + ES512 = "ES512" + + # Content Encryption Algorithms + A128CBC_HS256 = "A128CBC-HS256" + A192CBC_HS384 = "A192CBC-HS384" + A256CBC_HS512 = "A256CBC-HS512" + A128GCM = "A128GCM" + A192GCM = "A192GCM" + A256GCM = "A256GCM" + + # Pseudo algorithm for encryption + A128CBC = "A128CBC" + A192CBC = "A192CBC" + A256CBC = "A256CBC" + + # CEK Encryption Algorithms + DIR = "dir" + RSA1_5 = "RSA1_5" + RSA_OAEP = "RSA-OAEP" + RSA_OAEP_256 = "RSA-OAEP-256" + A128KW = "A128KW" + A192KW = "A192KW" + A256KW = "A256KW" + ECDH_ES = "ECDH-ES" + ECDH_ES_A128KW = "ECDH-ES+A128KW" + ECDH_ES_A192KW = "ECDH-ES+A192KW" + ECDH_ES_A256KW = "ECDH-ES+A256KW" + A128GCMKW = "A128GCMKW" + A192GCMKW = "A192GCMKW" + A256GCMKW = "A256GCMKW" + PBES2_HS256_A128KW = "PBES2-HS256+A128KW" + PBES2_HS384_A192KW = "PBES2-HS384+A192KW" + PBES2_HS512_A256KW = "PBES2-HS512+A256KW" + + # Compression Algorithms + DEF = "DEF" + + HMAC = {HS256, HS384, HS512} + RSA_DS = {RS256, RS384, RS512} + RSA_KW = {RSA1_5, RSA_OAEP, RSA_OAEP_256} + RSA = RSA_DS.union(RSA_KW) + EC_DS = {ES256, ES384, ES512} + EC_KW = {ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW} + EC = EC_DS.union(EC_KW) + AES_PSEUDO = {A128CBC, A192CBC, A256CBC, A128GCM, A192GCM, A256GCM} + AES_JWE_ENC = {A128CBC_HS256, A192CBC_HS384, A256CBC_HS512, A128GCM, A192GCM, A256GCM} + AES_ENC = AES_JWE_ENC.union(AES_PSEUDO) + AES_KW = {A128KW, A192KW, A256KW} + AEC_GCM_KW = {A128GCMKW, A192GCMKW, A256GCMKW} + AES = AES_ENC.union(AES_KW) + PBES2_KW = {PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW} + + HMAC_AUTH_TAG = {A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} + GCM = {A128GCM, A192GCM, A256GCM} + + SUPPORTED = HMAC.union(RSA_DS).union(EC_DS).union([DIR]).union(AES_JWE_ENC).union(RSA_KW).union(AES_KW) + + ALL = SUPPORTED.union([NONE]).union(AEC_GCM_KW).union(EC_KW).union(PBES2_KW) + + HASHES = { + HS256: hashlib.sha256, + HS384: hashlib.sha384, + HS512: hashlib.sha512, + RS256: hashlib.sha256, + RS384: hashlib.sha384, + RS512: hashlib.sha512, + ES256: hashlib.sha256, + ES384: hashlib.sha384, + ES512: hashlib.sha512, + } + + KEYS = {} + + +ALGORITHMS = Algorithms() + + +class Zips: + DEF = "DEF" + NONE = None + SUPPORTED = {DEF, NONE} + + +ZIPS = Zips() diff --git a/myenv/lib/python3.9/site-packages/jose/exceptions.py b/myenv/lib/python3.9/site-packages/jose/exceptions.py new file mode 100644 index 0000000..e8edc3b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/exceptions.py @@ -0,0 +1,59 @@ +class JOSEError(Exception): + pass + + +class JWSError(JOSEError): + pass + + +class JWSSignatureError(JWSError): + pass + + +class JWSAlgorithmError(JWSError): + pass + + +class JWTError(JOSEError): + pass + + +class JWTClaimsError(JWTError): + pass + + +class ExpiredSignatureError(JWTError): + pass + + +class JWKError(JOSEError): + pass + + +class JWEError(JOSEError): + """Base error for all JWE errors""" + + pass + + +class JWEParseError(JWEError): + """Could not parse the JWE string provided""" + + pass + + +class JWEInvalidAuth(JWEError): + """ + The authentication tag did not match the protected sections of the + JWE string provided + """ + + pass + + +class JWEAlgorithmUnsupportedError(JWEError): + """ + The JWE algorithm is not supported by the backend + """ + + pass diff --git a/myenv/lib/python3.9/site-packages/jose/jwe.py b/myenv/lib/python3.9/site-packages/jose/jwe.py new file mode 100644 index 0000000..2c387ff --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/jwe.py @@ -0,0 +1,607 @@ +import binascii +import json +import zlib +from collections.abc import Mapping +from struct import pack + +from . import jwk +from .backends import get_random_bytes +from .constants import ALGORITHMS, ZIPS +from .exceptions import JWEError, JWEParseError +from .utils import base64url_decode, base64url_encode, ensure_binary + + +def encrypt(plaintext, key, encryption=ALGORITHMS.A256GCM, algorithm=ALGORITHMS.DIR, zip=None, cty=None, kid=None): + """Encrypts plaintext and returns a JWE cmpact serialization string. + + Args: + plaintext (bytes): A bytes object to encrypt + key (str or dict): The key(s) to use for encrypting the content. Can be + individual JWK or JWK set. + encryption (str, optional): The content encryption algorithm used to + perform authenticated encryption on the plaintext to produce the + ciphertext and the Authentication Tag. Defaults to A256GCM. + algorithm (str, optional): The cryptographic algorithm used + to encrypt or determine the value of the CEK. Defaults to dir. + zip (str, optional): The compression algorithm) applied to the + plaintext before encryption. Defaults to None. + cty (str, optional): The media type for the secured content. + See http://www.iana.org/assignments/media-types/media-types.xhtml + kid (str, optional): Key ID for the provided key + + Returns: + bytes: The string representation of the header, encrypted key, + initialization vector, ciphertext, and authentication tag. + + Raises: + JWEError: If there is an error signing the token. + + Examples: + >>> from jose import jwe + >>> jwe.encrypt('Hello, World!', 'asecret128bitkey', algorithm='dir', encryption='A128GCM') + 'eyJhbGciOiJkaXIiLCJlbmMiOiJBMTI4R0NNIn0..McILMB3dYsNJSuhcDzQshA.OfX9H_mcUpHDeRM4IA.CcnTWqaqxNsjT4eCaUABSg' + + """ + plaintext = ensure_binary(plaintext) # Make sure it's bytes + if algorithm not in ALGORITHMS.SUPPORTED: + raise JWEError("Algorithm %s not supported." % algorithm) + if encryption not in ALGORITHMS.SUPPORTED: + raise JWEError("Algorithm %s not supported." % encryption) + key = jwk.construct(key, algorithm) + encoded_header = _encoded_header(algorithm, encryption, zip, cty, kid) + + plaintext = _compress(zip, plaintext) + enc_cek, iv, cipher_text, auth_tag = _encrypt_and_auth(key, algorithm, encryption, zip, plaintext, encoded_header) + + jwe_string = _jwe_compact_serialize(encoded_header, enc_cek, iv, cipher_text, auth_tag) + return jwe_string + + +def decrypt(jwe_str, key): + """Decrypts a JWE compact serialized string and returns the plaintext. + + Args: + jwe_str (str): A JWE to be decrypt. + key (str or dict): A key to attempt to decrypt the payload with. Can be + individual JWK or JWK set. + + Returns: + bytes: The plaintext bytes, assuming the authentication tag is valid. + + Raises: + JWEError: If there is an exception verifying the token. + + Examples: + >>> from jose import jwe + >>> jwe.decrypt(jwe_string, 'asecret128bitkey') + 'Hello, World!' + """ + header, encoded_header, encrypted_key, iv, cipher_text, auth_tag = _jwe_compact_deserialize(jwe_str) + + # Verify that the implementation understands and can process all + # fields that it is required to support, whether required by this + # specification, by the algorithms being used, or by the "crit" + # Header Parameter value, and that the values of those parameters + # are also understood and supported. + + try: + # Determine the Key Management Mode employed by the algorithm + # specified by the "alg" (algorithm) Header Parameter. + alg = header["alg"] + enc = header["enc"] + if alg not in ALGORITHMS.SUPPORTED: + raise JWEError("Algorithm %s not supported." % alg) + if enc not in ALGORITHMS.SUPPORTED: + raise JWEError("Algorithm %s not supported." % enc) + + except KeyError: + raise JWEParseError("alg and enc headers are required!") + + # Verify that the JWE uses a key known to the recipient. + key = jwk.construct(key, alg) + + # When Direct Key Agreement or Key Agreement with Key Wrapping are + # employed, use the key agreement algorithm to compute the value + # of the agreed upon key. When Direct Key Agreement is employed, + # let the CEK be the agreed upon key. When Key Agreement with Key + # Wrapping is employed, the agreed upon key will be used to + # decrypt the JWE Encrypted Key. + # + # When Key Wrapping, Key Encryption, or Key Agreement with Key + # Wrapping are employed, decrypt the JWE Encrypted Key to produce + # the CEK. The CEK MUST have a length equal to that required for + # the content encryption algorithm. Note that when there are + # multiple recipients, each recipient will only be able to decrypt + # JWE Encrypted Key values that were encrypted to a key in that + # recipient's possession. It is therefore normal to only be able + # to decrypt one of the per-recipient JWE Encrypted Key values to + # obtain the CEK value. Also, see Section 11.5 for security + # considerations on mitigating timing attacks. + if alg == ALGORITHMS.DIR: + # When Direct Key Agreement or Direct Encryption are employed, + # verify that the JWE Encrypted Key value is an empty octet + # sequence. + + # Record whether the CEK could be successfully determined for this + # recipient or not. + cek_valid = encrypted_key == b"" + + # When Direct Encryption is employed, let the CEK be the shared + # symmetric key. + cek_bytes = _get_key_bytes_from_key(key) + else: + try: + cek_bytes = key.unwrap_key(encrypted_key) + + # Record whether the CEK could be successfully determined for this + # recipient or not. + cek_valid = True + except NotImplementedError: + raise JWEError(f"alg {alg} is not implemented") + except Exception: + # Record whether the CEK could be successfully determined for this + # recipient or not. + cek_valid = False + + # To mitigate the attacks described in RFC 3218 [RFC3218], the + # recipient MUST NOT distinguish between format, padding, and length + # errors of encrypted keys. It is strongly recommended, in the event + # of receiving an improperly formatted key, that the recipient + # substitute a randomly generated CEK and proceed to the next step, to + # mitigate timing attacks. + cek_bytes = _get_random_cek_bytes_for_enc(enc) + + # Compute the Encoded Protected Header value BASE64URL(UTF8(JWE + # Protected Header)). If the JWE Protected Header is not present + # (which can only happen when using the JWE JSON Serialization and + # no "protected" member is present), let this value be the empty + # string. + protected_header = encoded_header + + # Let the Additional Authenticated Data encryption parameter be + # ASCII(Encoded Protected Header). However, if a JWE AAD value is + # present (which can only be the case when using the JWE JSON + # Serialization), instead let the Additional Authenticated Data + # encryption parameter be ASCII(Encoded Protected Header || '.' || + # BASE64URL(JWE AAD)). + aad = protected_header + + # Decrypt the JWE Ciphertext using the CEK, the JWE Initialization + # Vector, the Additional Authenticated Data value, and the JWE + # Authentication Tag (which is the Authentication Tag input to the + # calculation) using the specified content encryption algorithm, + # returning the decrypted plaintext and validating the JWE + # Authentication Tag in the manner specified for the algorithm, + # rejecting the input without emitting any decrypted output if the + # JWE Authentication Tag is incorrect. + try: + plain_text = _decrypt_and_auth(cek_bytes, enc, cipher_text, iv, aad, auth_tag) + except NotImplementedError: + raise JWEError(f"enc {enc} is not implemented") + except Exception as e: + raise JWEError(e) + + # If a "zip" parameter was included, uncompress the decrypted + # plaintext using the specified compression algorithm. + if plain_text is not None: + plain_text = _decompress(header.get("zip"), plain_text) + + return plain_text if cek_valid else None + + +def get_unverified_header(jwe_str): + """Returns the decoded headers without verification of any kind. + + Args: + jwe_str (str): A compact serialized JWE to decode the headers from. + + Returns: + dict: The dict representation of the JWE headers. + + Raises: + JWEError: If there is an exception decoding the JWE. + """ + header = _jwe_compact_deserialize(jwe_str)[0] + return header + + +def _decrypt_and_auth(cek_bytes, enc, cipher_text, iv, aad, auth_tag): + """ + Decrypt and verify the data + + Args: + cek_bytes (bytes): cek to derive encryption and possible auth key to + verify the auth tag + cipher_text (bytes): Encrypted data + iv (bytes): Initialization vector (iv) used to encrypt data + aad (bytes): Additional Authenticated Data used to verify the data + auth_tag (bytes): Authentication ntag to verify the data + + Returns: + (bytes): Decrypted data + """ + # Decrypt the JWE Ciphertext using the CEK, the JWE Initialization + # Vector, the Additional Authenticated Data value, and the JWE + # Authentication Tag (which is the Authentication Tag input to the + # calculation) using the specified content encryption algorithm, + # returning the decrypted plaintext + # and validating the JWE + # Authentication Tag in the manner specified for the algorithm, + if enc in ALGORITHMS.HMAC_AUTH_TAG: + encryption_key, mac_key, key_len = _get_encryption_key_mac_key_and_key_length_from_cek(cek_bytes, enc) + auth_tag_check = _auth_tag(cipher_text, iv, aad, mac_key, key_len) + elif enc in ALGORITHMS.GCM: + encryption_key = jwk.construct(cek_bytes, enc) + auth_tag_check = auth_tag # GCM check auth on decrypt + else: + raise NotImplementedError(f"enc {enc} is not implemented!") + + plaintext = encryption_key.decrypt(cipher_text, iv, aad, auth_tag) + if auth_tag != auth_tag_check: + raise JWEError("Invalid JWE Auth Tag") + + return plaintext + + +def _get_encryption_key_mac_key_and_key_length_from_cek(cek_bytes, enc): + derived_key_len = len(cek_bytes) // 2 + mac_key_bytes = cek_bytes[0:derived_key_len] + mac_key = _get_hmac_key(enc, mac_key_bytes) + encryption_key_bytes = cek_bytes[-derived_key_len:] + encryption_alg, _ = enc.split("-") + encryption_key = jwk.construct(encryption_key_bytes, encryption_alg) + return encryption_key, mac_key, derived_key_len + + +def _jwe_compact_deserialize(jwe_bytes): + """ + Deserialize and verify the header and segments are appropriate. + + Args: + jwe_bytes (bytes): The compact serialized JWE + Returns: + (dict, bytes, bytes, bytes, bytes, bytes) + """ + + # Base64url decode the encoded representations of the JWE + # Protected Header, the JWE Encrypted Key, the JWE Initialization + # Vector, the JWE Ciphertext, the JWE Authentication Tag, and the + # JWE AAD, following the restriction that no line breaks, + # whitespace, or other additional characters have been used. + jwe_bytes = ensure_binary(jwe_bytes) + try: + header_segment, encrypted_key_segment, iv_segment, cipher_text_segment, auth_tag_segment = jwe_bytes.split( + b".", 4 + ) + header_data = base64url_decode(header_segment) + except ValueError: + raise JWEParseError("Not enough segments") + except (TypeError, binascii.Error): + raise JWEParseError("Invalid header") + + # Verify that the octet sequence resulting from decoding the + # encoded JWE Protected Header is a UTF-8-encoded representation + # of a completely valid JSON object conforming to RFC 7159 + # [RFC7159]; let the JWE Protected Header be this JSON object. + # + # If using the JWE Compact Serialization, let the JOSE Header be + # the JWE Protected Header. Otherwise, when using the JWE JSON + # Serialization, let the JOSE Header be the union of the members + # of the JWE Protected Header, the JWE Shared Unprotected Header + # and the corresponding JWE Per-Recipient Unprotected Header, all + # of which must be completely valid JSON objects. During this + # step, verify that the resulting JOSE Header does not contain + # duplicate Header Parameter names. When using the JWE JSON + # Serialization, this restriction includes that the same Header + # Parameter name also MUST NOT occur in distinct JSON object + # values that together comprise the JOSE Header. + + try: + header = json.loads(header_data) + except ValueError as e: + raise JWEParseError(f"Invalid header string: {e}") + + if not isinstance(header, Mapping): + raise JWEParseError("Invalid header string: must be a json object") + + try: + encrypted_key = base64url_decode(encrypted_key_segment) + except (TypeError, binascii.Error): + raise JWEParseError("Invalid encrypted key") + + try: + iv = base64url_decode(iv_segment) + except (TypeError, binascii.Error): + raise JWEParseError("Invalid IV") + + try: + ciphertext = base64url_decode(cipher_text_segment) + except (TypeError, binascii.Error): + raise JWEParseError("Invalid cyphertext") + + try: + auth_tag = base64url_decode(auth_tag_segment) + except (TypeError, binascii.Error): + raise JWEParseError("Invalid auth tag") + + return header, header_segment, encrypted_key, iv, ciphertext, auth_tag + + +def _encoded_header(alg, enc, zip, cty, kid): + """ + Generate an appropriate JOSE header based on the values provided + Args: + alg (str): Key wrap/negotiation algorithm + enc (str): Encryption algorithm + zip (str): Compression method + cty (str): Content type of the encrypted data + kid (str): ID for the key used for the operation + + Returns: + bytes: JSON object of header based on input + """ + header = {"alg": alg, "enc": enc} + if zip: + header["zip"] = zip + if cty: + header["cty"] = cty + if kid: + header["kid"] = kid + json_header = json.dumps( + header, + separators=(",", ":"), + sort_keys=True, + ).encode("utf-8") + return base64url_encode(json_header) + + +def _big_endian(int_val): + return pack("!Q", int_val) + + +def _encrypt_and_auth(key, alg, enc, zip, plaintext, aad): + """ + Generate a content encryption key (cek) and initialization + vector (iv) based on enc and alg, compress the plaintext based on zip, + encrypt the compressed plaintext using the cek and iv based on enc + + Args: + key (Key): The key provided for encryption + alg (str): The algorithm use for key wrap/negotiation + enc (str): The encryption algorithm with which to encrypt the plaintext + zip (str): The compression algorithm with which to compress the plaintext + plaintext (bytes): The data to encrypt + aad (str): Additional authentication data utilized for generating an + auth tag + + Returns: + (bytes, bytes, bytes, bytes): A tuple of the following data + (key wrapped cek, iv, cipher text, auth tag) + """ + try: + cek_bytes, kw_cek = _get_cek(enc, alg, key) + except NotImplementedError: + raise JWEError(f"alg {alg} is not implemented") + + if enc in ALGORITHMS.HMAC_AUTH_TAG: + encryption_key, mac_key, key_len = _get_encryption_key_mac_key_and_key_length_from_cek(cek_bytes, enc) + iv, ciphertext, tag = encryption_key.encrypt(plaintext, aad) + auth_tag = _auth_tag(ciphertext, iv, aad, mac_key, key_len) + elif enc in ALGORITHMS.GCM: + encryption_key = jwk.construct(cek_bytes, enc) + iv, ciphertext, auth_tag = encryption_key.encrypt(plaintext, aad) + else: + raise NotImplementedError(f"enc {enc} is not implemented!") + + return kw_cek, iv, ciphertext, auth_tag + + +def _get_hmac_key(enc, mac_key_bytes): + """ + Get an HMACKey for the provided encryption algorithm and key bytes + + Args: + enc (str): Encryption algorithm + mac_key_bytes (bytes): vytes for the HMAC key + + Returns: + (HMACKey): The key to perform HMAC actions + """ + _, hash_alg = enc.split("-") + mac_key = jwk.construct(mac_key_bytes, hash_alg) + return mac_key + + +def _compress(zip, plaintext): + """ + Compress the plaintext based on the algorithm supplied + + Args: + zip (str): Compression Algorithm + plaintext (bytes): plaintext to compress + + Returns: + (bytes): Compressed plaintext + """ + if zip not in ZIPS.SUPPORTED: + raise NotImplementedError("ZIP {} is not supported!") + if zip is None: + compressed = plaintext + elif zip == ZIPS.DEF: + compressed = zlib.compress(plaintext) + else: + raise NotImplementedError("ZIP {} is not implemented!") + return compressed + + +def _decompress(zip, compressed): + """ + Decompress the plaintext based on the algorithm supplied + + Args: + zip (str): Compression Algorithm + plaintext (bytes): plaintext to decompress + + Returns: + (bytes): Compressed plaintext + """ + if zip not in ZIPS.SUPPORTED: + raise NotImplementedError("ZIP {} is not supported!") + if zip is None: + decompressed = compressed + elif zip == ZIPS.DEF: + decompressed = zlib.decompress(compressed) + else: + raise NotImplementedError("ZIP {} is not implemented!") + return decompressed + + +def _get_cek(enc, alg, key): + """ + Get the content encryption key + + Args: + enc (str): Encryption algorithm + alg (str): kwy wrap/negotiation algorithm + key (Key): Key provided to encryption method + + Return: + (bytes, bytes): Tuple of (cek bytes and wrapped cek) + """ + if alg == ALGORITHMS.DIR: + cek, wrapped_cek = _get_direct_key_wrap_cek(key) + else: + cek, wrapped_cek = _get_key_wrap_cek(enc, key) + + return cek, wrapped_cek + + +def _get_direct_key_wrap_cek(key): + """ + Get the cek and wrapped cek from the encryption key direct + + Args: + key (Key): Key provided to encryption method + + Return: + (Key, bytes): Tuple of (cek Key object and wrapped cek) + """ + # Get the JWK data to determine how to derive the cek + jwk_data = key.to_dict() + if jwk_data["kty"] == "oct": + # Get the last half of an octal key as the cek + cek_bytes = _get_key_bytes_from_key(key) + wrapped_cek = b"" + else: + raise NotImplementedError("JWK type {} not supported!".format(jwk_data["kty"])) + return cek_bytes, wrapped_cek + + +def _get_key_bytes_from_key(key): + """ + Get the raw key bytes from a Key object + + Args: + key (Key): Key from which to extract the raw key bytes + Returns: + (bytes) key data + """ + jwk_data = key.to_dict() + encoded_key = jwk_data["k"] + cek_bytes = base64url_decode(encoded_key) + return cek_bytes + + +def _get_key_wrap_cek(enc, key): + """_get_rsa_key_wrap_cek + Get the content encryption key for RSA key wrap + + Args: + enc (str): Encryption algorithm + key (Key): Key provided to encryption method + + Returns: + (Key, bytes): Tuple of (cek Key object and wrapped cek) + """ + cek_bytes = _get_random_cek_bytes_for_enc(enc) + wrapped_cek = key.wrap_key(cek_bytes) + return cek_bytes, wrapped_cek + + +def _get_random_cek_bytes_for_enc(enc): + """ + Get the random cek bytes based on the encryptionn algorithm + + Args: + enc (str): Encryption algorithm + + Returns: + (bytes) random bytes for cek key + """ + if enc == ALGORITHMS.A128GCM: + num_bits = 128 + elif enc == ALGORITHMS.A192GCM: + num_bits = 192 + elif enc in (ALGORITHMS.A128CBC_HS256, ALGORITHMS.A256GCM): + num_bits = 256 + elif enc == ALGORITHMS.A192CBC_HS384: + num_bits = 384 + elif enc == ALGORITHMS.A256CBC_HS512: + num_bits = 512 + else: + raise NotImplementedError(f"{enc} not supported") + cek_bytes = get_random_bytes(num_bits // 8) + return cek_bytes + + +def _auth_tag(ciphertext, iv, aad, mac_key, tag_length): + """ + Get ann auth tag from the provided data + + Args: + ciphertext (bytes): Encrypted value + iv (bytes): Initialization vector + aad (bytes): Additional Authenticated Data + mac_key (bytes): Key to use in generating the MAC + tag_length (int): How log the tag should be + + Returns: + (bytes) Auth tag + """ + al = _big_endian(len(aad) * 8) + auth_tag_input = aad + iv + ciphertext + al + signature = mac_key.sign(auth_tag_input) + auth_tag = signature[0:tag_length] + return auth_tag + + +def _jwe_compact_serialize(encoded_header, encrypted_cek, iv, cipher_text, auth_tag): + """ + Generate a compact serialized JWE + + Args: + encoded_header (bytes): Base64 URL Encoded JWE header JSON + encrypted_cek (bytes): Encrypted content encryption key (cek) + iv (bytes): Initialization vector (IV) + cipher_text (bytes): Cipher text + auth_tag (bytes): JWE Auth Tag + + Returns: + (str): JWE compact serialized string + """ + cipher_text = ensure_binary(cipher_text) + encoded_encrypted_cek = base64url_encode(encrypted_cek) + encoded_iv = base64url_encode(iv) + encoded_cipher_text = base64url_encode(cipher_text) + encoded_auth_tag = base64url_encode(auth_tag) + return ( + encoded_header + + b"." + + encoded_encrypted_cek + + b"." + + encoded_iv + + b"." + + encoded_cipher_text + + b"." + + encoded_auth_tag + ) diff --git a/myenv/lib/python3.9/site-packages/jose/jwk.py b/myenv/lib/python3.9/site-packages/jose/jwk.py new file mode 100644 index 0000000..7afc054 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/jwk.py @@ -0,0 +1,79 @@ +from jose.backends.base import Key +from jose.constants import ALGORITHMS +from jose.exceptions import JWKError + +try: + from jose.backends import RSAKey # noqa: F401 +except ImportError: + pass + +try: + from jose.backends import ECKey # noqa: F401 +except ImportError: + pass + +try: + from jose.backends import AESKey # noqa: F401 +except ImportError: + pass + +try: + from jose.backends import DIRKey # noqa: F401 +except ImportError: + pass + +try: + from jose.backends import HMACKey # noqa: F401 +except ImportError: + pass + + +def get_key(algorithm): + if algorithm in ALGORITHMS.KEYS: + return ALGORITHMS.KEYS[algorithm] + elif algorithm in ALGORITHMS.HMAC: # noqa: F811 + return HMACKey + elif algorithm in ALGORITHMS.RSA: + from jose.backends import RSAKey # noqa: F811 + + return RSAKey + elif algorithm in ALGORITHMS.EC: + from jose.backends import ECKey # noqa: F811 + + return ECKey + elif algorithm in ALGORITHMS.AES: + from jose.backends import AESKey # noqa: F811 + + return AESKey + elif algorithm == ALGORITHMS.DIR: + from jose.backends import DIRKey # noqa: F811 + + return DIRKey + return None + + +def register_key(algorithm, key_class): + if not issubclass(key_class, Key): + raise TypeError("Key class is not a subclass of jwk.Key") + ALGORITHMS.KEYS[algorithm] = key_class + ALGORITHMS.SUPPORTED.add(algorithm) + return True + + +def construct(key_data, algorithm=None): + """ + Construct a Key object for the given algorithm with the given + key_data. + """ + + # Allow for pulling the algorithm off of the passed in jwk. + if not algorithm and isinstance(key_data, dict): + algorithm = key_data.get("alg", None) + + if not algorithm: + raise JWKError("Unable to find an algorithm for key: %s" % key_data) + + key_class = get_key(algorithm) + if not key_class: + raise JWKError("Unable to find an algorithm for key: %s" % key_data) + return key_class(key_data, algorithm) diff --git a/myenv/lib/python3.9/site-packages/jose/jws.py b/myenv/lib/python3.9/site-packages/jose/jws.py new file mode 100644 index 0000000..bfaf6bd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/jws.py @@ -0,0 +1,266 @@ +import binascii +import json +from collections.abc import Iterable, Mapping + +from jose import jwk +from jose.backends.base import Key +from jose.constants import ALGORITHMS +from jose.exceptions import JWSError, JWSSignatureError +from jose.utils import base64url_decode, base64url_encode + + +def sign(payload, key, headers=None, algorithm=ALGORITHMS.HS256): + """Signs a claims set and returns a JWS string. + + Args: + payload (str or dict): A string to sign + key (str or dict): The key to use for signing the claim set. Can be + individual JWK or JWK set. + headers (dict, optional): A set of headers that will be added to + the default headers. Any headers that are added as additional + headers will override the default headers. + algorithm (str, optional): The algorithm to use for signing the + the claims. Defaults to HS256. + + Returns: + str: The string representation of the header, claims, and signature. + + Raises: + JWSError: If there is an error signing the token. + + Examples: + + >>> jws.sign({'a': 'b'}, 'secret', algorithm='HS256') + 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' + + """ + + if algorithm not in ALGORITHMS.SUPPORTED: + raise JWSError("Algorithm %s not supported." % algorithm) + + encoded_header = _encode_header(algorithm, additional_headers=headers) + encoded_payload = _encode_payload(payload) + signed_output = _sign_header_and_claims(encoded_header, encoded_payload, algorithm, key) + + return signed_output + + +def verify(token, key, algorithms, verify=True): + """Verifies a JWS string's signature. + + Args: + token (str): A signed JWS to be verified. + key (str or dict): A key to attempt to verify the payload with. Can be + individual JWK or JWK set. + algorithms (str or list): Valid algorithms that should be used to verify the JWS. + + Returns: + str: The str representation of the payload, assuming the signature is valid. + + Raises: + JWSError: If there is an exception verifying a token. + + Examples: + + >>> token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' + >>> jws.verify(token, 'secret', algorithms='HS256') + + """ + + header, payload, signing_input, signature = _load(token) + + if verify: + _verify_signature(signing_input, header, signature, key, algorithms) + + return payload + + +def get_unverified_header(token): + """Returns the decoded headers without verification of any kind. + + Args: + token (str): A signed JWS to decode the headers from. + + Returns: + dict: The dict representation of the token headers. + + Raises: + JWSError: If there is an exception decoding the token. + """ + header, claims, signing_input, signature = _load(token) + return header + + +def get_unverified_headers(token): + """Returns the decoded headers without verification of any kind. + + This is simply a wrapper of get_unverified_header() for backwards + compatibility. + + Args: + token (str): A signed JWS to decode the headers from. + + Returns: + dict: The dict representation of the token headers. + + Raises: + JWSError: If there is an exception decoding the token. + """ + return get_unverified_header(token) + + +def get_unverified_claims(token): + """Returns the decoded claims without verification of any kind. + + Args: + token (str): A signed JWS to decode the headers from. + + Returns: + str: The str representation of the token claims. + + Raises: + JWSError: If there is an exception decoding the token. + """ + header, claims, signing_input, signature = _load(token) + return claims + + +def _encode_header(algorithm, additional_headers=None): + header = {"typ": "JWT", "alg": algorithm} + + if additional_headers: + header.update(additional_headers) + + json_header = json.dumps( + header, + separators=(",", ":"), + sort_keys=True, + ).encode("utf-8") + + return base64url_encode(json_header) + + +def _encode_payload(payload): + if isinstance(payload, Mapping): + try: + payload = json.dumps( + payload, + separators=(",", ":"), + ).encode("utf-8") + except ValueError: + pass + + return base64url_encode(payload) + + +def _sign_header_and_claims(encoded_header, encoded_claims, algorithm, key): + signing_input = b".".join([encoded_header, encoded_claims]) + try: + if not isinstance(key, Key): + key = jwk.construct(key, algorithm) + signature = key.sign(signing_input) + except Exception as e: + raise JWSError(e) + + encoded_signature = base64url_encode(signature) + + encoded_string = b".".join([encoded_header, encoded_claims, encoded_signature]) + + return encoded_string.decode("utf-8") + + +def _load(jwt): + if isinstance(jwt, str): + jwt = jwt.encode("utf-8") + try: + signing_input, crypto_segment = jwt.rsplit(b".", 1) + header_segment, claims_segment = signing_input.split(b".", 1) + header_data = base64url_decode(header_segment) + except ValueError: + raise JWSError("Not enough segments") + except (TypeError, binascii.Error): + raise JWSError("Invalid header padding") + + try: + header = json.loads(header_data.decode("utf-8")) + except ValueError as e: + raise JWSError("Invalid header string: %s" % e) + + if not isinstance(header, Mapping): + raise JWSError("Invalid header string: must be a json object") + + try: + payload = base64url_decode(claims_segment) + except (TypeError, binascii.Error): + raise JWSError("Invalid payload padding") + + try: + signature = base64url_decode(crypto_segment) + except (TypeError, binascii.Error): + raise JWSError("Invalid crypto padding") + + return (header, payload, signing_input, signature) + + +def _sig_matches_keys(keys, signing_input, signature, alg): + for key in keys: + if not isinstance(key, Key): + key = jwk.construct(key, alg) + try: + if key.verify(signing_input, signature): + return True + except Exception: + pass + return False + + +def _get_keys(key): + + if isinstance(key, Key): + return (key,) + + try: + key = json.loads(key, parse_int=str, parse_float=str) + except Exception: + pass + + if isinstance(key, Mapping): + if "keys" in key: + # JWK Set per RFC 7517 + return key["keys"] + elif "kty" in key: + # Individual JWK per RFC 7517 + return (key,) + else: + # Some other mapping. Firebase uses just dict of kid, cert pairs + values = key.values() + if values: + return values + return (key,) + + # Iterable but not text or mapping => list- or tuple-like + elif isinstance(key, Iterable) and not (isinstance(key, str) or isinstance(key, bytes)): + return key + + # Scalar value, wrap in tuple. + else: + return (key,) + + +def _verify_signature(signing_input, header, signature, key="", algorithms=None): + + alg = header.get("alg") + if not alg: + raise JWSError("No algorithm was specified in the JWS header.") + + if algorithms is not None and alg not in algorithms: + raise JWSError("The specified alg value is not allowed") + + keys = _get_keys(key) + try: + if not _sig_matches_keys(keys, signing_input, signature, alg): + raise JWSSignatureError() + except JWSSignatureError: + raise JWSError("Signature verification failed.") + except JWSError: + raise JWSError("Invalid or unsupported algorithm: %s" % alg) diff --git a/myenv/lib/python3.9/site-packages/jose/jwt.py b/myenv/lib/python3.9/site-packages/jose/jwt.py new file mode 100644 index 0000000..3f2142e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/jwt.py @@ -0,0 +1,496 @@ +import json +from calendar import timegm +from collections.abc import Mapping +from datetime import datetime, timedelta + +from jose import jws + +from .constants import ALGORITHMS +from .exceptions import ExpiredSignatureError, JWSError, JWTClaimsError, JWTError +from .utils import calculate_at_hash, timedelta_total_seconds + + +def encode(claims, key, algorithm=ALGORITHMS.HS256, headers=None, access_token=None): + """Encodes a claims set and returns a JWT string. + + JWTs are JWS signed objects with a few reserved claims. + + Args: + claims (dict): A claims set to sign + key (str or dict): The key to use for signing the claim set. Can be + individual JWK or JWK set. + algorithm (str, optional): The algorithm to use for signing the + the claims. Defaults to HS256. + headers (dict, optional): A set of headers that will be added to + the default headers. Any headers that are added as additional + headers will override the default headers. + access_token (str, optional): If present, the 'at_hash' claim will + be calculated and added to the claims present in the 'claims' + parameter. + + Returns: + str: The string representation of the header, claims, and signature. + + Raises: + JWTError: If there is an error encoding the claims. + + Examples: + + >>> jwt.encode({'a': 'b'}, 'secret', algorithm='HS256') + 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' + + """ + + for time_claim in ["exp", "iat", "nbf"]: + + # Convert datetime to a intDate value in known time-format claims + if isinstance(claims.get(time_claim), datetime): + claims[time_claim] = timegm(claims[time_claim].utctimetuple()) + + if access_token: + claims["at_hash"] = calculate_at_hash(access_token, ALGORITHMS.HASHES[algorithm]) + + return jws.sign(claims, key, headers=headers, algorithm=algorithm) + + +def decode(token, key, algorithms=None, options=None, audience=None, issuer=None, subject=None, access_token=None): + """Verifies a JWT string's signature and validates reserved claims. + + Args: + token (str): A signed JWS to be verified. + key (str or dict): A key to attempt to verify the payload with. Can be + individual JWK or JWK set. + algorithms (str or list): Valid algorithms that should be used to verify the JWS. + audience (str): The intended audience of the token. If the "aud" claim is + included in the claim set, then the audience must be included and must equal + the provided claim. + issuer (str or iterable): Acceptable value(s) for the issuer of the token. + If the "iss" claim is included in the claim set, then the issuer must be + given and the claim in the token must be among the acceptable values. + subject (str): The subject of the token. If the "sub" claim is + included in the claim set, then the subject must be included and must equal + the provided claim. + access_token (str): An access token string. If the "at_hash" claim is included in the + claim set, then the access_token must be included, and it must match + the "at_hash" claim. + options (dict): A dictionary of options for skipping validation steps. + + defaults = { + 'verify_signature': True, + 'verify_aud': True, + 'verify_iat': True, + 'verify_exp': True, + 'verify_nbf': True, + 'verify_iss': True, + 'verify_sub': True, + 'verify_jti': True, + 'verify_at_hash': True, + 'require_aud': False, + 'require_iat': False, + 'require_exp': False, + 'require_nbf': False, + 'require_iss': False, + 'require_sub': False, + 'require_jti': False, + 'require_at_hash': False, + 'leeway': 0, + } + + Returns: + dict: The dict representation of the claims set, assuming the signature is valid + and all requested data validation passes. + + Raises: + JWTError: If the signature is invalid in any way. + ExpiredSignatureError: If the signature has expired. + JWTClaimsError: If any claim is invalid in any way. + + Examples: + + >>> payload = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' + >>> jwt.decode(payload, 'secret', algorithms='HS256') + + """ + + defaults = { + "verify_signature": True, + "verify_aud": True, + "verify_iat": True, + "verify_exp": True, + "verify_nbf": True, + "verify_iss": True, + "verify_sub": True, + "verify_jti": True, + "verify_at_hash": True, + "require_aud": False, + "require_iat": False, + "require_exp": False, + "require_nbf": False, + "require_iss": False, + "require_sub": False, + "require_jti": False, + "require_at_hash": False, + "leeway": 0, + } + + if options: + defaults.update(options) + + verify_signature = defaults.get("verify_signature", True) + + try: + payload = jws.verify(token, key, algorithms, verify=verify_signature) + except JWSError as e: + raise JWTError(e) + + # Needed for at_hash verification + algorithm = jws.get_unverified_header(token)["alg"] + + try: + claims = json.loads(payload.decode("utf-8")) + except ValueError as e: + raise JWTError("Invalid payload string: %s" % e) + + if not isinstance(claims, Mapping): + raise JWTError("Invalid payload string: must be a json object") + + _validate_claims( + claims, + audience=audience, + issuer=issuer, + subject=subject, + algorithm=algorithm, + access_token=access_token, + options=defaults, + ) + + return claims + + +def get_unverified_header(token): + """Returns the decoded headers without verification of any kind. + + Args: + token (str): A signed JWT to decode the headers from. + + Returns: + dict: The dict representation of the token headers. + + Raises: + JWTError: If there is an exception decoding the token. + """ + try: + headers = jws.get_unverified_headers(token) + except Exception: + raise JWTError("Error decoding token headers.") + + return headers + + +def get_unverified_headers(token): + """Returns the decoded headers without verification of any kind. + + This is simply a wrapper of get_unverified_header() for backwards + compatibility. + + Args: + token (str): A signed JWT to decode the headers from. + + Returns: + dict: The dict representation of the token headers. + + Raises: + JWTError: If there is an exception decoding the token. + """ + return get_unverified_header(token) + + +def get_unverified_claims(token): + """Returns the decoded claims without verification of any kind. + + Args: + token (str): A signed JWT to decode the headers from. + + Returns: + dict: The dict representation of the token claims. + + Raises: + JWTError: If there is an exception decoding the token. + """ + try: + claims = jws.get_unverified_claims(token) + except Exception: + raise JWTError("Error decoding token claims.") + + try: + claims = json.loads(claims.decode("utf-8")) + except ValueError as e: + raise JWTError("Invalid claims string: %s" % e) + + if not isinstance(claims, Mapping): + raise JWTError("Invalid claims string: must be a json object") + + return claims + + +def _validate_iat(claims): + """Validates that the 'iat' claim is valid. + + The "iat" (issued at) claim identifies the time at which the JWT was + issued. This claim can be used to determine the age of the JWT. Its + value MUST be a number containing a NumericDate value. Use of this + claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + """ + + if "iat" not in claims: + return + + try: + int(claims["iat"]) + except ValueError: + raise JWTClaimsError("Issued At claim (iat) must be an integer.") + + +def _validate_nbf(claims, leeway=0): + """Validates that the 'nbf' claim is valid. + + The "nbf" (not before) claim identifies the time before which the JWT + MUST NOT be accepted for processing. The processing of the "nbf" + claim requires that the current date/time MUST be after or equal to + the not-before date/time listed in the "nbf" claim. Implementers MAY + provide for some small leeway, usually no more than a few minutes, to + account for clock skew. Its value MUST be a number containing a + NumericDate value. Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + leeway (int): The number of seconds of skew that is allowed. + """ + + if "nbf" not in claims: + return + + try: + nbf = int(claims["nbf"]) + except ValueError: + raise JWTClaimsError("Not Before claim (nbf) must be an integer.") + + now = timegm(datetime.utcnow().utctimetuple()) + + if nbf > (now + leeway): + raise JWTClaimsError("The token is not yet valid (nbf)") + + +def _validate_exp(claims, leeway=0): + """Validates that the 'exp' claim is valid. + + The "exp" (expiration time) claim identifies the expiration time on + or after which the JWT MUST NOT be accepted for processing. The + processing of the "exp" claim requires that the current date/time + MUST be before the expiration date/time listed in the "exp" claim. + Implementers MAY provide for some small leeway, usually no more than + a few minutes, to account for clock skew. Its value MUST be a number + containing a NumericDate value. Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + leeway (int): The number of seconds of skew that is allowed. + """ + + if "exp" not in claims: + return + + try: + exp = int(claims["exp"]) + except ValueError: + raise JWTClaimsError("Expiration Time claim (exp) must be an integer.") + + now = timegm(datetime.utcnow().utctimetuple()) + + if exp < (now - leeway): + raise ExpiredSignatureError("Signature has expired.") + + +def _validate_aud(claims, audience=None): + """Validates that the 'aud' claim is valid. + + The "aud" (audience) claim identifies the recipients that the JWT is + intended for. Each principal intended to process the JWT MUST + identify itself with a value in the audience claim. If the principal + processing the claim does not identify itself with a value in the + "aud" claim when this claim is present, then the JWT MUST be + rejected. In the general case, the "aud" value is an array of case- + sensitive strings, each containing a StringOrURI value. In the + special case when the JWT has one audience, the "aud" value MAY be a + single case-sensitive string containing a StringOrURI value. The + interpretation of audience values is generally application specific. + Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + audience (str): The audience that is verifying the token. + """ + + if "aud" not in claims: + # if audience: + # raise JWTError('Audience claim expected, but not in claims') + return + + audience_claims = claims["aud"] + if isinstance(audience_claims, str): + audience_claims = [audience_claims] + if not isinstance(audience_claims, list): + raise JWTClaimsError("Invalid claim format in token") + if any(not isinstance(c, str) for c in audience_claims): + raise JWTClaimsError("Invalid claim format in token") + if audience not in audience_claims: + raise JWTClaimsError("Invalid audience") + + +def _validate_iss(claims, issuer=None): + """Validates that the 'iss' claim is valid. + + The "iss" (issuer) claim identifies the principal that issued the + JWT. The processing of this claim is generally application specific. + The "iss" value is a case-sensitive string containing a StringOrURI + value. Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + issuer (str or iterable): Acceptable value(s) for the issuer that + signed the token. + """ + + if issuer is not None: + if isinstance(issuer, str): + issuer = (issuer,) + if claims.get("iss") not in issuer: + raise JWTClaimsError("Invalid issuer") + + +def _validate_sub(claims, subject=None): + """Validates that the 'sub' claim is valid. + + The "sub" (subject) claim identifies the principal that is the + subject of the JWT. The claims in a JWT are normally statements + about the subject. The subject value MUST either be scoped to be + locally unique in the context of the issuer or be globally unique. + The processing of this claim is generally application specific. The + "sub" value is a case-sensitive string containing a StringOrURI + value. Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + subject (str): The subject of the token. + """ + + if "sub" not in claims: + return + + if not isinstance(claims["sub"], str): + raise JWTClaimsError("Subject must be a string.") + + if subject is not None: + if claims.get("sub") != subject: + raise JWTClaimsError("Invalid subject") + + +def _validate_jti(claims): + """Validates that the 'jti' claim is valid. + + The "jti" (JWT ID) claim provides a unique identifier for the JWT. + The identifier value MUST be assigned in a manner that ensures that + there is a negligible probability that the same value will be + accidentally assigned to a different data object; if the application + uses multiple issuers, collisions MUST be prevented among values + produced by different issuers as well. The "jti" claim can be used + to prevent the JWT from being replayed. The "jti" value is a case- + sensitive string. Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + """ + if "jti" not in claims: + return + + if not isinstance(claims["jti"], str): + raise JWTClaimsError("JWT ID must be a string.") + + +def _validate_at_hash(claims, access_token, algorithm): + """ + Validates that the 'at_hash' is valid. + + Its value is the base64url encoding of the left-most half of the hash + of the octets of the ASCII representation of the access_token value, + where the hash algorithm used is the hash algorithm used in the alg + Header Parameter of the ID Token's JOSE Header. For instance, if the + alg is RS256, hash the access_token value with SHA-256, then take the + left-most 128 bits and base64url encode them. The at_hash value is a + case sensitive string. Use of this claim is OPTIONAL. + + Args: + claims (dict): The claims dictionary to validate. + access_token (str): The access token returned by the OpenID Provider. + algorithm (str): The algorithm used to sign the JWT, as specified by + the token headers. + """ + if "at_hash" not in claims: + return + + if not access_token: + msg = "No access_token provided to compare against at_hash claim." + raise JWTClaimsError(msg) + + try: + expected_hash = calculate_at_hash(access_token, ALGORITHMS.HASHES[algorithm]) + except (TypeError, ValueError): + msg = "Unable to calculate at_hash to verify against token claims." + raise JWTClaimsError(msg) + + if claims["at_hash"] != expected_hash: + raise JWTClaimsError("at_hash claim does not match access_token.") + + +def _validate_claims(claims, audience=None, issuer=None, subject=None, algorithm=None, access_token=None, options=None): + + leeway = options.get("leeway", 0) + + if isinstance(leeway, timedelta): + leeway = timedelta_total_seconds(leeway) + required_claims = [e[len("require_") :] for e in options.keys() if e.startswith("require_") and options[e]] + for require_claim in required_claims: + if require_claim not in claims: + raise JWTError('missing required key "%s" among claims' % require_claim) + else: + options["verify_" + require_claim] = True # override verify when required + + if not isinstance(audience, ((str,), type(None))): + raise JWTError("audience must be a string or None") + + if options.get("verify_iat"): + _validate_iat(claims) + + if options.get("verify_nbf"): + _validate_nbf(claims, leeway=leeway) + + if options.get("verify_exp"): + _validate_exp(claims, leeway=leeway) + + if options.get("verify_aud"): + _validate_aud(claims, audience=audience) + + if options.get("verify_iss"): + _validate_iss(claims, issuer=issuer) + + if options.get("verify_sub"): + _validate_sub(claims, subject=subject) + + if options.get("verify_jti"): + _validate_jti(claims) + + if options.get("verify_at_hash"): + _validate_at_hash(claims, access_token, algorithm) diff --git a/myenv/lib/python3.9/site-packages/jose/utils.py b/myenv/lib/python3.9/site-packages/jose/utils.py new file mode 100644 index 0000000..fcef885 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/jose/utils.py @@ -0,0 +1,108 @@ +import base64 +import struct + +# Piggyback of the backends implementation of the function that converts a long +# to a bytes stream. Some plumbing is necessary to have the signatures match. +try: + from cryptography.utils import int_to_bytes as _long_to_bytes + + def long_to_bytes(n, blocksize=0): + return _long_to_bytes(n, blocksize or None) + + +except ImportError: + from ecdsa.ecdsa import int_to_string as _long_to_bytes + + def long_to_bytes(n, blocksize=0): + ret = _long_to_bytes(n) + if blocksize == 0: + return ret + else: + assert len(ret) <= blocksize + padding = blocksize - len(ret) + return b"\x00" * padding + ret + + +def long_to_base64(data, size=0): + return base64.urlsafe_b64encode(long_to_bytes(data, size)).strip(b"=") + + +def int_arr_to_long(arr): + return int("".join(["%02x" % byte for byte in arr]), 16) + + +def base64_to_long(data): + if isinstance(data, str): + data = data.encode("ascii") + + # urlsafe_b64decode will happily convert b64encoded data + _d = base64.urlsafe_b64decode(bytes(data) + b"==") + return int_arr_to_long(struct.unpack("%sB" % len(_d), _d)) + + +def calculate_at_hash(access_token, hash_alg): + """Helper method for calculating an access token + hash, as described in http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken + + Its value is the base64url encoding of the left-most half of the hash of the octets + of the ASCII representation of the access_token value, where the hash algorithm + used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE + Header. For instance, if the alg is RS256, hash the access_token value with SHA-256, + then take the left-most 128 bits and base64url encode them. The at_hash value is a + case sensitive string. + + Args: + access_token (str): An access token string. + hash_alg (callable): A callable returning a hash object, e.g. hashlib.sha256 + + """ + hash_digest = hash_alg(access_token.encode("utf-8")).digest() + cut_at = int(len(hash_digest) / 2) + truncated = hash_digest[:cut_at] + at_hash = base64url_encode(truncated) + return at_hash.decode("utf-8") + + +def base64url_decode(input): + """Helper method to base64url_decode a string. + + Args: + input (str): A base64url_encoded string to decode. + + """ + rem = len(input) % 4 + + if rem > 0: + input += b"=" * (4 - rem) + + return base64.urlsafe_b64decode(input) + + +def base64url_encode(input): + """Helper method to base64url_encode a string. + + Args: + input (str): A base64url_encoded string to encode. + + """ + return base64.urlsafe_b64encode(input).replace(b"=", b"") + + +def timedelta_total_seconds(delta): + """Helper method to determine the total number of seconds + from a timedelta. + + Args: + delta (timedelta): A timedelta to convert to seconds. + """ + return delta.days * 24 * 60 * 60 + delta.seconds + + +def ensure_binary(s): + """Coerce **s** to bytes.""" + + if isinstance(s, bytes): + return s + if isinstance(s, str): + return s.encode("utf-8", "strict") + raise TypeError(f"not expecting type '{type(s)}'") diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/AUTHORS.rst b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/AUTHORS.rst new file mode 100644 index 0000000..dbc0324 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/AUTHORS.rst @@ -0,0 +1,10 @@ + +Authors +======= + +* Ionel Cristian Mărieș - https://blog.ionelmc.ro +* Alvin Chow - https://github.com/alvinchow86 +* Astrum Kuo - https://github.com/xowenx +* Erik M. Bray - http://iguananaut.net +* Ran Benita - https://github.com/bluetech +* "hugovk" - https://github.com/hugovk diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/LICENSE new file mode 100644 index 0000000..de39b84 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/LICENSE @@ -0,0 +1,21 @@ +BSD 2-Clause License + +Copyright (c) 2014-2019, Ionel Cristian Mărieș +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/METADATA b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/METADATA new file mode 100644 index 0000000..c7a3bd4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/METADATA @@ -0,0 +1,219 @@ +Metadata-Version: 2.1 +Name: lazy-object-proxy +Version: 1.7.1 +Summary: A fast and thorough lazy object proxy. +Home-page: https://github.com/ionelmc/python-lazy-object-proxy +Author: Ionel Cristian Mărieș +Author-email: contact@ionelmc.ro +License: BSD-2-Clause +Project-URL: Documentation, https://python-lazy-object-proxy.readthedocs.io/ +Project-URL: Changelog, https://python-lazy-object-proxy.readthedocs.io/en/latest/changelog.html +Project-URL: Issue Tracker, https://github.com/ionelmc/python-lazy-object-proxy/issues +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Utilities +Requires-Python: >=3.6 +License-File: LICENSE +License-File: AUTHORS.rst + +======== +Overview +======== + + + +A fast and thorough lazy object proxy. + +* Free software: BSD 2-Clause License + +Note that this is based on `wrapt`_'s ObjectProxy with one big change: it calls a function the first time the proxy object is +used, while `wrapt.ObjectProxy` just forwards the method calls to the target object. + +In other words, you use `lazy-object-proxy` when you only have the object way later and you use `wrapt.ObjectProxy` when you +want to override few methods (by subclassing) and forward everything else to the target object. + +Example:: + + import lazy_object_proxy + + def expensive_func(): + from time import sleep + print('starting calculation') + # just as example for a very slow computation + sleep(2) + print('finished calculation') + # return the result of the calculation + return 10 + + obj = lazy_object_proxy.Proxy(expensive_func) + # function is called only when object is actually used + print(obj) # now expensive_func is called + + print(obj) # the result without calling the expensive_func + +Installation +============ + +:: + + pip install lazy-object-proxy + +Documentation +============= + +https://python-lazy-object-proxy.readthedocs.io/ + +Development +=========== + +To run all the tests run:: + + tox + +Acknowledgements +================ + +This project is based on some code from `wrapt`_ as you can see in the git history. + +.. _wrapt: https://github.com/GrahamDumpleton/wrapt + + +Changelog +========= + +1.7.1 (2021-12-15) +------------------ + + +* Removed most of the Python 2 support code and fixed ``python_requires`` to require at least Python 3.6. + + Note that 1.7.0 has been yanked because it could not install on Python 2.7. + Installing lazy-object-proxy on Python 2.7 should automatically fall back to the 1.6.0 release now. + +1.7.0 (2021-12-15) +------------------ + +* Switched CI to GitHub Actions, this has a couple consequences: + + * Support for Python 2.7 is dropped. You can still install it there but it's not tested anymore and + Python 2 specific handling will be removed at some point. + * Linux wheels are now provided in `musllinux` and `manylinux2014` variants. + +* Fixed ``__index__`` to fallback to ``int`` if the wrapped object doesn't have an ``__index__`` method. + This prevents situations where code using a proxy would otherwise likely just call ``int`` had the object + not have an ``__index__`` method. + +1.6.0 (2021-03-22) +------------------ + +* Added support for async special methods (``__aiter__``, ``__anext__``, + ``__await__``, ``__aenter__``, ``__aexit__``). + These are used in the ``async for``, ``await` and ``async with`` statements. + + Note that ``__await__`` returns a wrapper that tries to emulate the crazy + stuff going on in the ceval loop, so there will be a small performance overhead. +* Added the ``__resolved__`` property. You can use it to check if the factory has + been called. + +1.5.2 (2020-11-26) +------------------ + +* Added Python 3.9 wheels. +* Removed Python 2.7 Windows wheels + (not supported on newest image with Python 3.9). + +1.5.1 (2020-07-22) +------------------ + +* Added ARM64 wheels (manylinux2014). + +1.5.0 (2020-06-05) +------------------ + +* Added support for ``__fspath__``. +* Dropped support for Python 3.4. + +1.4.3 (2019-10-26) +------------------ + +* Added binary wheels for Python 3.8. +* Fixed license metadata. + +1.4.2 (2019-08-22) +------------------ + +* Included a ``pyproject.toml`` to allow users install the sdist with old python/setuptools, as the + setuptools-scm dep will be fetched by pip instead of setuptools. + Fixes `#30 `_. + +1.4.1 (2019-05-10) +------------------ + +* Fixed wheels being built with ``-coverage`` cflags. No more issues about bogus ``cext.gcda`` files. +* Removed useless C file from wheels. +* Changed ``setup.py`` to use setuptools-scm. + +1.4.0 (2019-05-05) +------------------ + +* Fixed ``__mod__`` for the slots backend. Contributed by Ran Benita in + `#28 `_. +* Dropped support for Python 2.6 and 3.3. Contributed by "hugovk" in + `#24 `_. + +1.3.1 (2017-05-05) +------------------ + +* Fix broken release (``sdist`` had a broken ``MANIFEST.in``). + +1.3.0 (2017-05-02) +------------------ + +* Speed up arithmetic operations involving ``cext.Proxy`` subclasses. + +1.2.2 (2016-04-14) +------------------ + +* Added `manylinux `_ wheels. +* Minor cleanup in readme. + +1.2.1 (2015-08-18) +------------------ + +* Fix a memory leak (the wrapped object would get bogus references). Contributed by Astrum Kuo in + `#10 `_. + +1.2.0 (2015-07-06) +------------------ + +* Don't instantiate the object when __repr__ is called. This aids with debugging (allows one to see exactly in + what state the proxy is). + +1.1.0 (2015-07-05) +------------------ + +* Added support for pickling. The pickled value is going to be the wrapped object *without* any Proxy container. +* Fixed a memory management issue in the C extension (reference cycles weren't garbage collected due to improper + handling in the C extension). Contributed by Alvin Chow in + `#8 `_. + +1.0.2 (2015-04-11) +----------------------------------------- + +* First release on PyPI. + + diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/RECORD b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/RECORD new file mode 100644 index 0000000..ecf8fb5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/RECORD @@ -0,0 +1,15 @@ +lazy_object_proxy-1.7.1.dist-info/LICENSE,sha256=W-1KNkH2bsSNuN7SNqKV8z2H0CkxXzYXZVhUzw1wxUA,1329 +lazy_object_proxy-1.7.1.dist-info/WHEEL,sha256=JIE30nfOWUuazI4Vcfiuv_cYm-SkZCh6YOqQQjhm90A,109 +lazy_object_proxy-1.7.1.dist-info/AUTHORS.rst,sha256=8CeCjODba0S8UczLyZBPhpO_J6NMZ9Hz_fE1A1uNe9Y,278 +lazy_object_proxy-1.7.1.dist-info/top_level.txt,sha256=UNH-FQB-j_8bYqPz3gD90kHvaC42TQqY0thHSnbaa0k,18 +lazy_object_proxy-1.7.1.dist-info/METADATA,sha256=KJk7PYeMIqprrKYkxWq-fpA821iYHa8A91WgF2g57Zo,6826 +lazy_object_proxy/compat.py,sha256=W9iIrb9SWePDvo5tYCyY_VMoFoZ84nUux_tyLoDqonw,286 +lazy_object_proxy/_version.py,sha256=zX3KLJe3jAcA2G4CTqjCQedyBec7fH8geOYCwS-1kEM,142 +lazy_object_proxy/slots.py,sha256=X9W_-wZ8_9-ielG6xZ9IKAysKQnutd5ErqXN2CsgfX4,12502 +lazy_object_proxy/__init__.py,sha256=XSO_JFYBxkLI52LzafR6-VCPdnABiRzo34pPeSIMt8Y,410 +lazy_object_proxy/cext.cpython-39-darwin.so,sha256=y3lZfsCYdvy6eLKBtYgQovoJKQ0Rs5fjhjhG4x8b7b8,65584 +lazy_object_proxy/utils.py,sha256=i-7kGag3tORs8M1nYFColM-ZZLPur7KKRxVG7ytEJSI,587 +lazy_object_proxy/utils_py3.py,sha256=aFemNo5BMbOscJWwRII8CUOORSJQmMKwCzpDxmfgHJo,912 +lazy_object_proxy/simple.py,sha256=uFmj46FZ-xukOaDUYnL6nUKK5P33Zzh683irFrswpF0,9092 +lazy_object_proxy-1.7.1.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +lazy_object_proxy-1.7.1.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/WHEEL new file mode 100644 index 0000000..9c3644e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_10_9_x86_64 + diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/top_level.txt new file mode 100644 index 0000000..bdf032e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy-1.7.1.dist-info/top_level.txt @@ -0,0 +1 @@ +lazy_object_proxy diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/__init__.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/__init__.py new file mode 100644 index 0000000..0c1ef47 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/__init__.py @@ -0,0 +1,23 @@ +try: + import copy_reg as copyreg +except ImportError: + import copyreg + +from .utils import identity + +copyreg.constructor(identity) + +try: + from .cext import Proxy + from .cext import identity +except ImportError: + from .slots import Proxy +else: + copyreg.constructor(identity) + +try: + from ._version import version as __version__ +except ImportError: + __version__ = '1.7.1' + +__all__ = "Proxy", diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/_version.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/_version.py new file mode 100644 index 0000000..e00aa2f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/_version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '1.7.1' +version_tuple = (1, 7, 1) diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/cext.cpython-39-darwin.so b/myenv/lib/python3.9/site-packages/lazy_object_proxy/cext.cpython-39-darwin.so new file mode 100755 index 0000000..423f95a Binary files /dev/null and b/myenv/lib/python3.9/site-packages/lazy_object_proxy/cext.cpython-39-darwin.so differ diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/compat.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/compat.py new file mode 100644 index 0000000..e950fdf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/compat.py @@ -0,0 +1,14 @@ +import sys + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, bytes +else: + string_types = basestring, # noqa: F821 + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + return meta("NewBase", bases, {}) diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/simple.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/simple.py new file mode 100644 index 0000000..724998d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/simple.py @@ -0,0 +1,278 @@ +import operator + +from .compat import PY2 +from .compat import PY3 +from .compat import string_types +from .compat import with_metaclass +from .utils import await_ +from .utils import cached_property +from .utils import identity + + +def make_proxy_method(code): + def proxy_wrapper(self, *args): + return code(self.__wrapped__, *args) + + return proxy_wrapper + + +class _ProxyMethods(object): + # We use properties to override the values of __module__ and + # __doc__. If we add these in ObjectProxy, the derived class + # __dict__ will still be setup to have string variants of these + # attributes and the rules of descriptors means that they appear to + # take precedence over the properties in the base class. To avoid + # that, we copy the properties into the derived class type itself + # via a meta class. In that way the properties will always take + # precedence. + + @property + def __module__(self): + return self.__wrapped__.__module__ + + @__module__.setter + def __module__(self, value): + self.__wrapped__.__module__ = value + + @property + def __doc__(self): + return self.__wrapped__.__doc__ + + @__doc__.setter + def __doc__(self, value): + self.__wrapped__.__doc__ = value + + # Need to also propagate the special __weakref__ attribute for case + # where decorating classes which will define this. If do not define + # it and use a function like inspect.getmembers() on a decorator + # class it will fail. This can't be in the derived classes. + + @property + def __weakref__(self): + return self.__wrapped__.__weakref__ + + +class _ProxyMetaType(type): + def __new__(cls, name, bases, dictionary): + # Copy our special properties into the class so that they + # always take precedence over attributes of the same name added + # during construction of a derived class. This is to save + # duplicating the implementation for them in all derived classes. + + dictionary.update(vars(_ProxyMethods)) + dictionary.pop('__dict__') + + return type.__new__(cls, name, bases, dictionary) + + +class Proxy(with_metaclass(_ProxyMetaType)): + __factory__ = None + + def __init__(self, factory): + self.__dict__['__factory__'] = factory + + @property + def __resolved__(self): + return '__wrapped__' in self.__dict__ + + @cached_property + def __wrapped__(self): + self = self.__dict__ + if '__factory__' in self: + factory = self['__factory__'] + return factory() + else: + raise ValueError("Proxy hasn't been initiated: __factory__ is missing.") + + __name__ = property(make_proxy_method(operator.attrgetter('__name__'))) + __class__ = property(make_proxy_method(operator.attrgetter('__class__'))) + __annotations__ = property(make_proxy_method(operator.attrgetter('__anotations__'))) + __dir__ = make_proxy_method(dir) + __str__ = make_proxy_method(str) + + if PY3: + __bytes__ = make_proxy_method(bytes) + + def __repr__(self, __getattr__=object.__getattribute__): + if '__wrapped__' in self.__dict__: + return '<{} at 0x{:x} wrapping {!r} at 0x{:x} with factory {!r}>'.format( + type(self).__name__, id(self), + self.__wrapped__, id(self.__wrapped__), + self.__factory__ + ) + else: + return '<{} at 0x{:x} with factory {!r}>'.format( + type(self).__name__, id(self), + self.__factory__ + ) + + def __fspath__(self): + wrapped = self.__wrapped__ + if isinstance(wrapped, string_types): + return wrapped + else: + fspath = getattr(wrapped, '__fspath__', None) + if fspath is None: + return wrapped + else: + return fspath() + + __reversed__ = make_proxy_method(reversed) + + if PY3: + __round__ = make_proxy_method(round) + + __lt__ = make_proxy_method(operator.lt) + __le__ = make_proxy_method(operator.le) + __eq__ = make_proxy_method(operator.eq) + __ne__ = make_proxy_method(operator.ne) + __gt__ = make_proxy_method(operator.gt) + __ge__ = make_proxy_method(operator.ge) + __hash__ = make_proxy_method(hash) + __nonzero__ = make_proxy_method(bool) + __bool__ = make_proxy_method(bool) + + def __setattr__(self, name, value): + if hasattr(type(self), name): + self.__dict__[name] = value + else: + setattr(self.__wrapped__, name, value) + + def __getattr__(self, name): + if name in ('__wrapped__', '__factory__'): + raise AttributeError(name) + else: + return getattr(self.__wrapped__, name) + + def __delattr__(self, name): + if hasattr(type(self), name): + del self.__dict__[name] + else: + delattr(self.__wrapped__, name) + + __add__ = make_proxy_method(operator.add) + __sub__ = make_proxy_method(operator.sub) + __mul__ = make_proxy_method(operator.mul) + __div__ = make_proxy_method(operator.div if PY2 else operator.truediv) + __truediv__ = make_proxy_method(operator.truediv) + __floordiv__ = make_proxy_method(operator.floordiv) + __mod__ = make_proxy_method(operator.mod) + __divmod__ = make_proxy_method(divmod) + __pow__ = make_proxy_method(pow) + __lshift__ = make_proxy_method(operator.lshift) + __rshift__ = make_proxy_method(operator.rshift) + __and__ = make_proxy_method(operator.and_) + __xor__ = make_proxy_method(operator.xor) + __or__ = make_proxy_method(operator.or_) + + def __radd__(self, other): + return other + self.__wrapped__ + + def __rsub__(self, other): + return other - self.__wrapped__ + + def __rmul__(self, other): + return other * self.__wrapped__ + + def __rdiv__(self, other): + return operator.div(other, self.__wrapped__) + + def __rtruediv__(self, other): + return operator.truediv(other, self.__wrapped__) + + def __rfloordiv__(self, other): + return other // self.__wrapped__ + + def __rmod__(self, other): + return other % self.__wrapped__ + + def __rdivmod__(self, other): + return divmod(other, self.__wrapped__) + + def __rpow__(self, other, *args): + return pow(other, self.__wrapped__, *args) + + def __rlshift__(self, other): + return other << self.__wrapped__ + + def __rrshift__(self, other): + return other >> self.__wrapped__ + + def __rand__(self, other): + return other & self.__wrapped__ + + def __rxor__(self, other): + return other ^ self.__wrapped__ + + def __ror__(self, other): + return other | self.__wrapped__ + + __iadd__ = make_proxy_method(operator.iadd) + __isub__ = make_proxy_method(operator.isub) + __imul__ = make_proxy_method(operator.imul) + __idiv__ = make_proxy_method(operator.idiv if PY2 else operator.itruediv) + __itruediv__ = make_proxy_method(operator.itruediv) + __ifloordiv__ = make_proxy_method(operator.ifloordiv) + __imod__ = make_proxy_method(operator.imod) + __ipow__ = make_proxy_method(operator.ipow) + __ilshift__ = make_proxy_method(operator.ilshift) + __irshift__ = make_proxy_method(operator.irshift) + __iand__ = make_proxy_method(operator.iand) + __ixor__ = make_proxy_method(operator.ixor) + __ior__ = make_proxy_method(operator.ior) + __neg__ = make_proxy_method(operator.neg) + __pos__ = make_proxy_method(operator.pos) + __abs__ = make_proxy_method(operator.abs) + __invert__ = make_proxy_method(operator.invert) + + __int__ = make_proxy_method(int) + + if PY2: + __long__ = make_proxy_method(long) # noqa + + __float__ = make_proxy_method(float) + __oct__ = make_proxy_method(oct) + __hex__ = make_proxy_method(hex) + + def __index__(self): + if hasattr(self.__wrapped__, '__index__'): + return operator.index(self.__wrapped__) + else: + return int(self.__wrapped__) + + __len__ = make_proxy_method(len) + __contains__ = make_proxy_method(operator.contains) + __getitem__ = make_proxy_method(operator.getitem) + __setitem__ = make_proxy_method(operator.setitem) + __delitem__ = make_proxy_method(operator.delitem) + + if PY2: + __getslice__ = make_proxy_method(operator.getslice) + __setslice__ = make_proxy_method(operator.setslice) + __delslice__ = make_proxy_method(operator.delslice) + + def __enter__(self): + return self.__wrapped__.__enter__() + + def __exit__(self, *args, **kwargs): + return self.__wrapped__.__exit__(*args, **kwargs) + + __iter__ = make_proxy_method(iter) + + def __call__(self, *args, **kwargs): + return self.__wrapped__(*args, **kwargs) + + def __reduce__(self): + return identity, (self.__wrapped__,) + + def __reduce_ex__(self, protocol): + return identity, (self.__wrapped__,) + + if await_: + from .utils import __aenter__ + from .utils import __aexit__ + from .utils import __aiter__ + from .utils import __anext__ + from .utils import __await__ + + __aiter__, __anext__, __await__, __aenter__, __aexit__ # noqa diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/slots.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/slots.py new file mode 100644 index 0000000..24d2f7e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/slots.py @@ -0,0 +1,452 @@ +import operator + +from .compat import PY2 +from .compat import PY3 +from .compat import string_types +from .compat import with_metaclass +from .utils import await_ +from .utils import identity + + +class _ProxyMethods(object): + # We use properties to override the values of __module__ and + # __doc__. If we add these in ObjectProxy, the derived class + # __dict__ will still be setup to have string variants of these + # attributes and the rules of descriptors means that they appear to + # take precedence over the properties in the base class. To avoid + # that, we copy the properties into the derived class type itself + # via a meta class. In that way the properties will always take + # precedence. + + @property + def __module__(self): + return self.__wrapped__.__module__ + + @__module__.setter + def __module__(self, value): + self.__wrapped__.__module__ = value + + @property + def __doc__(self): + return self.__wrapped__.__doc__ + + @__doc__.setter + def __doc__(self, value): + self.__wrapped__.__doc__ = value + + # We similar use a property for __dict__. We need __dict__ to be + # explicit to ensure that vars() works as expected. + + @property + def __dict__(self): + return self.__wrapped__.__dict__ + + # Need to also propagate the special __weakref__ attribute for case + # where decorating classes which will define this. If do not define + # it and use a function like inspect.getmembers() on a decorator + # class it will fail. This can't be in the derived classes. + + @property + def __weakref__(self): + return self.__wrapped__.__weakref__ + + +class _ProxyMetaType(type): + def __new__(cls, name, bases, dictionary): + # Copy our special properties into the class so that they + # always take precedence over attributes of the same name added + # during construction of a derived class. This is to save + # duplicating the implementation for them in all derived classes. + + dictionary.update(vars(_ProxyMethods)) + + return type.__new__(cls, name, bases, dictionary) + + +class Proxy(with_metaclass(_ProxyMetaType)): + """ + A proxy implementation in pure Python, using slots. You can subclass this to add + local methods or attributes, or enable __dict__. + + The most important internals: + + * ``__factory__`` is the callback that "materializes" the object we proxy to. + * ``__target__`` will contain the object we proxy to, once it's "materialized". + * ``__resolved__`` is a boolean, `True` if factory was called. + * ``__wrapped__`` is a property that does either: + + * return ``__target__`` if it's set. + * calls ``__factory__``, saves result to ``__target__`` and returns said result. + """ + + __slots__ = '__target__', '__factory__' + + def __init__(self, factory): + object.__setattr__(self, '__factory__', factory) + + @property + def __resolved__(self, __getattr__=object.__getattribute__): + try: + __getattr__(self, '__target__') + except AttributeError: + return False + else: + return True + + @property + def __wrapped__(self, __getattr__=object.__getattribute__, __setattr__=object.__setattr__, + __delattr__=object.__delattr__): + try: + return __getattr__(self, '__target__') + except AttributeError: + try: + factory = __getattr__(self, '__factory__') + except AttributeError: + raise ValueError("Proxy hasn't been initiated: __factory__ is missing.") + target = factory() + __setattr__(self, '__target__', target) + return target + + @__wrapped__.deleter + def __wrapped__(self, __delattr__=object.__delattr__): + __delattr__(self, '__target__') + + @__wrapped__.setter + def __wrapped__(self, target, __setattr__=object.__setattr__): + __setattr__(self, '__target__', target) + + @property + def __name__(self): + return self.__wrapped__.__name__ + + @__name__.setter + def __name__(self, value): + self.__wrapped__.__name__ = value + + @property + def __class__(self): + return self.__wrapped__.__class__ + + @__class__.setter # noqa: F811 + def __class__(self, value): # noqa: F811 + self.__wrapped__.__class__ = value + + @property + def __annotations__(self): + return self.__wrapped__.__anotations__ + + @__annotations__.setter + def __annotations__(self, value): + self.__wrapped__.__annotations__ = value + + def __dir__(self): + return dir(self.__wrapped__) + + def __str__(self): + return str(self.__wrapped__) + + if PY3: + def __bytes__(self): + return bytes(self.__wrapped__) + + def __repr__(self, __getattr__=object.__getattribute__): + try: + target = __getattr__(self, '__target__') + except AttributeError: + return '<{} at 0x{:x} with factory {!r}>'.format( + type(self).__name__, id(self), + self.__factory__ + ) + else: + return '<{} at 0x{:x} wrapping {!r} at 0x{:x} with factory {!r}>'.format( + type(self).__name__, id(self), + target, id(target), + self.__factory__ + ) + + def __fspath__(self): + wrapped = self.__wrapped__ + if isinstance(wrapped, string_types): + return wrapped + else: + fspath = getattr(wrapped, '__fspath__', None) + if fspath is None: + return wrapped + else: + return fspath() + + def __reversed__(self): + return reversed(self.__wrapped__) + + if PY3: + def __round__(self): + return round(self.__wrapped__) + + def __lt__(self, other): + return self.__wrapped__ < other + + def __le__(self, other): + return self.__wrapped__ <= other + + def __eq__(self, other): + return self.__wrapped__ == other + + def __ne__(self, other): + return self.__wrapped__ != other + + def __gt__(self, other): + return self.__wrapped__ > other + + def __ge__(self, other): + return self.__wrapped__ >= other + + def __hash__(self): + return hash(self.__wrapped__) + + def __nonzero__(self): + return bool(self.__wrapped__) + + def __bool__(self): + return bool(self.__wrapped__) + + def __setattr__(self, name, value, __setattr__=object.__setattr__): + if hasattr(type(self), name): + __setattr__(self, name, value) + else: + setattr(self.__wrapped__, name, value) + + def __getattr__(self, name): + if name in ('__wrapped__', '__factory__'): + raise AttributeError(name) + else: + return getattr(self.__wrapped__, name) + + def __delattr__(self, name, __delattr__=object.__delattr__): + if hasattr(type(self), name): + __delattr__(self, name) + else: + delattr(self.__wrapped__, name) + + def __add__(self, other): + return self.__wrapped__ + other + + def __sub__(self, other): + return self.__wrapped__ - other + + def __mul__(self, other): + return self.__wrapped__ * other + + def __div__(self, other): + return operator.div(self.__wrapped__, other) + + def __truediv__(self, other): + return operator.truediv(self.__wrapped__, other) + + def __floordiv__(self, other): + return self.__wrapped__ // other + + def __mod__(self, other): + return self.__wrapped__ % other + + def __divmod__(self, other): + return divmod(self.__wrapped__, other) + + def __pow__(self, other, *args): + return pow(self.__wrapped__, other, *args) + + def __lshift__(self, other): + return self.__wrapped__ << other + + def __rshift__(self, other): + return self.__wrapped__ >> other + + def __and__(self, other): + return self.__wrapped__ & other + + def __xor__(self, other): + return self.__wrapped__ ^ other + + def __or__(self, other): + return self.__wrapped__ | other + + def __radd__(self, other): + return other + self.__wrapped__ + + def __rsub__(self, other): + return other - self.__wrapped__ + + def __rmul__(self, other): + return other * self.__wrapped__ + + def __rdiv__(self, other): + return operator.div(other, self.__wrapped__) + + def __rtruediv__(self, other): + return operator.truediv(other, self.__wrapped__) + + def __rfloordiv__(self, other): + return other // self.__wrapped__ + + def __rmod__(self, other): + return other % self.__wrapped__ + + def __rdivmod__(self, other): + return divmod(other, self.__wrapped__) + + def __rpow__(self, other, *args): + return pow(other, self.__wrapped__, *args) + + def __rlshift__(self, other): + return other << self.__wrapped__ + + def __rrshift__(self, other): + return other >> self.__wrapped__ + + def __rand__(self, other): + return other & self.__wrapped__ + + def __rxor__(self, other): + return other ^ self.__wrapped__ + + def __ror__(self, other): + return other | self.__wrapped__ + + def __iadd__(self, other): + self.__wrapped__ += other + return self + + def __isub__(self, other): + self.__wrapped__ -= other + return self + + def __imul__(self, other): + self.__wrapped__ *= other + return self + + def __idiv__(self, other): + self.__wrapped__ = operator.idiv(self.__wrapped__, other) + return self + + def __itruediv__(self, other): + self.__wrapped__ = operator.itruediv(self.__wrapped__, other) + return self + + def __ifloordiv__(self, other): + self.__wrapped__ //= other + return self + + def __imod__(self, other): + self.__wrapped__ %= other + return self + + def __ipow__(self, other): + self.__wrapped__ **= other + return self + + def __ilshift__(self, other): + self.__wrapped__ <<= other + return self + + def __irshift__(self, other): + self.__wrapped__ >>= other + return self + + def __iand__(self, other): + self.__wrapped__ &= other + return self + + def __ixor__(self, other): + self.__wrapped__ ^= other + return self + + def __ior__(self, other): + self.__wrapped__ |= other + return self + + def __neg__(self): + return -self.__wrapped__ + + def __pos__(self): + return +self.__wrapped__ + + def __abs__(self): + return abs(self.__wrapped__) + + def __invert__(self): + return ~self.__wrapped__ + + def __int__(self): + return int(self.__wrapped__) + + if PY2: + def __long__(self): + return long(self.__wrapped__) # noqa + + def __float__(self): + return float(self.__wrapped__) + + def __oct__(self): + return oct(self.__wrapped__) + + def __hex__(self): + return hex(self.__wrapped__) + + def __index__(self): + if hasattr(self.__wrapped__, '__index__'): + return operator.index(self.__wrapped__) + else: + return int(self.__wrapped__) + + def __len__(self): + return len(self.__wrapped__) + + def __contains__(self, value): + return value in self.__wrapped__ + + def __getitem__(self, key): + return self.__wrapped__[key] + + def __setitem__(self, key, value): + self.__wrapped__[key] = value + + def __delitem__(self, key): + del self.__wrapped__[key] + + def __getslice__(self, i, j): + return self.__wrapped__[i:j] + + def __setslice__(self, i, j, value): + self.__wrapped__[i:j] = value + + def __delslice__(self, i, j): + del self.__wrapped__[i:j] + + def __enter__(self): + return self.__wrapped__.__enter__() + + def __exit__(self, *args, **kwargs): + return self.__wrapped__.__exit__(*args, **kwargs) + + def __iter__(self): + return iter(self.__wrapped__) + + def __next__(self): + return next(self.__wrapped__) + + def __call__(self, *args, **kwargs): + return self.__wrapped__(*args, **kwargs) + + def __reduce__(self): + return identity, (self.__wrapped__,) + + def __reduce_ex__(self, protocol): + return identity, (self.__wrapped__,) + + if await_: + from .utils import __aenter__ + from .utils import __aexit__ + from .utils import __aiter__ + from .utils import __anext__ + from .utils import __await__ + + __aiter__, __anext__, __await__, __aenter__, __aexit__ # noqa diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/utils.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/utils.py new file mode 100644 index 0000000..3307abe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/utils.py @@ -0,0 +1,25 @@ + # flake8: noqa +try: + from .utils_py3 import __aenter__ + from .utils_py3 import __aexit__ + from .utils_py3 import __aiter__ + from .utils_py3 import __anext__ + from .utils_py3 import __await__ + from .utils_py3 import await_ +except (ImportError, SyntaxError): + await_ = None + + +def identity(obj): + return obj + + +class cached_property(object): + def __init__(self, func): + self.func = func + + def __get__(self, obj, cls): + if obj is None: + return self + value = obj.__dict__[self.func.__name__] = self.func(obj) + return value diff --git a/myenv/lib/python3.9/site-packages/lazy_object_proxy/utils_py3.py b/myenv/lib/python3.9/site-packages/lazy_object_proxy/utils_py3.py new file mode 100644 index 0000000..101f270 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/lazy_object_proxy/utils_py3.py @@ -0,0 +1,44 @@ +from collections.abc import Awaitable +from inspect import CO_ITERABLE_COROUTINE +from types import CoroutineType +from types import GeneratorType + + +async def do_await(obj): + return await obj + + +def do_yield_from(gen): + return (yield from gen) + + +def await_(obj): + obj_type = type(obj) + if ( + obj_type is CoroutineType or + obj_type is GeneratorType and bool(obj.gi_code.co_flags & CO_ITERABLE_COROUTINE) or + isinstance(obj, Awaitable) + ): + return do_await(obj).__await__() + else: + return do_yield_from(obj) + + +def __aiter__(self): + return self.__wrapped__.__aiter__() + + +async def __anext__(self): + return await self.__wrapped__.__anext__() + + +def __await__(self): + return await_(self.__wrapped__) + + +def __aenter__(self): + return self.__wrapped__.__aenter__() + + +def __aexit__(self, *args, **kwargs): + return self.__wrapped__.__aexit__(*args, **kwargs) diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/LICENSE new file mode 100644 index 0000000..8fd356e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/LICENSE @@ -0,0 +1,25 @@ +Copyright © Ned Batchelder +Copyright © 2011-2013 Tarek Ziade +Copyright © 2013 Florent Xicluna + +Licensed under the terms of the Expat License + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/METADATA b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/METADATA new file mode 100644 index 0000000..e25facd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/METADATA @@ -0,0 +1,199 @@ +Metadata-Version: 2.1 +Name: mccabe +Version: 0.7.0 +Summary: McCabe checker, plugin for flake8 +Home-page: https://github.com/pycqa/mccabe +Author: Tarek Ziade +Author-email: tarek@ziade.org +Maintainer: Ian Stapleton Cordasco +Maintainer-email: graffatcolmingov@gmail.com +License: Expat license +Keywords: flake8 mccabe +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Quality Assurance +Requires-Python: >=3.6 +License-File: LICENSE + +McCabe complexity checker +========================= + +Ned's script to check McCabe complexity. + +This module provides a plugin for ``flake8``, the Python code checker. + + +Installation +------------ + +You can install, upgrade, or uninstall ``mccabe`` with these commands:: + + $ pip install mccabe + $ pip install --upgrade mccabe + $ pip uninstall mccabe + + +Standalone script +----------------- + +The complexity checker can be used directly:: + + $ python -m mccabe --min 5 mccabe.py + ("185:1: 'PathGraphingAstVisitor.visitIf'", 5) + ("71:1: 'PathGraph.to_dot'", 5) + ("245:1: 'McCabeChecker.run'", 5) + ("283:1: 'main'", 7) + ("203:1: 'PathGraphingAstVisitor.visitTryExcept'", 5) + ("257:1: 'get_code_complexity'", 5) + + +Plugin for Flake8 +----------------- + +When both ``flake8 2+`` and ``mccabe`` are installed, the plugin is +available in ``flake8``:: + + $ flake8 --version + 2.0 (pep8: 1.4.2, pyflakes: 0.6.1, mccabe: 0.2) + +By default the plugin is disabled. Use the ``--max-complexity`` switch to +enable it. It will emit a warning if the McCabe complexity of a function is +higher than the provided value:: + + $ flake8 --max-complexity 10 coolproject + ... + coolproject/mod.py:1204:1: C901 'CoolFactory.prepare' is too complex (14) + +This feature is quite useful for detecting over-complex code. According to McCabe, +anything that goes beyond 10 is too complex. + +Flake8 has many features that mccabe does not provide. Flake8 allows users to +ignore violations reported by plugins with ``# noqa``. Read more about this in +`their documentation +`__. +To silence violations reported by ``mccabe``, place your ``# noqa: C901`` on +the function definition line, where the error is reported for (possibly a +decorator). + + +Links +----- + +* Feedback and ideas: http://mail.python.org/mailman/listinfo/code-quality + +* Cyclomatic complexity: http://en.wikipedia.org/wiki/Cyclomatic_complexity + +* Ned Batchelder's script: + http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html + +* McCabe complexity: http://en.wikipedia.org/wiki/Cyclomatic_complexity + + +Changes +------- + +0.7.0 - 2021-01-23 +`````````````````` + +* Drop support for all versions of Python lower than 3.6 + +* Add support for Python 3.8, 3.9, and 3.10 + +* Fix option declaration for Flake8 + +0.6.1 - 2017-01-26 +`````````````````` + +* Fix signature for ``PathGraphingAstVisitor.default`` to match the signature + for ``ASTVisitor`` + +0.6.0 - 2017-01-23 +`````````````````` + +* Add support for Python 3.6 + +* Fix handling for missing statement types + +0.5.3 - 2016-12-14 +`````````````````` + +* Report actual column number of violation instead of the start of the line + +0.5.2 - 2016-07-31 +`````````````````` + +* When opening files ourselves, make sure we always name the file variable + +0.5.1 - 2016-07-28 +`````````````````` + +* Set default maximum complexity to -1 on the class itself + +0.5.0 - 2016-05-30 +`````````````````` + +* PyCon 2016 PDX release + +* Add support for Flake8 3.0 + +0.4.0 - 2016-01-27 +`````````````````` + +* Stop testing on Python 3.2 + +* Add support for async/await keywords on Python 3.5 from PEP 0492 + +0.3.1 - 2015-06-14 +`````````````````` + +* Include ``test_mccabe.py`` in releases. + +* Always coerce the ``max_complexity`` value from Flake8's entry-point to an + integer. + +0.3 - 2014-12-17 +```````````````` + +* Computation was wrong: the mccabe complexity starts at 1, not 2. + +* The ``max-complexity`` value is now inclusive. E.g.: if the + value is 10 and the reported complexity is 10, then it passes. + +* Add tests. + + +0.2.1 - 2013-04-03 +`````````````````` + +* Do not require ``setuptools`` in setup.py. It works around an issue + with ``pip`` and Python 3. + + +0.2 - 2013-02-22 +```````````````` + +* Rename project to ``mccabe``. + +* Provide ``flake8.extension`` setuptools entry point. + +* Read ``max-complexity`` from the configuration file. + +* Rename argument ``min_complexity`` to ``threshold``. + + +0.1 - 2013-02-11 +```````````````` +* First release + + diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/RECORD b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/RECORD new file mode 100644 index 0000000..0aba4cb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/RECORD @@ -0,0 +1,8 @@ +mccabe.py,sha256=g_kB8oPilNLemdOirPaZymQyyjqAH0kowrncUQaaw00,10654 +mccabe-0.7.0.dist-info/LICENSE,sha256=EPvAA8uvims89xlbgNrJbIba85ADmyq_bntuc1r9fXQ,1221 +mccabe-0.7.0.dist-info/METADATA,sha256=oMxU_cw4ev2Q23YTL3NRg4pebHSqlrbF_DSSs-cpfBE,5035 +mccabe-0.7.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +mccabe-0.7.0.dist-info/entry_points.txt,sha256=N2NH182GXTUyTm8r8XMgadb9C-CRa5dUr1k8OC91uGE,47 +mccabe-0.7.0.dist-info/top_level.txt,sha256=21cXuqZE-lpcfAqqANvX9EjI1ED1p8zcViv064u3RKA,7 +mccabe-0.7.0.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +mccabe-0.7.0.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/WHEEL new file mode 100644 index 0000000..0b18a28 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/entry_points.txt b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/entry_points.txt new file mode 100644 index 0000000..cc6645b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[flake8.extension] +C90 = mccabe:McCabeChecker + diff --git a/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/top_level.txt new file mode 100644 index 0000000..8831b36 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe-0.7.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mccabe diff --git a/myenv/lib/python3.9/site-packages/mccabe.py b/myenv/lib/python3.9/site-packages/mccabe.py new file mode 100644 index 0000000..5746504 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mccabe.py @@ -0,0 +1,346 @@ +""" Meager code path measurement tool. + Ned Batchelder + http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html + MIT License. +""" +from __future__ import with_statement + +import optparse +import sys +import tokenize + +from collections import defaultdict +try: + import ast + from ast import iter_child_nodes +except ImportError: # Python 2.5 + from flake8.util import ast, iter_child_nodes + +__version__ = '0.7.0' + + +class ASTVisitor(object): + """Performs a depth-first walk of the AST.""" + + def __init__(self): + self.node = None + self._cache = {} + + def default(self, node, *args): + for child in iter_child_nodes(node): + self.dispatch(child, *args) + + def dispatch(self, node, *args): + self.node = node + klass = node.__class__ + meth = self._cache.get(klass) + if meth is None: + className = klass.__name__ + meth = getattr(self.visitor, 'visit' + className, self.default) + self._cache[klass] = meth + return meth(node, *args) + + def preorder(self, tree, visitor, *args): + """Do preorder walk of tree using visitor""" + self.visitor = visitor + visitor.visit = self.dispatch + self.dispatch(tree, *args) # XXX *args make sense? + + +class PathNode(object): + def __init__(self, name, look="circle"): + self.name = name + self.look = look + + def to_dot(self): + print('node [shape=%s,label="%s"] %d;' % ( + self.look, self.name, self.dot_id())) + + def dot_id(self): + return id(self) + + +class PathGraph(object): + def __init__(self, name, entity, lineno, column=0): + self.name = name + self.entity = entity + self.lineno = lineno + self.column = column + self.nodes = defaultdict(list) + + def connect(self, n1, n2): + self.nodes[n1].append(n2) + # Ensure that the destination node is always counted. + self.nodes[n2] = [] + + def to_dot(self): + print('subgraph {') + for node in self.nodes: + node.to_dot() + for node, nexts in self.nodes.items(): + for next in nexts: + print('%s -- %s;' % (node.dot_id(), next.dot_id())) + print('}') + + def complexity(self): + """ Return the McCabe complexity for the graph. + V-E+2 + """ + num_edges = sum([len(n) for n in self.nodes.values()]) + num_nodes = len(self.nodes) + return num_edges - num_nodes + 2 + + +class PathGraphingAstVisitor(ASTVisitor): + """ A visitor for a parsed Abstract Syntax Tree which finds executable + statements. + """ + + def __init__(self): + super(PathGraphingAstVisitor, self).__init__() + self.classname = "" + self.graphs = {} + self.reset() + + def reset(self): + self.graph = None + self.tail = None + + def dispatch_list(self, node_list): + for node in node_list: + self.dispatch(node) + + def visitFunctionDef(self, node): + + if self.classname: + entity = '%s%s' % (self.classname, node.name) + else: + entity = node.name + + name = '%d:%d: %r' % (node.lineno, node.col_offset, entity) + + if self.graph is not None: + # closure + pathnode = self.appendPathNode(name) + self.tail = pathnode + self.dispatch_list(node.body) + bottom = PathNode("", look='point') + self.graph.connect(self.tail, bottom) + self.graph.connect(pathnode, bottom) + self.tail = bottom + else: + self.graph = PathGraph(name, entity, node.lineno, node.col_offset) + pathnode = PathNode(name) + self.tail = pathnode + self.dispatch_list(node.body) + self.graphs["%s%s" % (self.classname, node.name)] = self.graph + self.reset() + + visitAsyncFunctionDef = visitFunctionDef + + def visitClassDef(self, node): + old_classname = self.classname + self.classname += node.name + "." + self.dispatch_list(node.body) + self.classname = old_classname + + def appendPathNode(self, name): + if not self.tail: + return + pathnode = PathNode(name) + self.graph.connect(self.tail, pathnode) + self.tail = pathnode + return pathnode + + def visitSimpleStatement(self, node): + if node.lineno is None: + lineno = 0 + else: + lineno = node.lineno + name = "Stmt %d" % lineno + self.appendPathNode(name) + + def default(self, node, *args): + if isinstance(node, ast.stmt): + self.visitSimpleStatement(node) + else: + super(PathGraphingAstVisitor, self).default(node, *args) + + def visitLoop(self, node): + name = "Loop %d" % node.lineno + self._subgraph(node, name) + + visitAsyncFor = visitFor = visitWhile = visitLoop + + def visitIf(self, node): + name = "If %d" % node.lineno + self._subgraph(node, name) + + def _subgraph(self, node, name, extra_blocks=()): + """create the subgraphs representing any `if` and `for` statements""" + if self.graph is None: + # global loop + self.graph = PathGraph(name, name, node.lineno, node.col_offset) + pathnode = PathNode(name) + self._subgraph_parse(node, pathnode, extra_blocks) + self.graphs["%s%s" % (self.classname, name)] = self.graph + self.reset() + else: + pathnode = self.appendPathNode(name) + self._subgraph_parse(node, pathnode, extra_blocks) + + def _subgraph_parse(self, node, pathnode, extra_blocks): + """parse the body and any `else` block of `if` and `for` statements""" + loose_ends = [] + self.tail = pathnode + self.dispatch_list(node.body) + loose_ends.append(self.tail) + for extra in extra_blocks: + self.tail = pathnode + self.dispatch_list(extra.body) + loose_ends.append(self.tail) + if node.orelse: + self.tail = pathnode + self.dispatch_list(node.orelse) + loose_ends.append(self.tail) + else: + loose_ends.append(pathnode) + if pathnode: + bottom = PathNode("", look='point') + for le in loose_ends: + self.graph.connect(le, bottom) + self.tail = bottom + + def visitTryExcept(self, node): + name = "TryExcept %d" % node.lineno + self._subgraph(node, name, extra_blocks=node.handlers) + + visitTry = visitTryExcept + + def visitWith(self, node): + name = "With %d" % node.lineno + self.appendPathNode(name) + self.dispatch_list(node.body) + + visitAsyncWith = visitWith + + +class McCabeChecker(object): + """McCabe cyclomatic complexity checker.""" + name = 'mccabe' + version = __version__ + _code = 'C901' + _error_tmpl = "C901 %r is too complex (%d)" + max_complexity = -1 + + def __init__(self, tree, filename): + self.tree = tree + + @classmethod + def add_options(cls, parser): + flag = '--max-complexity' + kwargs = { + 'default': -1, + 'action': 'store', + 'type': int, + 'help': 'McCabe complexity threshold', + 'parse_from_config': 'True', + } + config_opts = getattr(parser, 'config_options', None) + if isinstance(config_opts, list): + # Flake8 2.x + kwargs.pop('parse_from_config') + parser.add_option(flag, **kwargs) + parser.config_options.append('max-complexity') + else: + parser.add_option(flag, **kwargs) + + @classmethod + def parse_options(cls, options): + cls.max_complexity = int(options.max_complexity) + + def run(self): + if self.max_complexity < 0: + return + visitor = PathGraphingAstVisitor() + visitor.preorder(self.tree, visitor) + for graph in visitor.graphs.values(): + if graph.complexity() > self.max_complexity: + text = self._error_tmpl % (graph.entity, graph.complexity()) + yield graph.lineno, graph.column, text, type(self) + + +def get_code_complexity(code, threshold=7, filename='stdin'): + try: + tree = compile(code, filename, "exec", ast.PyCF_ONLY_AST) + except SyntaxError: + e = sys.exc_info()[1] + sys.stderr.write("Unable to parse %s: %s\n" % (filename, e)) + return 0 + + complx = [] + McCabeChecker.max_complexity = threshold + for lineno, offset, text, check in McCabeChecker(tree, filename).run(): + complx.append('%s:%d:1: %s' % (filename, lineno, text)) + + if len(complx) == 0: + return 0 + print('\n'.join(complx)) + return len(complx) + + +def get_module_complexity(module_path, threshold=7): + """Returns the complexity of a module""" + code = _read(module_path) + return get_code_complexity(code, threshold, filename=module_path) + + +def _read(filename): + if (2, 5) < sys.version_info < (3, 0): + with open(filename, 'rU') as f: + return f.read() + elif (3, 0) <= sys.version_info < (4, 0): + """Read the source code.""" + try: + with open(filename, 'rb') as f: + (encoding, _) = tokenize.detect_encoding(f.readline) + except (LookupError, SyntaxError, UnicodeError): + # Fall back if file encoding is improperly declared + with open(filename, encoding='latin-1') as f: + return f.read() + with open(filename, 'r', encoding=encoding) as f: + return f.read() + + +def main(argv=None): + if argv is None: + argv = sys.argv[1:] + opar = optparse.OptionParser() + opar.add_option("-d", "--dot", dest="dot", + help="output a graphviz dot file", action="store_true") + opar.add_option("-m", "--min", dest="threshold", + help="minimum complexity for output", type="int", + default=1) + + options, args = opar.parse_args(argv) + + code = _read(args[0]) + tree = compile(code, args[0], "exec", ast.PyCF_ONLY_AST) + visitor = PathGraphingAstVisitor() + visitor.preorder(tree, visitor) + + if options.dot: + print('graph {') + for graph in visitor.graphs.values(): + if (not options.threshold or + graph.complexity() >= options.threshold): + graph.to_dot() + print('}') + else: + for graph in visitor.graphs.values(): + if graph.complexity() >= options.threshold: + print(graph.name, graph.complexity()) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/myenv/lib/python3.9/site-packages/multipart/__init__.py b/myenv/lib/python3.9/site-packages/multipart/__init__.py new file mode 100644 index 0000000..cbe9211 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/__init__.py @@ -0,0 +1,19 @@ +from __future__ import absolute_import +import sys + +# This is the canonical package information. +__author__ = 'Andrew Dunham' +__license__ = 'Apache' +__copyright__ = "Copyright (c) 2012-2013, Andrew Dunham" + +# We get the version from a sub-file that can be automatically generated. +from ._version import __version__ + +from .multipart import ( + FormParser, + MultipartParser, + QuerystringParser, + OctetStreamParser, + create_form_parser, + parse_form, +) diff --git a/myenv/lib/python3.9/site-packages/multipart/_version.py b/myenv/lib/python3.9/site-packages/multipart/_version.py new file mode 100644 index 0000000..eead319 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/_version.py @@ -0,0 +1 @@ +__version__ = '0.0.5' diff --git a/myenv/lib/python3.9/site-packages/multipart/decoders.py b/myenv/lib/python3.9/site-packages/multipart/decoders.py new file mode 100644 index 0000000..06cb23f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/decoders.py @@ -0,0 +1,171 @@ +import base64 +import binascii + +from .exceptions import Base64Error, DecodeError + + +class Base64Decoder(object): + """This object provides an interface to decode a stream of Base64 data. It + is instantiated with an "underlying object", and whenever a write() + operation is performed, it will decode the incoming data as Base64, and + call write() on the underlying object. This is primarily used for decoding + form data encoded as Base64, but can be used for other purposes:: + + from multipart.decoders import Base64Decoder + fd = open("notb64.txt", "wb") + decoder = Base64Decoder(fd) + try: + decoder.write("Zm9vYmFy") # "foobar" in Base64 + decoder.finalize() + finally: + decoder.close() + + # The contents of "notb64.txt" should be "foobar". + + This object will also pass all finalize() and close() calls to the + underlying object, if the underlying object supports them. + + Note that this class maintains a cache of base64 chunks, so that a write of + arbitrary size can be performed. You must call :meth:`finalize` on this + object after all writes are completed to ensure that all data is flushed + to the underlying object. + + :param underlying: the underlying object to pass writes to + """ + + def __init__(self, underlying): + self.cache = bytearray() + self.underlying = underlying + + def write(self, data): + """Takes any input data provided, decodes it as base64, and passes it + on to the underlying object. If the data provided is invalid base64 + data, then this method will raise + a :class:`multipart.exceptions.DecodeError` + + :param data: base64 data to decode + """ + + # Prepend any cache info to our data. + if len(self.cache) > 0: + data = self.cache + data + + # Slice off a string that's a multiple of 4. + decode_len = (len(data) // 4) * 4 + val = data[:decode_len] + + # Decode and write, if we have any. + if len(val) > 0: + try: + decoded = base64.b64decode(val) + except Base64Error: + raise DecodeError('There was an error raised while decoding ' + 'base64-encoded data.') + + self.underlying.write(decoded) + + # Get the remaining bytes and save in our cache. + remaining_len = len(data) % 4 + if remaining_len > 0: + self.cache = data[-remaining_len:] + else: + self.cache = b'' + + # Return the length of the data to indicate no error. + return len(data) + + def close(self): + """Close this decoder. If the underlying object has a `close()` + method, this function will call it. + """ + if hasattr(self.underlying, 'close'): + self.underlying.close() + + def finalize(self): + """Finalize this object. This should be called when no more data + should be written to the stream. This function can raise a + :class:`multipart.exceptions.DecodeError` if there is some remaining + data in the cache. + + If the underlying object has a `finalize()` method, this function will + call it. + """ + if len(self.cache) > 0: + raise DecodeError('There are %d bytes remaining in the ' + 'Base64Decoder cache when finalize() is called' + % len(self.cache)) + + if hasattr(self.underlying, 'finalize'): + self.underlying.finalize() + + def __repr__(self): + return "%s(underlying=%r)" % (self.__class__.__name__, self.underlying) + + +class QuotedPrintableDecoder(object): + """This object provides an interface to decode a stream of quoted-printable + data. It is instantiated with an "underlying object", in the same manner + as the :class:`multipart.decoders.Base64Decoder` class. This class behaves + in exactly the same way, including maintaining a cache of quoted-printable + chunks. + + :param underlying: the underlying object to pass writes to + """ + def __init__(self, underlying): + self.cache = b'' + self.underlying = underlying + + def write(self, data): + """Takes any input data provided, decodes it as quoted-printable, and + passes it on to the underlying object. + + :param data: quoted-printable data to decode + """ + # Prepend any cache info to our data. + if len(self.cache) > 0: + data = self.cache + data + + # If the last 2 characters have an '=' sign in it, then we won't be + # able to decode the encoded value and we'll need to save it for the + # next decoding step. + if data[-2:].find(b'=') != -1: + enc, rest = data[:-2], data[-2:] + else: + enc = data + rest = b'' + + # Encode and write, if we have data. + if len(enc) > 0: + self.underlying.write(binascii.a2b_qp(enc)) + + # Save remaining in cache. + self.cache = rest + return len(data) + + def close(self): + """Close this decoder. If the underlying object has a `close()` + method, this function will call it. + """ + if hasattr(self.underlying, 'close'): + self.underlying.close() + + def finalize(self): + """Finalize this object. This should be called when no more data + should be written to the stream. This function will not raise any + exceptions, but it may write more data to the underlying object if + there is data remaining in the cache. + + If the underlying object has a `finalize()` method, this function will + call it. + """ + # If we have a cache, write and then remove it. + if len(self.cache) > 0: + self.underlying.write(binascii.a2b_qp(self.cache)) + self.cache = b'' + + # Finalize our underlying stream. + if hasattr(self.underlying, 'finalize'): + self.underlying.finalize() + + def __repr__(self): + return "%s(underlying=%r)" % (self.__class__.__name__, self.underlying) diff --git a/myenv/lib/python3.9/site-packages/multipart/exceptions.py b/myenv/lib/python3.9/site-packages/multipart/exceptions.py new file mode 100644 index 0000000..98c5867 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/exceptions.py @@ -0,0 +1,58 @@ +import binascii + +from six import PY3 + + +class FormParserError(ValueError): + """Base error class for our form parser.""" + pass + + +class ParseError(FormParserError): + """This exception (or a subclass) is raised when there is an error while + parsing something. + """ + + #: This is the offset in the input data chunk (*NOT* the overall stream) in + #: which the parse error occured. It will be -1 if not specified. + offset = -1 + + +class MultipartParseError(ParseError): + """This is a specific error that is raised when the MultipartParser detects + an error while parsing. + """ + pass + + +class QuerystringParseError(ParseError): + """This is a specific error that is raised when the QuerystringParser + detects an error while parsing. + """ + pass + + +class DecodeError(ParseError): + """This exception is raised when there is a decoding error - for example + with the Base64Decoder or QuotedPrintableDecoder. + """ + pass + + +# On Python 3.3, IOError is the same as OSError, so we don't want to inherit +# from both of them. We handle this case below. +if IOError is not OSError: # pragma: no cover + class FileError(FormParserError, IOError, OSError): + """Exception class for problems with the File class.""" + pass +else: # pragma: no cover + class FileError(FormParserError, OSError): + """Exception class for problems with the File class.""" + pass + +# We check which version of Python we're on to figure out what error we need +# to catch for invalid Base64. +if PY3: # pragma: no cover + Base64Error = binascii.Error +else: # pragma: no cover + Base64Error = TypeError diff --git a/myenv/lib/python3.9/site-packages/multipart/multipart.py b/myenv/lib/python3.9/site-packages/multipart/multipart.py new file mode 100644 index 0000000..27aaecb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/multipart.py @@ -0,0 +1,1911 @@ +from __future__ import with_statement, absolute_import, print_function + +from six import ( + binary_type, + text_type, + PY3, +) + +from .decoders import * +from .exceptions import * + +try: + from urlparse import parse_qs +except ImportError: + from urllib.parse import parse_qs + +import os +import re +import sys +import shutil +import logging +import tempfile +from io import BytesIO +from numbers import Number + +# Unique missing object. +_missing = object() + +# States for the querystring parser. +STATE_BEFORE_FIELD = 0 +STATE_FIELD_NAME = 1 +STATE_FIELD_DATA = 2 + +# States for the multipart parser +STATE_START = 0 +STATE_START_BOUNDARY = 1 +STATE_HEADER_FIELD_START = 2 +STATE_HEADER_FIELD = 3 +STATE_HEADER_VALUE_START = 4 +STATE_HEADER_VALUE = 5 +STATE_HEADER_VALUE_ALMOST_DONE = 6 +STATE_HEADERS_ALMOST_DONE = 7 +STATE_PART_DATA_START = 8 +STATE_PART_DATA = 9 +STATE_PART_DATA_END = 10 +STATE_END = 11 + +STATES = [ + "START", + "START_BOUNDARY", "HEADER_FEILD_START", "HEADER_FIELD", "HEADER_VALUE_START", "HEADER_VALUE", + "HEADER_VALUE_ALMOST_DONE", "HEADRES_ALMOST_DONE", "PART_DATA_START", "PART_DATA", "PART_DATA_END", "END" +] + + +# Flags for the multipart parser. +FLAG_PART_BOUNDARY = 1 +FLAG_LAST_BOUNDARY = 2 + +# Get constants. Since iterating over a str on Python 2 gives you a 1-length +# string, but iterating over a bytes object on Python 3 gives you an integer, +# we need to save these constants. +CR = b'\r'[0] +LF = b'\n'[0] +COLON = b':'[0] +SPACE = b' '[0] +HYPHEN = b'-'[0] +AMPERSAND = b'&'[0] +SEMICOLON = b';'[0] +LOWER_A = b'a'[0] +LOWER_Z = b'z'[0] +NULL = b'\x00'[0] + +# Lower-casing a character is different, because of the difference between +# str on Py2, and bytes on Py3. Same with getting the ordinal value of a byte, +# and joining a list of bytes together. +# These functions abstract that. +if PY3: # pragma: no cover + lower_char = lambda c: c | 0x20 + ord_char = lambda c: c + join_bytes = lambda b: bytes(list(b)) +else: # pragma: no cover + lower_char = lambda c: c.lower() + ord_char = lambda c: ord(c) + join_bytes = lambda b: b''.join(list(b)) + +# These are regexes for parsing header values. +SPECIAL_CHARS = re.escape(b'()<>@,;:\\"/[]?={} \t') +QUOTED_STR = br'"(?:\\.|[^"])*"' +VALUE_STR = br'(?:[^' + SPECIAL_CHARS + br']+|' + QUOTED_STR + br')' +OPTION_RE_STR = ( + br'(?:;|^)\s*([^' + SPECIAL_CHARS + br']+)\s*=\s*(' + VALUE_STR + br')' +) +OPTION_RE = re.compile(OPTION_RE_STR) +QUOTE = b'"'[0] + + +def parse_options_header(value): + """ + Parses a Content-Type header into a value in the following format: + (content_type, {parameters}) + """ + if not value: + return (b'', {}) + + # If we are passed a string, we assume that it conforms to WSGI and does + # not contain any code point that's not in latin-1. + if isinstance(value, text_type): # pragma: no cover + value = value.encode('latin-1') + + # If we have no options, return the string as-is. + if b';' not in value: + return (value.lower().strip(), {}) + + # Split at the first semicolon, to get our value and then options. + ctype, rest = value.split(b';', 1) + options = {} + + # Parse the options. + for match in OPTION_RE.finditer(rest): + key = match.group(1).lower() + value = match.group(2) + if value[0] == QUOTE and value[-1] == QUOTE: + # Unquote the value. + value = value[1:-1] + value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"') + + # If the value is a filename, we need to fix a bug on IE6 that sends + # the full file path instead of the filename. + if key == b'filename': + if value[1:3] == b':\\' or value[:2] == b'\\\\': + value = value.split(b'\\')[-1] + + options[key] = value + + return ctype, options + + +class Field(object): + """A Field object represents a (parsed) form field. It represents a single + field with a corresponding name and value. + + The name that a :class:`Field` will be instantiated with is the same name + that would be found in the following HTML:: + + + + This class defines two methods, :meth:`on_data` and :meth:`on_end`, that + will be called when data is written to the Field, and when the Field is + finalized, respectively. + + :param name: the name of the form field + """ + def __init__(self, name): + self._name = name + self._value = [] + + # We cache the joined version of _value for speed. + self._cache = _missing + + @classmethod + def from_value(klass, name, value): + """Create an instance of a :class:`Field`, and set the corresponding + value - either None or an actual value. This method will also + finalize the Field itself. + + :param name: the name of the form field + :param value: the value of the form field - either a bytestring or + None + """ + + f = klass(name) + if value is None: + f.set_none() + else: + f.write(value) + f.finalize() + return f + + def write(self, data): + """Write some data into the form field. + + :param data: a bytestring + """ + return self.on_data(data) + + def on_data(self, data): + """This method is a callback that will be called whenever data is + written to the Field. + + :param data: a bytestring + """ + self._value.append(data) + self._cache = _missing + return len(data) + + def on_end(self): + """This method is called whenever the Field is finalized. + """ + if self._cache is _missing: + self._cache = b''.join(self._value) + + def finalize(self): + """Finalize the form field. + """ + self.on_end() + + def close(self): + """Close the Field object. This will free any underlying cache. + """ + # Free our value array. + if self._cache is _missing: + self._cache = b''.join(self._value) + + del self._value + + def set_none(self): + """Some fields in a querystring can possibly have a value of None - for + example, the string "foo&bar=&baz=asdf" will have a field with the + name "foo" and value None, one with name "bar" and value "", and one + with name "baz" and value "asdf". Since the write() interface doesn't + support writing None, this function will set the field value to None. + """ + self._cache = None + + @property + def field_name(self): + """This property returns the name of the field.""" + return self._name + + @property + def value(self): + """This property returns the value of the form field.""" + if self._cache is _missing: + self._cache = b''.join(self._value) + + return self._cache + + def __eq__(self, other): + if isinstance(other, Field): + return ( + self.field_name == other.field_name and + self.value == other.value + ) + else: + return NotImplemented + + def __repr__(self): + if len(self.value) > 97: + # We get the repr, and then insert three dots before the final + # quote. + v = repr(self.value[:97])[:-1] + "...'" + else: + v = repr(self.value) + + return "%s(field_name=%r, value=%s)" % ( + self.__class__.__name__, + self.field_name, + v + ) + + +class File(object): + """This class represents an uploaded file. It handles writing file data to + either an in-memory file or a temporary file on-disk, if the optional + threshold is passed. + + There are some options that can be passed to the File to change behavior + of the class. Valid options are as follows: + + .. list-table:: + :widths: 15 5 5 30 + :header-rows: 1 + + * - Name + - Type + - Default + - Description + * - UPLOAD_DIR + - `str` + - None + - The directory to store uploaded files in. If this is None, a + temporary file will be created in the system's standard location. + * - UPLOAD_DELETE_TMP + - `bool` + - True + - Delete automatically created TMP file + * - UPLOAD_KEEP_FILENAME + - `bool` + - False + - Whether or not to keep the filename of the uploaded file. If True, + then the filename will be converted to a safe representation (e.g. + by removing any invalid path segments), and then saved with the + same name). Otherwise, a temporary name will be used. + * - UPLOAD_KEEP_EXTENSIONS + - `bool` + - False + - Whether or not to keep the uploaded file's extension. If False, the + file will be saved with the default temporary extension (usually + ".tmp"). Otherwise, the file's extension will be maintained. Note + that this will properly combine with the UPLOAD_KEEP_FILENAME + setting. + * - MAX_MEMORY_FILE_SIZE + - `int` + - 1 MiB + - The maximum number of bytes of a File to keep in memory. By + default, the contents of a File are kept into memory until a certain + limit is reached, after which the contents of the File are written + to a temporary file. This behavior can be disabled by setting this + value to an appropriately large value (or, for example, infinity, + such as `float('inf')`. + + :param file_name: The name of the file that this :class:`File` represents + + :param field_name: The field name that uploaded this file. Note that this + can be None, if, for example, the file was uploaded + with Content-Type application/octet-stream + + :param config: The configuration for this File. See above for valid + configuration keys and their corresponding values. + """ + def __init__(self, file_name, field_name=None, config={}): + # Save configuration, set other variables default. + self.logger = logging.getLogger(__name__) + self._config = config + self._in_memory = True + self._bytes_written = 0 + self._fileobj = BytesIO() + + # Save the provided field/file name. + self._field_name = field_name + self._file_name = file_name + + # Our actual file name is None by default, since, depending on our + # config, we may not actually use the provided name. + self._actual_file_name = None + + # Split the extension from the filename. + if file_name is not None: + base, ext = os.path.splitext(file_name) + self._file_base = base + self._ext = ext + + @property + def field_name(self): + """The form field associated with this file. May be None if there isn't + one, for example when we have an application/octet-stream upload. + """ + return self._field_name + + @property + def file_name(self): + """The file name given in the upload request. + """ + return self._file_name + + @property + def actual_file_name(self): + """The file name that this file is saved as. Will be None if it's not + currently saved on disk. + """ + return self._actual_file_name + + @property + def file_object(self): + """The file object that we're currently writing to. Note that this + will either be an instance of a :class:`io.BytesIO`, or a regular file + object. + """ + return self._fileobj + + @property + def size(self): + """The total size of this file, counted as the number of bytes that + currently have been written to the file. + """ + return self._bytes_written + + @property + def in_memory(self): + """A boolean representing whether or not this file object is currently + stored in-memory or on-disk. + """ + return self._in_memory + + def flush_to_disk(self): + """If the file is already on-disk, do nothing. Otherwise, copy from + the in-memory buffer to a disk file, and then reassign our internal + file object to this new disk file. + + Note that if you attempt to flush a file that is already on-disk, a + warning will be logged to this module's logger. + """ + if not self._in_memory: + self.logger.warning( + "Trying to flush to disk when we're not in memory" + ) + return + + # Go back to the start of our file. + self._fileobj.seek(0) + + # Open a new file. + new_file = self._get_disk_file() + + # Copy the file objects. + shutil.copyfileobj(self._fileobj, new_file) + + # Seek to the new position in our new file. + new_file.seek(self._bytes_written) + + # Reassign the fileobject. + old_fileobj = self._fileobj + self._fileobj = new_file + + # We're no longer in memory. + self._in_memory = False + + # Close the old file object. + old_fileobj.close() + + def _get_disk_file(self): + """This function is responsible for getting a file object on-disk for us. + """ + self.logger.info("Opening a file on disk") + + file_dir = self._config.get('UPLOAD_DIR') + keep_filename = self._config.get('UPLOAD_KEEP_FILENAME', False) + keep_extensions = self._config.get('UPLOAD_KEEP_EXTENSIONS', False) + delete_tmp = self._config.get('UPLOAD_DELETE_TMP', True) + + # If we have a directory and are to keep the filename... + if file_dir is not None and keep_filename: + self.logger.info("Saving with filename in: %r", file_dir) + + # Build our filename. + # TODO: what happens if we don't have a filename? + fname = self._file_base + if keep_extensions: + fname = fname + self._ext + + path = os.path.join(file_dir, fname) + try: + self.logger.info("Opening file: %r", path) + tmp_file = open(path, 'w+b') + except (IOError, OSError) as e: + tmp_file = None + + self.logger.exception("Error opening temporary file") + raise FileError("Error opening temporary file: %r" % path) + else: + # Build options array. + # Note that on Python 3, tempfile doesn't support byte names. We + # encode our paths using the default filesystem encoding. + options = {} + if keep_extensions: + ext = self._ext + if isinstance(ext, binary_type): + ext = ext.decode(sys.getfilesystemencoding()) + + options['suffix'] = ext + if file_dir is not None: + d = file_dir + if isinstance(d, binary_type): + d = d.decode(sys.getfilesystemencoding()) + + options['dir'] = d + options['delete'] = delete_tmp + + # Create a temporary (named) file with the appropriate settings. + self.logger.info("Creating a temporary file with options: %r", + options) + try: + tmp_file = tempfile.NamedTemporaryFile(**options) + except (IOError, OSError): + self.logger.exception("Error creating named temporary file") + raise FileError("Error creating named temporary file") + + fname = tmp_file.name + + # Encode filename as bytes. + if isinstance(fname, text_type): + fname = fname.encode(sys.getfilesystemencoding()) + + self._actual_file_name = fname + return tmp_file + + def write(self, data): + """Write some data to the File. + + :param data: a bytestring + """ + return self.on_data(data) + + def on_data(self, data): + """This method is a callback that will be called whenever data is + written to the File. + + :param data: a bytestring + """ + pos = self._fileobj.tell() + bwritten = self._fileobj.write(data) + # true file objects write returns None + if bwritten is None: + bwritten = self._fileobj.tell() - pos + + # If the bytes written isn't the same as the length, just return. + if bwritten != len(data): + self.logger.warning("bwritten != len(data) (%d != %d)", bwritten, + len(data)) + return bwritten + + # Keep track of how many bytes we've written. + self._bytes_written += bwritten + + # If we're in-memory and are over our limit, we create a file. + if (self._in_memory and + self._config.get('MAX_MEMORY_FILE_SIZE') is not None and + (self._bytes_written > + self._config.get('MAX_MEMORY_FILE_SIZE'))): + self.logger.info("Flushing to disk") + self.flush_to_disk() + + # Return the number of bytes written. + return bwritten + + def on_end(self): + """This method is called whenever the Field is finalized. + """ + # Flush the underlying file object + self._fileobj.flush() + + def finalize(self): + """Finalize the form file. This will not close the underlying file, + but simply signal that we are finished writing to the File. + """ + self.on_end() + + def close(self): + """Close the File object. This will actually close the underlying + file object (whether it's a :class:`io.BytesIO` or an actual file + object). + """ + self._fileobj.close() + + def __repr__(self): + return "%s(file_name=%r, field_name=%r)" % ( + self.__class__.__name__, + self.file_name, + self.field_name + ) + + +class BaseParser(object): + """This class is the base class for all parsers. It contains the logic for + calling and adding callbacks. + + A callback can be one of two different forms. "Notification callbacks" are + callbacks that are called when something happens - for example, when a new + part of a multipart message is encountered by the parser. "Data callbacks" + are called when we get some sort of data - for example, part of the body of + a multipart chunk. Notification callbacks are called with no parameters, + whereas data callbacks are called with three, as follows:: + + data_callback(data, start, end) + + The "data" parameter is a bytestring (i.e. "foo" on Python 2, or b"foo" on + Python 3). "start" and "end" are integer indexes into the "data" string + that represent the data of interest. Thus, in a data callback, the slice + `data[start:end]` represents the data that the callback is "interested in". + The callback is not passed a copy of the data, since copying severely hurts + performance. + """ + def __init__(self): + self.logger = logging.getLogger(__name__) + + def callback(self, name, data=None, start=None, end=None): + """This function calls a provided callback with some data. If the + callback is not set, will do nothing. + + :param name: The name of the callback to call (as a string). + + :param data: Data to pass to the callback. If None, then it is + assumed that the callback is a notification callback, + and no parameters are given. + + :param end: An integer that is passed to the data callback. + + :param start: An integer that is passed to the data callback. + """ + name = "on_" + name + func = self.callbacks.get(name) + if func is None: + return + + # Depending on whether we're given a buffer... + if data is not None: + # Don't do anything if we have start == end. + if start is not None and start == end: + return + + self.logger.debug("Calling %s with data[%d:%d]", name, start, end) + func(data, start, end) + else: + self.logger.debug("Calling %s with no data", name) + func() + + def set_callback(self, name, new_func): + """Update the function for a callback. Removes from the callbacks dict + if new_func is None. + + :param name: The name of the callback to call (as a string). + + :param new_func: The new function for the callback. If None, then the + callback will be removed (with no error if it does not + exist). + """ + if new_func is None: + self.callbacks.pop('on_' + name, None) + else: + self.callbacks['on_' + name] = new_func + + def close(self): + pass # pragma: no cover + + def finalize(self): + pass # pragma: no cover + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + +class OctetStreamParser(BaseParser): + """This parser parses an octet-stream request body and calls callbacks when + incoming data is received. Callbacks are as follows: + + .. list-table:: + :widths: 15 10 30 + :header-rows: 1 + + * - Callback Name + - Parameters + - Description + * - on_start + - None + - Called when the first data is parsed. + * - on_data + - data, start, end + - Called for each data chunk that is parsed. + * - on_end + - None + - Called when the parser is finished parsing all data. + + :param callbacks: A dictionary of callbacks. See the documentation for + :class:`BaseParser`. + + :param max_size: The maximum size of body to parse. Defaults to infinity - + i.e. unbounded. + """ + def __init__(self, callbacks={}, max_size=float('inf')): + super(OctetStreamParser, self).__init__() + self.callbacks = callbacks + self._started = False + + if not isinstance(max_size, Number) or max_size < 1: + raise ValueError("max_size must be a positive number, not %r" % + max_size) + self.max_size = max_size + self._current_size = 0 + + def write(self, data): + """Write some data to the parser, which will perform size verification, + and then pass the data to the underlying callback. + + :param data: a bytestring + """ + if not self._started: + self.callback('start') + self._started = True + + # Truncate data length. + data_len = len(data) + if (self._current_size + data_len) > self.max_size: + # We truncate the length of data that we are to process. + new_size = int(self.max_size - self._current_size) + self.logger.warning("Current size is %d (max %d), so truncating " + "data length from %d to %d", + self._current_size, self.max_size, data_len, + new_size) + data_len = new_size + + # Increment size, then callback, in case there's an exception. + self._current_size += data_len + self.callback('data', data, 0, data_len) + return data_len + + def finalize(self): + """Finalize this parser, which signals to that we are finished parsing, + and sends the on_end callback. + """ + self.callback('end') + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + +class QuerystringParser(BaseParser): + """This is a streaming querystring parser. It will consume data, and call + the callbacks given when it has data. + + .. list-table:: + :widths: 15 10 30 + :header-rows: 1 + + * - Callback Name + - Parameters + - Description + * - on_field_start + - None + - Called when a new field is encountered. + * - on_field_name + - data, start, end + - Called when a portion of a field's name is encountered. + * - on_field_data + - data, start, end + - Called when a portion of a field's data is encountered. + * - on_field_end + - None + - Called when the end of a field is encountered. + * - on_end + - None + - Called when the parser is finished parsing all data. + + :param callbacks: A dictionary of callbacks. See the documentation for + :class:`BaseParser`. + + :param strict_parsing: Whether or not to parse the body strictly. Defaults + to False. If this is set to True, then the behavior + of the parser changes as the following: if a field + has a value with an equal sign (e.g. "foo=bar", or + "foo="), it is always included. If a field has no + equals sign (e.g. "...&name&..."), it will be + treated as an error if 'strict_parsing' is True, + otherwise included. If an error is encountered, + then a + :class:`multipart.exceptions.QuerystringParseError` + will be raised. + + :param max_size: The maximum size of body to parse. Defaults to infinity - + i.e. unbounded. + """ + def __init__(self, callbacks={}, strict_parsing=False, + max_size=float('inf')): + super(QuerystringParser, self).__init__() + self.state = STATE_BEFORE_FIELD + self._found_sep = False + + self.callbacks = callbacks + + # Max-size stuff + if not isinstance(max_size, Number) or max_size < 1: + raise ValueError("max_size must be a positive number, not %r" % + max_size) + self.max_size = max_size + self._current_size = 0 + + # Should parsing be strict? + self.strict_parsing = strict_parsing + + def write(self, data): + """Write some data to the parser, which will perform size verification, + parse into either a field name or value, and then pass the + corresponding data to the underlying callback. If an error is + encountered while parsing, a QuerystringParseError will be raised. The + "offset" attribute of the raised exception will be set to the offset in + the input data chunk (NOT the overall stream) that caused the error. + + :param data: a bytestring + """ + # Handle sizing. + data_len = len(data) + if (self._current_size + data_len) > self.max_size: + # We truncate the length of data that we are to process. + new_size = int(self.max_size - self._current_size) + self.logger.warning("Current size is %d (max %d), so truncating " + "data length from %d to %d", + self._current_size, self.max_size, data_len, + new_size) + data_len = new_size + + l = 0 + try: + l = self._internal_write(data, data_len) + finally: + self._current_size += l + + return l + + def _internal_write(self, data, length): + state = self.state + strict_parsing = self.strict_parsing + found_sep = self._found_sep + + i = 0 + while i < length: + ch = data[i] + + # Depending on our state... + if state == STATE_BEFORE_FIELD: + # If the 'found_sep' flag is set, we've already encountered + # and skipped a single seperator. If so, we check our strict + # parsing flag and decide what to do. Otherwise, we haven't + # yet reached a seperator, and thus, if we do, we need to skip + # it as it will be the boundary between fields that's supposed + # to be there. + if ch == AMPERSAND or ch == SEMICOLON: + if found_sep: + # If we're parsing strictly, we disallow blank chunks. + if strict_parsing: + e = QuerystringParseError( + "Skipping duplicate ampersand/semicolon at " + "%d" % i + ) + e.offset = i + raise e + else: + self.logger.debug("Skipping duplicate ampersand/" + "semicolon at %d", i) + else: + # This case is when we're skipping the (first) + # seperator between fields, so we just set our flag + # and continue on. + found_sep = True + else: + # Emit a field-start event, and go to that state. Also, + # reset the "found_sep" flag, for the next time we get to + # this state. + self.callback('field_start') + i -= 1 + state = STATE_FIELD_NAME + found_sep = False + + elif state == STATE_FIELD_NAME: + # Try and find a seperator - we ensure that, if we do, we only + # look for the equal sign before it. + sep_pos = data.find(b'&', i) + if sep_pos == -1: + sep_pos = data.find(b';', i) + + # See if we can find an equals sign in the remaining data. If + # so, we can immedately emit the field name and jump to the + # data state. + if sep_pos != -1: + equals_pos = data.find(b'=', i, sep_pos) + else: + equals_pos = data.find(b'=', i) + + if equals_pos != -1: + # Emit this name. + self.callback('field_name', data, i, equals_pos) + + # Jump i to this position. Note that it will then have 1 + # added to it below, which means the next iteration of this + # loop will inspect the character after the equals sign. + i = equals_pos + state = STATE_FIELD_DATA + else: + # No equals sign found. + if not strict_parsing: + # See also comments in the STATE_FIELD_DATA case below. + # If we found the seperator, we emit the name and just + # end - there's no data callback at all (not even with + # a blank value). + if sep_pos != -1: + self.callback('field_name', data, i, sep_pos) + self.callback('field_end') + + i = sep_pos - 1 + state = STATE_BEFORE_FIELD + else: + # Otherwise, no seperator in this block, so the + # rest of this chunk must be a name. + self.callback('field_name', data, i, length) + i = length + + else: + # We're parsing strictly. If we find a seperator, + # this is an error - we require an equals sign. + if sep_pos != -1: + e = QuerystringParseError( + "When strict_parsing is True, we require an " + "equals sign in all field chunks. Did not " + "find one in the chunk that starts at %d" % + (i,) + ) + e.offset = i + raise e + + # No seperator in the rest of this chunk, so it's just + # a field name. + self.callback('field_name', data, i, length) + i = length + + elif state == STATE_FIELD_DATA: + # Try finding either an ampersand or a semicolon after this + # position. + sep_pos = data.find(b'&', i) + if sep_pos == -1: + sep_pos = data.find(b';', i) + + # If we found it, callback this bit as data and then go back + # to expecting to find a field. + if sep_pos != -1: + self.callback('field_data', data, i, sep_pos) + self.callback('field_end') + + # Note that we go to the seperator, which brings us to the + # "before field" state. This allows us to properly emit + # "field_start" events only when we actually have data for + # a field of some sort. + i = sep_pos - 1 + state = STATE_BEFORE_FIELD + + # Otherwise, emit the rest as data and finish. + else: + self.callback('field_data', data, i, length) + i = length + + else: # pragma: no cover (error case) + msg = "Reached an unknown state %d at %d" % (state, i) + self.logger.warning(msg) + e = QuerystringParseError(msg) + e.offset = i + raise e + + i += 1 + + self.state = state + self._found_sep = found_sep + return len(data) + + def finalize(self): + """Finalize this parser, which signals to that we are finished parsing, + if we're still in the middle of a field, an on_field_end callback, and + then the on_end callback. + """ + # If we're currently in the middle of a field, we finish it. + if self.state == STATE_FIELD_DATA: + self.callback('field_end') + self.callback('end') + + def __repr__(self): + return "%s(keep_blank_values=%r, strict_parsing=%r, max_size=%r)" % ( + self.__class__.__name__, + self.keep_blank_values, self.strict_parsing, self.max_size + ) + + +class MultipartParser(BaseParser): + """This class is a streaming multipart/form-data parser. + + .. list-table:: + :widths: 15 10 30 + :header-rows: 1 + + * - Callback Name + - Parameters + - Description + * - on_part_begin + - None + - Called when a new part of the multipart message is encountered. + * - on_part_data + - data, start, end + - Called when a portion of a part's data is encountered. + * - on_part_end + - None + - Called when the end of a part is reached. + * - on_header_begin + - None + - Called when we've found a new header in a part of a multipart + message + * - on_header_field + - data, start, end + - Called each time an additional portion of a header is read (i.e. the + part of the header that is before the colon; the "Foo" in + "Foo: Bar"). + * - on_header_value + - data, start, end + - Called when we get data for a header. + * - on_header_end + - None + - Called when the current header is finished - i.e. we've reached the + newline at the end of the header. + * - on_headers_finished + - None + - Called when all headers are finished, and before the part data + starts. + * - on_end + - None + - Called when the parser is finished parsing all data. + + + :param boundary: The multipart boundary. This is required, and must match + what is given in the HTTP request - usually in the + Content-Type header. + + :param callbacks: A dictionary of callbacks. See the documentation for + :class:`BaseParser`. + + :param max_size: The maximum size of body to parse. Defaults to infinity - + i.e. unbounded. + """ + + def __init__(self, boundary, callbacks={}, max_size=float('inf')): + # Initialize parser state. + super(MultipartParser, self).__init__() + self.state = STATE_START + self.index = self.flags = 0 + + self.callbacks = callbacks + + if not isinstance(max_size, Number) or max_size < 1: + raise ValueError("max_size must be a positive number, not %r" % + max_size) + self.max_size = max_size + self._current_size = 0 + + # Setup marks. These are used to track the state of data recieved. + self.marks = {} + + # TODO: Actually use this rather than the dumb version we currently use + # # Precompute the skip table for the Boyer-Moore-Horspool algorithm. + # skip = [len(boundary) for x in range(256)] + # for i in range(len(boundary) - 1): + # skip[ord_char(boundary[i])] = len(boundary) - i - 1 + # + # # We use a tuple since it's a constant, and marginally faster. + # self.skip = tuple(skip) + + # Save our boundary. + if isinstance(boundary, text_type): # pragma: no cover + boundary = boundary.encode('latin-1') + self.boundary = b'\r\n--' + boundary + + # Get a set of characters that belong to our boundary. + self.boundary_chars = frozenset(self.boundary) + + # We also create a lookbehind list. + # Note: the +8 is since we can have, at maximum, "\r\n--" + boundary + + # "--\r\n" at the final boundary, and the length of '\r\n--' and + # '--\r\n' is 8 bytes. + self.lookbehind = [NULL for x in range(len(boundary) + 8)] + + def write(self, data): + """Write some data to the parser, which will perform size verification, + and then parse the data into the appropriate location (e.g. header, + data, etc.), and pass this on to the underlying callback. If an error + is encountered, a MultipartParseError will be raised. The "offset" + attribute on the raised exception will be set to the offset of the byte + in the input chunk that caused the error. + + :param data: a bytestring + """ + # Handle sizing. + data_len = len(data) + if (self._current_size + data_len) > self.max_size: + # We truncate the length of data that we are to process. + new_size = int(self.max_size - self._current_size) + self.logger.warning("Current size is %d (max %d), so truncating " + "data length from %d to %d", + self._current_size, self.max_size, data_len, + new_size) + data_len = new_size + + l = 0 + try: + l = self._internal_write(data, data_len) + finally: + self._current_size += l + + return l + + def _internal_write(self, data, length): + # Get values from locals. + boundary = self.boundary + + # Get our state, flags and index. These are persisted between calls to + # this function. + state = self.state + index = self.index + flags = self.flags + + # Our index defaults to 0. + i = 0 + + # Set a mark. + def set_mark(name): + self.marks[name] = i + + # Remove a mark. + def delete_mark(name, reset=False): + self.marks.pop(name, None) + + # Helper function that makes calling a callback with data easier. The + # 'remaining' parameter will callback from the marked value until the + # end of the buffer, and reset the mark, instead of deleting it. This + # is used at the end of the function to call our callbacks with any + # remaining data in this chunk. + def data_callback(name, remaining=False): + marked_index = self.marks.get(name) + if marked_index is None: + return + + # If we're getting remaining data, we ignore the current i value + # and just call with the remaining data. + if remaining: + self.callback(name, data, marked_index, length) + self.marks[name] = 0 + + # Otherwise, we call it from the mark to the current byte we're + # processing. + else: + self.callback(name, data, marked_index, i) + self.marks.pop(name, None) + + # For each byte... + while i < length: + c = data[i] + + if state == STATE_START: + # Skip leading newlines + if c == CR or c == LF: + i += 1 + self.logger.debug("Skipping leading CR/LF at %d", i) + continue + + # index is used as in index into our boundary. Set to 0. + index = 0 + + # Move to the next state, but decrement i so that we re-process + # this character. + state = STATE_START_BOUNDARY + i -= 1 + + elif state == STATE_START_BOUNDARY: + # Check to ensure that the last 2 characters in our boundary + # are CRLF. + if index == len(boundary) - 2: + if c != CR: + # Error! + msg = "Did not find CR at end of boundary (%d)" % (i,) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + index += 1 + + elif index == len(boundary) - 2 + 1: + if c != LF: + msg = "Did not find LF at end of boundary (%d)" % (i,) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + # The index is now used for indexing into our boundary. + index = 0 + + # Callback for the start of a part. + self.callback('part_begin') + + # Move to the next character and state. + state = STATE_HEADER_FIELD_START + + else: + # Check to ensure our boundary matches + if c != boundary[index + 2]: + msg = "Did not find boundary character %r at index " \ + "%d" % (c, index + 2) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + # Increment index into boundary and continue. + index += 1 + + elif state == STATE_HEADER_FIELD_START: + # Mark the start of a header field here, reset the index, and + # continue parsing our header field. + index = 0 + + # Set a mark of our header field. + set_mark('header_field') + + # Move to parsing header fields. + state = STATE_HEADER_FIELD + i -= 1 + + elif state == STATE_HEADER_FIELD: + # If we've reached a CR at the beginning of a header, it means + # that we've reached the second of 2 newlines, and so there are + # no more headers to parse. + if c == CR: + delete_mark('header_field') + state = STATE_HEADERS_ALMOST_DONE + i += 1 + continue + + # Increment our index in the header. + index += 1 + + # Do nothing if we encounter a hyphen. + if c == HYPHEN: + pass + + # If we've reached a colon, we're done with this header. + elif c == COLON: + # A 0-length header is an error. + if index == 1: + msg = "Found 0-length header at %d" % (i,) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + # Call our callback with the header field. + data_callback('header_field') + + # Move to parsing the header value. + state = STATE_HEADER_VALUE_START + + else: + # Lower-case this character, and ensure that it is in fact + # a valid letter. If not, it's an error. + cl = lower_char(c) + if cl < LOWER_A or cl > LOWER_Z: + msg = "Found non-alphanumeric character %r in " \ + "header at %d" % (c, i) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + elif state == STATE_HEADER_VALUE_START: + # Skip leading spaces. + if c == SPACE: + i += 1 + continue + + # Mark the start of the header value. + set_mark('header_value') + + # Move to the header-value state, reprocessing this character. + state = STATE_HEADER_VALUE + i -= 1 + + elif state == STATE_HEADER_VALUE: + # If we've got a CR, we're nearly done our headers. Otherwise, + # we do nothing and just move past this character. + if c == CR: + data_callback('header_value') + self.callback('header_end') + state = STATE_HEADER_VALUE_ALMOST_DONE + + elif state == STATE_HEADER_VALUE_ALMOST_DONE: + # The last character should be a LF. If not, it's an error. + if c != LF: + msg = "Did not find LF character at end of header " \ + "(found %r)" % (c,) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + # Move back to the start of another header. Note that if that + # state detects ANOTHER newline, it'll trigger the end of our + # headers. + state = STATE_HEADER_FIELD_START + + elif state == STATE_HEADERS_ALMOST_DONE: + # We're almost done our headers. This is reached when we parse + # a CR at the beginning of a header, so our next character + # should be a LF, or it's an error. + if c != LF: + msg = "Did not find LF at end of headers (found %r)" % (c,) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + self.callback('headers_finished') + state = STATE_PART_DATA_START + + elif state == STATE_PART_DATA_START: + # Mark the start of our part data. + set_mark('part_data') + + # Start processing part data, including this character. + state = STATE_PART_DATA + i -= 1 + + elif state == STATE_PART_DATA: + # We're processing our part data right now. During this, we + # need to efficiently search for our boundary, since any data + # on any number of lines can be a part of the current data. + # We use the Boyer-Moore-Horspool algorithm to efficiently + # search through the remainder of the buffer looking for our + # boundary. + + # Save the current value of our index. We use this in case we + # find part of a boundary, but it doesn't match fully. + prev_index = index + + # Set up variables. + boundary_length = len(boundary) + boundary_end = boundary_length - 1 + data_length = length + boundary_chars = self.boundary_chars + + # If our index is 0, we're starting a new part, so start our + # search. + if index == 0: + # Search forward until we either hit the end of our buffer, + # or reach a character that's in our boundary. + i += boundary_end + while i < data_length - 1 and data[i] not in boundary_chars: + i += boundary_length + + # Reset i back the length of our boundary, which is the + # earliest possible location that could be our match (i.e. + # if we've just broken out of our loop since we saw the + # last character in our boundary) + i -= boundary_end + c = data[i] + + # Now, we have a couple of cases here. If our index is before + # the end of the boundary... + if index < boundary_length: + # If the character matches... + if boundary[index] == c: + # If we found a match for our boundary, we send the + # existing data. + if index == 0: + data_callback('part_data') + + # The current character matches, so continue! + index += 1 + else: + index = 0 + + # Our index is equal to the length of our boundary! + elif index == boundary_length: + # First we increment it. + index += 1 + + # Now, if we've reached a newline, we need to set this as + # the potential end of our boundary. + if c == CR: + flags |= FLAG_PART_BOUNDARY + + # Otherwise, if this is a hyphen, we might be at the last + # of all boundaries. + elif c == HYPHEN: + flags |= FLAG_LAST_BOUNDARY + + # Otherwise, we reset our index, since this isn't either a + # newline or a hyphen. + else: + index = 0 + + # Our index is right after the part boundary, which should be + # a LF. + elif index == boundary_length + 1: + # If we're at a part boundary (i.e. we've seen a CR + # character already)... + if flags & FLAG_PART_BOUNDARY: + # We need a LF character next. + if c == LF: + # Unset the part boundary flag. + flags &= (~FLAG_PART_BOUNDARY) + + # Callback indicating that we've reached the end of + # a part, and are starting a new one. + self.callback('part_end') + self.callback('part_begin') + + # Move to parsing new headers. + index = 0 + state = STATE_HEADER_FIELD_START + i += 1 + continue + + # We didn't find an LF character, so no match. Reset + # our index and clear our flag. + index = 0 + flags &= (~FLAG_PART_BOUNDARY) + + # Otherwise, if we're at the last boundary (i.e. we've + # seen a hyphen already)... + elif flags & FLAG_LAST_BOUNDARY: + # We need a second hyphen here. + if c == HYPHEN: + # Callback to end the current part, and then the + # message. + self.callback('part_end') + self.callback('end') + state = STATE_END + else: + # No match, so reset index. + index = 0 + + # If we have an index, we need to keep this byte for later, in + # case we can't match the full boundary. + if index > 0: + self.lookbehind[index - 1] = c + + # Otherwise, our index is 0. If the previous index is not, it + # means we reset something, and we need to take the data we + # thought was part of our boundary and send it along as actual + # data. + elif prev_index > 0: + # Callback to write the saved data. + lb_data = join_bytes(self.lookbehind) + self.callback('part_data', lb_data, 0, prev_index) + + # Overwrite our previous index. + prev_index = 0 + + # Re-set our mark for part data. + set_mark('part_data') + + # Re-consider the current character, since this could be + # the start of the boundary itself. + i -= 1 + + elif state == STATE_END: + # Do nothing and just consume a byte in the end state. + if c not in (CR, LF): + self.logger.warning("Consuming a byte '0x%x' in the end state", c) + + else: # pragma: no cover (error case) + # We got into a strange state somehow! Just stop processing. + msg = "Reached an unknown state %d at %d" % (state, i) + self.logger.warning(msg) + e = MultipartParseError(msg) + e.offset = i + raise e + + # Move to the next byte. + i += 1 + + # We call our callbacks with any remaining data. Note that we pass + # the 'remaining' flag, which sets the mark back to 0 instead of + # deleting it, if it's found. This is because, if the mark is found + # at this point, we assume that there's data for one of these things + # that has been parsed, but not yet emitted. And, as such, it implies + # that we haven't yet reached the end of this 'thing'. So, by setting + # the mark to 0, we cause any data callbacks that take place in future + # calls to this function to start from the beginning of that buffer. + data_callback('header_field', True) + data_callback('header_value', True) + data_callback('part_data', True) + + # Save values to locals. + self.state = state + self.index = index + self.flags = flags + + # Return our data length to indicate no errors, and that we processed + # all of it. + return length + + def finalize(self): + """Finalize this parser, which signals to that we are finished parsing. + + Note: It does not currently, but in the future, it will verify that we + are in the final state of the parser (i.e. the end of the multipart + message is well-formed), and, if not, throw an error. + """ + # TODO: verify that we're in the state STATE_END, otherwise throw an + # error or otherwise state that we're not finished parsing. + pass + + def __repr__(self): + return "%s(boundary=%r)" % (self.__class__.__name__, self.boundary) + + +class FormParser(object): + """This class is the all-in-one form parser. Given all the information + necessary to parse a form, it will instantiate the correct parser, create + the proper :class:`Field` and :class:`File` classes to store the data that + is parsed, and call the two given callbacks with each field and file as + they become available. + + :param content_type: The Content-Type of the incoming request. This is + used to select the appropriate parser. + + :param on_field: The callback to call when a field has been parsed and is + ready for usage. See above for parameters. + + :param on_file: The callback to call when a file has been parsed and is + ready for usage. See above for parameters. + + :param on_end: An optional callback to call when all fields and files in a + request has been parsed. Can be None. + + :param boundary: If the request is a multipart/form-data request, this + should be the boundary of the request, as given in the + Content-Type header, as a bytestring. + + :param file_name: If the request is of type application/octet-stream, then + the body of the request will not contain any information + about the uploaded file. In such cases, you can provide + the file name of the uploaded file manually. + + :param FileClass: The class to use for uploaded files. Defaults to + :class:`File`, but you can provide your own class if you + wish to customize behaviour. The class will be + instantiated as FileClass(file_name, field_name), and it + must provide the folllowing functions:: + file_instance.write(data) + file_instance.finalize() + file_instance.close() + + :param FieldClass: The class to use for uploaded fields. Defaults to + :class:`Field`, but you can provide your own class if + you wish to customize behaviour. The class will be + instantiated as FieldClass(field_name), and it must + provide the folllowing functions:: + field_instance.write(data) + field_instance.finalize() + field_instance.close() + + :param config: Configuration to use for this FormParser. The default + values are taken from the DEFAULT_CONFIG value, and then + any keys present in this dictionary will overwrite the + default values. + + """ + #: This is the default configuration for our form parser. + #: Note: all file sizes should be in bytes. + DEFAULT_CONFIG = { + 'MAX_BODY_SIZE': float('inf'), + 'MAX_MEMORY_FILE_SIZE': 1 * 1024 * 1024, + 'UPLOAD_DIR': None, + 'UPLOAD_KEEP_FILENAME': False, + 'UPLOAD_KEEP_EXTENSIONS': False, + + # Error on invalid Content-Transfer-Encoding? + 'UPLOAD_ERROR_ON_BAD_CTE': False, + } + + def __init__(self, content_type, on_field, on_file, on_end=None, + boundary=None, file_name=None, FileClass=File, + FieldClass=Field, config={}): + + self.logger = logging.getLogger(__name__) + + # Save variables. + self.content_type = content_type + self.boundary = boundary + self.bytes_received = 0 + self.parser = None + + # Save callbacks. + self.on_field = on_field + self.on_file = on_file + self.on_end = on_end + + # Save classes. + self.FileClass = File + self.FieldClass = Field + + # Set configuration options. + self.config = self.DEFAULT_CONFIG.copy() + self.config.update(config) + + # Depending on the Content-Type, we instantiate the correct parser. + if content_type == 'application/octet-stream': + # Work around the lack of 'nonlocal' in Py2 + class vars(object): + f = None + + def on_start(): + vars.f = FileClass(file_name, None, config=self.config) + + def on_data(data, start, end): + vars.f.write(data[start:end]) + + def on_end(): + # Finalize the file itself. + vars.f.finalize() + + # Call our callback. + on_file(vars.f) + + # Call the on-end callback. + if self.on_end is not None: + self.on_end() + + callbacks = { + 'on_start': on_start, + 'on_data': on_data, + 'on_end': on_end, + } + + # Instantiate an octet-stream parser + parser = OctetStreamParser(callbacks, + max_size=self.config['MAX_BODY_SIZE']) + + elif (content_type == 'application/x-www-form-urlencoded' or + content_type == 'application/x-url-encoded'): + + name_buffer = [] + + class vars(object): + f = None + + def on_field_start(): + pass + + def on_field_name(data, start, end): + name_buffer.append(data[start:end]) + + def on_field_data(data, start, end): + if vars.f is None: + vars.f = FieldClass(b''.join(name_buffer)) + del name_buffer[:] + vars.f.write(data[start:end]) + + def on_field_end(): + # Finalize and call callback. + if vars.f is None: + # If we get here, it's because there was no field data. + # We create a field, set it to None, and then continue. + vars.f = FieldClass(b''.join(name_buffer)) + del name_buffer[:] + vars.f.set_none() + + vars.f.finalize() + on_field(vars.f) + vars.f = None + + def on_end(): + if self.on_end is not None: + self.on_end() + + # Setup callbacks. + callbacks = { + 'on_field_start': on_field_start, + 'on_field_name': on_field_name, + 'on_field_data': on_field_data, + 'on_field_end': on_field_end, + 'on_end': on_end, + } + + # Instantiate parser. + parser = QuerystringParser( + callbacks=callbacks, + max_size=self.config['MAX_BODY_SIZE'] + ) + + elif content_type == 'multipart/form-data': + if boundary is None: + self.logger.error("No boundary given") + raise FormParserError("No boundary given") + + header_name = [] + header_value = [] + headers = {} + + # No 'nonlocal' on Python 2 :-( + class vars(object): + f = None + writer = None + is_file = False + + def on_part_begin(): + pass + + def on_part_data(data, start, end): + bytes_processed = vars.writer.write(data[start:end]) + # TODO: check for error here. + return bytes_processed + + def on_part_end(): + vars.f.finalize() + if vars.is_file: + on_file(vars.f) + else: + on_field(vars.f) + + def on_header_field(data, start, end): + header_name.append(data[start:end]) + + def on_header_value(data, start, end): + header_value.append(data[start:end]) + + def on_header_end(): + headers[b''.join(header_name)] = b''.join(header_value) + del header_name[:] + del header_value[:] + + def on_headers_finished(): + # Reset the 'is file' flag. + vars.is_file = False + + # Parse the content-disposition header. + # TODO: handle mixed case + content_disp = headers.get(b'Content-Disposition') + disp, options = parse_options_header(content_disp) + + # Get the field and filename. + field_name = options.get(b'name') + file_name = options.get(b'filename') + # TODO: check for errors + + # Create the proper class. + if file_name is None: + vars.f = FieldClass(field_name) + else: + vars.f = FileClass(file_name, field_name, config=self.config) + vars.is_file = True + + # Parse the given Content-Transfer-Encoding to determine what + # we need to do with the incoming data. + # TODO: check that we properly handle 8bit / 7bit encoding. + transfer_encoding = headers.get(b'Content-Transfer-Encoding', + b'7bit') + + if (transfer_encoding == b'binary' or + transfer_encoding == b'8bit' or + transfer_encoding == b'7bit'): + vars.writer = vars.f + + elif transfer_encoding == b'base64': + vars.writer = Base64Decoder(vars.f) + + elif transfer_encoding == b'quoted-printable': + vars.writer = QuotedPrintableDecoder(vars.f) + + else: + self.logger.warning("Unknown Content-Transfer-Encoding: " + "%r", transfer_encoding) + if self.config['UPLOAD_ERROR_ON_BAD_CTE']: + raise FormParserError( + 'Unknown Content-Transfer-Encoding "{0}"'.format( + transfer_encoding + ) + ) + else: + # If we aren't erroring, then we just treat this as an + # unencoded Content-Transfer-Encoding. + vars.writer = vars.f + + def on_end(): + vars.writer.finalize() + if self.on_end is not None: + self.on_end() + + # These are our callbacks for the parser. + callbacks = { + 'on_part_begin': on_part_begin, + 'on_part_data': on_part_data, + 'on_part_end': on_part_end, + 'on_header_field': on_header_field, + 'on_header_value': on_header_value, + 'on_header_end': on_header_end, + 'on_headers_finished': on_headers_finished, + 'on_end': on_end, + } + + # Instantiate a multipart parser. + parser = MultipartParser(boundary, callbacks, + max_size=self.config['MAX_BODY_SIZE']) + + else: + self.logger.warning("Unknown Content-Type: %r", content_type) + raise FormParserError("Unknown Content-Type: {0}".format( + content_type + )) + + self.parser = parser + + def write(self, data): + """Write some data. The parser will forward this to the appropriate + underlying parser. + + :param data: a bytestring + """ + self.bytes_received += len(data) + # TODO: check the parser's return value for errors? + return self.parser.write(data) + + def finalize(self): + """Finalize the parser.""" + if self.parser is not None and hasattr(self.parser, 'finalize'): + self.parser.finalize() + + def close(self): + """Close the parser.""" + if self.parser is not None and hasattr(self.parser, 'close'): + self.parser.close() + + def __repr__(self): + return "%s(content_type=%r, parser=%r)" % ( + self.__class__.__name__, + self.content_type, + self.parser, + ) + + +def create_form_parser(headers, on_field, on_file, trust_x_headers=False, + config={}): + """This function is a helper function to aid in creating a FormParser + instances. Given a dictionary-like headers object, it will determine + the correct information needed, instantiate a FormParser with the + appropriate values and given callbacks, and then return the corresponding + parser. + + :param headers: A dictionary-like object of HTTP headers. The only + required header is Content-Type. + + :param on_field: Callback to call with each parsed field. + + :param on_file: Callback to call with each parsed file. + + :param trust_x_headers: Whether or not to trust information received from + certain X-Headers - for example, the file name from + X-File-Name. + + :param config: Configuration variables to pass to the FormParser. + """ + content_type = headers.get('Content-Type') + if content_type is None: + logging.getLogger(__name__).warning("No Content-Type header given") + raise ValueError("No Content-Type header given!") + + # Boundaries are optional (the FormParser will raise if one is needed + # but not given). + content_type, params = parse_options_header(content_type) + boundary = params.get(b'boundary') + + # We need content_type to be a string, not a bytes object. + content_type = content_type.decode('latin-1') + + # File names are optional. + file_name = headers.get('X-File-Name') + + # Instantiate a form parser. + form_parser = FormParser(content_type, + on_field, + on_file, + boundary=boundary, + file_name=file_name, + config=config) + + # Return our parser. + return form_parser + + +def parse_form(headers, input_stream, on_field, on_file, chunk_size=1048576, + **kwargs): + """This function is useful if you just want to parse a request body, + without too much work. Pass it a dictionary-like object of the request's + headers, and a file-like object for the input stream, along with two + callbacks that will get called whenever a field or file is parsed. + + :param headers: A dictionary-like object of HTTP headers. The only + required header is Content-Type. + + :param input_stream: A file-like object that represents the request body. + The read() method must return bytestrings. + + :param on_field: Callback to call with each parsed field. + + :param on_file: Callback to call with each parsed file. + + :param chunk_size: The maximum size to read from the input stream and write + to the parser at one time. Defaults to 1 MiB. + """ + + # Create our form parser. + parser = create_form_parser(headers, on_field, on_file) + + # Read chunks of 100KiB and write to the parser, but never read more than + # the given Content-Length, if any. + content_length = headers.get('Content-Length') + if content_length is not None: + content_length = int(content_length) + else: + content_length = float('inf') + bytes_read = 0 + + while True: + # Read only up to the Content-Length given. + max_readable = min(content_length - bytes_read, 1048576) + buff = input_stream.read(max_readable) + + # Write to the parser and update our length. + parser.write(buff) + bytes_read += len(buff) + + # If we get a buffer that's smaller than the size requested, or if we + # have read up to our content length, we're done. + if len(buff) != max_readable or bytes_read == content_length: + break + + # Tell our parser that we're done writing data. + parser.finalize() diff --git a/myenv/lib/python3.9/site-packages/multipart/tests/__init__.py b/myenv/lib/python3.9/site-packages/multipart/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/multipart/tests/compat.py b/myenv/lib/python3.9/site-packages/multipart/tests/compat.py new file mode 100644 index 0000000..7a3479e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/tests/compat.py @@ -0,0 +1,138 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest + +import os +import re +import sys +import types +import functools + + +def ensure_in_path(path): + """ + Ensure that a given path is in the sys.path array + """ + if not os.path.isdir(path): + raise RuntimeError('Tried to add nonexisting path') + + def _samefile(x, y): + try: + return os.path.samefile(x, y) + except (IOError, OSError): + return False + except AttributeError: + # Probably on Windows. + path1 = os.path.abspath(x).lower() + path2 = os.path.abspath(y).lower() + return path1 == path2 + + # Remove existing copies of it. + for pth in sys.path: + if _samefile(pth, path): + sys.path.remove(pth) + + # Add it at the beginning. + sys.path.insert(0, path) + + +# Check if pytest is imported. If so, we use it to create marking decorators. +# If not, we just create a function that does nothing. +try: + import pytest +except ImportError: + pytest = None + +if pytest is not None: + slow_test = pytest.mark.slow_test + xfail = pytest.mark.xfail + +else: + slow_test = lambda x: x + + def xfail(*args, **kwargs): + if len(args) > 0 and isinstance(args[0], types.FunctionType): + return args[0] + + return lambda x: x + + +# We don't use the py.test parametrizing function, since it seems to break +# with unittest.TestCase subclasses. +def parametrize(field_names, field_values): + # If we're not given a list of field names, we make it. + if not isinstance(field_names, (tuple, list)): + field_names = (field_names,) + field_values = [(val,) for val in field_values] + + # Create a decorator that saves this list of field names and values on the + # function for later parametrizing. + def decorator(func): + func.__dict__['param_names'] = field_names + func.__dict__['param_values'] = field_values + return func + + return decorator + + +# This is a metaclass that actually performs the parametrization. +class ParametrizingMetaclass(type): + IDENTIFIER_RE = re.compile('[^A-Za-z0-9]') + + def __new__(klass, name, bases, attrs): + new_attrs = attrs.copy() + for attr_name, attr in attrs.items(): + # We only care about functions + if not isinstance(attr, types.FunctionType): + continue + + param_names = attr.__dict__.pop('param_names', None) + param_values = attr.__dict__.pop('param_values', None) + if param_names is None or param_values is None: + continue + + # Create multiple copies of the function. + for i, values in enumerate(param_values): + assert len(param_names) == len(values) + + # Get a repr of the values, and fix it to be a valid identifier + human = '_'.join( + [klass.IDENTIFIER_RE.sub('', repr(x)) for x in values] + ) + + # Create a new name. + # new_name = attr.__name__ + "_%d" % i + new_name = attr.__name__ + "__" + human + + # Create a replacement function. + def create_new_func(func, names, values): + # Create a kwargs dictionary. + kwargs = dict(zip(names, values)) + + @functools.wraps(func) + def new_func(self): + return func(self, **kwargs) + + # Manually set the name and return the new function. + new_func.__name__ = new_name + return new_func + + # Actually create the new function. + new_func = create_new_func(attr, param_names, values) + + # Save this new function in our attrs dict. + new_attrs[new_name] = new_func + + # Remove the old attribute from our new dictionary. + del new_attrs[attr_name] + + # We create the class as normal, except we use our new attributes. + return type.__new__(klass, name, bases, new_attrs) + + +# This is a class decorator that actually applies the above metaclass. +def parametrize_class(klass): + return ParametrizingMetaclass(klass.__name__, + klass.__bases__, + klass.__dict__) diff --git a/myenv/lib/python3.9/site-packages/multipart/tests/test_multipart.py b/myenv/lib/python3.9/site-packages/multipart/tests/test_multipart.py new file mode 100644 index 0000000..dbb7ff1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/multipart/tests/test_multipart.py @@ -0,0 +1,1306 @@ +# -*- coding: utf-8 -*- + +import os +import sys +import glob +import yaml +import base64 +import random +import tempfile +from .compat import ( + parametrize, + parametrize_class, + slow_test, + unittest, +) +from io import BytesIO +from six import binary_type, text_type + +from mock import MagicMock, Mock, patch + +from ..multipart import * + + +# Get the current directory for our later test cases. +curr_dir = os.path.abspath(os.path.dirname(__file__)) + + +def force_bytes(val): + if isinstance(val, text_type): + val = val.encode(sys.getfilesystemencoding()) + + return val + + +class TestField(unittest.TestCase): + def setUp(self): + self.f = Field('foo') + + def test_name(self): + self.assertEqual(self.f.field_name, 'foo') + + def test_data(self): + self.f.write(b'test123') + self.assertEqual(self.f.value, b'test123') + + def test_cache_expiration(self): + self.f.write(b'test') + self.assertEqual(self.f.value, b'test') + self.f.write(b'123') + self.assertEqual(self.f.value, b'test123') + + def test_finalize(self): + self.f.write(b'test123') + self.f.finalize() + self.assertEqual(self.f.value, b'test123') + + def test_close(self): + self.f.write(b'test123') + self.f.close() + self.assertEqual(self.f.value, b'test123') + + def test_from_value(self): + f = Field.from_value(b'name', b'value') + self.assertEqual(f.field_name, b'name') + self.assertEqual(f.value, b'value') + + f2 = Field.from_value(b'name', None) + self.assertEqual(f2.value, None) + + def test_equality(self): + f1 = Field.from_value(b'name', b'value') + f2 = Field.from_value(b'name', b'value') + + self.assertEqual(f1, f2) + + def test_equality_with_other(self): + f = Field.from_value(b'foo', b'bar') + self.assertFalse(f == b'foo') + self.assertFalse(b'foo' == f) + + def test_set_none(self): + f = Field(b'foo') + self.assertEqual(f.value, b'') + + f.set_none() + self.assertEqual(f.value, None) + + +class TestFile(unittest.TestCase): + def setUp(self): + self.c = {} + self.d = force_bytes(tempfile.mkdtemp()) + self.f = File(b'foo.txt', config=self.c) + + def assert_data(self, data): + f = self.f.file_object + f.seek(0) + self.assertEqual(f.read(), data) + f.seek(0) + f.truncate() + + def assert_exists(self): + full_path = os.path.join(self.d, self.f.actual_file_name) + self.assertTrue(os.path.exists(full_path)) + + def test_simple(self): + self.f.write(b'foobar') + self.assert_data(b'foobar') + + def test_invalid_write(self): + m = Mock() + m.write.return_value = 5 + self.f._fileobj = m + v = self.f.write(b'foobar') + self.assertEqual(v, 5) + + def test_file_fallback(self): + self.c['MAX_MEMORY_FILE_SIZE'] = 1 + + self.f.write(b'1') + self.assertTrue(self.f.in_memory) + self.assert_data(b'1') + + self.f.write(b'123') + self.assertFalse(self.f.in_memory) + self.assert_data(b'123') + + # Test flushing too. + old_obj = self.f.file_object + self.f.flush_to_disk() + self.assertFalse(self.f.in_memory) + self.assertIs(self.f.file_object, old_obj) + + def test_file_fallback_with_data(self): + self.c['MAX_MEMORY_FILE_SIZE'] = 10 + + self.f.write(b'1' * 10) + self.assertTrue(self.f.in_memory) + + self.f.write(b'2' * 10) + self.assertFalse(self.f.in_memory) + + self.assert_data(b'11111111112222222222') + + def test_file_name(self): + # Write to this dir. + self.c['UPLOAD_DIR'] = self.d + self.c['MAX_MEMORY_FILE_SIZE'] = 10 + + # Write. + self.f.write(b'12345678901') + self.assertFalse(self.f.in_memory) + + # Assert that the file exists + self.assertIsNotNone(self.f.actual_file_name) + self.assert_exists() + + def test_file_full_name(self): + # Write to this dir. + self.c['UPLOAD_DIR'] = self.d + self.c['UPLOAD_KEEP_FILENAME'] = True + self.c['MAX_MEMORY_FILE_SIZE'] = 10 + + # Write. + self.f.write(b'12345678901') + self.assertFalse(self.f.in_memory) + + # Assert that the file exists + self.assertEqual(self.f.actual_file_name, b'foo') + self.assert_exists() + + def test_file_full_name_with_ext(self): + self.c['UPLOAD_DIR'] = self.d + self.c['UPLOAD_KEEP_FILENAME'] = True + self.c['UPLOAD_KEEP_EXTENSIONS'] = True + self.c['MAX_MEMORY_FILE_SIZE'] = 10 + + # Write. + self.f.write(b'12345678901') + self.assertFalse(self.f.in_memory) + + # Assert that the file exists + self.assertEqual(self.f.actual_file_name, b'foo.txt') + self.assert_exists() + + def test_file_full_name_with_ext(self): + self.c['UPLOAD_DIR'] = self.d + self.c['UPLOAD_KEEP_FILENAME'] = True + self.c['UPLOAD_KEEP_EXTENSIONS'] = True + self.c['MAX_MEMORY_FILE_SIZE'] = 10 + + # Write. + self.f.write(b'12345678901') + self.assertFalse(self.f.in_memory) + + # Assert that the file exists + self.assertEqual(self.f.actual_file_name, b'foo.txt') + self.assert_exists() + + def test_no_dir_with_extension(self): + self.c['UPLOAD_KEEP_EXTENSIONS'] = True + self.c['MAX_MEMORY_FILE_SIZE'] = 10 + + # Write. + self.f.write(b'12345678901') + self.assertFalse(self.f.in_memory) + + # Assert that the file exists + ext = os.path.splitext(self.f.actual_file_name)[1] + self.assertEqual(ext, b'.txt') + self.assert_exists() + + def test_invalid_dir_with_name(self): + # Write to this dir. + self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting')) + self.c['UPLOAD_KEEP_FILENAME'] = True + self.c['MAX_MEMORY_FILE_SIZE'] = 5 + + # Write. + with self.assertRaises(FileError): + self.f.write(b'1234567890') + + def test_invalid_dir_no_name(self): + # Write to this dir. + self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting')) + self.c['UPLOAD_KEEP_FILENAME'] = False + self.c['MAX_MEMORY_FILE_SIZE'] = 5 + + # Write. + with self.assertRaises(FileError): + self.f.write(b'1234567890') + + # TODO: test uploading two files with the same name. + + +class TestParseOptionsHeader(unittest.TestCase): + def test_simple(self): + t, p = parse_options_header('application/json') + self.assertEqual(t, b'application/json') + self.assertEqual(p, {}) + + def test_blank(self): + t, p = parse_options_header('') + self.assertEqual(t, b'') + self.assertEqual(p, {}) + + def test_single_param(self): + t, p = parse_options_header('application/json;par=val') + self.assertEqual(t, b'application/json') + self.assertEqual(p, {b'par': b'val'}) + + def test_single_param_with_spaces(self): + t, p = parse_options_header(b'application/json; par=val') + self.assertEqual(t, b'application/json') + self.assertEqual(p, {b'par': b'val'}) + + def test_multiple_params(self): + t, p = parse_options_header(b'application/json;par=val;asdf=foo') + self.assertEqual(t, b'application/json') + self.assertEqual(p, {b'par': b'val', b'asdf': b'foo'}) + + def test_quoted_param(self): + t, p = parse_options_header(b'application/json;param="quoted"') + self.assertEqual(t, b'application/json') + self.assertEqual(p, {b'param': b'quoted'}) + + def test_quoted_param_with_semicolon(self): + t, p = parse_options_header(b'application/json;param="quoted;with;semicolons"') + self.assertEqual(p[b'param'], b'quoted;with;semicolons') + + def test_quoted_param_with_escapes(self): + t, p = parse_options_header(b'application/json;param="This \\" is \\" a \\" quote"') + self.assertEqual(p[b'param'], b'This " is " a " quote') + + def test_handles_ie6_bug(self): + t, p = parse_options_header(b'text/plain; filename="C:\\this\\is\\a\\path\\file.txt"') + + self.assertEqual(p[b'filename'], b'file.txt') + + +class TestBaseParser(unittest.TestCase): + def setUp(self): + self.b = BaseParser() + self.b.callbacks = {} + + def test_callbacks(self): + # The stupid list-ness is to get around lack of nonlocal on py2 + l = [0] + def on_foo(): + l[0] += 1 + + self.b.set_callback('foo', on_foo) + self.b.callback('foo') + self.assertEqual(l[0], 1) + + self.b.set_callback('foo', None) + self.b.callback('foo') + self.assertEqual(l[0], 1) + + +class TestQuerystringParser(unittest.TestCase): + def assert_fields(self, *args, **kwargs): + if kwargs.pop('finalize', True): + self.p.finalize() + + self.assertEqual(self.f, list(args)) + if kwargs.get('reset', True): + self.f = [] + + def setUp(self): + self.reset() + + def reset(self): + self.f = [] + + name_buffer = [] + data_buffer = [] + + def on_field_name(data, start, end): + name_buffer.append(data[start:end]) + + def on_field_data(data, start, end): + data_buffer.append(data[start:end]) + + def on_field_end(): + self.f.append(( + b''.join(name_buffer), + b''.join(data_buffer) + )) + + del name_buffer[:] + del data_buffer[:] + + callbacks = { + 'on_field_name': on_field_name, + 'on_field_data': on_field_data, + 'on_field_end': on_field_end + } + + self.p = QuerystringParser(callbacks) + + def test_simple_querystring(self): + self.p.write(b'foo=bar') + + self.assert_fields((b'foo', b'bar')) + + def test_querystring_blank_beginning(self): + self.p.write(b'&foo=bar') + + self.assert_fields((b'foo', b'bar')) + + def test_querystring_blank_end(self): + self.p.write(b'foo=bar&') + + self.assert_fields((b'foo', b'bar')) + + def test_multiple_querystring(self): + self.p.write(b'foo=bar&asdf=baz') + + self.assert_fields( + (b'foo', b'bar'), + (b'asdf', b'baz') + ) + + def test_streaming_simple(self): + self.p.write(b'foo=bar&') + self.assert_fields( + (b'foo', b'bar'), + finalize=False + ) + + self.p.write(b'asdf=baz') + self.assert_fields( + (b'asdf', b'baz') + ) + + def test_streaming_break(self): + self.p.write(b'foo=one') + self.assert_fields(finalize=False) + + self.p.write(b'two') + self.assert_fields(finalize=False) + + self.p.write(b'three') + self.assert_fields(finalize=False) + + self.p.write(b'&asd') + self.assert_fields( + (b'foo', b'onetwothree'), + finalize=False + ) + + self.p.write(b'f=baz') + self.assert_fields( + (b'asdf', b'baz') + ) + + def test_semicolon_seperator(self): + self.p.write(b'foo=bar;asdf=baz') + + self.assert_fields( + (b'foo', b'bar'), + (b'asdf', b'baz') + ) + + def test_too_large_field(self): + self.p.max_size = 15 + + # Note: len = 8 + self.p.write(b"foo=bar&") + self.assert_fields((b'foo', b'bar'), finalize=False) + + # Note: len = 8, only 7 bytes processed + self.p.write(b'a=123456') + self.assert_fields((b'a', b'12345')) + + def test_invalid_max_size(self): + with self.assertRaises(ValueError): + p = QuerystringParser(max_size=-100) + + def test_strict_parsing_pass(self): + data = b'foo=bar&another=asdf' + for first, last in split_all(data): + self.reset() + self.p.strict_parsing = True + + print("%r / %r" % (first, last)) + + self.p.write(first) + self.p.write(last) + self.assert_fields((b'foo', b'bar'), (b'another', b'asdf')) + + def test_strict_parsing_fail_double_sep(self): + data = b'foo=bar&&another=asdf' + for first, last in split_all(data): + self.reset() + self.p.strict_parsing = True + + cnt = 0 + with self.assertRaises(QuerystringParseError) as cm: + cnt += self.p.write(first) + cnt += self.p.write(last) + self.p.finalize() + + # The offset should occur at 8 bytes into the data (as a whole), + # so we calculate the offset into the chunk. + if cm is not None: + self.assertEqual(cm.exception.offset, 8 - cnt) + + def test_double_sep(self): + data = b'foo=bar&&another=asdf' + for first, last in split_all(data): + print(" %r / %r " % (first, last)) + self.reset() + + cnt = 0 + cnt += self.p.write(first) + cnt += self.p.write(last) + + self.assert_fields((b'foo', b'bar'), (b'another', b'asdf')) + + def test_strict_parsing_fail_no_value(self): + self.p.strict_parsing = True + with self.assertRaises(QuerystringParseError) as cm: + self.p.write(b'foo=bar&blank&another=asdf') + + if cm is not None: + self.assertEqual(cm.exception.offset, 8) + + def test_success_no_value(self): + self.p.write(b'foo=bar&blank&another=asdf') + self.assert_fields( + (b'foo', b'bar'), + (b'blank', b''), + (b'another', b'asdf') + ) + + +class TestOctetStreamParser(unittest.TestCase): + def setUp(self): + self.d = [] + self.started = 0 + self.finished = 0 + + def on_start(): + self.started += 1 + + def on_data(data, start, end): + self.d.append(data[start:end]) + + def on_end(): + self.finished += 1 + + callbacks = { + 'on_start': on_start, + 'on_data': on_data, + 'on_end': on_end + } + + self.p = OctetStreamParser(callbacks) + + def assert_data(self, data, finalize=True): + self.assertEqual(b''.join(self.d), data) + self.d = [] + + def assert_started(self, val=True): + if val: + self.assertEqual(self.started, 1) + else: + self.assertEqual(self.started, 0) + + def assert_finished(self, val=True): + if val: + self.assertEqual(self.finished, 1) + else: + self.assertEqual(self.finished, 0) + + def test_simple(self): + # Assert is not started + self.assert_started(False) + + # Write something, it should then be started + have data + self.p.write(b'foobar') + self.assert_started() + self.assert_data(b'foobar') + + # Finalize, and check + self.assert_finished(False) + self.p.finalize() + self.assert_finished() + + def test_multiple_chunks(self): + self.p.write(b'foo') + self.p.write(b'bar') + self.p.write(b'baz') + self.p.finalize() + + self.assert_data(b'foobarbaz') + self.assert_finished() + + def test_max_size(self): + self.p.max_size = 5 + + self.p.write(b'0123456789') + self.p.finalize() + + self.assert_data(b'01234') + self.assert_finished() + + def test_invalid_max_size(self): + with self.assertRaises(ValueError): + q = OctetStreamParser(max_size='foo') + + +class TestBase64Decoder(unittest.TestCase): + # Note: base64('foobar') == 'Zm9vYmFy' + def setUp(self): + self.f = BytesIO() + self.d = Base64Decoder(self.f) + + def assert_data(self, data, finalize=True): + if finalize: + self.d.finalize() + + self.f.seek(0) + self.assertEqual(self.f.read(), data) + self.f.seek(0) + self.f.truncate() + + def test_simple(self): + self.d.write(b'Zm9vYmFy') + self.assert_data(b'foobar') + + def test_bad(self): + with self.assertRaises(DecodeError): + self.d.write(b'Zm9v!mFy') + + def test_split_properly(self): + self.d.write(b'Zm9v') + self.d.write(b'YmFy') + self.assert_data(b'foobar') + + def test_bad_split(self): + buff = b'Zm9v' + for i in range(1, 4): + first, second = buff[:i], buff[i:] + + self.setUp() + self.d.write(first) + self.d.write(second) + self.assert_data(b'foo') + + def test_long_bad_split(self): + buff = b'Zm9vYmFy' + for i in range(5, 8): + first, second = buff[:i], buff[i:] + + self.setUp() + self.d.write(first) + self.d.write(second) + self.assert_data(b'foobar') + + def test_close_and_finalize(self): + parser = Mock() + f = Base64Decoder(parser) + + f.finalize() + parser.finalize.assert_called_once_with() + + f.close() + parser.close.assert_called_once_with() + + def test_bad_length(self): + self.d.write(b'Zm9vYmF') # missing ending 'y' + + with self.assertRaises(DecodeError): + self.d.finalize() + + +class TestQuotedPrintableDecoder(unittest.TestCase): + def setUp(self): + self.f = BytesIO() + self.d = QuotedPrintableDecoder(self.f) + + def assert_data(self, data, finalize=True): + if finalize: + self.d.finalize() + + self.f.seek(0) + self.assertEqual(self.f.read(), data) + self.f.seek(0) + self.f.truncate() + + def test_simple(self): + self.d.write(b'foobar') + self.assert_data(b'foobar') + + def test_with_escape(self): + self.d.write(b'foo=3Dbar') + self.assert_data(b'foo=bar') + + def test_with_newline_escape(self): + self.d.write(b'foo=\r\nbar') + self.assert_data(b'foobar') + + def test_with_only_newline_escape(self): + self.d.write(b'foo=\nbar') + self.assert_data(b'foobar') + + def test_with_split_escape(self): + self.d.write(b'foo=3') + self.d.write(b'Dbar') + self.assert_data(b'foo=bar') + + def test_with_split_newline_escape_1(self): + self.d.write(b'foo=\r') + self.d.write(b'\nbar') + self.assert_data(b'foobar') + + def test_with_split_newline_escape_2(self): + self.d.write(b'foo=') + self.d.write(b'\r\nbar') + self.assert_data(b'foobar') + + def test_close_and_finalize(self): + parser = Mock() + f = QuotedPrintableDecoder(parser) + + f.finalize() + parser.finalize.assert_called_once_with() + + f.close() + parser.close.assert_called_once_with() + + def test_not_aligned(self): + """ + https://github.com/andrew-d/python-multipart/issues/6 + """ + self.d.write(b'=3AX') + self.assert_data(b':X') + + # Additional offset tests + self.d.write(b'=3') + self.d.write(b'AX') + self.assert_data(b':X') + + self.d.write(b'q=3AX') + self.assert_data(b'q:X') + + +# Load our list of HTTP test cases. +http_tests_dir = os.path.join(curr_dir, 'test_data', 'http') + +# Read in all test cases and load them. +NON_PARAMETRIZED_TESTS = set(['single_field_blocks']) +http_tests = [] +for f in os.listdir(http_tests_dir): + # Only load the HTTP test cases. + fname, ext = os.path.splitext(f) + if fname in NON_PARAMETRIZED_TESTS: + continue + + if ext == '.http': + # Get the YAML file and load it too. + yaml_file = os.path.join(http_tests_dir, fname + '.yaml') + + # Load both. + with open(os.path.join(http_tests_dir, f), 'rb') as f: + test_data = f.read() + + with open(yaml_file, 'rb') as f: + yaml_data = yaml.load(f) + + http_tests.append({ + 'name': fname, + 'test': test_data, + 'result': yaml_data + }) + + +def split_all(val): + """ + This function will split an array all possible ways. For example: + split_all([1,2,3,4]) + will give: + ([1], [2,3,4]), ([1,2], [3,4]), ([1,2,3], [4]) + """ + for i in range(1, len(val) - 1): + yield (val[:i], val[i:]) + + +@parametrize_class +class TestFormParser(unittest.TestCase): + def make(self, boundary, config={}): + self.ended = False + self.files = [] + self.fields = [] + + def on_field(f): + self.fields.append(f) + + def on_file(f): + self.files.append(f) + + def on_end(): + self.ended = True + + # Get a form-parser instance. + self.f = FormParser('multipart/form-data', on_field, on_file, on_end, + boundary=boundary, config=config) + + def assert_file_data(self, f, data): + o = f.file_object + o.seek(0) + file_data = o.read() + self.assertEqual(file_data, data) + + def assert_file(self, field_name, file_name, data): + # Find this file. + found = None + for f in self.files: + if f.field_name == field_name: + found = f + break + + # Assert that we found it. + self.assertIsNotNone(found) + + try: + # Assert about this file. + self.assert_file_data(found, data) + self.assertEqual(found.file_name, file_name) + + # Remove it from our list. + self.files.remove(found) + finally: + # Close our file + found.close() + + def assert_field(self, name, value): + # Find this field in our fields list. + found = None + for f in self.fields: + if f.field_name == name: + found = f + break + + # Assert that it exists and matches. + self.assertIsNotNone(found) + self.assertEqual(value, found.value) + + # Remove it for future iterations. + self.fields.remove(found) + + @parametrize('param', http_tests) + def test_http(self, param): + # Firstly, create our parser with the given boundary. + boundary = param['result']['boundary'] + if isinstance(boundary, text_type): + boundary = boundary.encode('latin-1') + self.make(boundary) + + # Now, we feed the parser with data. + exc = None + try: + processed = self.f.write(param['test']) + self.f.finalize() + except MultipartParseError as e: + processed = 0 + exc = e + + # print(repr(param)) + # print("") + # print(repr(self.fields)) + # print(repr(self.files)) + + # Do we expect an error? + if 'error' in param['result']['expected']: + self.assertIsNotNone(exc) + self.assertEqual(param['result']['expected']['error'], exc.offset) + return + + # No error! + self.assertEqual(processed, len(param['test'])) + + # Assert that the parser gave us the appropriate fields/files. + for e in param['result']['expected']: + # Get our type and name. + type = e['type'] + name = e['name'].encode('latin-1') + + if type == 'field': + self.assert_field(name, e['data']) + + elif type == 'file': + self.assert_file( + name, + e['file_name'].encode('latin-1'), + e['data'] + ) + + else: + assert False + + def test_random_splitting(self): + """ + This test runs a simple multipart body with one field and one file + through every possible split. + """ + # Load test data. + test_file = 'single_field_single_file.http' + with open(os.path.join(http_tests_dir, test_file), 'rb') as f: + test_data = f.read() + + # We split the file through all cases. + for first, last in split_all(test_data): + # Create form parser. + self.make('boundary') + + # Feed with data in 2 chunks. + i = 0 + i += self.f.write(first) + i += self.f.write(last) + self.f.finalize() + + # Assert we processed everything. + self.assertEqual(i, len(test_data)) + + # Assert that our file and field are here. + self.assert_field(b'field', b'test1') + self.assert_file(b'file', b'file.txt', b'test2') + + def test_feed_single_bytes(self): + """ + This test parses a simple multipart body 1 byte at a time. + """ + # Load test data. + test_file = 'single_field_single_file.http' + with open(os.path.join(http_tests_dir, test_file), 'rb') as f: + test_data = f.read() + + # Create form parser. + self.make('boundary') + + # Write all bytes. + # NOTE: Can't simply do `for b in test_data`, since that gives + # an integer when iterating over a bytes object on Python 3. + i = 0 + for x in range(len(test_data)): + b = test_data[x:x + 1] + i += self.f.write(b) + + self.f.finalize() + + # Assert we processed everything. + self.assertEqual(i, len(test_data)) + + # Assert that our file and field are here. + self.assert_field(b'field', b'test1') + self.assert_file(b'file', b'file.txt', b'test2') + + def test_feed_blocks(self): + """ + This test parses a simple multipart body 1 byte at a time. + """ + # Load test data. + test_file = 'single_field_blocks.http' + with open(os.path.join(http_tests_dir, test_file), 'rb') as f: + test_data = f.read() + + for c in range(1, len(test_data) + 1): + # Skip first `d` bytes - not interesting + for d in range(c): + + # Create form parser. + self.make('boundary') + # Skip + i = 0 + self.f.write(test_data[:d]) + i += d + for x in range(d, len(test_data), c): + # Write a chunk to achieve condition + # `i == data_length - 1` + # in boundary search loop (multipatr.py:1302) + b = test_data[x:x + c] + i += self.f.write(b) + + self.f.finalize() + + # Assert we processed everything. + self.assertEqual(i, len(test_data)) + + # Assert that our field is here. + self.assert_field(b'field', + b'0123456789ABCDEFGHIJ0123456789ABCDEFGHIJ') + + @slow_test + def test_request_body_fuzz(self): + """ + This test randomly fuzzes the request body to ensure that no strange + exceptions are raised and we don't end up in a strange state. The + fuzzing consists of randomly doing one of the following: + - Adding a random byte at a random offset + - Randomly deleting a single byte + - Randomly swapping two bytes + """ + # Load test data. + test_file = 'single_field_single_file.http' + with open(os.path.join(http_tests_dir, test_file), 'rb') as f: + test_data = f.read() + + iterations = 1000 + successes = 0 + failures = 0 + exceptions = 0 + + print("Running %d iterations of fuzz testing:" % (iterations,)) + for i in range(iterations): + # Create a bytearray to mutate. + fuzz_data = bytearray(test_data) + + # Pick what we're supposed to do. + choice = random.choice([1, 2, 3]) + if choice == 1: + # Add a random byte. + i = random.randrange(len(test_data)) + b = random.randrange(256) + + fuzz_data.insert(i, b) + msg = "Inserting byte %r at offset %d" % (b, i) + + elif choice == 2: + # Remove a random byte. + i = random.randrange(len(test_data)) + del fuzz_data[i] + + msg = "Deleting byte at offset %d" % (i,) + + elif choice == 3: + # Swap two bytes. + i = random.randrange(len(test_data) - 1) + fuzz_data[i], fuzz_data[i + 1] = fuzz_data[i + 1], fuzz_data[i] + + msg = "Swapping bytes %d and %d" % (i, i + 1) + + # Print message, so if this crashes, we can inspect the output. + print(" " + msg) + + # Create form parser. + self.make('boundary') + + # Feed with data, and ignore form parser exceptions. + i = 0 + try: + i = self.f.write(bytes(fuzz_data)) + self.f.finalize() + except FormParserError: + exceptions += 1 + else: + if i == len(fuzz_data): + successes += 1 + else: + failures += 1 + + print("--------------------------------------------------") + print("Successes: %d" % (successes,)) + print("Failures: %d" % (failures,)) + print("Exceptions: %d" % (exceptions,)) + + @slow_test + def test_request_body_fuzz_random_data(self): + """ + This test will fuzz the multipart parser with some number of iterations + of randomly-generated data. + """ + iterations = 1000 + successes = 0 + failures = 0 + exceptions = 0 + + print("Running %d iterations of fuzz testing:" % (iterations,)) + for i in range(iterations): + data_size = random.randrange(100, 4096) + data = os.urandom(data_size) + print(" Testing with %d random bytes..." % (data_size,)) + + # Create form parser. + self.make('boundary') + + # Feed with data, and ignore form parser exceptions. + i = 0 + try: + i = self.f.write(bytes(data)) + self.f.finalize() + except FormParserError: + exceptions += 1 + else: + if i == len(data): + successes += 1 + else: + failures += 1 + + print("--------------------------------------------------") + print("Successes: %d" % (successes,)) + print("Failures: %d" % (failures,)) + print("Exceptions: %d" % (exceptions,)) + + def test_bad_start_boundary(self): + self.make('boundary') + data = b'--boundary\rfoobar' + with self.assertRaises(MultipartParseError): + self.f.write(data) + + self.make('boundary') + data = b'--boundaryfoobar' + with self.assertRaises(MultipartParseError): + i = self.f.write(data) + + def test_octet_stream(self): + files = [] + def on_file(f): + files.append(f) + on_field = Mock() + on_end = Mock() + + f = FormParser('application/octet-stream', on_field, on_file, on_end=on_end, file_name=b'foo.txt') + self.assertTrue(isinstance(f.parser, OctetStreamParser)) + + f.write(b'test') + f.write(b'1234') + f.finalize() + + # Assert that we only recieved a single file, with the right data, and that we're done. + self.assertFalse(on_field.called) + self.assertEqual(len(files), 1) + self.assert_file_data(files[0], b'test1234') + self.assertTrue(on_end.called) + + def test_querystring(self): + fields = [] + def on_field(f): + fields.append(f) + on_file = Mock() + on_end = Mock() + + def simple_test(f): + # Reset tracking. + del fields[:] + on_file.reset_mock() + on_end.reset_mock() + + # Write test data. + f.write(b'foo=bar') + f.write(b'&test=asdf') + f.finalize() + + # Assert we only recieved 2 fields... + self.assertFalse(on_file.called) + self.assertEqual(len(fields), 2) + + # ...assert that we have the correct data... + self.assertEqual(fields[0].field_name, b'foo') + self.assertEqual(fields[0].value, b'bar') + + self.assertEqual(fields[1].field_name, b'test') + self.assertEqual(fields[1].value, b'asdf') + + # ... and assert that we've finished. + self.assertTrue(on_end.called) + + f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end) + self.assertTrue(isinstance(f.parser, QuerystringParser)) + simple_test(f) + + f = FormParser('application/x-url-encoded', on_field, on_file, on_end=on_end) + self.assertTrue(isinstance(f.parser, QuerystringParser)) + simple_test(f) + + def test_close_methods(self): + parser = Mock() + f = FormParser('application/x-url-encoded', None, None) + f.parser = parser + + f.finalize() + parser.finalize.assert_called_once_with() + + f.close() + parser.close.assert_called_once_with() + + def test_bad_content_type(self): + # We should raise a ValueError for a bad Content-Type + with self.assertRaises(ValueError): + f = FormParser('application/bad', None, None) + + def test_no_boundary_given(self): + # We should raise a FormParserError when parsing a multipart message + # without a boundary. + with self.assertRaises(FormParserError): + f = FormParser('multipart/form-data', None, None) + + def test_bad_content_transfer_encoding(self): + data = b'----boundary\r\nContent-Disposition: form-data; name="file"; filename="test.txt"\r\nContent-Type: text/plain\r\nContent-Transfer-Encoding: badstuff\r\n\r\nTest\r\n----boundary--\r\n' + + files = [] + def on_file(f): + files.append(f) + on_field = Mock() + on_end = Mock() + + # Test with erroring. + config = {'UPLOAD_ERROR_ON_BAD_CTE': True} + f = FormParser('multipart/form-data', on_field, on_file, + on_end=on_end, boundary='--boundary', config=config) + + with self.assertRaises(FormParserError): + f.write(data) + f.finalize() + + # Test without erroring. + config = {'UPLOAD_ERROR_ON_BAD_CTE': False} + f = FormParser('multipart/form-data', on_field, on_file, + on_end=on_end, boundary='--boundary', config=config) + + f.write(data) + f.finalize() + self.assert_file_data(files[0], b'Test') + + def test_handles_None_fields(self): + fields = [] + def on_field(f): + fields.append(f) + on_file = Mock() + on_end = Mock() + + f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end) + f.write(b'foo=bar&another&baz=asdf') + f.finalize() + + self.assertEqual(fields[0].field_name, b'foo') + self.assertEqual(fields[0].value, b'bar') + + self.assertEqual(fields[1].field_name, b'another') + self.assertEqual(fields[1].value, None) + + self.assertEqual(fields[2].field_name, b'baz') + self.assertEqual(fields[2].value, b'asdf') + + def test_max_size_multipart(self): + # Load test data. + test_file = 'single_field_single_file.http' + with open(os.path.join(http_tests_dir, test_file), 'rb') as f: + test_data = f.read() + + # Create form parser. + self.make('boundary') + + # Set the maximum length that we can process to be halfway through the + # given data. + self.f.parser.max_size = len(test_data) / 2 + + i = self.f.write(test_data) + self.f.finalize() + + # Assert we processed the correct amount. + self.assertEqual(i, len(test_data) / 2) + + def test_max_size_form_parser(self): + # Load test data. + test_file = 'single_field_single_file.http' + with open(os.path.join(http_tests_dir, test_file), 'rb') as f: + test_data = f.read() + + # Create form parser setting the maximum length that we can process to + # be halfway through the given data. + size = len(test_data) / 2 + self.make('boundary', config={'MAX_BODY_SIZE': size}) + + i = self.f.write(test_data) + self.f.finalize() + + # Assert we processed the correct amount. + self.assertEqual(i, len(test_data) / 2) + + def test_octet_stream_max_size(self): + files = [] + def on_file(f): + files.append(f) + on_field = Mock() + on_end = Mock() + + f = FormParser('application/octet-stream', on_field, on_file, + on_end=on_end, file_name=b'foo.txt', + config={'MAX_BODY_SIZE': 10}) + + f.write(b'0123456789012345689') + f.finalize() + + self.assert_file_data(files[0], b'0123456789') + + def test_invalid_max_size_multipart(self): + with self.assertRaises(ValueError): + q = MultipartParser(b'bound', max_size='foo') + + +class TestHelperFunctions(unittest.TestCase): + def test_create_form_parser(self): + r = create_form_parser({'Content-Type': 'application/octet-stream'}, + None, None) + self.assertTrue(isinstance(r, FormParser)) + + def test_create_form_parser_error(self): + headers = {} + with self.assertRaises(ValueError): + create_form_parser(headers, None, None) + + def test_parse_form(self): + on_field = Mock() + on_file = Mock() + + parse_form( + {'Content-Type': 'application/octet-stream', + }, + BytesIO(b'123456789012345'), + on_field, + on_file + ) + + on_file.assert_called_once() + + # Assert that the first argument of the call (a File object) has size + # 15 - i.e. all data is written. + self.assertEqual(on_file.call_args[0][0].size, 15) + + def test_parse_form_content_length(self): + files = [] + def on_file(file): + files.append(file) + + parse_form( + {'Content-Type': 'application/octet-stream', + 'Content-Length': '10' + }, + BytesIO(b'123456789012345'), + None, + on_file + ) + + self.assertEqual(len(files), 1) + self.assertEqual(files[0].size, 10) + + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(TestFile)) + suite.addTest(unittest.makeSuite(TestParseOptionsHeader)) + suite.addTest(unittest.makeSuite(TestBaseParser)) + suite.addTest(unittest.makeSuite(TestQuerystringParser)) + suite.addTest(unittest.makeSuite(TestOctetStreamParser)) + suite.addTest(unittest.makeSuite(TestBase64Decoder)) + suite.addTest(unittest.makeSuite(TestQuotedPrintableDecoder)) + suite.addTest(unittest.makeSuite(TestFormParser)) + suite.addTest(unittest.makeSuite(TestHelperFunctions)) + + return suite + diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/LICENSE b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/LICENSE new file mode 100644 index 0000000..bdb7786 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/LICENSE @@ -0,0 +1,27 @@ +Mypy extensions are licensed under the terms of the MIT license, reproduced below. + += = = = = + +The MIT License + +Copyright (c) 2016-2017 Jukka Lehtosalo and contributors + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + += = = = = diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/METADATA b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/METADATA new file mode 100644 index 0000000..8892543 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/METADATA @@ -0,0 +1,31 @@ +Metadata-Version: 2.1 +Name: mypy-extensions +Version: 0.4.3 +Summary: Experimental type system extensions for programs checked with the mypy typechecker. +Home-page: https://github.com/python/mypy_extensions +Author: The mypy developers +Author-email: jukka.lehtosalo@iki.fi +License: MIT License +Platform: UNKNOWN +Classifier: Development Status :: 2 - Pre-Alpha +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Topic :: Software Development +Requires-Dist: typing (>=3.5.3) ; python_version < "3.5" + +Mypy Extensions +=============== + +The "mypy_extensions" module defines experimental extensions to the +standard "typing" module that are supported by the mypy typechecker. + + diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/RECORD b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/RECORD new file mode 100644 index 0000000..a9a14f1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/RECORD @@ -0,0 +1,7 @@ +mypy_extensions.py,sha256=zuwlHRPtDPLvzoQv9-FSryoLKAkKaDIpu14-js2gHOE,5078 +mypy_extensions-0.4.3.dist-info/LICENSE,sha256=pQRQ2h1TzXd7gM7XfFj_lqvgzNh5cGvRQsPsIOJF8LQ,1204 +mypy_extensions-0.4.3.dist-info/METADATA,sha256=6AEHeULt1o9wXmnlSNSu3QeQcx4Ywq8OKhB689SG0p4,1155 +mypy_extensions-0.4.3.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 +mypy_extensions-0.4.3.dist-info/top_level.txt,sha256=TllnGWqDoFMhKyTiX9peoF1VC1wmkRgILHdebnubEb8,16 +mypy_extensions-0.4.3.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +mypy_extensions-0.4.3.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/WHEEL new file mode 100644 index 0000000..8b701e9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.6) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/top_level.txt new file mode 100644 index 0000000..ee21665 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions-0.4.3.dist-info/top_level.txt @@ -0,0 +1 @@ +mypy_extensions diff --git a/myenv/lib/python3.9/site-packages/mypy_extensions.py b/myenv/lib/python3.9/site-packages/mypy_extensions.py new file mode 100644 index 0000000..5f9d88e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/mypy_extensions.py @@ -0,0 +1,166 @@ +"""Defines experimental extensions to the standard "typing" module that are +supported by the mypy typechecker. + +Example usage: + from mypy_extensions import TypedDict +""" + +from typing import Any + +# NOTE: This module must support Python 2.7 in addition to Python 3.x + +import sys +# _type_check is NOT a part of public typing API, it is used here only to mimic +# the (convenient) behavior of types provided by typing module. +from typing import _type_check # type: ignore + + +def _check_fails(cls, other): + try: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools', 'typing']: + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + except (AttributeError, ValueError): + pass + return False + + +def _dict_new(cls, *args, **kwargs): + return dict(*args, **kwargs) + + +def _typeddict_new(cls, _typename, _fields=None, **kwargs): + total = kwargs.pop('total', True) + if _fields is None: + _fields = kwargs + elif kwargs: + raise TypeError("TypedDict takes either a dict or keyword arguments," + " but not both") + + ns = {'__annotations__': dict(_fields), '__total__': total} + try: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return _TypedDictMeta(_typename, (), ns) + + +class _TypedDictMeta(type): + def __new__(cls, name, bases, ns, total=True): + # Create new typed dict class object. + # This method is called directly when TypedDict is subclassed, + # or via _typeddict_new when TypedDict is instantiated. This way + # TypedDict supports all three syntaxes described in its docstring. + # Subclasses and instances of TypedDict return actual dictionaries + # via _dict_new. + ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns) + + anns = ns.get('__annotations__', {}) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + anns = {n: _type_check(tp, msg) for n, tp in anns.items()} + for base in bases: + anns.update(base.__dict__.get('__annotations__', {})) + tp_dict.__annotations__ = anns + if not hasattr(tp_dict, '__total__'): + tp_dict.__total__ = total + return tp_dict + + __instancecheck__ = __subclasscheck__ = _check_fails + + +TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) +TypedDict.__module__ = __name__ +TypedDict.__doc__ = \ + """A simple typed name space. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, with each key + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by typecheckers. + Usage:: + + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info could be accessed via Point2D.__annotations__. TypedDict + supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + + class Point2D(TypedDict): + x: int + y: int + label: str + + The latter syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + """ + +# Argument constructors for making more-detailed Callables. These all just +# return their type argument, to make them complete noops in terms of the +# `typing` module. + + +def Arg(type=Any, name=None): + """A normal positional argument""" + return type + + +def DefaultArg(type=Any, name=None): + """A positional argument with a default value""" + return type + + +def NamedArg(type=Any, name=None): + """A keyword-only argument""" + return type + + +def DefaultNamedArg(type=Any, name=None): + """A keyword-only argument with a default value""" + return type + + +def VarArg(type=Any): + """A *args-style variadic positional argument""" + return type + + +def KwArg(type=Any): + """A **kwargs-style variadic keyword argument""" + return type + + +# Return type that indicates a function does not return +class NoReturn: pass + + +def trait(cls): + return cls + + +def mypyc_attr(*attrs, **kwattrs): + return lambda x: x + + +# TODO: We may want to try to properly apply this to any type +# variables left over... +class _FlexibleAliasClsApplied: + def __init__(self, val): + self.val = val + + def __getitem__(self, args): + return self.val + + +class _FlexibleAliasCls: + def __getitem__(self, args): + return _FlexibleAliasClsApplied(args[-1]) + + +FlexibleAlias = _FlexibleAliasCls() diff --git a/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/INSTALLER b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/INSTALLER new file mode 100644 index 0000000..2f9ab90 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/INSTALLER @@ -0,0 +1 @@ +Poetry 1.6.1 \ No newline at end of file diff --git a/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/LICENSE.txt b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/LICENSE.txt new file mode 100644 index 0000000..a274a66 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/LICENSE.txt @@ -0,0 +1,37 @@ +NetworkX is distributed with the 3-clause BSD license. + +:: + + Copyright (C) 2004-2022, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NetworkX Developers nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/METADATA b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/METADATA new file mode 100644 index 0000000..dc385d7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/METADATA @@ -0,0 +1,131 @@ +Metadata-Version: 2.1 +Name: networkx +Version: 2.8.5 +Summary: Python package for creating and manipulating graphs and networks +Home-page: https://networkx.org/ +Author: Aric Hagberg +Author-email: hagberg@lanl.gov +Maintainer: NetworkX Developers +Maintainer-email: networkx-discuss@googlegroups.com +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: >=3.8 +License-File: LICENSE.txt +Provides-Extra: default +Requires-Dist: numpy (>=1.19) ; extra == 'default' +Requires-Dist: scipy (>=1.8) ; extra == 'default' +Requires-Dist: matplotlib (>=3.4) ; extra == 'default' +Requires-Dist: pandas (>=1.3) ; extra == 'default' +Provides-Extra: developer +Requires-Dist: pre-commit (>=2.19) ; extra == 'developer' +Requires-Dist: mypy (>=0.960) ; extra == 'developer' +Provides-Extra: doc +Requires-Dist: sphinx (>=5) ; extra == 'doc' +Requires-Dist: pydata-sphinx-theme (>=0.9) ; extra == 'doc' +Requires-Dist: sphinx-gallery (>=0.10) ; extra == 'doc' +Requires-Dist: numpydoc (>=1.4) ; extra == 'doc' +Requires-Dist: pillow (>=9.1) ; extra == 'doc' +Requires-Dist: nb2plots (>=0.6) ; extra == 'doc' +Requires-Dist: texext (>=0.6.6) ; extra == 'doc' +Provides-Extra: extra +Requires-Dist: lxml (>=4.6) ; extra == 'extra' +Requires-Dist: pygraphviz (>=1.9) ; extra == 'extra' +Requires-Dist: pydot (>=1.4.2) ; extra == 'extra' +Requires-Dist: sympy (>=1.10) ; extra == 'extra' +Provides-Extra: test +Requires-Dist: pytest (>=7.1) ; extra == 'test' +Requires-Dist: pytest-cov (>=3.0) ; extra == 'test' +Requires-Dist: codecov (>=2.1) ; extra == 'test' + +NetworkX +======== + +.. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?tag=networkx-2.8.5 + :target: https://github.com/networkx/networkx/actions?query=branch%3Anetworkx-2.8.5 + +.. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg + :target: https://app.codecov.io/gh/networkx/networkx/branch/main + +.. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square + :target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22 + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest version of NetworkX:: + + $ pip install networkx + +Install with all optional dependencies:: + + $ pip install networkx[all] + +For additional details, please see `INSTALL.rst`. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see `CONTRIBUTING.rst`). + +License +------- + +Released under the 3-Clause BSD license (see `LICENSE.txt`):: + + Copyright (C) 2004-2022 NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/RECORD b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/RECORD new file mode 100644 index 0000000..81f3cf2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/RECORD @@ -0,0 +1,644 @@ +networkx/__init__.py,sha256=MovMyewNUEF1O6pE02yVCvVjVdZntb0rksfnBC1tgiY,2939 +networkx/conftest.py,sha256=JXFOt642CF00x-Ov8PFmw_XzTtlrpt1CNvJWzSKrG1k,12271 +networkx/convert.py,sha256=ZBOGJBt4e8hKBH-Rr7xdertkWHyv57HAU-jBdN1VBIE,15858 +networkx/convert_matrix.py,sha256=r80YqseF6JglHFwZqDixXxueom7_PChnu_esHGLw05o,56245 +networkx/exception.py,sha256=5v8tPTpYcuu3OFgSitgC8-wMUGNwfgxZog2gsBNeRPk,3537 +networkx/lazy_imports.py,sha256=3PZ69R_MO23e3U8-6M37bpfXQ6lx5ywV61sETo4k9Cg,5777 +networkx/relabel.py,sha256=WQZv_Gil6Bh5oX8imiw4foZQdw6yMQhtYWFNNRHEPJg,10010 +networkx/algorithms/__init__.py,sha256=eJ_V2reAkgQo0cvUYnADoZt5iTe0fhe9he2zrA6pGEs,6364 +networkx/algorithms/asteroidal.py,sha256=rdFr50v0T9t5VtNN1z74NQ0COgtstlBR4W1BTPoatC8,5810 +networkx/algorithms/boundary.py,sha256=UXpJDuXRvEv6GDY1BFULHyPJJK0rXx-K5XN491KYpgg,4695 +networkx/algorithms/bridges.py,sha256=a8WCc02kWwhu3F1POie0Jzhlo0yH80MlOIl9DjJq0Ws,5886 +networkx/algorithms/chains.py,sha256=NFjIqQYheEmX0Hwyk9bBt3aCQ2WSWe2eMODGXKZ0cGo,6769 +networkx/algorithms/chordal.py,sha256=D71-vEWs8-I096n_vpUuONVIHcLxNQhRkMQS8zhKcgI,14376 +networkx/algorithms/clique.py,sha256=pEBXbMVV2db69hHYXeux9IJwhzIgiyqwBu06jAmvuJs,26631 +networkx/algorithms/cluster.py,sha256=6voNVbLepilh6HQggtn0JB3iiyA0xoVEHNmdOoG_Uq4,18789 +networkx/algorithms/communicability_alg.py,sha256=ClwaQ5CH0loQJCXtutMCDwqNStNqo0B4eOQECoXs7UM,4551 +networkx/algorithms/core.py,sha256=QsAvXtz0YtF9d10mh_4hKzJ6Hb0uWf3Aq1Sa8Xh37XY,15869 +networkx/algorithms/covering.py,sha256=my5V4PHT3zHLK0QvanEqQn65jJ5_nNVy6ul_0wHRT18,5262 +networkx/algorithms/cuts.py,sha256=HCrDvHqAAMcXqPXaW4qh5TKFM4wVbGnCVIOv4EDL6rs,9722 +networkx/algorithms/cycles.py,sha256=HKG2fERA6_6ZrMQvBvoNcOkYyKn6WE6JB-RhFKOFlmw,21677 +networkx/algorithms/d_separation.py,sha256=RhTA7HwR_NUOVvi6BOBuH5n9L6Fv7Z52DMcPmSn0GSc,4165 +networkx/algorithms/dag.py,sha256=nPMzzIKxPnaI8vo86Q3NmiSRIcMkyz8VDC-Ecu14hVE,34557 +networkx/algorithms/distance_measures.py,sha256=MXpBzHD_xoioe19B13h9ftzV40_Yf-jD7B-0phpQBEI,22803 +networkx/algorithms/distance_regular.py,sha256=DARGJQPSdf_xxoInuEBBKN4kun0Izu-LGX8c1-hiuvk,6872 +networkx/algorithms/dominance.py,sha256=lK7FRWAr6TiENtmuTniW1q7KesopvW6kJLKqNR5-SeI,3394 +networkx/algorithms/dominating.py,sha256=lvyePsQ08fGU9OdZZ5oi_d7P_lg7HR5xn8CNdpWxNP8,2647 +networkx/algorithms/efficiency_measures.py,sha256=y21IgKH5o-IZhdka0_4J0naw1XIXkxa3ymEBey75KEw,4277 +networkx/algorithms/euler.py,sha256=HMAahIPnIXzFW6tMKnWgW0jnYQ5hzR2CWByfK612pzg,13600 +networkx/algorithms/graph_hashing.py,sha256=rQN5evPVZBCnuJHjJ7Fvf0znSuS6PZHX0wTsrnvp4gI,11392 +networkx/algorithms/graphical.py,sha256=r4a7Sk8GSDYvZlKs0I8pOvpCWAZspCAFvNM9O2WtrOk,13449 +networkx/algorithms/hierarchy.py,sha256=afmel-XstPOVjn5B3Pc4G2ejclgStf4Bk8zdWOSgWHw,1502 +networkx/algorithms/hybrid.py,sha256=U6hyTathxCYfenAnDBiqQm46r1zPDbTUdRI--yzg6TA,6152 +networkx/algorithms/isolate.py,sha256=oZU25X5zEKCnwnVMS7BFXnTckS7jwXkyTuI5T6TooME,2261 +networkx/algorithms/link_prediction.py,sha256=QOei1_-_sL8FqTLyB-fEcwgUnbhQ_RdwiqpYzJBUZM8,19792 +networkx/algorithms/lowest_common_ancestors.py,sha256=Cez4vbJK2vws3LwTPiFADtkwG_eGIXSU9qTWQbe-IK0,14013 +networkx/algorithms/matching.py,sha256=fope8XNIJMJtmO7SdxNfTsmE0rdQeq0ovWjpNpx2DCk,42801 +networkx/algorithms/mis.py,sha256=oUWPZAew3zVsW9DKcblo7mOt2dUUfdrsucSlz56jdok,2325 +networkx/algorithms/moral.py,sha256=tVX5HD9h8qmygjMSufsVrcYiUkpLMUdhDE_mWLw5WYo,1475 +networkx/algorithms/non_randomness.py,sha256=-hL-zHvyjj0Bpr8E2xj0rwbr7CPYUbXdicggpenIGrg,2858 +networkx/algorithms/planar_drawing.py,sha256=vSlP1AToiIzaZPYmNJfTOLouJHndjZWzK2O7bbpNOo0,16320 +networkx/algorithms/planarity.py,sha256=nOX-8mUnFRJrZc9DtsRsMIMZ_anJoftjZU91sXxqlgs,39410 +networkx/algorithms/polynomials.py,sha256=tlbLcpQaYp1XVA7uQPasBHe15AuScZpVXPgzTX75xKI,10826 +networkx/algorithms/reciprocity.py,sha256=1iv5v1GkWu6cAorGxpiBmS5jzh_BkVi8XZaXtuLr7ms,2796 +networkx/algorithms/regular.py,sha256=oI3fx0ZoHceX_WYe1J-7IxhLtLVahkNZIhqv1HVXNJ8,6205 +networkx/algorithms/richclub.py,sha256=6twv21qc43m2N5j9zPwLOWZh7BeDwCN8XTdtx_Ph5KY,4152 +networkx/algorithms/similarity.py,sha256=UpxBU63wyQeMqcWIxTUP6jh3F4DAty9aizoEBZ8pAkE,59768 +networkx/algorithms/simple_paths.py,sha256=5rOnA5bQ_nJCoBiX9eiJQC1OHY5moe9thp9YaM0Hp8w,29763 +networkx/algorithms/smallworld.py,sha256=re54UBdlwcXH2pHqmp3AoBf-lHGeSNe5AM1t7rliObw,13085 +networkx/algorithms/smetric.py,sha256=SXz5CHN9f2fQ_k4DJAW_iGgMgUs3pKHsvLx2Yk5wGfY,1177 +networkx/algorithms/sparsifiers.py,sha256=QETW6i73_YyvmM_bCm-53A3ln_YV926Cq2DzKY526X8,10038 +networkx/algorithms/structuralholes.py,sha256=FGO23J0-m8Dgkt8RgRWLy_v1wvO8wQ0mv96R2pxf1xI,9146 +networkx/algorithms/summarization.py,sha256=HzWpV8v0nc_z9njfvjGTUTZCk177IOqE6BltbN-Z79A,22926 +networkx/algorithms/swap.py,sha256=259xQ_uOB9e9KKqlJ4wprlARSfaThJM2ax1nbPEJ2os,9819 +networkx/algorithms/threshold.py,sha256=VCME2hzhkwnXGNqfaYSrCG0x8l63i_FH-N7zNl5CeNs,31010 +networkx/algorithms/tournament.py,sha256=hDrl96Nf5huz66YJsDuV9ruRrRFFTGKJDctQq9fj4ls,11584 +networkx/algorithms/triads.py,sha256=U5t8GatjmTraeuxltj8ocvqCY-iiostYzKgIBRmdyzc,13476 +networkx/algorithms/vitality.py,sha256=f1fAmEm1n7JE6TMBix9iFPmK9EOtgGg0YuRj8BYZnGM,2296 +networkx/algorithms/voronoi.py,sha256=BFykl7dsAXeQGDrK-Z6-sGbsyaQUYMXkkzZTcN_Pa78,3158 +networkx/algorithms/wiener.py,sha256=b_W7Beo_MKyetq6o8w2jLu0v8okmfX3n9F4Bc_NTi-M,2270 +networkx/algorithms/approximation/__init__.py,sha256=hwi6EOHU1OJEDOxYr8USLexbUOubH76aiU9P4WRDZrw,1197 +networkx/algorithms/approximation/clique.py,sha256=t-G-wOQ3SFcCOWSrmH8pKmPhtnp2UouXM4HctPshVNc,7181 +networkx/algorithms/approximation/clustering_coefficient.py,sha256=mfgwpJN1Jk9da7KLBOMNqXiU187r3WoUQLxiTwlI9gw,2009 +networkx/algorithms/approximation/connectivity.py,sha256=lfjwwTlS_JC--GJBC4Lu65XO50Jz3vpKwhAqTE2lwEs,12716 +networkx/algorithms/approximation/distance_measures.py,sha256=6q6J3VRqxAkqqh_d519mItyjfhdgTqS8xePHo4D3ZWI,5550 +networkx/algorithms/approximation/dominating_set.py,sha256=1WAJnJ8AFyFSNO7TLAsJ0fFIVcIPoYhupRDhcJsDpb0,4143 +networkx/algorithms/approximation/kcomponents.py,sha256=fsRl3wBAe5LTQGftJ71OFIMN4k1XGH9JZlsOUJC03QE,13223 +networkx/algorithms/approximation/matching.py,sha256=iN-ofr7Gs1XkCHSAxc5HkwSrHkMyUY7i7bTPQkIuR-o,1155 +networkx/algorithms/approximation/maxcut.py,sha256=DD1mktczrwD79NR3A0bAegXWSld_47rpQfJtZr09Mdk,3594 +networkx/algorithms/approximation/ramsey.py,sha256=mD3edR4mp8qyjV1oFp6vV3yX0w7LbOuOsj94LmRNDYA,1339 +networkx/algorithms/approximation/steinertree.py,sha256=X0IpQh3972Wl1qubybz3sNQb4BW6LJ1uidb9ZUEQNXU,3376 +networkx/algorithms/approximation/traveling_salesman.py,sha256=eg2zjSJimcv_pnJrdcObmEFJWN48vC-lMto1v74-VKs,54262 +networkx/algorithms/approximation/treewidth.py,sha256=YekIUcogCMMvy2zR9aQsA866A5nSFr0ySTEzP7gDCc0,8019 +networkx/algorithms/approximation/vertex_cover.py,sha256=GroE2Vc_Ieq4kUpj-pDQqlCTR-0PGqcZ9T33qV77xZ0,2741 +networkx/algorithms/approximation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/approximation/tests/test_approx_clust_coeff.py,sha256=PGOVEKf2BcJu1vvjZrgTlBBpwM8V6t7yCANjyS9nWF0,1171 +networkx/algorithms/approximation/tests/test_clique.py,sha256=JZ_ja03aVU7vnZ42Joy1ze0vjdcm_CnDhD96Z4W_Dcc,3022 +networkx/algorithms/approximation/tests/test_connectivity.py,sha256=gDG6tsgP3ux7Dgu0x7r0nso7_yknIxicV42Gq0It5pc,5952 +networkx/algorithms/approximation/tests/test_distance_measures.py,sha256=GSyupA_jqSc_pLPSMnZFNcBgZc8-KFWgt6Q7uFegTqg,2024 +networkx/algorithms/approximation/tests/test_dominating_set.py,sha256=Rtdsu-0KjZMS2Qj4fd3nJgHDHxASDrwS907_TyaHUVw,2296 +networkx/algorithms/approximation/tests/test_kcomponents.py,sha256=MCQ1tNiFQrl0-MutM1N_Q6QHYEWCvDQ6cRM_Y7V3dDw,9213 +networkx/algorithms/approximation/tests/test_matching.py,sha256=nitZncaM0605kaIu1NO6_5TFV2--nohUCO46XTD_lnM,186 +networkx/algorithms/approximation/tests/test_maxcut.py,sha256=R0tx_0mP0vWKX564j4qoiljnG3Mn0XhGAhFRYOZEcHM,2430 +networkx/algorithms/approximation/tests/test_ramsey.py,sha256=2oJFufDcKZNFvRVmt66nLvuHC95c4b8ey-nrANKOluw,1142 +networkx/algorithms/approximation/tests/test_steinertree.py,sha256=g4Wvx6u8HNa8Q5XY-R_KCbY8vcKXMuOz8lC6zAJ7Vv0,3145 +networkx/algorithms/approximation/tests/test_traveling_salesman.py,sha256=LSAquDfYaz_MxM1Tb4uYM_XNCh8_tJINcUSxFOFrJ_U,30699 +networkx/algorithms/approximation/tests/test_treewidth.py,sha256=1AwP3bgaEG4JrrlG0cbjMcxirlKH4vk-weaP2isdDXM,8949 +networkx/algorithms/approximation/tests/test_vertex_cover.py,sha256=FobHNhG9CAMeB_AOEprUs-7XQdPoc1YvfmXhozDZ8pM,1942 +networkx/algorithms/assortativity/__init__.py,sha256=ov3HRRbeYB_6Qezvxp1OTl77GBpw-EWkWGUzgfT8G9c,294 +networkx/algorithms/assortativity/connectivity.py,sha256=eR-PMTFyY8fktn_nUZ2LLblEsoktAMFyGmdN_eXQfVc,4815 +networkx/algorithms/assortativity/correlation.py,sha256=KOOfrFosgyeTOz3zKeqh31o1f1WgssbESCmIWP1v3qc,8529 +networkx/algorithms/assortativity/mixing.py,sha256=VhbJxGlBRa_KrGmxfq9da5mibxnbg5Fl1PODfaWIwPo,9094 +networkx/algorithms/assortativity/neighbor_degree.py,sha256=qe33xcO9SNMNTbb3c9e0q-whYDcVl_O5fi2Pzke2p5s,5243 +networkx/algorithms/assortativity/pairs.py,sha256=hleMyWgHoy8hXV8ZOIqGJ_ChZQ2oVlukYfPvkaT9SQg,3297 +networkx/algorithms/assortativity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/assortativity/tests/base_test.py,sha256=DjNszEwQzYDPXvEIHTyVAExE6WoUEWtR2kgz_ATfgWo,2724 +networkx/algorithms/assortativity/tests/test_connectivity.py,sha256=HQGuZTnGab1yWaKLilXwwIg-GgSR7alMcH-0CcLXszE,5092 +networkx/algorithms/assortativity/tests/test_correlation.py,sha256=qS0Nv7rqKEYDrsrGkOfpm49jXi-D7YKs39dwkcdLxCM,4527 +networkx/algorithms/assortativity/tests/test_mixing.py,sha256=8zS_xTryZ7_MPmsbRgcaeiPfpnwzhqH8TbYS4f_pEbI,6903 +networkx/algorithms/assortativity/tests/test_neighbor_degree.py,sha256=AFlcIF0CoTD2F_j5i1AHeOFJzvyEn7Z4ww2axdT0D3E,3706 +networkx/algorithms/assortativity/tests/test_pairs.py,sha256=t05qP_-gfkbiR6aTLtE1owYl9otBSsuJcRkuZsa63UQ,3008 +networkx/algorithms/bipartite/__init__.py,sha256=P6prxqUpq0T1xikH3DLNggcGxEEf6gu6z8tcwd3Pbq0,3768 +networkx/algorithms/bipartite/basic.py,sha256=VY0cmgU2KHSklcvJ8xvZAZLzOk7jan3HZ2dg0TMuf4Y,8220 +networkx/algorithms/bipartite/centrality.py,sha256=cabaDLe_RxmYKd3Aa93PeRdH7TvlKKQsBMbSp9kaRH4,8412 +networkx/algorithms/bipartite/cluster.py,sha256=W0ZfqKOwz-UazQ6niOkr4S5xlcvsvVSMFHhFV96EmqE,6845 +networkx/algorithms/bipartite/covering.py,sha256=-_fHiWsMuzF8KrmpeOXFWiQctkKq2jiIlbHIN0u_HdM,2091 +networkx/algorithms/bipartite/edgelist.py,sha256=OQ8hTszNynjYVbhVS0ilgobEPvcIIhisivu9B1cTGw8,11198 +networkx/algorithms/bipartite/generators.py,sha256=pnyIACA6E5BXRGea7KvgXJYWzrcAQxRqWwTn2hYmfcc,19961 +networkx/algorithms/bipartite/matching.py,sha256=l7sahM0AhZ0GGmYj0m1xiRGCcd4Dfr-pc07pD6SL6GY,21273 +networkx/algorithms/bipartite/matrix.py,sha256=GJWEfVMwqfet589UNvDaZnJVdvQQYcVFSNqGu80CnUI,6382 +networkx/algorithms/bipartite/projection.py,sha256=XR81J4o7eUiBCMZV3JsqXq4tmE1ZYyVZEdE6t8xvMxY,17304 +networkx/algorithms/bipartite/redundancy.py,sha256=U6oV3JS6hxoMuqSznTr7Wp3iH90pkzXA5kuVWTutYzU,3479 +networkx/algorithms/bipartite/spectral.py,sha256=DqX2CJNvUIdEwblNbMIhCBb3LsEP5Gvk-yRytP0vlg4,1890 +networkx/algorithms/bipartite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/bipartite/tests/test_basic.py,sha256=gzbtsQqPi85BznX5REdGBBJVyr9aH4nO06c3eEI4634,4291 +networkx/algorithms/bipartite/tests/test_centrality.py,sha256=gIXOB8ZP-uO9r5Cp23Yng_C4o-qCUKM_48GzFrlCWQ0,5901 +networkx/algorithms/bipartite/tests/test_cluster.py,sha256=8aJH5Ac8QbuknAA65w0rXUHwAjqqEImEYUyrt5pboM4,2809 +networkx/algorithms/bipartite/tests/test_covering.py,sha256=SbnZQTZY3jjt9Ncv--Q0tG7ywATk4vem2FPx7rV_Ixg,1229 +networkx/algorithms/bipartite/tests/test_edgelist.py,sha256=1_9UI5pv6qbD696ibnmSzf1rVLmWeYrRohZ_Xazg3Yg,6486 +networkx/algorithms/bipartite/tests/test_generators.py,sha256=GLMThTKIfZ96NwTxIL0P0o0OAESZFfnySRkRjtKhao8,12794 +networkx/algorithms/bipartite/tests/test_matching.py,sha256=xDP9qjK1xlfu1eVFGAOi6sk_w1AgWvMp5p2vBmz5J1w,11967 +networkx/algorithms/bipartite/tests/test_matrix.py,sha256=EoqQKTMcPPPPUZYTzc-AAtl5F77qT0X3FI3E1tYppxM,2900 +networkx/algorithms/bipartite/tests/test_project.py,sha256=Hx6P2NQII1O9-cF3GgHqfIZxUfyNjUtZ7i5-beAu4mM,14714 +networkx/algorithms/bipartite/tests/test_redundancy.py,sha256=F6z_h713fkLOAEhR_4LXWaRdP1amduCQYiVESGml61A,785 +networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py,sha256=HZr6gYzQEpMTjnm7IaIne3lN9yr7pkKwYQXh8ljj9SY,2359 +networkx/algorithms/centrality/__init__.py,sha256=SQty4JnRqEKKomu9sE99VcVTouO5A_B0QUUicUIbY60,533 +networkx/algorithms/centrality/betweenness.py,sha256=em6GYnXMzvu1PdX0roN1eAIUTtaw6r8cHDvH6156_o8,14324 +networkx/algorithms/centrality/betweenness_subset.py,sha256=k7RNfHSR-2ZmK3UhhHD0ExibXGxjp8Iaa_BfEPCphIg,9655 +networkx/algorithms/centrality/closeness.py,sha256=2caNHg1O4ZnbmX-GOSBYDbsY2yMvCzGRj0wl1f3Lu10,10201 +networkx/algorithms/centrality/current_flow_betweenness.py,sha256=R_QKBxJuEN8jMPd0b-rbGrKfIowQ1Tk4lfhW-8RlhOM,11766 +networkx/algorithms/centrality/current_flow_betweenness_subset.py,sha256=1x3rUl2FdtWVdvY8uWGh_yme6Eyy4kamK40jApZqPwI,7976 +networkx/algorithms/centrality/current_flow_closeness.py,sha256=wqSzFa1CRsiJyqr5M0DpJXtfSPyxqqM9cIwpg5lfbao,3316 +networkx/algorithms/centrality/degree_alg.py,sha256=P1wQno5RrAK0jnZm9ZDO3UBcAoNIoygEmfCXURmO7Z4,3239 +networkx/algorithms/centrality/dispersion.py,sha256=WzE9_ECrM7K0QGTqgwTJJ-cvU2c-D41FMlZ8OYNr9Sg,3318 +networkx/algorithms/centrality/eigenvector.py,sha256=4NDbPe7NRS075XW4WwsJnZcuNngYlVxmGKjWT1-H_JU,8150 +networkx/algorithms/centrality/flow_matrix.py,sha256=GoXCdw0Cno58Fgv3wdfGJYQ2FkMiUIUI03486OyvE44,3919 +networkx/algorithms/centrality/group.py,sha256=VY7FcOQegFhCgEZ3mOmAGaAymBEWCntxYB6qriYjNt0,27731 +networkx/algorithms/centrality/harmonic.py,sha256=CbLCDYB54B-YqCGgEBsDLkC03SW-s0KitYhsbIN4J88,2589 +networkx/algorithms/centrality/katz.py,sha256=_FUTkTRHF73MdMkIBIumGndSF24XnoFNjSJMN-5nVOc,10674 +networkx/algorithms/centrality/load.py,sha256=2FAn5AO-KfWlkfTxcMJ0z9XDr2dmFjCIafsLJvaCyiQ,6801 +networkx/algorithms/centrality/percolation.py,sha256=KEs6W66WxXQaWlboYLziVhpVO_bGt4iSP4MTj3t2AA4,4088 +networkx/algorithms/centrality/reaching.py,sha256=JB4NXxR8QBiACGNS3ACJkyFsM1jEHumm9O5EXc5kKXI,6947 +networkx/algorithms/centrality/second_order.py,sha256=v7GhTeIRcRPevAc89MPNjkUp8CnTRfOJhNKQf4AGHlg,4728 +networkx/algorithms/centrality/subgraph_alg.py,sha256=3T8X153DaTttL7NSeZXuo1Vn8nC1rzc-tXgtoiEwdcA,9506 +networkx/algorithms/centrality/trophic.py,sha256=3wjkoPEWEgHXHp1i_t08sq4M4JNTGtc8iybNK_rPliE,4549 +networkx/algorithms/centrality/voterank_alg.py,sha256=OllLOtaPLPoBpbWobUXKS9UrGGKSy7tfFwUdr6IoR5I,3191 +networkx/algorithms/centrality/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/centrality/tests/test_betweenness_centrality.py,sha256=pKoPAP1hnQSgrOxYeW5-LdUiFDANiwTn_NdOdgccbo8,26795 +networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py,sha256=wW2PkIEQe-DQu2lCnRchlch2Jl2oh9stOESUbRM_gDM,8388 +networkx/algorithms/centrality/tests/test_closeness_centrality.py,sha256=XWZivyLjxYlF41U4ktUmvULC2PMvxKs2U6BHDXRZVdE,10209 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py,sha256=Wvu6wi3BPpsJPLyleV0OQhK_c7W7JTOSfGZCeYqzYPs,7204 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py,sha256=JfRGgPuiF-vJu5fc2_pcJYREEboxcK_dmy-np39c4Aw,5839 +networkx/algorithms/centrality/tests/test_current_flow_closeness.py,sha256=E5LdZVJL2KNbfPeBbAVGOLCoE53ZaWTFqx4GdjL2pbg,1153 +networkx/algorithms/centrality/tests/test_degree_centrality.py,sha256=EKduYez3hTUWixAW0NN89l_a7A9j3XhF9ZYCOG4QKls,4106 +networkx/algorithms/centrality/tests/test_dispersion.py,sha256=YB67oQmolVh6PgEvrXApHrOJlK-f1TUGKPymSk6j2WE,1605 +networkx/algorithms/centrality/tests/test_eigenvector_centrality.py,sha256=BzZWTPAKHoWo0GnKfG2OCVuAzqK8djsZ44DSCHQxazY,4634 +networkx/algorithms/centrality/tests/test_group.py,sha256=YmWifoTgw2gSS5BnA9G2T_Voauk_WG6v90JrZEt-Kjk,8686 +networkx/algorithms/centrality/tests/test_harmonic_centrality.py,sha256=wYP0msmB5hh5OMIxPl9t0G4QSpG3Brxw98Kh9BrRoag,3658 +networkx/algorithms/centrality/tests/test_katz_centrality.py,sha256=hI2uNM3_LJhlEbWbiq4iB6L_NsTt_6XCfI6jl9yG6ik,11247 +networkx/algorithms/centrality/tests/test_load_centrality.py,sha256=eOBgwPIyaShzibQ61jEQNlIpEd8-Kh6iauRmmUupyO4,11080 +networkx/algorithms/centrality/tests/test_percolation_centrality.py,sha256=JRuGdrzHwhvsjCe2YKq7povPVWsanKOAXIia0_-KfCU,2699 +networkx/algorithms/centrality/tests/test_reaching.py,sha256=RxNFfPsMfbYpPLZcg2RWWKMWKqE9MFcN0JvjCnOgpKA,3865 +networkx/algorithms/centrality/tests/test_second_order_centrality.py,sha256=xqfVYRYPSv7x0AwUFlkoE1_m8xxG60koN-ychM6lrwE,1921 +networkx/algorithms/centrality/tests/test_subgraph.py,sha256=vhE9Uh-_Hlk49k-ny6ORHCgqk7LWH8OHIYOEYM96uz0,3729 +networkx/algorithms/centrality/tests/test_trophic.py,sha256=AzV6rwcTa4b4tcenoKh95o6VF-z7w75l81ZOdhhi6yE,8705 +networkx/algorithms/centrality/tests/test_voterank.py,sha256=7sNbtXv3578jKtCTPY4LTwR0LUIQq1u_qcWL5Lqm1Kw,1592 +networkx/algorithms/coloring/__init__.py,sha256=P1cmqrAjcaCdObkNZ1e6Hp__ZpxBAhQx0iIipOVW8jg,182 +networkx/algorithms/coloring/equitable_coloring.py,sha256=w82VwGjfpvry3j9h9zpgtcuAGJQNG3FtYUbJl4z2Umk,16571 +networkx/algorithms/coloring/greedy_coloring.py,sha256=BIE4ibWUD9mJtytevadGTSfiKGJA6wPi_iHjWm1SjQ0,19573 +networkx/algorithms/coloring/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/coloring/tests/test_coloring.py,sha256=p_84Boj4XK1BAVwTEvQoO3rUjMjzBoEDA9b5X8m5zeY,20631 +networkx/algorithms/community/__init__.py,sha256=SqTYf-Rsu4-4qOA4s_1pPz-eFx_AAHA01X5qm_LX3TE,1245 +networkx/algorithms/community/asyn_fluid.py,sha256=j2YyD_crV2o3ufiMsiFvItnAedqO4wW-ptG2LlyQFsc,5829 +networkx/algorithms/community/centrality.py,sha256=8L0Cq1upur2Bt6BgEDxqQR1MdRvYoDclWsrL7rUd0nw,6497 +networkx/algorithms/community/community_utils.py,sha256=_jpb_2iem4BoS8tDFKohvgOEl4Fyv0LcwvYJiHpwi5w,867 +networkx/algorithms/community/kclique.py,sha256=Yw4kW2Yn2Ru10RAJ_0xtMyhRIhiol_t9m_hjy_4mbHM,2487 +networkx/algorithms/community/kernighan_lin.py,sha256=AsKOcF07J6-ZYy9nkDDxnAblU43MrlwsqiUw_-hMYRw,4264 +networkx/algorithms/community/label_propagation.py,sha256=QbLnbpZLDTVVEkCFHInz4RzJrcXjaYx-yZgcDRQlwPY,7209 +networkx/algorithms/community/louvain.py,sha256=XHqCYWdWfi3sTIgxfjLoNGZ1hzkMapiFLHY0HWj5wnU,13576 +networkx/algorithms/community/lukes.py,sha256=m3uLqjY7LAONgAVknzp-XtpDW72rcjdERzcaD2mpsWU,8048 +networkx/algorithms/community/modularity_max.py,sha256=XMIkXc1vdgzTnl5w_rvBNWoee6WklKLjru_esf9o1RU,19281 +networkx/algorithms/community/quality.py,sha256=oEkmGVeMYpeVwjWLhVdveIOc-rAVxdyj0Szvy0HFnwE,14610 +networkx/algorithms/community/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/community/tests/test_asyn_fluid.py,sha256=YVLh9HVky72n9oywcIuVMGJPjJ97ga5O8E_l3qI8bnU,3046 +networkx/algorithms/community/tests/test_centrality.py,sha256=L0S-Dz8MKefObM9m0gFfZhxcfaQ4iwDNq-kovC9UEQc,2922 +networkx/algorithms/community/tests/test_kclique.py,sha256=LpHZPexPoLnrMmJXDxYSLr7XbjIRhiEAdZaSXJFa8fI,2407 +networkx/algorithms/community/tests/test_kernighan_lin.py,sha256=s8bK53Y1a87zvlZ1AJE-QJ2vItnbscSOlHQSrMpetGI,2709 +networkx/algorithms/community/tests/test_label_propagation.py,sha256=-SmqnH_9L8pbLW7NkGxg52Qrjm9j98W3zTe0xY8pcoc,5035 +networkx/algorithms/community/tests/test_louvain.py,sha256=rpn6896mZdz3EjBS0awwxch4hsNStSbo3-XKAXK_0Fk,4781 +networkx/algorithms/community/tests/test_lukes.py,sha256=PpFaCFeiUWhcW0k6A0cPjcIH4yF4cFhjivMw3-AoBZw,3951 +networkx/algorithms/community/tests/test_modularity_max.py,sha256=IaWS3VO-QbVTYTF780xpgKP-TUWyDIi8erya4UbbEzg,10373 +networkx/algorithms/community/tests/test_quality.py,sha256=FlL8fW0Gs-w_Y28Bd7tIqWJFImIz0pB7FEubgeB4VdA,5615 +networkx/algorithms/community/tests/test_utils.py,sha256=WLBssBjJR2ihRIVu78022n2O8Qv8xuLlJqz47kWP3SA,670 +networkx/algorithms/components/__init__.py,sha256=Dt74KZWp_cJ_j0lL5hd_S50_hia5DKcC2SjuRnubr6M,173 +networkx/algorithms/components/attracting.py,sha256=3HRqyJ3HrCOFVNZn_sdcgixIvhfp5BOsVxKk0CALrAU,2657 +networkx/algorithms/components/biconnected.py,sha256=MIDPAAFM5vsViksdzNpH-bpZI9ezKVilBB-FOzYFjQA,12501 +networkx/algorithms/components/connected.py,sha256=a1z8iYqMOLyRMoRhxqry0xm0t8uW5IFkFp1ahRPnUms,4147 +networkx/algorithms/components/semiconnected.py,sha256=JzUqo-WMBDw8dow8TAOZ8w2ARpTn_e_crKbJB_5vnWI,1588 +networkx/algorithms/components/strongly_connected.py,sha256=1VZbq98LKnECUvj46a92Fr8QNfo9ZATAAFyI7KnoCfw,11201 +networkx/algorithms/components/weakly_connected.py,sha256=ZsBSLEe4elp2qPipBPWFhAuIdXuvWFL_cOb9IgtF2oM,4074 +networkx/algorithms/components/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/components/tests/test_attracting.py,sha256=b3N3ZR9E5gLSQWGgaqhcRfRs4KBW6GnnkVYeAjdxC_o,2243 +networkx/algorithms/components/tests/test_biconnected.py,sha256=N-J-dgBgI77ytYUUrXjduLxtDydH7jS-af98fyPBkYc,6036 +networkx/algorithms/components/tests/test_connected.py,sha256=805NWi0g8doZ3WUguSY59ITrPSuE5J-VTj5j7l9xWsc,3675 +networkx/algorithms/components/tests/test_semiconnected.py,sha256=q860lIxZF5M2JmDwwdzy-SGSXnrillOefMx23GcJpw0,1792 +networkx/algorithms/components/tests/test_strongly_connected.py,sha256=r-H5xAbZiK0k-SGstJPy00xzlA0I9ym5spCGhRJjLvA,6554 +networkx/algorithms/components/tests/test_weakly_connected.py,sha256=yi23wxW2Vw6JOMqaWMEuqNRxnleriuAQrZ5JGWE48Jk,2887 +networkx/algorithms/connectivity/__init__.py,sha256=VuUXTkagxX-tHjgmeYJ3K4Eq_luK6kSpv1nZwiwGFd8,281 +networkx/algorithms/connectivity/connectivity.py,sha256=6FnncJG1-syvQk5_ToG2swxEqD04dVM6MX2O9hDMbEs,29734 +networkx/algorithms/connectivity/cuts.py,sha256=qp1kb9DGg3EDj-i-Z4hJCozgdNBMcZyz9nzkNOOovD8,22640 +networkx/algorithms/connectivity/disjoint_paths.py,sha256=H39ge1ZmxGK4TEYxES1_fLLoqUvgUANf_rgS3PchLIE,14425 +networkx/algorithms/connectivity/edge_augmentation.py,sha256=XP1zZb0Lp2-uySoLHA3iZ5Pqr-qPwOawY3qetrLOm9Q,43788 +networkx/algorithms/connectivity/edge_kcomponents.py,sha256=TM0EnginLyOgYifqcCVWrB-SNB5kg2s1cSO9mc85hlo,20687 +networkx/algorithms/connectivity/kcomponents.py,sha256=nLMMqfL5VwDcmQm_Vd1faoaAOU5JcgaLjPW9dj1nDLQ,8222 +networkx/algorithms/connectivity/kcutsets.py,sha256=454eDEjD6l1rI6pMR711LtGxE-GNpUM46YpqXRxqepU,9330 +networkx/algorithms/connectivity/stoerwagner.py,sha256=uP4AWBMqBZDfSzzoj2Wa989ykO6pNMPVdMDE6aeK7Ww,5340 +networkx/algorithms/connectivity/utils.py,sha256=mx7_WRUDm-VWVt5PgbnSMH9MbX0pW-adFrS-UNY7U3s,3144 +networkx/algorithms/connectivity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/connectivity/tests/test_connectivity.py,sha256=PLOwCLA2ZyGsmCLeGqM9r8lEIIfYvJ5T-ZXxIafGDYo,15029 +networkx/algorithms/connectivity/tests/test_cuts.py,sha256=aYAluKaswU-HHx6iZnWx-MI-kwNJ7uXXA39Jx79IWiA,10358 +networkx/algorithms/connectivity/tests/test_disjoint_paths.py,sha256=0IqgdjbNpE3ziWREV8vOdjWHF7cx57BKrenqf8CfR3Y,8399 +networkx/algorithms/connectivity/tests/test_edge_augmentation.py,sha256=-26SQv4AW5oJoRc6RymCQDmeVHcTmc5KwcmetDLEXWQ,15522 +networkx/algorithms/connectivity/tests/test_edge_kcomponents.py,sha256=CZ26Dy91WOUqhw1X73mqLGX-WHWzBBIeBCgrp6KK4Zo,16453 +networkx/algorithms/connectivity/tests/test_kcomponents.py,sha256=ohoSX8GACeszRZdzTiNuWXSFitfU9DzP0hqllS2gvMU,8554 +networkx/algorithms/connectivity/tests/test_kcutsets.py,sha256=qzrxmW3-KEnkNvkP-nWtSQj40_pfzUFadDvv2bjGDT4,8488 +networkx/algorithms/connectivity/tests/test_stoer_wagner.py,sha256=A291C30_t2CI1erPCqN1W0DoAj3zqNA8fThPIj4Rku0,3011 +networkx/algorithms/flow/__init__.py,sha256=rVtMUy6dViPLewjDRntmn15QF0bQwiDdQbZZx9j7Drc,341 +networkx/algorithms/flow/boykovkolmogorov.py,sha256=KkcNNdbeipyuWMJ-UCJP326asD4uVtGsDKiTGYZrLos,13238 +networkx/algorithms/flow/capacityscaling.py,sha256=KeoOIrdZ_Ak_3b3KoEOF-o0LMGGdiceJE8FfamddRxE,14373 +networkx/algorithms/flow/dinitz_alg.py,sha256=u9_fgyD_B8MTHG6ZoExR5HR6G1HNR46uVRWp7c82JRE,7108 +networkx/algorithms/flow/edmondskarp.py,sha256=yidyYmtZNSPTarE1KuU1L53srNPq_VGXU_lIaGaeHcE,7956 +networkx/algorithms/flow/gomory_hu.py,sha256=YZajBDibtu1IX0_P47Bye_wXM9y1vGwiSfzeOO-rqB4,6267 +networkx/algorithms/flow/maxflow.py,sha256=BNqpMcMkqfG-EpnIiGpEmXXgA5GKfdoTM-GgrQTAnHI,22715 +networkx/algorithms/flow/mincost.py,sha256=1xn5I2z66BIBGTwNs_LspRICBTFqf-zdRD_3cqXy788,11968 +networkx/algorithms/flow/networksimplex.py,sha256=M-fkIp_OTaMmXKEwd8WPiYdA1Nhmjj1raZhlgGgC_l8,25089 +networkx/algorithms/flow/preflowpush.py,sha256=x1EUeTLeaTaPK0s1f6CIVxhQ7UBHvLUB37S8wc7WKis,15621 +networkx/algorithms/flow/shortestaugmentingpath.py,sha256=fOTpKT_wW3CHMZbAFkDiCsiYZ2OXlfTR15nxZ9I2QnA,10272 +networkx/algorithms/flow/utils.py,sha256=bugJtMMGob7Yz48xg8POSR_hG-yQd8k-SISiAOI7oWw,5743 +networkx/algorithms/flow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/flow/tests/gl1.gpickle.bz2,sha256=z4-BzrXqruFiGqYLiS2D5ZamFz9vZRc1m2ef89qhsPg,44623 +networkx/algorithms/flow/tests/gw1.gpickle.bz2,sha256=b3nw6Q-kxR7HkWXxWWPh7YlHdXbga8qmeuYiwmBBGTE,42248 +networkx/algorithms/flow/tests/netgen-2.gpickle.bz2,sha256=OxfmbN7ajtuNHexyYmx38fZd1GdeP3bcL8T9hKoDjjA,18972 +networkx/algorithms/flow/tests/test_gomory_hu.py,sha256=aWtbI3AHofIK6LDJnmj9UH1QOfulXsi5NyB7bNyV2Vw,4471 +networkx/algorithms/flow/tests/test_maxflow.py,sha256=NJK_V40GHZtsM8DR-sLpMeNFHp46K8KcMj-pSYI8zdQ,18425 +networkx/algorithms/flow/tests/test_maxflow_large_graph.py,sha256=w7kDHC3qJeD1vO538jMTlVJRx5_2Fe8HfINeY71_7TU,4504 +networkx/algorithms/flow/tests/test_mincost.py,sha256=fSF6LI098og2G7qA_dlNFirSr6jOoKDeg3Ju0pUnrck,17665 +networkx/algorithms/flow/tests/test_networksimplex.py,sha256=E2iJrte1jS1-ODBQIBx7EbDL-uh4WXB4nv0LME52cu8,11975 +networkx/algorithms/flow/tests/wlm3.gpickle.bz2,sha256=zKy6Hg-_swvsNh8OSOyIyZnTR0_Npd35O9RErOF8-g4,88132 +networkx/algorithms/isomorphism/__init__.py,sha256=pODZ0ELhrxvY-qfcxL7FQcPPBfc6C9o39BcbFMs5O8E,354 +networkx/algorithms/isomorphism/ismags.py,sha256=ZmDQpO63by6ElTKvVk4-boy45JdP3UAYcHTSHja_r14,43599 +networkx/algorithms/isomorphism/isomorph.py,sha256=qHL_PU4ewMn7bt0d1qIaE3TEc6ZZCura8STinQ_h2ks,6382 +networkx/algorithms/isomorphism/isomorphvf2.py,sha256=PuPRVjHHtYJD4jeqOLsx-cvyz1tfjiL32_3ufWc1ff8,40556 +networkx/algorithms/isomorphism/matchhelpers.py,sha256=VN4eQjwhjOCHZsIKMziH0yr5yQlceOg2lnCnn8Gl2E0,10936 +networkx/algorithms/isomorphism/temporalisomorphvf2.py,sha256=N6yS-OSO_bqprlQgyNEGu27log0nG4RFqmQlAZny6zg,10949 +networkx/algorithms/isomorphism/tree_isomorphism.py,sha256=MNKPIHdA1q05AmYv9z_rLWAeW6-_msWiXOLO-UJcbw4,9258 +networkx/algorithms/isomorphism/vf2userfunc.py,sha256=jdvaGLziSM2XURVfxbRjMDX84i_8ewpY7di-u67cXBI,7496 +networkx/algorithms/isomorphism/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/isomorphism/tests/iso_r01_s80.A99,sha256=hKzMtYLUR8Oqp9pmJR6RwG7qo31aNPZcnXy4KHDGhqU,1442 +networkx/algorithms/isomorphism/tests/iso_r01_s80.B99,sha256=AHx_W2xG4JEcz1xKoN5TwCHVE6-UO2PiMByynkd4TPE,1442 +networkx/algorithms/isomorphism/tests/si2_b06_m200.A99,sha256=NVnPFA52amNl3qM55G1V9eL9ZlP9NwugBlPf-zekTFU,310 +networkx/algorithms/isomorphism/tests/si2_b06_m200.B99,sha256=-clIDp05LFNRHA2BghhGTeyuXDqBBqA9XpEzpB7Ku7M,1602 +networkx/algorithms/isomorphism/tests/test_ismags.py,sha256=NBuHegns9BFxZCrelBg1ZbJ1c21ZPH49doBnHsJxLvM,10616 +networkx/algorithms/isomorphism/tests/test_isomorphism.py,sha256=1GZmmqNWk605Qq9h55V_5SfEKPM50Ceq6DSICdh6ufs,1663 +networkx/algorithms/isomorphism/tests/test_isomorphvf2.py,sha256=lCC5KSNMfyEMSA4q1Xa41k2BTfmr2u2n6ImsDcupiC8,11480 +networkx/algorithms/isomorphism/tests/test_match_helpers.py,sha256=ocp3pd_JSCLHAp_mtsipu1XqehOgQCMuuhWEeRG5U7g,2456 +networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py,sha256=DZy2zAt74jiTAM-jGK5H9aGRn1ZsMgQl9K5UNsu178Y,7346 +networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py,sha256=NOrZQCy9nJFMeG-2oJGgsSAiq8sqItXhVqsOeH4bRh0,7154 +networkx/algorithms/isomorphism/tests/test_vf2userfunc.py,sha256=qOKeCm46kqdLO02H3wxMm2wEHzFQBdUFDdh_kC0KHwM,6630 +networkx/algorithms/link_analysis/__init__.py,sha256=UkcgTDdzsIu-jsJ4jBwP8sF2CsRPC1YcZZT-q5Wlj3I,118 +networkx/algorithms/link_analysis/hits_alg.py,sha256=3xuTVFDQN2VYz8qGVG2TmJ6cQreZm8GY3ZvjyDkAXus,11889 +networkx/algorithms/link_analysis/pagerank_alg.py,sha256=ItC0SgX5TR6b8uQP3eOl4fjZVjArLaiaV_F3eroo0VU,17691 +networkx/algorithms/link_analysis/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/link_analysis/tests/test_hits.py,sha256=qpjWeMHkNsepx_Al08wrHZpXYb5NRYgvliRDGEH2TWg,2953 +networkx/algorithms/link_analysis/tests/test_pagerank.py,sha256=QmYxqxmQaisUU3Dht9XY0NkHL4xmbuu7l83Gt4mS8LM,7562 +networkx/algorithms/minors/__init__.py,sha256=ceeKdsZ6U1H40ED-KmtVGkbADxeWMTVG07Ja8P7N_Pg,587 +networkx/algorithms/minors/contraction.py,sha256=t-RNVWgXjhqbNYUebJC7mZLpUZJAfFU4ju_WJrZ7Y-M,21746 +networkx/algorithms/minors/tests/test_contraction.py,sha256=EjNPMSAR_agCn6jclpx8ojhDg4G9FPVmQfpvgyiprMA,15918 +networkx/algorithms/node_classification/__init__.py,sha256=ZCwCp9_7nCD1d_7pwzXU3F9vT13uKhY7wtmRzrZKYQw,1748 +networkx/algorithms/node_classification/hmn.py,sha256=GFlPFKPQtfb1_SzXtVVnK2Gbepa2vPQdsGG_8j897kU,2603 +networkx/algorithms/node_classification/lgc.py,sha256=QDv7oMECQCz9G7tjwXUHcwL6WxiVpzxRe4IYcYrfP24,2722 +networkx/algorithms/node_classification/utils.py,sha256=oFTTByxlC-VjnwazPBpdwyIzEWEylEtFiDKkTc7NqSE,1023 +networkx/algorithms/operators/__init__.py,sha256=dJ3xOXvHxSzzM3-YcfvjGTJ_ndxULF1TybkIRzUS87Y,201 +networkx/algorithms/operators/all.py,sha256=kzyYsQHxPZ1JZO91u58kriq_vqNejd8Eaeu4rRsmoEk,6741 +networkx/algorithms/operators/binary.py,sha256=DT_vht6aj62yAlXrK4V9sgmAkMUgitvL397lnHwBM3g,11979 +networkx/algorithms/operators/product.py,sha256=ny5SSemJ5I6Gt7eIl3aDxStsIjq4TPWWx896mR06wpk,13857 +networkx/algorithms/operators/unary.py,sha256=D647RNItgvd05brXLA7VQxEQ1uTsg1PMAzSZshFxkfw,1717 +networkx/algorithms/operators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/operators/tests/test_all.py,sha256=_WolA1dki0JqeDjLSXMbdGnUkxrP7WJKKIi30OEHQYA,7371 +networkx/algorithms/operators/tests/test_binary.py,sha256=_v7Tu9jzkjI5UYMp3SPKKswXIRqvrBZmwOJKh__b7jE,12033 +networkx/algorithms/operators/tests/test_product.py,sha256=EYTajjizRtZUjQk8o3vjPGoyfs4cDyevLyblCO6RsJI,13188 +networkx/algorithms/operators/tests/test_unary.py,sha256=UZdzbt5GI9hnflEizUWXihGqBWmSFJDkzjwVv6wziQE,1415 +networkx/algorithms/shortest_paths/__init__.py,sha256=Rmxtsje-mPdQyeYhE8TP2NId-iZEOu4eAsWhVRm2Xqk,285 +networkx/algorithms/shortest_paths/astar.py,sha256=Yk0U976Uq4HNsaaC7M7pw1zRyzw8NpptX12WMBYVHrE,6990 +networkx/algorithms/shortest_paths/dense.py,sha256=dxDu2puBcMzLlvxemBK-KSr3LUbyGzErTmXdj1Po-ok,7300 +networkx/algorithms/shortest_paths/generic.py,sha256=5TIjfQnbWdbhD5bS_oZJ9WwrEHFKHTX2K4-KrK9wwFg,20139 +networkx/algorithms/shortest_paths/unweighted.py,sha256=z7Fs94f8R-jeBLH2TAZOQBKEMqQGt_czeI07_1WUUNM,14220 +networkx/algorithms/shortest_paths/weighted.py,sha256=7KA42oSuVrdPY3k9z5yEAMu2xriPv_EobxxlkEYtvO4,80039 +networkx/algorithms/shortest_paths/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/shortest_paths/tests/test_astar.py,sha256=CWFKGGoqhDMBA6pQP-osmHzmfuf1PxYKp1otgR0UV94,5507 +networkx/algorithms/shortest_paths/tests/test_dense.py,sha256=ievl4gu3Exl_31hp4OKcsAGPb3g3_xFUM4t3NnvrG_A,6747 +networkx/algorithms/shortest_paths/tests/test_dense_numpy.py,sha256=BNwXCe2wgNPE8o35-shPsFj8l19c_QG6Ye8tkIGphf8,2300 +networkx/algorithms/shortest_paths/tests/test_generic.py,sha256=5xmZuWeBaIKlzxp4Y1C_ADhNkJfCwUQSczFB-ezdKu8,15463 +networkx/algorithms/shortest_paths/tests/test_unweighted.py,sha256=H2j_MaafTzx2U-biPiViuFPOdk0H50s80HlGQaXBaAA,4601 +networkx/algorithms/shortest_paths/tests/test_weighted.py,sha256=D4MHcUhgYueBAAUccAmESnlcLAHBsd75kuUCiQ0vfg0,33263 +networkx/algorithms/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tests/test_asteroidal.py,sha256=uMY1UbtYj5pHEwZbCBT5Ep_jVv1uDa7rSXLWVKUkcWk,503 +networkx/algorithms/tests/test_boundary.py,sha256=ebnJRPyYZOjKE5x0PfXXbiEWA9w4mZtL1j19Lh46WtI,6227 +networkx/algorithms/tests/test_bridges.py,sha256=1STJG5IFSe1rnr64BKrnE7Gm__oCAHxOSHBwl-z97rY,2513 +networkx/algorithms/tests/test_chains.py,sha256=akKRVr3itHTDEll3t1QGhjElAWLWgqI6yiX8vQYJ5f0,4086 +networkx/algorithms/tests/test_chordal.py,sha256=tY-lw79emVZ27jb-zNpufDWiObdeZODWm5YbvI-meeo,4458 +networkx/algorithms/tests/test_clique.py,sha256=BT9VStBBVLLT5-mD2HB0E_HiwzcbmoBVISQRhs7VCpc,10519 +networkx/algorithms/tests/test_cluster.py,sha256=AltwLWAblpSLa-24KvNuxYxM2IeVl5p2d-kozA9QJ-0,15595 +networkx/algorithms/tests/test_communicability.py,sha256=dUKeV-abTQqHfNZY4lelu7aBR1fVgNrMCpKrE5V1P9Y,2939 +networkx/algorithms/tests/test_core.py,sha256=l29lU66JqKCmzdrt5ZnuHJ0Sy0Vu_pYGZRiPClMOm1k,6581 +networkx/algorithms/tests/test_covering.py,sha256=FZw0-_mOwDS7pMFH4tKOZ1YJ7te5i7DTS8Ji62flqO8,2417 +networkx/algorithms/tests/test_cuts.py,sha256=2Ir5xyIG4cTC4Dgg1cceLXaEFiOCJ60ZTDDn33vz0Ns,5377 +networkx/algorithms/tests/test_cycles.py,sha256=NjHzk9QxldNf7NV25F_U4rT-U1B4rdUCrqVnRa5I1Ng,11758 +networkx/algorithms/tests/test_d_separation.py,sha256=wFRwZdvSAi4b9FDfWEIE93Lhc73kv44eyQq4rwrvevg,4312 +networkx/algorithms/tests/test_dag.py,sha256=RGAtEBz_HxpsD3hjXZY9Sh71C3Un1j_j4cb4V7_6Dho,25805 +networkx/algorithms/tests/test_distance_measures.py,sha256=2Z3tTpwpOehI4h9diSHmwPGO7oyG8zxswJkTaxPFAnE,8478 +networkx/algorithms/tests/test_distance_regular.py,sha256=pPZ2CPKo4QLjhxlcJhBQZif6-_2qwfh1kpbrN_mu5tg,2312 +networkx/algorithms/tests/test_dominance.py,sha256=ZeLzdelMFDPBdKnFykUAG565gs5ySUbEhdBMT3HX3hQ,9388 +networkx/algorithms/tests/test_dominating.py,sha256=hyta7ln6BbHaGlpEUla6jVzh2PRuSjvujLSGXrmwZbc,1228 +networkx/algorithms/tests/test_efficiency.py,sha256=QKWMvyjCG1Byt-oNp7Rz_qxnVeT77Zk27lrzI1qH0mA,1894 +networkx/algorithms/tests/test_euler.py,sha256=Q0GeWkKTyqEaLAJLM7kuyVrcYF_n73MiM13x8o5iJ0Y,10038 +networkx/algorithms/tests/test_graph_hashing.py,sha256=duR9DQLUpRuy9bv0ZKQPt9gy9WxiX_K0-BVMlnF-WHY,23517 +networkx/algorithms/tests/test_graphical.py,sha256=iwaAV-LLxzxdrQFHD7zYGzRdwhKiWIzHSlMypu0BF9w,5370 +networkx/algorithms/tests/test_hierarchy.py,sha256=g3-0pNfzRo-RDW1BsiLXxyi2LwWIJukXx2i4JCpN2fg,941 +networkx/algorithms/tests/test_hybrid.py,sha256=kQLzaMoqZcKFaJ3D7PKbY2O-FX59XDZ1pN5un8My-tk,720 +networkx/algorithms/tests/test_isolate.py,sha256=LyR0YYHJDH5vppQzGzGiJK-aaIV17_Jmla8dMf93olg,555 +networkx/algorithms/tests/test_link_prediction.py,sha256=7c322xESYdH5WEA0TsMw4Jcc_-lqfIsj-SjXP6Y0TVc,19442 +networkx/algorithms/tests/test_lowest_common_ancestors.py,sha256=BeuSAX3wqel2mn40cu1FehcvecRGeuCw1plWQFY-oZA,10922 +networkx/algorithms/tests/test_matching.py,sha256=MwQxn_YPJ_TzKabKjLUZfORbtDaXPh8jQtP35RkO6Eo,18836 +networkx/algorithms/tests/test_max_weight_clique.py,sha256=iYLkDGzYAmZ06IcT-0Rtay7UyjJ0A2y7ilXUTjDFg44,6742 +networkx/algorithms/tests/test_mis.py,sha256=F8cf09mvzG3A_omw6wWR1-j9i8WUmpGI9BwGdi2bHes,1875 +networkx/algorithms/tests/test_moral.py,sha256=15PZgkx7O9aXQB1npQ2JNqBBkEqPPP2RfeZzKqY-GNU,452 +networkx/algorithms/tests/test_node_classification.py,sha256=ZGa_uSd6tRqDS775mrPaRLbIr5CDQEMS8qw1frO_pEU,4669 +networkx/algorithms/tests/test_node_classification_deprecations.py,sha256=5UcqqCFIZUiFZ1NsDhTQnMcecbQxFEe6qlzgNGDmPTQ,1286 +networkx/algorithms/tests/test_non_randomness.py,sha256=-8s-fJLYRxVNp7QpaMe5Dxrxi0kvewY78d4ja-nXNBk,782 +networkx/algorithms/tests/test_planar_drawing.py,sha256=FrpNWiGxNzBokpSZHfa8q55UyRn0v7gzwgUmNcMvT7I,8775 +networkx/algorithms/tests/test_planarity.py,sha256=Ts63BD2k38lWA8tDv95YkMFfskHHdZizmroeh5WYEgk,13169 +networkx/algorithms/tests/test_polynomials.py,sha256=baI0Kua1pRngRC6Scm5gRRwi1bl0iET5_Xxo3AZTP3A,1983 +networkx/algorithms/tests/test_reciprocity.py,sha256=MkdZ2w_7i0UPK6PdnStULwmzAt7RAe9xS0_BWxiK05s,1297 +networkx/algorithms/tests/test_regular.py,sha256=zGf7Mmh7XPtwunOoeTfgiICnfsVeCEbMop3NrDgIfqY,2457 +networkx/algorithms/tests/test_richclub.py,sha256=lugmYnRVZz37WWFVaZuVnfuOIZTCddIk1KxNoeNnPYY,2258 +networkx/algorithms/tests/test_similarity.py,sha256=Lrg6NjvE369HDxna5LwsvCPFjYpOx97Q0RjMQ-Iyskc,32195 +networkx/algorithms/tests/test_simple_paths.py,sha256=YT1PSijWs22dxoUI_lfzYOj1ukr--kNEtie-TGZXKF8,24008 +networkx/algorithms/tests/test_smallworld.py,sha256=ReQRXdtXRCRc9YTOATYz_CyOEL5YYiU73GZme_gpjrU,2153 +networkx/algorithms/tests/test_smetric.py,sha256=x2LR9IyimDRC29a0uBnPeBCxptSK90NLN6GQYAH9nRc,426 +networkx/algorithms/tests/test_sparsifiers.py,sha256=A12V4ljWxvXaSFJ73mHSFK2YNO-k8ax6Me4yEWTsI4s,4043 +networkx/algorithms/tests/test_structuralholes.py,sha256=p2PogSKedBHR1bT6x-tuj8aqV3L3tr842v6Z8QozYRI,5228 +networkx/algorithms/tests/test_summarization.py,sha256=msFYq5KWCMT4sK6qXhn_ZItJwlvaANqEGP_bhOl0atY,21393 +networkx/algorithms/tests/test_swap.py,sha256=SIQRGQv9E8phEzrxQKkx0GP-tyiwFEWnkONF06H6VtU,3067 +networkx/algorithms/tests/test_threshold.py,sha256=n3dSpE3amPa49C4MsAffSpZ259saTU0c-1mMzERrh84,9760 +networkx/algorithms/tests/test_tournament.py,sha256=xxmLb9Lrmjkh9tKmyv2yYJrhB2PHWh-Bq71M-d1NjQo,4158 +networkx/algorithms/tests/test_triads.py,sha256=td8v-_0JiLvV0ZW6ADqE8V5iy5p9wfH-jYJtrA-LJ-Y,8952 +networkx/algorithms/tests/test_vitality.py,sha256=p5lPWCtVMtbvxDw6TJUaf8vpb0zKPoz5pND722xiypQ,1380 +networkx/algorithms/tests/test_voronoi.py,sha256=M4B6JtkJUw56ULEWRs1kyVEUsroNrnb5FBq9OioAyHM,3477 +networkx/algorithms/tests/test_wiener.py,sha256=NJJbXZ9L5ZeFGQpCpvYVWFNqyX3amkbuDQEBL7wCixw,2080 +networkx/algorithms/traversal/__init__.py,sha256=YtFrfNjciqTOI6jGePQaJ01tRSEQXTHqTGGNhDEDb_8,142 +networkx/algorithms/traversal/beamsearch.py,sha256=2iRO5_t4ZweZ2Jg9nBshRBOdWkcb8KB-fDgTFyhviRo,3388 +networkx/algorithms/traversal/breadth_first_search.py,sha256=xB5bwg8qRfndDMRrz8lA5XI-kawbNjt_f7KsK8gVgyY,12930 +networkx/algorithms/traversal/depth_first_search.py,sha256=1ladJNGDxeu0aRved6IG1EWU8lcxJJBm1b_JZWJ-2oc,12842 +networkx/algorithms/traversal/edgebfs.py,sha256=7eVnh6dqqpoCAW_dNIACvWgBj1-RytVGBft9V8Qr8eE,6233 +networkx/algorithms/traversal/edgedfs.py,sha256=sqssJqQ3M-xDdU1sPdxMd_VDQcxymYMv-wKnSEZEHOE,5938 +networkx/algorithms/traversal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/traversal/tests/test_beamsearch.py,sha256=XutDm5yWOB6lU0p_Wtd6RSSOdd7OfBMkhvQ70wxzb6w,897 +networkx/algorithms/traversal/tests/test_bfs.py,sha256=nm9WzJgBwB_AuPDQD5DKkZc5vpj8Xks08sSf-dL4E9o,4060 +networkx/algorithms/traversal/tests/test_dfs.py,sha256=_pk1X-gJr3eIZGKulPmP2apz6K0AMSrs21cijyRI0vU,5487 +networkx/algorithms/traversal/tests/test_edgebfs.py,sha256=8oplCu0fct3QipT0JB0-292EA2aOm8zWlMkPedfe6iY,4702 +networkx/algorithms/traversal/tests/test_edgedfs.py,sha256=HGmC3GUYSn9XLMHQpdefdE6g-Uh3KqbmgEEXBcckdYc,4775 +networkx/algorithms/tree/__init__.py,sha256=wm_FjX3G7hqJfyNmeEaJsRjZI-8Kkv0Nb5jAmQNXzSc,149 +networkx/algorithms/tree/branchings.py,sha256=lrFHGdFFgkEHrFY_D5j9BoDZoZVmxa2rE6KSpYWTVJQ,36255 +networkx/algorithms/tree/coding.py,sha256=RrzQtnGmZilhyXeVVMWYLGmdMMkh0wZHtF5MMTRdRbo,12987 +networkx/algorithms/tree/decomposition.py,sha256=9FoC5jiOM_TbrKs3MlV3NZZ5t8t8a3Qbya88vlM6igM,3034 +networkx/algorithms/tree/mst.py,sha256=hd4GDW3CcV76dv9lZhsS0m5G2Li-sRwGy6zv4U7e1_g,39261 +networkx/algorithms/tree/operations.py,sha256=bAIIsuZ5CJQOFtMzG6ZIEEiE23QGzu1eIY7uqLuu7LA,3499 +networkx/algorithms/tree/recognition.py,sha256=u36v_SVmQ2W77gB55vvsU6ayG12zYwKOGHPwVxvSnRY,7497 +networkx/algorithms/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tree/tests/test_branchings.py,sha256=-jSj7I0dCOxcK3kTnXQRu-NDOWOQH7tzXlOZqkJcRKk,15249 +networkx/algorithms/tree/tests/test_coding.py,sha256=f3A5dvfkWImC6Jp2qkuw2Sz3whOsabnaOfu6Eh9r65I,3954 +networkx/algorithms/tree/tests/test_decomposition.py,sha256=vnl_xoQzi1LnlZL25vXOZWwvaWmon3-x222OKt4eDqE,1871 +networkx/algorithms/tree/tests/test_mst.py,sha256=Pgym7C6ZMb5npBbQbIf6aZ4bjfkYaLoHTRx0fJ2GV4I,21428 +networkx/algorithms/tree/tests/test_operations.py,sha256=0IevbCpr0F6AQBLlxbp5qSO9ENf-y1lg1ZkZxuWpHfc,1124 +networkx/algorithms/tree/tests/test_recognition.py,sha256=1Wz3PHAvAkt2Q_00HUXcfabRY0E63VBwNREAeWoz9N0,4173 +networkx/classes/__init__.py,sha256=AzOWEJxd7rt8jdPvdMmPdKvhbQTksRvSN86TGRS8HnU,335 +networkx/classes/coreviews.py,sha256=i0EEXzpKsW3W62_aP83JrHOxhlaS5K0xZiVE7IBBFCM,15471 +networkx/classes/digraph.py,sha256=YDnFAxhtaMYOyWfipiCu_x62BLlFldgNalDJQ_yBgEs,44427 +networkx/classes/filters.py,sha256=47OFApfkvvohVMoZ2v9sniM6sgv9rka869BDwmbdww4,1715 +networkx/classes/function.py,sha256=jXB672qCM3kNUWjTJG7_Rj7IqXYTRvrLJCqdkWj6Ync,35449 +networkx/classes/graph.py,sha256=whquojGtufS_LgPbPVKtiNUOgDQqzyNTPfIs6gu8HUQ,65984 +networkx/classes/graphviews.py,sha256=p-TcscD--cuB9zn4LdmsfohdUFn3izL30J-88MSb-To,6567 +networkx/classes/multidigraph.py,sha256=GFEH-3FWb1hlNjSeQgSlciLs0zTMt2XWDZAJSnvgCQs,35867 +networkx/classes/multigraph.py,sha256=Mt6njKgMg1gjVC33LMRlbEH1yEuYNOlIwH5fLZfC6K4,45810 +networkx/classes/ordered.py,sha256=RxgDUaJMlO887FsXws4SiPib7hCB6kUCAdCz0vwqRGg,5405 +networkx/classes/reportviews.py,sha256=H9BCIugWdoAsAsQO1F0oDbjNCQIdyI4JltQbqVc8Mb4,45714 +networkx/classes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/classes/tests/historical_tests.py,sha256=mMQy3mZ9o5ybkTUvtcl6eFflcCX4Ocntb1L35MOwZ08,16173 +networkx/classes/tests/test_coreviews.py,sha256=KcvLfvPXgIKYJrW97M5I6HgJrbHa3mb60CQc3QlV3Bc,15409 +networkx/classes/tests/test_digraph.py,sha256=3OgbdTjpyaZBi6SIoUdH3p2c6ia0d0UpJmC1qs7R9Ak,12059 +networkx/classes/tests/test_digraph_historical.py,sha256=xb2sylutpDIPXM1stcGW-tsW0xFOPQ-WZ7UbqapHEm0,3689 +networkx/classes/tests/test_filters.py,sha256=fBLig8z548gsBBlQw6VJdGZb4IcqJj7_0mi2Fd2ncEM,5851 +networkx/classes/tests/test_function.py,sha256=aYAhSFHAhH2L_LCyrfxLEt1aYcItYAEwq9NyzDblzOs,26327 +networkx/classes/tests/test_graph.py,sha256=2PsivQzvN3od05YOtKRsqQe2NXGLibf-npnwaNP_RQ0,29739 +networkx/classes/tests/test_graph_historical.py,sha256=-jf961vQCuQLyly0ju50q9dbzWG5m2OAs9H6IVS670c,273 +networkx/classes/tests/test_graphviews.py,sha256=7PaY2AuqgCb-TS1pvjkf51GZMsTY179XUxLGRNTx-M0,11524 +networkx/classes/tests/test_multidigraph.py,sha256=nyCRNuIURmk3wFKnLfuPIb9BI5Rjkwd4gjmt5kwc8FE,16034 +networkx/classes/tests/test_multigraph.py,sha256=9z0ocdUGW0kq_zL2XQgBrdNecTQwwys6h-5GbaxgGr8,18568 +networkx/classes/tests/test_ordered.py,sha256=QlDQyKzstSOUQWfIMSMl3hTq9e65pWNYhLlH2U0xkRI,1148 +networkx/classes/tests/test_reportviews.py,sha256=ek-FMPYupoFUFIP-Yv3aJXDYmPs4jcYhreZOrI7Rn88,41317 +networkx/classes/tests/test_special.py,sha256=Uq25h522f3ndVE7P8jc4IwghGggLK6vkP-MtxNoVgJY,5760 +networkx/classes/tests/test_subgraphviews.py,sha256=yEhUPLdS7fW7gLuZzEyQu6zGWnBGvS0GbUbYpuDHTHA,13192 +networkx/drawing/__init__.py,sha256=rwVeOR7MCDwBUhXnx5VyiP0YfXkCeT7waocuqwXEPw8,136 +networkx/drawing/layout.py,sha256=U3hFZNtAarnHtNrnUCEnDXw-f71QhGFT7jGvTveTV4w,35778 +networkx/drawing/nx_agraph.py,sha256=SKICRNITffboFQN-m_bo4sxKteiHTdqHBpKTQybf48M,14699 +networkx/drawing/nx_pydot.py,sha256=icNbna-3F2kXQGGtnjO4oKiH368Cc4Dtbozuv-kHsVg,14091 +networkx/drawing/nx_pylab.py,sha256=Hj4Bl4hZgr5lBin2goU4jvvkaqQ_-v4oUNotsu6es6c,48387 +networkx/drawing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/drawing/tests/test_agraph.py,sha256=LM4XfZ1C7MPdNDYgqhy3ImHIafIIp4RgetDVe14khv0,8633 +networkx/drawing/tests/test_layout.py,sha256=fhL7o4pi1FT5t32FsWHMimTCeyOuFC4CcaunDreNsdw,17005 +networkx/drawing/tests/test_pydot.py,sha256=zxq8cyjl0ANJg63aXv87D1xvsju6wHQGPt94C8AFLB0,6241 +networkx/drawing/tests/test_pylab.py,sha256=06GFkbfc4KAX4Dq9VoNzlamyAWMlutyLQJyuQ-SdQQo,26085 +networkx/drawing/tests/baseline/test_house_with_colors.png,sha256=FQi9pIRFwjq4gvgB8cDdBHL5euQUJFw6sQlABf2kRVo,21918 +networkx/generators/__init__.py,sha256=vgbZl18dH6eXOnKI60TJjxrVRsfg-PSPo16i7OSEMr8,1272 +networkx/generators/atlas.dat.gz,sha256=c_xBbfAWSSNgd1HLdZ9K6B3rX2VQvyW-Wcht47dH5B0,8887 +networkx/generators/atlas.py,sha256=f0276nWepfl7P5nbCmZ6Fcd8zntPbRuZKxoQqweM0C4,5548 +networkx/generators/classic.py,sha256=mvK7lT0Y6LX1ipgwk2guIDj_Th1QQtccrae_qJr5_6o,23801 +networkx/generators/cographs.py,sha256=4ebIRwmgDiffa2ofAVGolkpnxQzN1xfkq0r1crnwpEE,1844 +networkx/generators/community.py,sha256=jI-PGI-mC6mubahXRIwj-qJDz9nRX4VE8vMDNosoTGY,34429 +networkx/generators/degree_seq.py,sha256=B3_2OW9EK-u_UHz9vjChwDNTPZG1TrRLMoeYYwGz-Wo,29817 +networkx/generators/directed.py,sha256=Ukdw9h3taJGO3x0zEAIzMsw0n5B5ZOAk2BqcKl-jrpk,16676 +networkx/generators/duplication.py,sha256=qpX1nx37qtJt2NN9EOJQJqusFJLLNpzJmLJ8aE0mkhM,4959 +networkx/generators/ego.py,sha256=CbIbzM9Dxk8OM48Xw4Qu9AwoL9J9SeEzvc5KWXaomHg,1836 +networkx/generators/expanders.py,sha256=Q-fnJKbIc_XXHaHOk9D4rVLXJKHk2Yg6ZgrMfoEJk88,6191 +networkx/generators/geometric.py,sha256=NvQSapYw2pgEbDlI2FwmfX2gxmwoHzP5HjDQesdKxyk,29482 +networkx/generators/harary_graph.py,sha256=zDhVtwJJkr-UjOY8jGhuvPQ7b_Mjve7XpLNNA4I2cLo,6072 +networkx/generators/internet_as_graphs.py,sha256=MyaBr9iRyTDij94ukTKZJUTFrx3X792VcquDnm-bzqs,14128 +networkx/generators/intersection.py,sha256=n2molGcwS9tE7Lo65clvJqTynymOmtqXrn5jQPKpTbQ,3947 +networkx/generators/interval_graph.py,sha256=5bV5ZL66tsDazbbe2IZY3uZFqLoTAqj39F1HJKvvct4,2186 +networkx/generators/joint_degree_seq.py,sha256=LGV7USEfG-t6zUzfip1xnCAgAVn5qkaLAgi1zHt8-50,24785 +networkx/generators/lattice.py,sha256=qDwNX7eSXsn0YKiV42aF_7sjGLyov0N-uBsMRmTgbRs,13222 +networkx/generators/line.py,sha256=CFqpIqDUhDNQKn4eK43aOAS_bqddRpqf-P8l6RM9MhA,17654 +networkx/generators/mycielski.py,sha256=aWhsBSq5fagkxfRqN45Gswpo64jv9lSu9A_g2mMV2P0,3225 +networkx/generators/nonisomorphic_trees.py,sha256=GZJaC09tck-8vtzxH1px9RJTcl8B5WbWaC9ldqTA_MQ,5180 +networkx/generators/random_clustered.py,sha256=DBWtgWQT4gHX7mb4iZjRhfDMEfcoWGzIynEOBQp2cZ4,4132 +networkx/generators/random_graphs.py,sha256=eGaNxx06z3gJrv64vGcoeyXKBKqjYVXMmd8g29y23nA,44337 +networkx/generators/small.py,sha256=-MKuw9rirnVQP5C2JWM78nLq0onGr2Y2i8xlif0p5J8,30426 +networkx/generators/social.py,sha256=ghaDZVhcuUneZ1PS6mkMERgk8yPGAOGU9fHJgy7lAWo,22759 +networkx/generators/spectral_graph_forge.py,sha256=sZnPxv8jKs43Y0gCaHKqZrC-_BEunYaZqKyzKaWl9zU,4246 +networkx/generators/stochastic.py,sha256=5H1kxP4za36KRkfXF8CGGg_X15GtCfQdx-6gzXdHtok,1840 +networkx/generators/sudoku.py,sha256=sc2oGk0vvSmmSzlyHWCntJ8_fXvh9VomeJ-0v6p8yew,4243 +networkx/generators/trees.py,sha256=ayXZBWb6uUgHWFG5kYkZAtV6yC3vy-5_7gS75J_CtJ4,14146 +networkx/generators/triads.py,sha256=OAVGc07yKJ2d2IAvSF94z8f1KHgFS9lMeC7HoWCn8V0,2184 +networkx/generators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/generators/tests/test_atlas.py,sha256=nwXJL4O5jUqhTwqhkPxHY8s3KXHQTDEdsfbg4MsSzVQ,2530 +networkx/generators/tests/test_classic.py,sha256=5iBXKn9DZKvxmtpqWwF7LEpV4TRcSK9iRaeBGagjjUw,19550 +networkx/generators/tests/test_cographs.py,sha256=DkiQzP69sjw3QtjWVX2XV0EXoOuEvR42dixPWwuawSE,460 +networkx/generators/tests/test_community.py,sha256=-8AKUr10zQ_KZKoGud00ypxsby7-YNGoO80qibw8h5E,8681 +networkx/generators/tests/test_degree_seq.py,sha256=xCcfFEHee6AzVdRKHZOZBEHbv03UC8PDYAWM9BON3sc,7106 +networkx/generators/tests/test_directed.py,sha256=fGzUOVnwNR1aOv-9R6caYQeT-xZ_Ea1v6qPFxi19swE,4179 +networkx/generators/tests/test_duplication.py,sha256=IIzcHEfHp0NHsH7GTXSb4E4kgXAlt83q4IMibfx2FBw,1915 +networkx/generators/tests/test_ego.py,sha256=8v1Qjmkli9wIhhUuqzgqCzysr0C1Z2C3oJMCUoNvgY4,1327 +networkx/generators/tests/test_expanders.py,sha256=aYS2zuodq7AeVISebU_O_QIFxIPBgto8J-vEsaqPhy8,2389 +networkx/generators/tests/test_geometric.py,sha256=3ZHjCZJC9DH8jKO5I19h_TisEwVYQC21ADE7LJreclc,11237 +networkx/generators/tests/test_harary_graph.py,sha256=_k00U6jwuGSLu444Cb4q4zRaLV7ufNWNTHJBMOnFmf4,4958 +networkx/generators/tests/test_internet_as_graphs.py,sha256=lR3_KuyETCVxPnRaNeks0sEcNljHmi2ohOYkxaplgmM,7137 +networkx/generators/tests/test_intersection.py,sha256=hcIit5fKfOn3VjMhz9KqovZK9tzxZfmC6ezvA7gZAvM,819 +networkx/generators/tests/test_interval_graph.py,sha256=-1yXDZDW-ygmNva9Bu-TsS_SYGLcW1KJplwZHFFYyWM,4278 +networkx/generators/tests/test_joint_degree_seq.py,sha256=fHK-hW_9aGdf13AlnvoZZmNMTik8CXPoJelmhSzzcXM,4272 +networkx/generators/tests/test_lattice.py,sha256=EFhg_eA-q9x2e56FMIT_Jw3ZXqhjW1yt6Iy-EhUIzzU,9292 +networkx/generators/tests/test_line.py,sha256=f_6YjUGctdlw6ZbSZqoFVpBfTWZLKsW4w8TE5RfLnZ4,8471 +networkx/generators/tests/test_mycielski.py,sha256=cAg2J6o_RrbwEdAc0vCuSF6zeS6w1KT4leTM0vkIeoA,822 +networkx/generators/tests/test_nonisomorphic_trees.py,sha256=Y_qWyj_qZU9O_DC4BHEVD9xnIEALCmfdmZAYJjTxUYE,2384 +networkx/generators/tests/test_random_clustered.py,sha256=LTfigb1swnYWS59OJoBmNcjFcUjsodnHVOwFxBXl7xg,979 +networkx/generators/tests/test_random_graphs.py,sha256=kA2Qo-DbbnBDN8PVcbE9-y22L7JD4B56DfaNU52myY8,12827 +networkx/generators/tests/test_small.py,sha256=yXMFFqC2IWlW_KSvt77H_JYE1i8P-7kmiB7FgR1_iZQ,7354 +networkx/generators/tests/test_spectral_graph_forge.py,sha256=x4jyTiQiydaUPWYaGsNFsIB47PAzSSwQYCNXGa2B4SU,1594 +networkx/generators/tests/test_stochastic.py,sha256=nPupQ2mG2oy22gGCs1dPFg4nZHYObsQZ1bBxTXTsEG0,1822 +networkx/generators/tests/test_sudoku.py,sha256=dgOmk-B7MxCVkbHdZzsLZppQ61FAArVy4McSVL8Afzo,1968 +networkx/generators/tests/test_trees.py,sha256=6FIXOXkiYs4_fr7HCq5q8TcQ87ML_73fZNqT8VI2hVA,2901 +networkx/generators/tests/test_triads.py,sha256=mgpHFf0Z34CqtnXgkdf7gK1dC77ppYAqwviXsaU1HVs,332 +networkx/linalg/__init__.py,sha256=7iyNZ_YYBnlsW8zSfhUgvEkywOrUWfpIuyS86ZOKlG8,568 +networkx/linalg/algebraicconnectivity.py,sha256=yRu4HOy1EWdk5jSWTRV1vnSumYvIwsd28ffCyh1BPFA,18281 +networkx/linalg/attrmatrix.py,sha256=Swp4TdcOo7OWvJEdwdSvmZBCeI56SxJQpmc4GSLnmEI,15658 +networkx/linalg/bethehessianmatrix.py,sha256=_Ni1dtnow03zj9J3OENlrqK08_p2e9FOKBOwIiQzIIk,2996 +networkx/linalg/graphmatrix.py,sha256=srUndtkU9U6I6VSFDrm9MgH6qm1SChIJ5RYaY1g1maE,6071 +networkx/linalg/laplacianmatrix.py,sha256=aPJPMP-WT85hy6jZcC6w9m7S2_gOQP2_3Fz3fb_sSQc,13986 +networkx/linalg/modularitymatrix.py,sha256=7hgSPeoMrkDFKOETaMgw9inYJ2T8uSlHeoIib2r3fKA,5119 +networkx/linalg/spectrum.py,sha256=Iek8aRJw_TPVGiyxVyKGCmVLCVOxb3ukvBXp6X1Lhxk,3876 +networkx/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/linalg/tests/test_algebraic_connectivity.py,sha256=I9kbAwLsGvVUt_1bAwHcp4UmqSg_peWv41j37bg5EUo,13344 +networkx/linalg/tests/test_attrmatrix.py,sha256=XD3YuPc5yXKWbhwVSI8YiV_wABWM-rLtwf1uwwWlnI0,2833 +networkx/linalg/tests/test_bethehessian.py,sha256=0r-Do902ywV10TyqTlIJ2Ls3iMqM6sSs2PZbod7kWBM,1327 +networkx/linalg/tests/test_graphmatrix.py,sha256=0cMwzfK6gX7yGCtwRpVXcUn0RWDQDH-HALM5volWA20,9090 +networkx/linalg/tests/test_laplacian.py,sha256=K8p2upJTJLfNHfAf0B9ohPXBZ4k_2VMpSvIc-jXZ_rM,9934 +networkx/linalg/tests/test_modularity.py,sha256=mfKUvwc3bj6Rud1aG4oK3Eu1qg12o6cB8-pv5ZFicYY,3115 +networkx/linalg/tests/test_spectrum.py,sha256=agP2DsiEIvtkNUkT94mdPtJjwnobnjMTUOwjIQa4giA,2828 +networkx/readwrite/__init__.py,sha256=smim3hE7gh6EUjVvCP2rypK0Lgb30y5LOWfgnl79kq4,2490 +networkx/readwrite/adjlist.py,sha256=U4Fit0feX6snGWKqfLslqcTT6EL638_YeZfxnSWTkFM,7743 +networkx/readwrite/edgelist.py,sha256=cV6lWh70mU_W5uBDl_VywZIugIJaRAu9IhoQ2Rthzjo,14079 +networkx/readwrite/gexf.py,sha256=AEtYSWVBRmsiBuF0C5G8z7YbQ0pQRIbQkK--zKj7IMY,39511 +networkx/readwrite/gml.py,sha256=T1xccYR11GzZBXsNOM-4_jYcQbOjQydxc8Z3ECppygg,30016 +networkx/readwrite/gpickle.py,sha256=hiL8LmPRtOWWwUSaPYLzxEtK_pen-GQff70SYL-XXLo,2985 +networkx/readwrite/graph6.py,sha256=3N50F0-58G4YohISmgV2G9emKDBUpfHcKhomYGYHhNE,11277 +networkx/readwrite/graphml.py,sha256=OtynmKyxpS-1V8stzwuIQrk0K3FYZci5eGbuTglRhHM,39163 +networkx/readwrite/leda.py,sha256=RSM1kW6F7HWkWliXeBTEMA_3rgXmRfX63MGM_d3DHUU,2712 +networkx/readwrite/multiline_adjlist.py,sha256=LEBRqDIUjxqeeYb-OiL64fDCPagmYVUA0uj0PMFJFcY,11201 +networkx/readwrite/nx_shp.py,sha256=RoING1m00mCFQ3w7YFgmxV_uClb8_MbXw2nba8acLB0,12183 +networkx/readwrite/nx_yaml.py,sha256=fEhgNB23RPNN_upmkPsRvZ2aUtQPC3vJegkRSrVKeS8,2174 +networkx/readwrite/p2g.py,sha256=QzjBrvkRR7_BlIM7fzzQeBBbDo0t4mHttJ1SYQg40Wg,2995 +networkx/readwrite/pajek.py,sha256=EDiFLozCQJb19g-D5EXpNBO2a1Ago2iro07Wp0xUkNE,8636 +networkx/readwrite/sparse6.py,sha256=0dGmYXB-_wPnJ_A0SHb0CvkTCoM9vw0ge4-ZjgOlCB4,10193 +networkx/readwrite/text.py,sha256=7h437jkHOxid7-DMl09Zsy1bzigTGVWLsribapfCG_I,6498 +networkx/readwrite/json_graph/__init__.py,sha256=6U_kUzFQRiTn_wtklqdxSJ1O7DLykeS6EPLIc-u0Dk4,724 +networkx/readwrite/json_graph/adjacency.py,sha256=2g2cn9OmwSihhFAJesPdhmU6yIC9Gp6x0reuhKxzjAM,4745 +networkx/readwrite/json_graph/cytoscape.py,sha256=fAT2T4c0IQvAMT9wmAw5eJMjbwJ2T_sAWoMsdLtX61A,8142 +networkx/readwrite/json_graph/jit.py,sha256=xuURaJrdy4gMgwlLIAUiezNM_OCwsalgin8og2qofVc,3082 +networkx/readwrite/json_graph/node_link.py,sha256=1bjjTqnQotI3bee5PxPl1HnhLvB-KCF5zQxcqQ77THE,6111 +networkx/readwrite/json_graph/tree.py,sha256=dXtA1N16m-at8UdZXtZwy2oI05OdmoNpHiglJCWVeuk,6750 +networkx/readwrite/json_graph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/json_graph/tests/test_adjacency.py,sha256=fIhI53yZlt4_8xSpCLy8-z0K132QTmiVLr49bHfKG5Y,1766 +networkx/readwrite/json_graph/tests/test_cytoscape.py,sha256=dDVWM8IYP4xJPp8Jaqv_p3Bg6TkdbM2aAEEK7GDXcRQ,2548 +networkx/readwrite/json_graph/tests/test_jit.py,sha256=_JpDVaWHwQSPsOCgPRZ7BPBvp8VtVNXOruV7uAz2w78,2068 +networkx/readwrite/json_graph/tests/test_node_link.py,sha256=YrEUySBplVAWbLfU73p4nJNYl7gq6SDkgkx0mm-0lXA,3177 +networkx/readwrite/json_graph/tests/test_tree.py,sha256=xhXn1hsh2mN5AlYpFFod4_vReWWC5n8KnpMXzq82Tpw,1887 +networkx/readwrite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/tests/test_adjlist.py,sha256=dLEv3txnBrHYxajOYAQhA8CA7axiuPw1ECbaHL5p338,9922 +networkx/readwrite/tests/test_edgelist.py,sha256=l_o1-VbHFMIkNwOLWDdekoprzrfMEDf-tYhIKoxTG3c,9997 +networkx/readwrite/tests/test_getattr_nxyaml_removal.py,sha256=OBwlS280q6jFsrM7DY1MFmKLqHQZSlEswH7P0aY28bE,1005 +networkx/readwrite/tests/test_gexf.py,sha256=PLphmfUB8fYGJCt3EM-rVkqXjHJaCF1GNFN7qccyrxM,18928 +networkx/readwrite/tests/test_gml.py,sha256=stS4rg6N4-YBBYNVOagxSnwL2kXFr2DX-7oQBiE4Mns,20182 +networkx/readwrite/tests/test_gpickle.py,sha256=kr_-moaOpvzk1N5VkuW-biJroHA_kCKH11Va-7Z6jT4,2143 +networkx/readwrite/tests/test_graph6.py,sha256=IjBpfTr-czBLHb8UT_JzvOTBROpnOf5TKKkfCnEeQT8,6069 +networkx/readwrite/tests/test_graphml.py,sha256=vGbgGGZEMtQa4-Cjhxv6lj1GiIjVtZYdARsisdAccQQ,67496 +networkx/readwrite/tests/test_leda.py,sha256=_5F4nLLQ1oAZQMZtTQoFncZL0Oc-IsztFBglEdQeH3k,1392 +networkx/readwrite/tests/test_p2g.py,sha256=mtO2mA_2qeV7q8tQz9jf1fuo13blIhLW0SUKb_4S7j0,1327 +networkx/readwrite/tests/test_pajek.py,sha256=XTsnaCaYjroysCHlTsYwMGGrDR0B1MRwWkA-WXbAXTg,4703 +networkx/readwrite/tests/test_shp.py,sha256=w6MnK1LlVNLTPS5d2ufVynN7B537Vfm9RWxlluLu8nM,9166 +networkx/readwrite/tests/test_sparse6.py,sha256=fLpTG0YgcptNOpUipcCcVlni5i8IyC21kkk3ZeD0XhM,5470 +networkx/readwrite/tests/test_text.py,sha256=oEKc-VO5vk3E5XkACtpybaf1xUIcdAoETDkK5LZFiPM,7933 +networkx/testing/__init__.py,sha256=7r6YEG1MsWE913m_gOxgwLaENn0d8oW58qyYDnCjlQA,75 +networkx/testing/test.py,sha256=50q8FPBi5Uks6rwzu58yIZ8XXttuDtG7TvU5h1m0WXg,934 +networkx/testing/utils.py,sha256=WsN3scG0pYJhueHG97wItgMf8htJqcQTwdNDQRI2CMM,1491 +networkx/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/testing/tests/test_utils.py,sha256=jDmedlD9406YVdccNmRtiM4VEHuzRGRrbiKMB6UsA6Q,4953 +networkx/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/tests/test_all_random_functions.py,sha256=V1ETv6d-3wbbrinZHGUrjWQ6WOi1G4evQCdWJB87KZQ,8643 +networkx/tests/test_convert.py,sha256=cZJhrpPscLfvDOz4zJqhcq1IUJlJil-JQbiH3__16lU,12752 +networkx/tests/test_convert_numpy.py,sha256=mL7nQ2IezZtdb99IwBQb3iUoI5vP_AjBPOAaVWYu2qY,25240 +networkx/tests/test_convert_pandas.py,sha256=rNZ8UZiZN-9TBNVtFFj2BnY4Qb8lnetvcJndBBn1Nac,12259 +networkx/tests/test_convert_scipy.py,sha256=edYqfKczdlX5HqTNZs1HS8_uhVsbQCN2lzp9IzSxWoU,10880 +networkx/tests/test_exceptions.py,sha256=XYkpPzqMepSw3MPRUJN5LcFsUsy3YT_fiRDhm0OeAeQ,927 +networkx/tests/test_import.py,sha256=Gm4ujfH9JkQtDrSjOlwXXXUuubI057wskKLCkF6Z92k,220 +networkx/tests/test_lazy_imports.py,sha256=hq-0vf78aLRkIMOviEW-nowtAfZ3A778McPZaFKmn5w,2663 +networkx/tests/test_relabel.py,sha256=XNRvVrLwRaHJSuXxBidxl5ojcFGs6l4kHwtCNtgLykI,12448 +networkx/utils/__init__.py,sha256=2FD4Wn9_6sbJFpNCB3bGVKcNvvAn7BOT-dDSH_qvLEc,272 +networkx/utils/contextmanagers.py,sha256=7WkfpRXxITV6xlm-Fo2Gc3b4DJNzMNRFIedN3Tzrnlg,1269 +networkx/utils/decorators.py,sha256=DKBd2zd6ndFbgpQzLr5OVs5QP_Ka78s8QEmMDr1gIds,46493 +networkx/utils/heaps.py,sha256=rb-eoEAgSx0t2EjUj09XgzpIQzdneZIT2p6opJw8TLI,10414 +networkx/utils/mapped_queue.py,sha256=Oj1L2DyqhGZ1TFhcHyJq5IK8ZTkvm3VNmh4F1zsYX7M,9138 +networkx/utils/misc.py,sha256=1MOrkvjftO3xuWwz0Zjftu-he7pkwXBbt__nm2ZIBFA,20390 +networkx/utils/random_sequence.py,sha256=ADFgbqUxPF6bcCQ7jJeUWoWSOI5w8zMDfMY-6yHmQW4,4240 +networkx/utils/rcm.py,sha256=Sx3iAwZvrzW-eMoQb2-ZprwmXeGFSMaNwpI6xBOEKvs,4629 +networkx/utils/union_find.py,sha256=PFEk5AyjSq8piU1E1qD4MIoaBWmeBLp1OTwkLeDTmmk,3323 +networkx/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/utils/tests/test__init.py,sha256=QE0i-lNE4pG2eYjB2mZ0uw7jPD-7TdL7Y9p73JoWQmo,363 +networkx/utils/tests/test_contextmanager.py,sha256=ewa7A464FOmm0uZD-kD4iN_9zEcQNwZJPIin0lE9u_c,310 +networkx/utils/tests/test_decorators.py,sha256=Waol9jmnOXS1FAHgE_6yK6zPUOk8zmOTVDePuknwG8E,13642 +networkx/utils/tests/test_heaps.py,sha256=qCuWMzpcMH1Gwu014CAams78o151QD5YL0mB1fz16Yw,3711 +networkx/utils/tests/test_mapped_queue.py,sha256=x4xxZR4CbaSiLyMBahgGt7eBS0BtIx6CYo3sXXM-LgM,6338 +networkx/utils/tests/test_misc.py,sha256=mJ3cXfQmRfWCO9Jn4Jqi1OEo-s5CfruvTi2ZxFFSPbM,9664 +networkx/utils/tests/test_random_sequence.py,sha256=Ou-IeCFybibZuycoin5gUQzzC-iy5yanZFmrqvdGt6Q,925 +networkx/utils/tests/test_rcm.py,sha256=UvUAkgmQMGk_Nn94TJyQsle4A5SLQFqMQWld1tiQ2lk,1421 +networkx/utils/tests/test_unionfind.py,sha256=j-DF5XyeJzq1hoeAgN5Nye2Au7EPD040t8oS4Aw2IwU,1579 +../../../share/doc/networkx-2.8.5/LICENSE.txt,sha256=qun4CfsaFZLtcun1LfEM-tj-ImuNTSg4X5DdQ0HOcRk,1763 +../../../share/doc/networkx-2.8.5/examples/README.txt,sha256=4fcFf8kOy3-lR9Mt5JabLTrR5CJU-TwR0qykp4WJaPs,185 +../../../share/doc/networkx-2.8.5/examples/3d_drawing/README.txt,sha256=s5-t1C9VR7xuGe6I6LoAHyLZypgxt6nacxqhlFV_cq0,22 +../../../share/doc/networkx-2.8.5/examples/3d_drawing/mayavi2_spring.py,sha256=m3CEGHEYxTwuWe4jObonMM7ZS62hdEBvas-SW1UCvrk,934 +../../../share/doc/networkx-2.8.5/examples/3d_drawing/plot_basic.py,sha256=PWG-R4COK5xRUpO9fzuYp7jIRkPqPINe9ThiSas1PA8,1149 +../../../share/doc/networkx-2.8.5/examples/algorithms/README.txt,sha256=xn-_KUQ8ego4sNw2nrr4axL38uzGmSgO1jiK3kC0_X4,22 +../../../share/doc/networkx-2.8.5/examples/algorithms/WormNet.v3.benchmark.txt,sha256=UvbM0_uQawr_W5rjxhICvH_W8n01FBiX8T-le19ufr8,1346746 +../../../share/doc/networkx-2.8.5/examples/algorithms/hartford_drug.edgelist,sha256=Nwzo8P1bWNq1e_JEodTLFgUCx80bpkg4PhtSd3G8aZk,2335 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_beam_search.py,sha256=SmksU_kiGquXokBB6midxx9IDFSph9SfZK9FKH4Z6O4,4119 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_betweenness_centrality.py,sha256=y4AnvUdMdUe39X-gGIGRohIHPwN4oSaf6Y2te7E5OsY,2122 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_blockmodel.py,sha256=wvdheWrt-KcX2e_zMARdPwOJHlDLKM8H8aCVkIx2gbM,2679 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_circuits.py,sha256=f56Ct7i_s5ImJ71OMAg8BPqcFYQygdEQnTzrAgl7fW8,3496 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_davis_club.py,sha256=YrUUnaWm82MEE4qoGeCfmUWmHqV7vpDBckDSmFn5T8U,1201 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_dedensification.py,sha256=BdXLYidxo-5swuseC8viiIQwNB-lqshQ6EdLU_vQsP4,2250 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_iterated_dynamical_systems.py,sha256=AXlEyF2UPtHosV8rZcmL_KjWOMOoDV-2dm7YzOT54Ro,5996 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_krackhardt_centrality.py,sha256=Ff4MdZyWKlnQ0HZuPinRrlAQlBtW_tOIeXZXn5CBY4w,637 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_parallel_betweenness.py,sha256=AVQaNwga0QPUrr3qr3g2hOupyKsf-jp08Dvw4gLz7M0,2453 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_rcm.py,sha256=nu4JylMqrESBo36J9O6dayk5cQjQ147-UISNzvK6r6E,1039 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_snap.py,sha256=02GOuZxjHHo0h8eO8ErhktIFFw4FFnOx8CVkt8uIaTI,3088 +../../../share/doc/networkx-2.8.5/examples/algorithms/plot_subgraphs.py,sha256=F-3_AjHvqZidxoJfyMIkfmZHkAEoroQrJM9GW7tJeSg,6474 +../../../share/doc/networkx-2.8.5/examples/basic/README.txt,sha256=SZoFiP7cQAXsOc0zCWRT7NsGznY1aiAk-0zD8dXsaPY,12 +../../../share/doc/networkx-2.8.5/examples/basic/plot_properties.py,sha256=NIoZX3qYpbb1R_mfgrYnldFwe1qOWYQgjfZZGhUHovk,1065 +../../../share/doc/networkx-2.8.5/examples/basic/plot_read_write.py,sha256=AN5KVYbINladQ3IEODapTqT-4hRCteonClbhUEvbUYM,525 +../../../share/doc/networkx-2.8.5/examples/basic/plot_simple_graph.py,sha256=1QGvIhSGRNJAi4lLzpE_W1q4OMzn0DpdCJpbEZgqeUw,1240 +../../../share/doc/networkx-2.8.5/examples/drawing/README.txt,sha256=DoWMcDCC_TTjREqBqc_YMatITP1_2lHp7Rv5IxrM2bA,16 +../../../share/doc/networkx-2.8.5/examples/drawing/chess_masters_WCC.pgn.bz2,sha256=2e-170bYxxtpmUW4mrs4UEnsF89vIfJWJYOk8gYYGew,100224 +../../../share/doc/networkx-2.8.5/examples/drawing/knuth_miles.txt.gz,sha256=e-sV93FBC11m7Rdn-4K_uGoPTNgt3dtkMuyvQ0ovTZU,20317 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_chess_masters.py,sha256=7OZXxlTA8TKyyis7L6yJuqZYNSmzdszmRVNln1BSgrg,4583 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_custom_node_icons.py,sha256=2y_68MamGfIQhpM3YFHSxYqMqzLl4CnhzaqKDKFs4jI,2139 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_degree.py,sha256=US7sGbPQsyVccE3URvirRAHM9t6W_QfWgmkGin2O8Bc,1556 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_directed.py,sha256=Wyzt-aq3seUJVVmiEgPEzXP7YWatFTKDHNc3LvVtwwQ,1108 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_edge_colormap.py,sha256=3cTm9jX3vh635lGtLMmjwR2FJHlxa87WVK0GoU80zxA,441 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_ego_graph.py,sha256=E_LRplVi2knRK9hDuO1z2DvIfvV3uTMp0t4G2kVuDa0,910 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_eigenvalues.py,sha256=cu1VnGCbPgotHgqMzoixOdGNKzZ0rIVn9WjKKweuRhw,544 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_four_grids.py,sha256=P6LJaQLA-9_78LVyHkxdRHlLHj6s-vL21NkOiINjsxk,1054 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_house_with_colors.py,sha256=Bs1z9SqsQ5DMV2a8X8rXU6qpSyUyqvnH-O7y5nBsOks,665 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_knuth_miles.py,sha256=2i4voltKotkkbn_jiiHYaCxAkbJEliMpkQivVL8aKw4,4111 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_labels_and_colors.py,sha256=GtcCrvzGiuyRdeN2_gCH6WCpvtUgUGU2EXqXtMVOkRE,1243 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_multipartite_graph.py,sha256=0X0-mrxVEJD51i16mc5mLTQL0z6ix82NdYQs2ha_ZWs,995 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_node_colormap.py,sha256=Lgc9GuyL9F1sBtcA5pUMnrjdIbPoO3EESyAjLn4e5xg,288 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_rainbow_coloring.py,sha256=pjlo110knte-vQhfryAIQXTlQHBgL8jQBdij9KNZApk,2172 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_random_geometric_graph.py,sha256=HVfm2eKbzs5NcV0p0DxYJmpQ0V_z2ayykrWG3F1vftM,938 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_sampson.py,sha256=FYwBC6iCL6chHnnKCRgQk1WwkL2CcEBs9oq1RWBBCgA,1228 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_selfloops.py,sha256=x-1Ma-TlydzoU7VWahWynKMFACn1OcuR_Uwzk6LaZog,753 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_simple_path.py,sha256=QA26fUWDPmJT_4s1Mo-wG9LWKTyCXPIe-0mRJYXAW5w,252 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_spectral_grid.py,sha256=JhYZQcyrsKHvJfQR_9X4pBaKS7kaM0qvGFzbunqQzq0,1592 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_tsp.py,sha256=g-0NDgIPXQANaqfS3GufbvAoiaS0Y4s6O4TWSjQk5Vc,1301 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_unix_email.py,sha256=tZJBr60SvQSYf1OcrHGi6Oxwmev_skQzUsLUwuGb8wM,1968 +../../../share/doc/networkx-2.8.5/examples/drawing/plot_weighted_graph.py,sha256=HqH3lkhg62XD6ytrhi-nhBCiB6i0tcBv30rwxcQfIqw,1124 +../../../share/doc/networkx-2.8.5/examples/drawing/unix_email.mbox,sha256=i20mxjWonqmAbgbr1qBNIG9BTb3qQt0WQ7YLfHUwu9U,1709 +../../../share/doc/networkx-2.8.5/examples/graph/README.txt,sha256=etRvcBQG30Fb6dIiQZaYHbnl1a6Tz_peks4MqXKwRhg,12 +../../../share/doc/networkx-2.8.5/examples/graph/plot_degree_sequence.py,sha256=TyzJyRmLHsaXEJ8IivYF416SggsM8lZE-zYxEdkngfo,799 +../../../share/doc/networkx-2.8.5/examples/graph/plot_erdos_renyi.py,sha256=lvwPtraq5q-vuuOyrlZFYABXfbfM47dR89S2k1h9GZ8,841 +../../../share/doc/networkx-2.8.5/examples/graph/plot_expected_degree_sequence.py,sha256=DWIyy0S3eBZksKAdk9dL9unIa1Cfw0rQiTcm8if0Qak,496 +../../../share/doc/networkx-2.8.5/examples/graph/plot_football.py,sha256=Dj0-gyMVUw80vyMIOqA5sFBzddL8t1R7BzIFoD1S1o8,1171 +../../../share/doc/networkx-2.8.5/examples/graph/plot_karate_club.py,sha256=6H2R2SRxZ6ZpCtIC7CUl0fhgdJuorZpo4NOok7ThFYk,494 +../../../share/doc/networkx-2.8.5/examples/graph/plot_morse_trie.py,sha256=w8sFNW1XaVm6Q4XBahxl2IArm3fVyUwTHQM2ZA-AjbU,2965 +../../../share/doc/networkx-2.8.5/examples/graph/plot_napoleon_russian_campaign.py,sha256=WS4ETV1ESBi8tCcPp6pe0uXvFiFht_xNx-hgK1TcTn0,2901 +../../../share/doc/networkx-2.8.5/examples/graph/plot_roget.py,sha256=jHuZLofFw7gTwxzMiNtwewnn3nh4SMeW8SvgabnSnx0,2126 +../../../share/doc/networkx-2.8.5/examples/graph/plot_words.py,sha256=Q-4VlEdCya3ik8kLgp2H4vIk5GP3hmWkH6Y9cLw0LQM,2683 +../../../share/doc/networkx-2.8.5/examples/graph/roget_dat.txt.gz,sha256=XhxtczVtfkcdZxE4P7KWs9ENRy3mvub739cXKdkgBk0,15758 +../../../share/doc/networkx-2.8.5/examples/graph/words_dat.txt.gz,sha256=nuZl5rQHvOTxrOvZjbLg0tIHXvbYGSIB5bwxuXZXcQA,33695 +../../../share/doc/networkx-2.8.5/examples/subclass/README.txt,sha256=9DLXetYO600sLeOY_fFc8Msnli8-qQrVbGB2f5gP20g,18 +../../../share/doc/networkx-2.8.5/examples/subclass/plot_antigraph.py,sha256=yImsNH9uoXJLAe8v37cDr0bR9DtjvQTESXozFEse12w,6023 +../../../share/doc/networkx-2.8.5/examples/subclass/plot_printgraph.py,sha256=-IZlxW9m8nRfqS16AAhWP5u_YGPKq005MjL0diyBcc8,2292 +networkx-2.8.5.dist-info/LICENSE.txt,sha256=qun4CfsaFZLtcun1LfEM-tj-ImuNTSg4X5DdQ0HOcRk,1763 +networkx-2.8.5.dist-info/METADATA,sha256=3ax0xIRcItYCi7YqISxtFJWZuW1p_3v_SIsDy1y2EBk,5010 +networkx-2.8.5.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +networkx-2.8.5.dist-info/top_level.txt,sha256=s3Mk-7KOlu-kD39w8Xg_KXoP5Z_MVvgB-upkyuOE4Hk,9 +networkx-2.8.5.dist-info/INSTALLER,sha256=gRgfSaB7mbFTKnL6UjSYEdWrvm1o583nO6nktg_YuCc,12 +networkx-2.8.5.dist-info/RECORD,, diff --git a/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/WHEEL b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/top_level.txt b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/top_level.txt new file mode 100644 index 0000000..4d07dfe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx-2.8.5.dist-info/top_level.txt @@ -0,0 +1 @@ +networkx diff --git a/myenv/lib/python3.9/site-packages/networkx/__init__.py b/myenv/lib/python3.9/site-packages/networkx/__init__.py new file mode 100644 index 0000000..165b0a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/__init__.py @@ -0,0 +1,90 @@ +""" +NetworkX +======== + +NetworkX is a Python package for the creation, manipulation, and study of the +structure, dynamics, and functions of complex networks. + +See https://networkx.org for complete documentation. +""" + +__version__ = "2.8.5" + + +def __getattr__(name): + """Remove functions and provide informative error messages.""" + if name == "nx_yaml": + raise ImportError( + "\nThe nx_yaml module has been removed from NetworkX.\n" + "Please use the `yaml` package directly for working with yaml data.\n" + "For example, a networkx.Graph `G` can be written to and loaded\n" + "from a yaml file with:\n\n" + " import yaml\n\n" + " with open('path_to_yaml_file', 'w') as fh:\n" + " yaml.dump(G, fh)\n" + " with open('path_to_yaml_file', 'r') as fh:\n" + " G = yaml.load(fh, Loader=yaml.Loader)\n\n" + "Note that yaml.Loader is considered insecure - see the pyyaml\n" + "documentation for further details.\n\n" + "This message will be removed in NetworkX 3.0." + ) + if name == "read_yaml": + raise ImportError( + "\nread_yaml has been removed from NetworkX, please use `yaml`\n" + "directly:\n\n" + " import yaml\n\n" + " with open('path', 'r') as fh:\n" + " yaml.load(fh, Loader=yaml.Loader)\n\n" + "Note that yaml.Loader is considered insecure - see the pyyaml\n" + "documentation for further details.\n\n" + "This message will be removed in NetworkX 3.0." + ) + if name == "write_yaml": + raise ImportError( + "\nwrite_yaml has been removed from NetworkX, please use `yaml`\n" + "directly:\n\n" + " import yaml\n\n" + " with open('path_for_yaml_output', 'w') as fh:\n" + " yaml.dump(G_to_be_yaml, fh)\n\n" + "This message will be removed in NetworkX 3.0." + ) + raise AttributeError(f"module {__name__} has no attribute {name}") + + +# These are imported in order as listed +from networkx.lazy_imports import _lazy_import + +from networkx.exception import * + +from networkx import utils + +from networkx import classes +from networkx.classes import filters +from networkx.classes import * + +from networkx import convert +from networkx.convert import * + +from networkx import convert_matrix +from networkx.convert_matrix import * + +from networkx import relabel +from networkx.relabel import * + +from networkx import generators +from networkx.generators import * + +from networkx import readwrite +from networkx.readwrite import * + +# Need to test with SciPy, when available +from networkx import algorithms +from networkx.algorithms import * + +from networkx import linalg +from networkx.linalg import * + +from networkx.testing.test import run as test + +from networkx import drawing +from networkx.drawing import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/__init__.py new file mode 100644 index 0000000..9fa60b9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/__init__.py @@ -0,0 +1,129 @@ +from networkx.algorithms.assortativity import * +from networkx.algorithms.asteroidal import * +from networkx.algorithms.boundary import * +from networkx.algorithms.bridges import * +from networkx.algorithms.chains import * +from networkx.algorithms.centrality import * +from networkx.algorithms.chordal import * +from networkx.algorithms.cluster import * +from networkx.algorithms.clique import * +from networkx.algorithms.communicability_alg import * +from networkx.algorithms.components import * +from networkx.algorithms.coloring import * +from networkx.algorithms.core import * +from networkx.algorithms.covering import * +from networkx.algorithms.cycles import * +from networkx.algorithms.cuts import * +from networkx.algorithms.d_separation import * +from networkx.algorithms.dag import * +from networkx.algorithms.distance_measures import * +from networkx.algorithms.distance_regular import * +from networkx.algorithms.dominance import * +from networkx.algorithms.dominating import * +from networkx.algorithms.efficiency_measures import * +from networkx.algorithms.euler import * +from networkx.algorithms.graphical import * +from networkx.algorithms.hierarchy import * +from networkx.algorithms.hybrid import * +from networkx.algorithms.link_analysis import * +from networkx.algorithms.link_prediction import * +from networkx.algorithms.lowest_common_ancestors import * +from networkx.algorithms.isolate import * +from networkx.algorithms.matching import * +from networkx.algorithms.minors import * +from networkx.algorithms.mis import * +from networkx.algorithms.moral import * +from networkx.algorithms.non_randomness import * +from networkx.algorithms.operators import * +from networkx.algorithms.planarity import * +from networkx.algorithms.planar_drawing import * +from networkx.algorithms.reciprocity import * +from networkx.algorithms.regular import * +from networkx.algorithms.richclub import * +from networkx.algorithms.shortest_paths import * +from networkx.algorithms.similarity import * +from networkx.algorithms.graph_hashing import * +from networkx.algorithms.simple_paths import * +from networkx.algorithms.smallworld import * +from networkx.algorithms.smetric import * +from networkx.algorithms.structuralholes import * +from networkx.algorithms.sparsifiers import * +from networkx.algorithms.summarization import * +from networkx.algorithms.swap import * +from networkx.algorithms.traversal import * +from networkx.algorithms.triads import * +from networkx.algorithms.vitality import * +from networkx.algorithms.voronoi import * +from networkx.algorithms.wiener import * +from networkx.algorithms.polynomials import * + +# Make certain subpackages available to the user as direct imports from +# the `networkx` namespace. +from networkx.algorithms import approximation +from networkx.algorithms import assortativity +from networkx.algorithms import bipartite +from networkx.algorithms import node_classification +from networkx.algorithms import centrality +from networkx.algorithms import chordal +from networkx.algorithms import cluster +from networkx.algorithms import clique +from networkx.algorithms import components +from networkx.algorithms import connectivity +from networkx.algorithms import community +from networkx.algorithms import coloring +from networkx.algorithms import flow +from networkx.algorithms import isomorphism +from networkx.algorithms import link_analysis +from networkx.algorithms import lowest_common_ancestors +from networkx.algorithms import operators +from networkx.algorithms import shortest_paths +from networkx.algorithms import tournament +from networkx.algorithms import traversal +from networkx.algorithms import tree + +# Make certain functions from some of the previous subpackages available +# to the user as direct imports from the `networkx` namespace. +from networkx.algorithms.bipartite import complete_bipartite_graph +from networkx.algorithms.bipartite import is_bipartite +from networkx.algorithms.bipartite import project +from networkx.algorithms.bipartite import projected_graph +from networkx.algorithms.connectivity import all_pairs_node_connectivity +from networkx.algorithms.connectivity import all_node_cuts +from networkx.algorithms.connectivity import average_node_connectivity +from networkx.algorithms.connectivity import edge_connectivity +from networkx.algorithms.connectivity import edge_disjoint_paths +from networkx.algorithms.connectivity import k_components +from networkx.algorithms.connectivity import k_edge_components +from networkx.algorithms.connectivity import k_edge_subgraphs +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity import is_k_edge_connected +from networkx.algorithms.connectivity import minimum_edge_cut +from networkx.algorithms.connectivity import minimum_node_cut +from networkx.algorithms.connectivity import node_connectivity +from networkx.algorithms.connectivity import node_disjoint_paths +from networkx.algorithms.connectivity import stoer_wagner +from networkx.algorithms.flow import capacity_scaling +from networkx.algorithms.flow import cost_of_flow +from networkx.algorithms.flow import gomory_hu_tree +from networkx.algorithms.flow import max_flow_min_cost +from networkx.algorithms.flow import maximum_flow +from networkx.algorithms.flow import maximum_flow_value +from networkx.algorithms.flow import min_cost_flow +from networkx.algorithms.flow import min_cost_flow_cost +from networkx.algorithms.flow import minimum_cut +from networkx.algorithms.flow import minimum_cut_value +from networkx.algorithms.flow import network_simplex +from networkx.algorithms.isomorphism import could_be_isomorphic +from networkx.algorithms.isomorphism import fast_could_be_isomorphic +from networkx.algorithms.isomorphism import faster_could_be_isomorphic +from networkx.algorithms.isomorphism import is_isomorphic +from networkx.algorithms.tree.branchings import maximum_branching +from networkx.algorithms.tree.branchings import maximum_spanning_arborescence +from networkx.algorithms.tree.branchings import minimum_branching +from networkx.algorithms.tree.branchings import minimum_spanning_arborescence +from networkx.algorithms.tree.branchings import ArborescenceIterator +from networkx.algorithms.tree.coding import * +from networkx.algorithms.tree.decomposition import * +from networkx.algorithms.tree.mst import * +from networkx.algorithms.tree.operations import * +from networkx.algorithms.tree.recognition import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/__init__.py new file mode 100644 index 0000000..13fc21f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/__init__.py @@ -0,0 +1,24 @@ +"""Approximations of graph properties and Heuristic methods for optimization. + + .. warning:: These functions are not imported in the top-level of ``networkx`` + + These functions can be accessed using + ``networkx.approximation.function_name`` + + They can be imported using ``from networkx.algorithms import approximation`` + or ``from networkx.algorithms.approximation import function_name`` + +""" +from networkx.algorithms.approximation.clustering_coefficient import * +from networkx.algorithms.approximation.clique import * +from networkx.algorithms.approximation.connectivity import * +from networkx.algorithms.approximation.distance_measures import * +from networkx.algorithms.approximation.dominating_set import * +from networkx.algorithms.approximation.kcomponents import * +from networkx.algorithms.approximation.matching import * +from networkx.algorithms.approximation.ramsey import * +from networkx.algorithms.approximation.steinertree import * +from networkx.algorithms.approximation.traveling_salesman import * +from networkx.algorithms.approximation.treewidth import * +from networkx.algorithms.approximation.vertex_cover import * +from networkx.algorithms.approximation.maxcut import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/clique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/clique.py new file mode 100644 index 0000000..7228636 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/clique.py @@ -0,0 +1,233 @@ +"""Functions for computing large cliques and maximum independent sets.""" +import networkx as nx +from networkx.algorithms.approximation import ramsey +from networkx.utils import not_implemented_for + +__all__ = [ + "clique_removal", + "max_clique", + "large_clique_size", + "maximum_independent_set", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def maximum_independent_set(G): + """Returns an approximate maximum independent set. + + Independent set or stable set is a set of vertices in a graph, no two of + which are adjacent. That is, it is a set I of vertices such that for every + two vertices in I, there is no edge connecting the two. Equivalently, each + edge in the graph has at most one endpoint in I. The size of an independent + set is the number of vertices it contains [1]_. + + A maximum independent set is a largest independent set for a given graph G + and its size is denoted $\\alpha(G)$. The problem of finding such a set is called + the maximum independent set problem and is an NP-hard optimization problem. + As such, it is unlikely that there exists an efficient algorithm for finding + a maximum independent set of a graph. + + The Independent Set algorithm is based on [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + iset : Set + The apx-maximum independent set + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case. + + References + ---------- + .. [1] `Wikipedia: Independent set + `_ + .. [2] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + iset, _ = clique_removal(G) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def max_clique(G): + r"""Find the Maximum Clique + + Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set + in the worst case. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + clique : set + The apx-maximum clique of the graph + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + A clique in an undirected graph G = (V, E) is a subset of the vertex set + `C \subseteq V` such that for every two vertices in C there exists an edge + connecting the two. This is equivalent to saying that the subgraph + induced by C is complete (in some cases, the term clique may also refer + to the subgraph). + + A maximum clique is a clique of the largest possible size in a given graph. + The clique number `\omega(G)` of a graph G is the number of + vertices in a maximum clique in G. The intersection number of + G is the smallest number of cliques that together cover all edges of G. + + https://en.wikipedia.org/wiki/Maximum_clique + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + doi:10.1007/BF01994876 + """ + if G is None: + raise ValueError("Expected NetworkX graph!") + + # finding the maximum clique in a graph is equivalent to finding + # the independent set in the complementary graph + cgraph = nx.complement(G) + iset, _ = clique_removal(cgraph) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def clique_removal(G): + r"""Repeatedly remove cliques from the graph. + + Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique + and independent set. Returns the largest independent set found, along + with found maximal cliques. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_ind_cliques : (set, list) tuple + 2-tuple of Maximal Independent Set and list of maximal cliques (sets). + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + graph = G.copy() + c_i, i_i = ramsey.ramsey_R2(graph) + cliques = [c_i] + isets = [i_i] + while graph: + graph.remove_nodes_from(c_i) + c_i, i_i = ramsey.ramsey_R2(graph) + if c_i: + cliques.append(c_i) + if i_i: + isets.append(i_i) + # Determine the largest independent set as measured by cardinality. + maxiset = max(isets, key=len) + return maxiset, cliques + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def large_clique_size(G): + """Find the size of a large clique in a graph. + + A *clique* is a subset of nodes in which each pair of nodes is + adjacent. This function is a heuristic for finding the size of a + large clique in the graph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + k: integer + The size of a large clique in the graph. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + This implementation is from [1]_. Its worst case time complexity is + :math:`O(n d^2)`, where *n* is the number of nodes in the graph and + *d* is the maximum degree. + + This function is a heuristic, which means it may work well in + practice, but there is no rigorous mathematical guarantee on the + ratio between the returned number and the actual largest clique size + in the graph. + + References + ---------- + .. [1] Pattabiraman, Bharath, et al. + "Fast Algorithms for the Maximum Clique Problem on Massive Graphs + with Applications to Overlapping Community Detection." + *Internet Mathematics* 11.4-5 (2015): 421--448. + + + See also + -------- + + :func:`networkx.algorithms.approximation.clique.max_clique` + A function that returns an approximate maximum clique with a + guarantee on the approximation ratio. + + :mod:`networkx.algorithms.clique` + Functions for finding the exact maximum clique in a graph. + + """ + degrees = G.degree + + def _clique_heuristic(G, U, size, best_size): + if not U: + return max(best_size, size) + u = max(U, key=degrees) + U.remove(u) + N_prime = {v for v in G[u] if degrees[v] >= best_size} + return _clique_heuristic(G, U & N_prime, size + 1, best_size) + + best_size = 0 + nodes = (u for u in G if degrees[u] >= best_size) + for u in nodes: + neighbors = {v for v in G[u] if degrees[v] >= best_size} + best_size = _clique_heuristic(G, neighbors, 1, best_size) + return best_size diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/clustering_coefficient.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/clustering_coefficient.py new file mode 100644 index 0000000..d37a754 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/clustering_coefficient.py @@ -0,0 +1,64 @@ +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["average_clustering"] + + +@py_random_state(2) +@not_implemented_for("directed") +def average_clustering(G, trials=1000, seed=None): + r"""Estimates the average clustering coefficient of G. + + The local clustering of each node in `G` is the fraction of triangles + that actually exist over all possible triangles in its neighborhood. + The average clustering coefficient of a graph `G` is the mean of + local clusterings. + + This function finds an approximate average clustering coefficient + for G by repeating `n` times (defined in `trials`) the following + experiment: choose a node at random, choose two of its neighbors + at random, and check if they are connected. The approximate + coefficient is the fraction of triangles found over the number + of trials [1]_. + + Parameters + ---------- + G : NetworkX graph + + trials : integer + Number of trials to perform (default 1000). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + c : float + Approximated average clustering coefficient. + + Examples + -------- + >>> from networkx.algorithms import approximation + >>> G = nx.erdos_renyi_graph(10, 0.2, seed=10) + >>> approximation.average_clustering(G, trials=1000, seed=10) + 0.214 + + References + ---------- + .. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering + coefficient and transitivity. Universität Karlsruhe, Fakultät für + Informatik, 2004. + https://doi.org/10.5445/IR/1000001239 + + """ + n = len(G) + triangles = 0 + nodes = list(G) + for i in [int(seed.random() * n) for i in range(trials)]: + nbrs = list(G[nodes[i]]) + if len(nbrs) < 2: + continue + u, v = seed.sample(nbrs, 2) + if u in G[v]: + triangles += 1 + return triangles / trials diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/connectivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/connectivity.py new file mode 100644 index 0000000..0634751 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/connectivity.py @@ -0,0 +1,401 @@ +""" Fast approximation for node connectivity +""" +import itertools +from operator import itemgetter + +import networkx as nx + +__all__ = [ + "local_node_connectivity", + "node_connectivity", + "all_pairs_node_connectivity", +] + + +def local_node_connectivity(G, source, target, cutoff=None): + """Compute node connectivity between source and target. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for node connectivity + + target : node + Ending node for node connectivity + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff. Default value None. + + Returns + ------- + k: integer + pairwise node connectivity + + Examples + -------- + >>> # Platonic octahedral graph has node connectivity 4 + >>> # for each non adjacent node pair + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.local_node_connectivity(G, 0, 5) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + Note that the authors propose a further refinement, losing accuracy and + gaining speed, which is not implemented yet. + + See also + -------- + all_pairs_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if target == source: + raise nx.NetworkXError("source and target have to be different nodes.") + + # Maximum possible node independent paths + if G.is_directed(): + possible = min(G.out_degree(source), G.in_degree(target)) + else: + possible = min(G.degree(source), G.degree(target)) + + K = 0 + if not possible: + return K + + if cutoff is None: + cutoff = float("inf") + + exclude = set() + for i in range(min(possible, cutoff)): + try: + path = _bidirectional_shortest_path(G, source, target, exclude) + exclude.update(set(path)) + K += 1 + except nx.NetworkXNoPath: + break + + return K + + +def node_connectivity(G, s=None, t=None): + r"""Returns an approximation for node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. By Menger's theorem, + this is equal to the number of node independent paths (paths that + share no nodes other than source and target). + + If source and target nodes are provided, this function returns the + local node connectivity: the minimum number of nodes that must be + removed to break all paths from source to target in G. + + This algorithm is based on a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic octahedral graph is 4-node-connected + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.node_connectivity(G) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + See also + -------- + all_pairs_node_connectivity + local_node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t) + + # Global node connectivity + if G.is_directed(): + connected_func = nx.is_weakly_connected + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain(G.predecessors(v), G.successors(v)) + + else: + connected_func = nx.is_connected + iter_func = itertools.combinations + neighbors = G.neighbors + + if not connected_func(G): + return 0 + + # Choose a node with minimum degree + v, minimum_degree = min(G.degree(), key=itemgetter(1)) + # Node connectivity is bounded by minimum degree + K = minimum_degree + # compute local node connectivity with all non-neighbors nodes + # and store the minimum + for w in set(G) - set(neighbors(v)) - {v}: + K = min(K, local_node_connectivity(G, v, w, cutoff=K)) + # Same for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y not in G[x] and x != y: + K = min(K, local_node_connectivity(G, x, y, cutoff=K)) + return K + + +def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): + """Compute node connectivity between all pairs of nodes. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + + Parameters + ---------- + G : NetworkX graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff in each pair of nodes. + Default value None. + + Returns + ------- + K : dictionary + Dictionary, keyed by source and target, of pairwise node connectivity + + See Also + -------- + local_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + for u, v in iter_func(nbunch, 2): + k = local_node_connectivity(G, u, v, cutoff=cutoff) + all_pairs[u][v] = k + if not directed: + all_pairs[v][u] = k + + return all_pairs + + +def _bidirectional_shortest_path(G, source, target, exclude): + """Returns shortest path between source and target ignoring nodes in the + container 'exclude'. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + exclude: container + Container for nodes to exclude from the search for shortest paths + + Returns + ------- + path: list + Shortest path between source and target ignoring nodes in 'exclude' + + Raises + ------ + NetworkXNoPath + If there is no path or if nodes are adjacent and have only one path + between them + + Notes + ----- + This function and its helper are originally from + networkx.algorithms.shortest_paths.unweighted and are modified to + accept the extra parameter 'exclude', which is a container for nodes + already used in other paths that should be ignored. + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, exclude) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target, exclude): + # does BFS from both source and target and meets in the middle + # excludes nodes in the container "exclude" from the search + if source is None or target is None: + raise nx.NetworkXException( + "Bidirectional shortest path called without source or target" + ) + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # predecesssor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + level = 0 + + while forward_fringe and reverse_fringe: + # Make sure that we iterate one step forward and one step backwards + # thus source and target will only trigger "found path" when they are + # adjacent and then they can be safely included in the container 'exclude' + level += 1 + if not level % 2 == 0: + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w in exclude: + continue + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + return pred, succ, w # found path + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w in exclude: + continue + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + return pred, succ, w # found path + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/distance_measures.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/distance_measures.py new file mode 100644 index 0000000..c5666b2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/distance_measures.py @@ -0,0 +1,140 @@ +"""Distance measures approximated metrics.""" + +import networkx as nx +from networkx.utils.decorators import py_random_state + +__all__ = ["diameter"] + + +@py_random_state(1) +def diameter(G, seed=None): + """Returns a lower bound on the diameter of the graph G. + + The function computes a lower bound on the diameter (i.e., the maximum eccentricity) + of a directed or undirected graph G. The procedure used varies depending on the graph + being directed or not. + + If G is an `undirected` graph, then the function uses the `2-sweep` algorithm [1]_. + The main idea is to pick the farthest node from a random node and return its eccentricity. + + Otherwise, if G is a `directed` graph, the function uses the `2-dSweep` algorithm [2]_, + The procedure starts by selecting a random source node $s$ from which it performs a + forward and a backward BFS. Let $a_1$ and $a_2$ be the farthest nodes in the forward and + backward cases, respectively. Then, it computes the backward eccentricity of $a_1$ using + a backward BFS and the forward eccentricity of $a_2$ using a forward BFS. + Finally, it returns the best lower bound between the two. + + In both cases, the time complexity is linear with respect to the size of G. + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + d : integer + Lower Bound on the Diameter of G + + Raises + ------ + NetworkXError + If the graph is empty or + If the graph is undirected and not connected or + If the graph is directed and not strongly connected. + + See Also + -------- + networkx.algorithms.distance_measures.diameter + + References + ---------- + .. [1] Magnien, Clémence, Matthieu Latapy, and Michel Habib. + *Fast computation of empirically tight bounds for the diameter of massive graphs.* + Journal of Experimental Algorithmics (JEA), 2009. + https://arxiv.org/pdf/0904.2728.pdf + .. [2] Crescenzi, Pierluigi, Roberto Grossi, Leonardo Lanzi, and Andrea Marino. + *On computing the diameter of real-world directed (weighted) graphs.* + International Symposium on Experimental Algorithms. Springer, Berlin, Heidelberg, 2012. + https://courses.cs.ut.ee/MTAT.03.238/2014_fall/uploads/Main/diameter.pdf + """ + # if G is empty + if not G: + raise nx.NetworkXError("Expected non-empty NetworkX graph!") + # if there's only a node + if G.number_of_nodes() == 1: + return 0 + # if G is directed + if G.is_directed(): + return _two_sweep_directed(G, seed) + # else if G is undirected + return _two_sweep_undirected(G, seed) + + +def _two_sweep_undirected(G, seed): + """Helper function for finding a lower bound on the diameter + for undirected Graphs. + + The idea is to pick the farthest node from a random node + and return its eccentricity. + + ``G`` is a NetworkX undirected graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # select a random source node + source = seed.choice(list(G)) + # get the distances to the other nodes + distances = nx.shortest_path_length(G, source) + # if some nodes have not been visited, then the graph is not connected + if len(distances) != len(G): + raise nx.NetworkXError("Graph not connected.") + # take a node that is (one of) the farthest nodes from the source + *_, node = distances + # return the eccentricity of the node + return nx.eccentricity(G, node) + + +def _two_sweep_directed(G, seed): + """Helper function for finding a lower bound on the diameter + for directed Graphs. + + It implements 2-dSweep, the directed version of the 2-sweep algorithm. + The algorithm follows the following steps. + 1. Select a source node $s$ at random. + 2. Perform a forward BFS from $s$ to select a node $a_1$ at the maximum + distance from the source, and compute $LB_1$, the backward eccentricity of $a_1$. + 3. Perform a backward BFS from $s$ to select a node $a_2$ at the maximum + distance from the source, and compute $LB_2$, the forward eccentricity of $a_2$. + 4. Return the maximum between $LB_1$ and $LB_2$. + + ``G`` is a NetworkX directed graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # get a new digraph G' with the edges reversed in the opposite direction + G_reversed = G.reverse() + # select a random source node + source = seed.choice(list(G)) + # compute forward distances from source + forward_distances = nx.shortest_path_length(G, source) + # compute backward distances from source + backward_distances = nx.shortest_path_length(G_reversed, source) + # if either the source can't reach every node or not every node + # can reach the source, then the graph is not strongly connected + n = len(G) + if len(forward_distances) != n or len(backward_distances) != n: + raise nx.NetworkXError("DiGraph not strongly connected.") + # take a node a_1 at the maximum distance from the source in G + *_, a_1 = forward_distances + # take a node a_2 at the maximum distance from the source in G_reversed + *_, a_2 = backward_distances + # return the max between the backward eccentricity of a_1 and the forward eccentricity of a_2 + return max(nx.eccentricity(G_reversed, a_1), nx.eccentricity(G, a_2)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/dominating_set.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/dominating_set.py new file mode 100644 index 0000000..7685323 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/dominating_set.py @@ -0,0 +1,123 @@ +"""Functions for finding node and edge dominating sets. + +A `dominating set`_ for an undirected graph *G* with vertex set *V* +and edge set *E* is a subset *D* of *V* such that every vertex not in +*D* is adjacent to at least one member of *D*. An `edge dominating set`_ +is a subset *F* of *E* such that every edge not in *F* is +incident to an endpoint of at least one edge in *F*. + +.. _dominating set: https://en.wikipedia.org/wiki/Dominating_set +.. _edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set + +""" + +from ...utils import not_implemented_for +from ..matching import maximal_matching + +__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"] + + +# TODO Why doesn't this algorithm work for directed graphs? +@not_implemented_for("directed") +def min_weighted_dominating_set(G, weight=None): + r"""Returns a dominating set that approximates the minimum weight node + dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph. + + weight : string + The node attribute storing the weight of an node. If provided, + the node attribute with this key must be a number for each + node. If not provided, each node is assumed to have weight one. + + Returns + ------- + min_weight_dominating_set : set + A set of nodes, the sum of whose weights is no more than `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of + each node in the graph and `w(V^*)` denotes the sum of the + weights of each node in the minimum weight dominating set. + + Notes + ----- + This algorithm computes an approximate minimum weighted dominating + set for the graph `G`. The returned solution has weight `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each + node in the graph and `w(V^*)` denotes the sum of the weights of + each node in the minimum weight dominating set for the graph. + + This implementation of the algorithm runs in $O(m)$ time, where $m$ + is the number of edges in the graph. + + References + ---------- + .. [1] Vazirani, Vijay V. + *Approximation Algorithms*. + Springer Science & Business Media, 2001. + + """ + # The unique dominating set for the null graph is the empty set. + if len(G) == 0: + return set() + + # This is the dominating set that will eventually be returned. + dom_set = set() + + def _cost(node_and_neighborhood): + """Returns the cost-effectiveness of greedily choosing the given + node. + + `node_and_neighborhood` is a two-tuple comprising a node and its + closed neighborhood. + + """ + v, neighborhood = node_and_neighborhood + return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set) + + # This is a set of all vertices not already covered by the + # dominating set. + vertices = set(G) + # This is a dictionary mapping each node to the closed neighborhood + # of that node. + neighborhoods = {v: {v} | set(G[v]) for v in G} + + # Continue until all vertices are adjacent to some node in the + # dominating set. + while vertices: + # Find the most cost-effective node to add, along with its + # closed neighborhood. + dom_node, min_set = min(neighborhoods.items(), key=_cost) + # Add the node to the dominating set and reduce the remaining + # set of nodes to cover. + dom_set.add(dom_node) + del neighborhoods[dom_node] + vertices -= min_set + + return dom_set + + +def min_edge_dominating_set(G): + r"""Returns minimum cardinality edge dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_edge_dominating_set : set + Returns a set of dominating edges whose size is no more than 2 * OPT. + + Notes + ----- + The algorithm computes an approximate solution to the edge dominating set + problem. The result is no more than 2 * OPT in terms of size of the set. + Runtime of the algorithm is $O(|E|)$. + """ + if not G: + raise ValueError("Expected non-empty NetworkX graph!") + return maximal_matching(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/kcomponents.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/kcomponents.py new file mode 100644 index 0000000..239cc0f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/kcomponents.py @@ -0,0 +1,368 @@ +""" Fast approximation for k-component structure +""" +import itertools +from collections import defaultdict +from collections.abc import Mapping +from functools import cached_property + +import networkx as nx +from networkx.algorithms.approximation import local_node_connectivity +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +def k_components(G, min_density=0.95): + r"""Returns the approximate k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + This implementation is based on the fast heuristics to approximate + the `k`-component structure of a graph [1]_. Which, in turn, it is based on + a fast approximation algorithm for finding good lower bounds of the number + of node independent paths between two nodes [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + min_density : Float + Density relaxation threshold. Default value 0.95 + + Returns + ------- + k_components : dict + Dictionary with connectivity level `k` as key and a list of + sets of nodes that form a k-component of level `k` as values. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> from networkx.algorithms import approximation as apxa + >>> G = nx.petersen_graph() + >>> k_components = apxa.k_components(G) + + Notes + ----- + The logic of the approximation algorithm for computing the `k`-component + structure [1]_ is based on repeatedly applying simple and fast algorithms + for `k`-cores and biconnected components in order to narrow down the + number of pairs of nodes over which we have to compute White and Newman's + approximation algorithm for finding node independent paths [2]_. More + formally, this algorithm is based on Whitney's theorem, which states + an inclusion relation among node connectivity, edge connectivity, and + minimum degree for any graph G. This theorem implies that every + `k`-component is nested inside a `k`-edge-component, which in turn, + is contained in a `k`-core. Thus, this algorithm computes node independent + paths among pairs of nodes in each biconnected part of each `k`-core, + and repeats this procedure for each `k` from 3 to the maximal core number + of a node in the input graph. + + Because, in practice, many nodes of the core of level `k` inside a + bicomponent actually are part of a component of level k, the auxiliary + graph needed for the algorithm is likely to be very dense. Thus, we use + a complement graph data structure (see `AntiGraph`) to save memory. + AntiGraph only stores information of the edges that are *not* present + in the actual auxiliary graph. When applying algorithms to this + complement graph data structure, it behaves as if it were the dense + version. + + See also + -------- + k_components + + References + ---------- + .. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + .. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + https://www.santafe.edu/research/results/working-papers/fast-approximation-algorithms-for-finding-node-ind + + .. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + https://doi.org/10.2307/3088904 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values + k_components = defaultdict(list) + # make a few functions local for speed + node_connectivity = local_node_connectivity + k_core = nx.k_core + core_number = nx.core_number + biconnected_components = nx.biconnected_components + combinations = itertools.combinations + # Exact solution for k = {1,2} + # There is a linear time algorithm for triconnectivity, if we had an + # implementation available we could start from k = 4. + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + for bicomponent in nx.biconnected_components(G): + # avoid considering dyads as bicomponents + bicomp = set(bicomponent) + if len(bicomp) > 2: + k_components[2].append(bicomp) + # There is no k-component of k > maximum core number + # \kappa(G) <= \lambda(G) <= \delta(G) + g_cnumber = core_number(G) + max_core = max(g_cnumber.values()) + for k in range(3, max_core + 1): + C = k_core(G, k, core_number=g_cnumber) + for nodes in biconnected_components(C): + # Build a subgraph SG induced by the nodes that are part of + # each biconnected component of the k-core subgraph C. + if len(nodes) < k: + continue + SG = G.subgraph(nodes) + # Build auxiliary graph + H = _AntiGraph() + H.add_nodes_from(SG.nodes()) + for u, v in combinations(SG, 2): + K = node_connectivity(SG, u, v, cutoff=k) + if k > K: + H.add_edge(u, v) + for h_nodes in biconnected_components(H): + if len(h_nodes) <= k: + continue + SH = H.subgraph(h_nodes) + for Gc in _cliques_heuristic(SG, SH, k, min_density): + for k_nodes in biconnected_components(Gc): + Gk = nx.k_core(SG.subgraph(k_nodes), k) + if len(Gk) <= k: + continue + k_components[k].append(set(Gk)) + return k_components + + +def _cliques_heuristic(G, H, k, min_density): + h_cnumber = nx.core_number(H) + for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)): + cands = {n for n, c in h_cnumber.items() if c == c_value} + # Skip checking for overlap for the highest core value + if i == 0: + overlap = False + else: + overlap = set.intersection( + *[{x for x in H[n] if x not in cands} for n in cands] + ) + if overlap and len(overlap) < k: + SH = H.subgraph(cands | overlap) + else: + SH = H.subgraph(cands) + sh_cnumber = nx.core_number(SH) + SG = nx.k_core(G.subgraph(SH), k) + while not (_same(sh_cnumber) and nx.density(SH) >= min_density): + # This subgraph must be writable => .copy() + SH = H.subgraph(SG).copy() + if len(SH) <= k: + break + sh_cnumber = nx.core_number(SH) + sh_deg = dict(SH.degree()) + min_deg = min(sh_deg.values()) + SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg) + SG = nx.k_core(G.subgraph(SH), k) + else: + yield SG + + +def _same(measure, tol=0): + vals = set(measure.values()) + if (max(vals) - min(vals)) <= tol: + return True + return False + + +class _AntiGraph(nx.Graph): + """ + Class for complement graphs. + + The main goal is to be able to work with big and dense graphs with + a low memory footprint. + + In this class you add the edges that *do not exist* in the dense graph, + the report methods of the class return the neighbors, the edges and + the degree as if it was the dense graph. Thus it's possible to use + an instance of this class with some of NetworkX functions. In this + case we only use k-core, connected_components, and biconnected_components. + """ + + all_edge_dict = {"weight": 1} + + def single_edge_dict(self): + return self.all_edge_dict + + edge_attr_dict_factory = single_edge_dict # type: ignore + + def __getitem__(self, n): + """Returns a dict of neighbors of node n in the dense graph. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + """ + all_edge_dict = self.all_edge_dict + return { + node: all_edge_dict for node in set(self._adj) - set(self._adj[n]) - {n} + } + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n in the + dense graph. + """ + try: + return iter(set(self._adj) - set(self._adj[n]) - {n}) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + class AntiAtlasView(Mapping): + """An adjacency inner dict for AntiGraph""" + + def __init__(self, graph, node): + self._graph = graph + self._atlas = graph._adj[node] + self._node = node + + def __len__(self): + return len(self._graph) - len(self._atlas) - 1 + + def __iter__(self): + return (n for n in self._graph if n not in self._atlas and n != self._node) + + def __getitem__(self, nbr): + nbrs = set(self._graph._adj) - set(self._atlas) - {self._node} + if nbr in nbrs: + return self._graph.all_edge_dict + raise KeyError(nbr) + + class AntiAdjacencyView(AntiAtlasView): + """An adjacency outer dict for AntiGraph""" + + def __init__(self, graph): + self._graph = graph + self._atlas = graph._adj + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._graph) + + def __getitem__(self, node): + if node not in self._graph: + raise KeyError(node) + return self._graph.AntiAtlasView(self._graph, node) + + @cached_property + def adj(self): + return self.AntiAdjacencyView(self) + + def subgraph(self, nodes): + """This subgraph method returns a full AntiGraph. Not a View""" + nodes = set(nodes) + G = _AntiGraph() + G.add_nodes_from(nodes) + for n in G: + Gnbrs = G.adjlist_inner_dict_factory() + G._adj[n] = Gnbrs + for nbr, d in self._adj[n].items(): + if nbr in G._adj: + Gnbrs[nbr] = d + G._adj[nbr][n] = d + G.graph = self.graph + return G + + class AntiDegreeView(nx.reportviews.DegreeView): + def __iter__(self): + all_nodes = set(self._succ) + for n in self._nodes: + nbrs = all_nodes - set(self._succ[n]) - {n} + yield (n, len(nbrs)) + + def __getitem__(self, n): + nbrs = set(self._succ) - set(self._succ[n]) - {n} + # AntiGraph is a ThinGraph so all edges have weight 1 + return len(nbrs) + (n in nbrs) + + @cached_property + def degree(self): + """Returns an iterator for (node, degree) and degree for single node. + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + deg: + Degree of the node, if a single node is passed as argument. + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return self.AntiDegreeView(self) + + def adjacency(self): + """Returns an iterator of (node, adjacency set) tuples for all nodes + in the dense graph. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency set) for all nodes in + the graph. + + """ + for n in self._adj: + yield (n, set(self._adj) - set(self._adj[n]) - {n}) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/matching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/matching.py new file mode 100644 index 0000000..17a52ed --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/matching.py @@ -0,0 +1,42 @@ +""" +************** +Graph Matching +************** + +Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent +edges; that is, no two edges share a common vertex. + +`Wikipedia: Matching `_ +""" +import networkx as nx + +__all__ = ["min_maximal_matching"] + + +def min_maximal_matching(G): + r"""Returns the minimum maximal matching of G. That is, out of all maximal + matchings of the graph G, the smallest is returned. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_maximal_matching : set + Returns a set of edges such that no two edges share a common endpoint + and every edge not in the set shares some common endpoint in the set. + Cardinality will be 2*OPT in the worst case. + + Notes + ----- + The algorithm computes an approximate solution fo the minimum maximal + cardinality matching problem. The solution is no more than 2 * OPT in size. + Runtime is $O(|E|)$. + + References + ---------- + .. [1] Vazirani, Vijay Approximation Algorithms (2001) + """ + return nx.maximal_matching(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/maxcut.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/maxcut.py new file mode 100644 index 0000000..59dfa63 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/maxcut.py @@ -0,0 +1,111 @@ +import networkx as nx +from networkx.utils.decorators import not_implemented_for, py_random_state + +__all__ = ["randomized_partitioning", "one_exchange"] + + +@not_implemented_for("directed", "multigraph") +@py_random_state(1) +def randomized_partitioning(G, seed=None, p=0.5, weight=None): + """Compute a random partitioning of the graph nodes and its cut value. + + A partitioning is calculated by observing each node + and deciding to add it to the partition with probability `p`, + returning a random cut and its corresponding value (the + sum of weights of edges connecting different partitions). + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + p : scalar + Probability for each node to be part of the first partition. + Should be in [0,1] + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_size : scalar + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + """ + cut = {node for node in G.nodes() if seed.random() < p} + cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + partition = (cut, G.nodes - cut) + return cut_size, partition + + +def _swap_node_partition(cut, node): + return cut - {node} if node in cut else cut.union({node}) + + +@not_implemented_for("directed", "multigraph") +@py_random_state(2) +def one_exchange(G, initial_cut=None, seed=None, weight=None): + """Compute a partitioning of the graphs nodes and the corresponding cut value. + + Use a greedy one exchange strategy to find a locally maximal cut + and its value, it works by finding the best node (one that gives + the highest gain to the cut value) to add to the current cut + and repeats this process until no improvement can be made. + + Parameters + ---------- + G : networkx Graph + Graph to find a maximum cut for. + + initial_cut : set + Cut to use as a starting point. If not supplied the algorithm + starts with an empty cut. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_value : scalar + Value of the maximum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a maximum cut. + """ + if initial_cut is None: + initial_cut = set() + cut = set(initial_cut) + current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + while True: + nodes = list(G.nodes()) + # Shuffling the nodes ensures random tie-breaks in the following call to max + seed.shuffle(nodes) + best_node_to_swap = max( + nodes, + key=lambda v: nx.algorithms.cut_size( + G, _swap_node_partition(cut, v), weight=weight + ), + default=None, + ) + potential_cut = _swap_node_partition(cut, best_node_to_swap) + potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight) + + if potential_cut_size > current_cut_size: + cut = potential_cut + current_cut_size = potential_cut_size + else: + break + + partition = (cut, G.nodes - cut) + return current_cut_size, partition diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/ramsey.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/ramsey.py new file mode 100644 index 0000000..1692477 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/ramsey.py @@ -0,0 +1,51 @@ +""" +Ramsey numbers. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = ["ramsey_R2"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def ramsey_R2(G): + r"""Compute the largest clique and largest independent set in `G`. + + This can be used to estimate bounds for the 2-color + Ramsey number `R(2;s,t)` for `G`. + + This is a recursive implementation which could run into trouble + for large recursions. Note that self-loop edges are ignored. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_pair : (set, set) tuple + Maximum clique, Maximum independent set. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + """ + if not G: + return set(), set() + + node = arbitrary_element(G) + nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node) + nnbrs = nx.non_neighbors(G, node) + c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy()) + c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy()) + + c_1.add(node) + i_2.add(node) + # Choose the larger of the two cliques and the larger of the two + # independent sets, according to cardinality. + return max(c_1, c_2, key=len), max(i_1, i_2, key=len) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/steinertree.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/steinertree.py new file mode 100644 index 0000000..496098b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/steinertree.py @@ -0,0 +1,104 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["metric_closure", "steiner_tree"] + + +@not_implemented_for("directed") +def metric_closure(G, weight="weight"): + """Return the metric closure of a graph. + + The metric closure of a graph *G* is the complete graph in which each edge + is weighted by the shortest path distance between the nodes in *G* . + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + NetworkX graph + Metric closure of the graph `G`. + + """ + M = nx.Graph() + + Gnodes = set(G) + + # check for connected graph while processing first node + all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight) + u, (distance, path) = next(all_paths_iter) + if Gnodes - set(distance): + msg = "G is not a connected graph. metric_closure is not defined." + raise nx.NetworkXError(msg) + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + # first node done -- now process the rest + for u, (distance, path) in all_paths_iter: + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + return M + + +@not_implemented_for("directed") +def steiner_tree(G, terminal_nodes, weight="weight"): + """Return an approximation to the minimum Steiner tree of a graph. + + The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` + is a tree within `G` that spans those nodes and has minimum size + (sum of edge weights) among all such trees. + + The minimum Steiner tree can be approximated by computing the minimum + spanning tree of the subgraph of the metric closure of *G* induced by the + terminal nodes, where the metric closure of *G* is the complete graph in + which each edge is weighted by the shortest path distance between the + nodes in *G* . + This algorithm produces a tree whose weight is within a (2 - (2 / t)) + factor of the weight of the optimal Steiner tree where *t* is number of + terminal nodes. + + Parameters + ---------- + G : NetworkX graph + + terminal_nodes : list + A list of terminal nodes for which minimum steiner tree is + to be found. + + Returns + ------- + NetworkX graph + Approximation to the minimum steiner tree of `G` induced by + `terminal_nodes` . + + Notes + ----- + For multigraphs, the edge between two nodes with minimum weight is the + edge put into the Steiner tree. + + + References + ---------- + .. [1] Steiner_tree_problem on Wikipedia. + https://en.wikipedia.org/wiki/Steiner_tree_problem + """ + # H is the subgraph induced by terminal_nodes in the metric closure M of G. + M = metric_closure(G, weight=weight) + H = M.subgraph(terminal_nodes) + # Use the 'distance' attribute of each edge provided by M. + mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True) + # Create an iterator over each edge in each shortest path; repeats are okay + edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges) + # For multigraph we should add the minimal weight edge keys + if G.is_multigraph(): + edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges + ) + T = G.edge_subgraph(edges) + return T diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py new file mode 100644 index 0000000..5eab5c1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py @@ -0,0 +1,41 @@ +import networkx as nx +from networkx.algorithms.approximation import average_clustering + +# This approximation has to be exact in regular graphs +# with no triangles or with all possible triangles. + + +def test_petersen(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_petersen_seed(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2, seed=1) == nx.average_clustering(G) + + +def test_tetrahedral(): + # Actual coefficient is 1 + G = nx.tetrahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_dodecahedral(): + # Actual coefficient is 0 + G = nx.dodecahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_empty(): + G = nx.empty_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 0 + + +def test_complete(): + G = nx.complete_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 1 + G = nx.complete_graph(7) + assert average_clustering(G, trials=len(G) // 2) == 1 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_clique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_clique.py new file mode 100644 index 0000000..ebda285 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_clique.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.clique` module.""" + + +import networkx as nx +from networkx.algorithms.approximation import ( + clique_removal, + large_clique_size, + max_clique, + maximum_independent_set, +) + + +def is_independent_set(G, nodes): + """Returns True if and only if `nodes` is a clique in `G`. + + `G` is a NetworkX graph. `nodes` is an iterable of nodes in + `G`. + + """ + return G.subgraph(nodes).number_of_edges() == 0 + + +def is_clique(G, nodes): + """Returns True if and only if `nodes` is an independent set + in `G`. + + `G` is an undirected simple graph. `nodes` is an iterable of + nodes in `G`. + + """ + H = G.subgraph(nodes) + n = len(H) + return H.number_of_edges() == n * (n - 1) // 2 + + +class TestCliqueRemoval: + """Unit tests for the + :func:`~networkx.algorithms.approximation.clique_removal` function. + + """ + + def test_trivial_graph(self): + G = nx.trivial_graph() + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + # In fact, we should only have 1-cliques, that is, singleton nodes. + assert all(len(clique) == 1 for clique in cliques) + + def test_complete_graph(self): + G = nx.complete_graph(10) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + def test_barbell_graph(self): + G = nx.barbell_graph(10, 5) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + +class TestMaxClique: + """Unit tests for the :func:`networkx.algorithms.approximation.max_clique` + function. + + """ + + def test_null_graph(self): + G = nx.null_graph() + assert len(max_clique(G)) == 0 + + def test_complete_graph(self): + graph = nx.complete_graph(30) + # this should return the entire graph + mc = max_clique(graph) + assert 30 == len(mc) + + def test_maximal_by_cardinality(self): + """Tests that the maximal clique is computed according to maximum + cardinality of the sets. + + For more information, see pull request #1531. + + """ + G = nx.complete_graph(5) + G.add_edge(4, 5) + clique = max_clique(G) + assert len(clique) > 1 + + G = nx.lollipop_graph(30, 2) + clique = max_clique(G) + assert len(clique) > 2 + + +def test_large_clique_size(): + G = nx.complete_graph(9) + nx.add_cycle(G, [9, 10, 11]) + G.add_edge(8, 9) + G.add_edge(1, 12) + G.add_node(13) + + assert large_clique_size(G) == 9 + G.remove_node(5) + assert large_clique_size(G) == 8 + G.remove_edge(2, 3) + assert large_clique_size(G) == 7 + + +def test_independent_set(): + # smoke test + G = nx.Graph() + assert len(maximum_independent_set(G)) == 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py new file mode 100644 index 0000000..887db20 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py @@ -0,0 +1,199 @@ +import pytest + +import networkx as nx +from networkx.algorithms import approximation as approx + + +def test_global_node_connectivity(): + # Figure 1 chapter on Connectivity + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + assert 2 == approx.local_node_connectivity(G, 1, 11) + assert 2 == approx.node_connectivity(G) + assert 2 == approx.node_connectivity(G, 1, 11) + + +def test_white_harary1(): + # Figure 1b white and harary (2001) + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + assert 1 == approx.node_connectivity(G) + + +def test_complete_graphs(): + for n in range(5, 25, 5): + G = nx.complete_graph(n) + assert n - 1 == approx.node_connectivity(G) + assert n - 1 == approx.node_connectivity(G, 0, 3) + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + assert 0 == approx.node_connectivity(G) + assert 0 == approx.node_connectivity(G, 0, 3) + + +def test_petersen(): + G = nx.petersen_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +# Approximation fails with tutte graph +# def test_tutte(): +# G = nx.tutte_graph() +# assert_equal(3, approx.node_connectivity(G)) + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +def test_octahedral(): + G = nx.octahedral_graph() + assert 4 == approx.node_connectivity(G) + assert 4 == approx.node_connectivity(G, 0, 5) + + +# Approximation can fail with icosahedral graph depending +# on iteration order. +# def test_icosahedral(): +# G=nx.icosahedral_graph() +# assert_equal(5, approx.node_connectivity(G)) +# assert_equal(5, approx.node_connectivity(G, 0, 5)) + + +def test_only_source(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, s=0) + + +def test_only_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, t=0) + + +def test_missing_source(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 10, 1) + + +def test_missing_target(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 1, 10) + + +def test_source_equals_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.local_node_connectivity, G, 0, 0) + + +def test_directed_node_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + assert 1 == approx.node_connectivity(G) + assert 1 == approx.node_connectivity(G, 1, 4) + assert 2 == approx.node_connectivity(D) + assert 2 == approx.node_connectivity(D, 1, 4) + + +class TestAllPairsNodeConnectivityApprox: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = approx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = approx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = approx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = approx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = approx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_cutoff(self): + for G in [self.K10, self.K5, self.K20]: + for mp in [2, 3, 4]: + paths = approx.all_pairs_node_connectivity(G, cutoff=mp) + for source in paths: + for target, K in paths[source].items(): + assert K == mp + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = approx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py new file mode 100644 index 0000000..8125150 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py @@ -0,0 +1,60 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.distance_measures` module. +""" + +import pytest + +import networkx as nx +from networkx.algorithms.approximation import diameter + + +class TestDiameter: + """Unit tests for the approximate diameter function + :func:`~networkx.algorithms.approximation.distance_measures.diameter`. + """ + + def test_null_graph(self): + """Test empty graph.""" + G = nx.null_graph() + with pytest.raises( + nx.NetworkXError, match="Expected non-empty NetworkX graph!" + ): + diameter(G) + + def test_undirected_non_connected(self): + """Test an undirected disconnected graph.""" + graph = nx.path_graph(10) + graph.remove_edge(3, 4) + with pytest.raises(nx.NetworkXError, match="Graph not connected."): + diameter(graph) + + def test_directed_non_strongly_connected(self): + """Test a directed non strongly connected graph.""" + graph = nx.path_graph(10, create_using=nx.DiGraph()) + with pytest.raises(nx.NetworkXError, match="DiGraph not strongly connected."): + diameter(graph) + + def test_complete_undirected_graph(self): + """Test a complete undirected graph.""" + graph = nx.complete_graph(10) + assert diameter(graph) == 1 + + def test_complete_directed_graph(self): + """Test a complete directed graph.""" + graph = nx.complete_graph(10, create_using=nx.DiGraph()) + assert diameter(graph) == 1 + + def test_undirected_path_graph(self): + """Test an undirected path graph with 10 nodes.""" + graph = nx.path_graph(10) + assert diameter(graph) == 9 + + def test_directed_path_graph(self): + """Test a directed path graph with 10 nodes.""" + graph = nx.path_graph(10).to_directed() + assert diameter(graph) == 9 + + def test_single_node(self): + """Test a graph which contains just a node.""" + graph = nx.Graph() + graph.add_node(1) + assert diameter(graph) == 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py new file mode 100644 index 0000000..892ce34 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py @@ -0,0 +1,67 @@ +import networkx as nx +from networkx.algorithms.approximation import ( + min_edge_dominating_set, + min_weighted_dominating_set, +) + + +class TestMinWeightDominatingSet: + def test_min_weighted_dominating_set(self): + graph = nx.Graph() + graph.add_edge(1, 2) + graph.add_edge(1, 5) + graph.add_edge(2, 3) + graph.add_edge(2, 5) + graph.add_edge(3, 4) + graph.add_edge(3, 6) + graph.add_edge(5, 6) + + vertices = {1, 2, 3, 4, 5, 6} + # due to ties, this might be hard to test tight bounds + dom_set = min_weighted_dominating_set(graph) + for vertex in vertices - dom_set: + neighbors = set(graph.neighbors(vertex)) + assert len(neighbors & dom_set) > 0, "Non dominating set found!" + + def test_star_graph(self): + """Tests that an approximate dominating set for the star graph, + even when the center node does not have the smallest integer + label, gives just the center node. + + For more information, see #1527. + + """ + # Create a star graph in which the center node has the highest + # label instead of the lowest. + G = nx.star_graph(10) + G = nx.relabel_nodes(G, {0: 9, 9: 0}) + assert min_weighted_dominating_set(G) == {9} + + def test_min_edge_dominating_set(self): + graph = nx.path_graph(5) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.complete_graph(10) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py new file mode 100644 index 0000000..6b28031 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py @@ -0,0 +1,301 @@ +# Test for approximation to k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.approximation import k_components +from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same + + +def build_k_number_dict(k_components): + k_num = {} + for k, comps in sorted(k_components.items()): + for comp in comps: + for node in comp: + k_num[node] = k + return k_num + + +## +# Some nice synthetic graphs +## + + +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function + + +def _check_connectivity(G): + result = k_components(G) + for k, components in result.items(): + if k < 3: + continue + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_connectivity(G) + + +def test_example_1(): + G = graph_example_1() + _check_connectivity(G) + + +def test_karate_0(): + G = nx.karate_club_graph() + _check_connectivity(G) + + +def test_karate_1(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + approx_karate_k_num = karate_k_num.copy() + approx_karate_k_num[24] = 2 + approx_karate_k_num[25] = 2 + G = nx.karate_club_graph() + k_comps = k_components(G) + k_num = build_k_number_dict(k_comps) + assert k_num in (karate_k_num, approx_karate_k_num) + + +def test_example_1_detail_3_and_4(): + G = graph_example_1() + result = k_components(G) + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + # Finally check that the k-components detected have actually node + # connectivity >= k. + for k, components in result.items(): + if k < 3: + continue + for component in components: + K = nx.node_connectivity(G.subgraph(component)) + assert K >= k + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.4, directed=True) + kc = k_components(G) + + +def test_same(): + equal = {"A": 2, "B": 2, "C": 2} + slightly_different = {"A": 2, "B": 1, "C": 2} + different = {"A": 2, "B": 8, "C": 18} + assert _same(equal) + assert not _same(slightly_different) + assert _same(slightly_different, tol=1) + assert not _same(different) + assert not _same(different, tol=4) + + +class TestAntiGraph: + @classmethod + def setup_class(cls): + cls.Gnp = nx.gnp_random_graph(20, 0.8) + cls.Anp = _AntiGraph(nx.complement(cls.Gnp)) + cls.Gd = nx.davis_southern_women_graph() + cls.Ad = _AntiGraph(nx.complement(cls.Gd)) + cls.Gk = nx.karate_club_graph() + cls.Ak = _AntiGraph(nx.complement(cls.Gk)) + cls.GA = [(cls.Gnp, cls.Anp), (cls.Gd, cls.Ad), (cls.Gk, cls.Ak)] + + def test_size(self): + for G, A in self.GA: + n = G.order() + s = len(list(G.edges())) + len(list(A.edges())) + assert s == (n * (n - 1)) / 2 + + def test_degree(self): + for G, A in self.GA: + assert sorted(G.degree()) == sorted(A.degree()) + + def test_core_number(self): + for G, A in self.GA: + assert nx.core_number(G) == nx.core_number(A) + + def test_connected_components(self): + for G, A in self.GA: + gc = [set(c) for c in nx.connected_components(G)] + ac = [set(c) for c in nx.connected_components(A)] + for comp in ac: + assert comp in gc + + def test_adj(self): + for G, A in self.GA: + for n, nbrs in G.adj.items(): + a_adj = sorted((n, sorted(ad)) for n, ad in A.adj.items()) + g_adj = sorted((n, sorted(ad)) for n, ad in G.adj.items()) + assert a_adj == g_adj + + def test_adjacency(self): + for G, A in self.GA: + a_adj = list(A.adjacency()) + for n, nbrs in G.adjacency(): + assert (n, set(nbrs)) in a_adj + + def test_neighbors(self): + for G, A in self.GA: + node = list(G.nodes())[0] + assert set(G.neighbors(node)) == set(A.neighbors(node)) + + def test_node_not_in_graph(self): + for G, A in self.GA: + node = "non_existent_node" + pytest.raises(nx.NetworkXError, A.neighbors, node) + pytest.raises(nx.NetworkXError, G.neighbors, node) + + def test_degree_thingraph(self): + for G, A in self.GA: + node = list(G.nodes())[0] + nodes = list(G.nodes())[1:4] + assert G.degree(node) == A.degree(node) + assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree()) + # AntiGraph is a ThinGraph, so all the weights are 1 + assert sum(d for n, d in A.degree()) == sum( + d for n, d in A.degree(weight="weight") + ) + assert sum(d for n, d in G.degree(nodes)) == sum( + d for n, d in A.degree(nodes) + ) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_matching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_matching.py new file mode 100644 index 0000000..f50da3d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_matching.py @@ -0,0 +1,8 @@ +import networkx as nx +import networkx.algorithms.approximation as a + + +def test_min_maximal_matching(): + # smoke test + G = nx.Graph() + assert len(a.min_maximal_matching(G)) == 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py new file mode 100644 index 0000000..ec75b59 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py @@ -0,0 +1,82 @@ +import random + +import networkx as nx +from networkx.algorithms.approximation import maxcut + + +def _is_valid_cut(G, set1, set2): + union = set1.union(set2) + assert union == set(G.nodes) + assert len(set1) + len(set2) == G.number_of_nodes() + + +def _cut_is_locally_optimal(G, cut_size, set1): + # test if cut can be locally improved + for i, node in enumerate(set1): + cut_size_without_node = nx.algorithms.cut_size( + G, set1 - {node}, weight="weight" + ) + assert cut_size_without_node <= cut_size + + +def test_random_partitioning(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, seed=5) + _is_valid_cut(G, set1, set2) + + +def test_random_partitioning_all_to_one(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, p=1) + _is_valid_cut(G, set1, set2) + assert len(set1) == G.number_of_nodes() + assert len(set2) == 0 + + +def test_one_exchange_basic(): + G = nx.complete_graph(5) + random.seed(5) + for (u, v, w) in G.edges(data=True): + w["weight"] = random.randrange(-100, 100, 1) / 10 + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange( + G, initial_cut, weight="weight", seed=5 + ) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + + +def test_one_exchange_optimal(): + # Greedy one exchange should find the optimal solution for this graph (14) + G = nx.Graph() + G.add_edge(1, 2, weight=3) + G.add_edge(1, 3, weight=3) + G.add_edge(1, 4, weight=3) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=5) + + cut_size, (set1, set2) = maxcut.one_exchange(G, weight="weight", seed=5) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + # check global optimality + assert cut_size == 14 + + +def test_negative_weights(): + G = nx.complete_graph(5) + random.seed(5) + for (u, v, w) in G.edges(data=True): + w["weight"] = -1 * random.random() + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange(G, initial_cut, weight="weight") + + # make sure it is a valid cut + _is_valid_cut(G, set1, set2) + # check local optimality + _cut_is_locally_optimal(G, cut_size, set1) + # test that all nodes are in the same partition + assert len(set1) == len(G.nodes) or len(set2) == len(G.nodes) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py new file mode 100644 index 0000000..856a8ef --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py @@ -0,0 +1,31 @@ +import networkx as nx +import networkx.algorithms.approximation as apxa + + +def test_ramsey(): + # this should only find the complete graph + graph = nx.complete_graph(10) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # this trival graph has no cliques. should just find i-sets + graph = nx.trivial_graph() + c, i = apxa.ramsey_R2(graph) + assert c == {0}, "clique not correctly found by ramsey!" + assert i == {0}, "i-set not correctly found by ramsey!" + + graph = nx.barbell_graph(10, 5, nx.Graph()) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # add self-loops and test again + graph.add_edges_from([(n, n) for n in range(0, len(graph), 2)]) + cc, ii = apxa.ramsey_R2(graph) + assert cc == c + assert ii == i diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py new file mode 100644 index 0000000..d58eb66 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py @@ -0,0 +1,83 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation.steinertree import metric_closure, steiner_tree +from networkx.utils import edges_equal + + +class TestSteinerTree: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_edge(1, 2, weight=10) + G.add_edge(2, 3, weight=10) + G.add_edge(3, 4, weight=10) + G.add_edge(4, 5, weight=10) + G.add_edge(5, 6, weight=10) + G.add_edge(2, 7, weight=1) + G.add_edge(7, 5, weight=1) + cls.G = G + cls.term_nodes = [1, 2, 3, 4, 5] + + def test_connected_metric_closure(self): + G = self.G.copy() + G.add_node(100) + pytest.raises(nx.NetworkXError, metric_closure, G) + + def test_metric_closure(self): + M = metric_closure(self.G) + mc = [ + (1, 2, {"distance": 10, "path": [1, 2]}), + (1, 3, {"distance": 20, "path": [1, 2, 3]}), + (1, 4, {"distance": 22, "path": [1, 2, 7, 5, 4]}), + (1, 5, {"distance": 12, "path": [1, 2, 7, 5]}), + (1, 6, {"distance": 22, "path": [1, 2, 7, 5, 6]}), + (1, 7, {"distance": 11, "path": [1, 2, 7]}), + (2, 3, {"distance": 10, "path": [2, 3]}), + (2, 4, {"distance": 12, "path": [2, 7, 5, 4]}), + (2, 5, {"distance": 2, "path": [2, 7, 5]}), + (2, 6, {"distance": 12, "path": [2, 7, 5, 6]}), + (2, 7, {"distance": 1, "path": [2, 7]}), + (3, 4, {"distance": 10, "path": [3, 4]}), + (3, 5, {"distance": 12, "path": [3, 2, 7, 5]}), + (3, 6, {"distance": 22, "path": [3, 2, 7, 5, 6]}), + (3, 7, {"distance": 11, "path": [3, 2, 7]}), + (4, 5, {"distance": 10, "path": [4, 5]}), + (4, 6, {"distance": 20, "path": [4, 5, 6]}), + (4, 7, {"distance": 11, "path": [4, 5, 7]}), + (5, 6, {"distance": 10, "path": [5, 6]}), + (5, 7, {"distance": 1, "path": [5, 7]}), + (6, 7, {"distance": 11, "path": [6, 5, 7]}), + ] + assert edges_equal(list(M.edges(data=True)), mc) + + def test_steiner_tree(self): + S = steiner_tree(self.G, self.term_nodes) + expected_steiner_tree = [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (5, 7, {"weight": 1}), + ] + assert edges_equal(list(S.edges(data=True)), expected_steiner_tree) + + def test_multigraph_steiner_tree(self): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2, 0, {"weight": 1}), + (2, 3, 0, {"weight": 999}), + (2, 3, 1, {"weight": 1}), + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + ) + terminal_nodes = [2, 4, 5] + expected_edges = [ + (2, 3, 1, {"weight": 1}), # edge with key 1 has lower weight + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + T = steiner_tree(G, terminal_nodes) + assert edges_equal(T.edges(data=True, keys=True), expected_edges) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py new file mode 100644 index 0000000..6f9b3b0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py @@ -0,0 +1,963 @@ +"""Unit tests for the traveling_salesman module.""" +import random + +import pytest + +import networkx as nx +import networkx.algorithms.approximation as nx_app + +pairwise = nx.utils.pairwise + + +def test_christofides_hamiltonian(): + random.seed(42) + G = nx.complete_graph(20) + for (u, v) in G.edges(): + G[u][v]["weight"] = random.randint(0, 10) + + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + tree = nx.minimum_spanning_tree(G, weight="weight") + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G, tree))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + +def test_christofides_incomplete_graph(): + G = nx.complete_graph(10) + G.remove_edge(0, 1) + pytest.raises(nx.NetworkXError, nx_app.christofides, G) + + +def test_christofides_ignore_selfloops(): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.christofides(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +# set up graphs for other tests +class TestBase: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + cls.DG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 3), + ("B", "C", 12), + ("B", "D", 16), + ("C", "A", 13), + ("C", "B", 12), + ("C", "D", 4), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG_cycle = ["D", "C", "B", "A", "D"] + cls.DG_cost = 31.0 + + cls.DG2 = nx.DiGraph() + cls.DG2.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 30), + ("B", "C", 2), + ("B", "D", 16), + ("C", "A", 33), + ("C", "B", 32), + ("C", "D", 34), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG2_cycle = ["D", "A", "B", "C", "D"] + cls.DG2_cost = 53.0 + + cls.unweightedUG = nx.complete_graph(5, nx.Graph()) + cls.unweightedDG = nx.complete_graph(5, nx.DiGraph()) + + cls.incompleteUG = nx.Graph() + cls.incompleteUG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + cls.incompleteDG = nx.DiGraph() + cls.incompleteDG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + + cls.UG = nx.Graph() + cls.UG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "C", 12), + ("B", "D", 16), + ("C", "D", 4), + } + ) + cls.UG_cycle = ["D", "C", "B", "A", "D"] + cls.UG_cost = 33.0 + + cls.UG2 = nx.Graph() + cls.UG2.add_weighted_edges_from( + { + ("A", "B", 1), + ("A", "C", 15), + ("A", "D", 5), + ("B", "C", 16), + ("B", "D", 8), + ("C", "D", 3), + } + ) + cls.UG2_cycle = ["D", "C", "B", "A", "D"] + cls.UG2_cost = 25.0 + + +def validate_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln + assert cost == exp_cost + + +def validate_symmetric_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln or soln == exp_soln[::-1] + assert cost == exp_cost + + +class TestGreedyTSP(TestBase): + def test_greedy(self): + cycle = nx_app.greedy_tsp(self.DG, source="D") + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 31.0) + + cycle = nx_app.greedy_tsp(self.DG2, source="D") + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 78.0) + + cycle = nx_app.greedy_tsp(self.UG, source="D") + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 33.0) + + cycle = nx_app.greedy_tsp(self.UG2, source="D") + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "A", "B", "D"], 27.0) + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteUG) + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteDG) + + def test_not_weighted_graph(self): + nx_app.greedy_tsp(self.unweightedUG) + nx_app.greedy_tsp(self.unweightedDG) + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + cycle = nx_app.greedy_tsp(G) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.greedy_tsp(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +class TestSimulatedAnnealingTSP(TestBase): + tsp = staticmethod(nx_app.simulated_annealing_tsp) + + def test_simulated_annealing_directed(self): + cycle = self.tsp(self.DG, "greedy", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "B", "A", "C", "D"] + cycle = self.tsp(self.DG, initial_sol, source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "A", "C", "B", "D"] + cycle = self.tsp(self.DG, initial_sol, move="1-0", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + cycle = self.tsp(self.DG2, "greedy", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + cycle = self.tsp(self.DG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + def test_simulated_annealing_undirected(self): + cycle = self.tsp(self.UG, "greedy", source="D", seed=42) + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.UG_cycle, self.UG_cost) + + cycle = self.tsp(self.UG2, "greedy", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + cycle = self.tsp(self.UG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + def test_error_on_input_order_mistake(self): + # see issue #4846 https://github.com/networkx/networkx/issues/4846 + pytest.raises(TypeError, self.tsp, self.UG, weight="weight") + pytest.raises(nx.NetworkXError, self.tsp, self.UG, "weight") + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteUG, "greedy", source=0) + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteDG, "greedy", source=0) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = self.tsp(G, "greedy") + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + def test_not_weighted_graph(self): + self.tsp(self.unweightedUG, "greedy") + self.tsp(self.unweightedDG, "greedy") + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + + cycle = self.tsp(G, "greedy", source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + cycle = self.tsp(G, [1, 2, 1], source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Simulated Annealing Version: + # set number of moves low and alpha high + cycle = self.tsp( + self.DG2, "greedy", source="D", move="1-0", alpha=1, N_inner=1, seed=42 + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + print(cycle, cost) + assert cost > self.DG2_cost + + # Try with an incorrect initial guess + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, + initial_sol, + source="D", + move="1-0", + alpha=0.1, + N_inner=1, + max_iterations=1, + seed=42, + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + print(cycle, cost) + assert cost > self.DG_cost + + +class TestThresholdAcceptingTSP(TestSimulatedAnnealingTSP): + tsp = staticmethod(nx_app.threshold_accepting_tsp) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Threshold Version: + # set number of moves low and number of iterations low + cycle = self.tsp( + self.DG2, + "greedy", + source="D", + move="1-0", + N_inner=1, + max_iterations=1, + seed=4, + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG2_cost + + # set threshold too low + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, initial_sol, source="D", move="1-0", threshold=-3, seed=42 + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG_cost + + +# Tests for function traveling_salesman_problem +def test_TSP_method(): + G = nx.cycle_graph(9) + G[4][5]["weight"] = 10 + + def my_tsp_method(G, weight): + return nx_app.simulated_annealing_tsp(G, "greedy", weight, source=4, seed=1) + + path = nx_app.traveling_salesman_problem(G, method=my_tsp_method, cycle=False) + print(path) + assert path == [4, 3, 2, 1, 0, 8, 7, 6, 5] + + +def test_TSP_unweighted(): + G = nx.cycle_graph(9) + path = nx_app.traveling_salesman_problem(G, nodes=[3, 6], cycle=False) + assert path in ([3, 4, 5, 6], [6, 5, 4, 3]) + + cycle = nx_app.traveling_salesman_problem(G, nodes=[3, 6]) + assert cycle in ([3, 4, 5, 6, 5, 4, 3], [6, 5, 4, 3, 4, 5, 6]) + + +def test_TSP_weighted(): + G = nx.cycle_graph(9) + G[0][1]["weight"] = 2 + G[1][2]["weight"] = 2 + G[2][3]["weight"] = 2 + G[3][4]["weight"] = 4 + G[4][5]["weight"] = 5 + G[5][6]["weight"] = 4 + G[6][7]["weight"] = 2 + G[7][8]["weight"] = 2 + G[8][0]["weight"] = 2 + tsp = nx_app.traveling_salesman_problem + + # path between 3 and 6 + expected_paths = ([3, 2, 1, 0, 8, 7, 6], [6, 7, 8, 0, 1, 2, 3]) + # cycle between 3 and 6 + expected_cycles = ( + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3], + [6, 7, 8, 0, 1, 2, 3, 2, 1, 0, 8, 7, 6], + ) + # path through all nodes + expected_tourpaths = ([5, 6, 7, 8, 0, 1, 2, 3, 4], [4, 3, 2, 1, 0, 8, 7, 6, 5]) + + # Check default method + cycle = tsp(G, nodes=[3, 6], weight="weight") + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", cycle=False) + assert tourpath in expected_tourpaths + + # Check all methods + methods = [ + nx_app.christofides, + nx_app.greedy_tsp, + lambda G, wt: nx_app.simulated_annealing_tsp(G, "greedy", weight=wt), + lambda G, wt: nx_app.threshold_accepting_tsp(G, "greedy", weight=wt), + ] + for method in methods: + cycle = tsp(G, nodes=[3, 6], weight="weight", method=method) + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", method=method, cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", method=method, cycle=False) + assert tourpath in expected_tourpaths + + +def test_TSP_incomplete_graph_short_path(): + G = nx.cycle_graph(9) + G.add_edges_from([(4, 9), (9, 10), (10, 11), (11, 0)]) + G[4][5]["weight"] = 5 + + cycle = nx_app.traveling_salesman_problem(G) + print(cycle) + assert len(cycle) == 17 and len(set(cycle)) == 12 + + # make sure that cutting one edge out of complete graph formulation + # cuts out many edges out of the path of the TSP + path = nx_app.traveling_salesman_problem(G, cycle=False) + print(path) + assert len(path) == 13 and len(set(path)) == 12 + + +def test_held_karp_ascent(): + """ + Test the Held-Karp relaxation with the ascent method + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # Adjacency matrix from page 1153 of the 1970 Held and Karp paper + # which have been edited to be directional, but also symmetric + G_array = np.array( + [ + [0, 97, 60, 73, 17, 52], + [97, 0, 41, 52, 90, 30], + [60, 41, 0, 21, 35, 41], + [73, 52, 21, 0, 95, 46], + [17, 90, 35, 95, 0, 81], + [52, 30, 41, 46, 81, 0], + ] + ) + + solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 207.00 + # Check that the z_stars are the same + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_fractional_solution(): + """ + Test the ascent method using a modified version of Figure 2 on page 1140 + in 'The Traveling Salesman Problem and Minimum Spanning Trees' by Held and + Karp + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and is a complete directed graph with infinite edge weights for the + # edges not listed in the original graph + G_array = np.array( + [ + [0, 100, 100, 100000, 100000, 1], + [100, 0, 100, 100000, 1, 100000], + [100, 100, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 100, 100], + [100000, 1, 100000, 100, 0, 100], + [1, 100000, 100000, 100, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 303.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_ascent_method_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + solution_edges = [(0, 1), (1, 3), (3, 2), (2, 5), (5, 6), (4, 0), (6, 4)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 190.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_method_asymmetric_2(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 45, 39, 92, 29, 31], + [72, 0, 4, 12, 21, 60], + [81, 6, 0, 98, 70, 53], + [49, 71, 59, 0, 98, 94], + [74, 95, 24, 43, 0, 47], + [56, 43, 3, 65, 22, 0], + ] + ) + + solution_edges = [(0, 5), (5, 4), (1, 3), (3, 0), (2, 1), (4, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 144.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_held_karp_ascent_asymmetric_3(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced. + + In this graph their are two different optimal, integral solutions (which + are also the overall atsp solutions) to the Held Karp relaxation. However, + this particular graph has two different tours of optimal value and the + possible solutions in the held_karp_ascent function are not stored in an + ordered data structure. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 1, 5, 2, 7, 4], + [7, 0, 7, 7, 1, 4], + [4, 7, 0, 9, 2, 1], + [7, 2, 7, 0, 4, 4], + [5, 5, 4, 4, 0, 3], + [3, 9, 1, 3, 4, 0], + ] + ) + + solution1_edges = [(0, 3), (1, 4), (2, 5), (3, 1), (4, 2), (5, 0)] + + solution2_edges = [(0, 3), (3, 1), (1, 4), (4, 5), (2, 0), (5, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + assert round(opt_hk, 2) == 13.00 + # Check that the z_stars are the same + solution1 = nx.DiGraph() + solution1.add_edges_from(solution1_edges) + solution2 = nx.DiGraph() + solution2.add_edges_from(solution2_edges) + assert nx.utils.edges_equal(z_star.edges, solution1.edges) or nx.utils.edges_equal( + z_star.edges, solution2.edges + ) + + +def test_held_karp_ascent_fractional_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 100, 150, 100000, 100000, 1], + [150, 0, 100, 100000, 1, 100000], + [100, 150, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 150, 100], + [100000, 2, 100000, 100, 0, 150], + [2, 100000, 100000, 150, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 5 / 12, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 5 / 12, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 5 / 12, + (3, 5): 5 / 12, + (4, 1): 5 / 6, + (4, 3): 5 / 12, + (4, 5): 5 / 12, + (5, 0): 5 / 6, + (5, 3): 5 / 12, + (5, 4): 5 / 12, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 304.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_spanning_tree_distribution(): + """ + Test that we can create an exponential distribution of spanning trees such + that the probability of each tree is proportional to the product of edge + weights. + + Results of this test have been confirmed with hypothesis testing from the + created distribution. + + This test uses the symmetric, fractional Held Karp solution. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + solution_gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of z_star + G = nx.MultiGraph() + for u, v in z_star: + if (u, v) in G.edges or (v, u) in G.edges: + continue + G.add_edge(u, v) + + gamma = tsp.spanning_tree_distribution(G, z_star) + + assert {key: round(gamma[key], 4) for key in gamma} == solution_gamma + + +def test_asadpour_tsp(): + """ + Test the complete asadpour tsp algorithm with the fractional, symmetric + Held Karp solution. This test also uses an incomplete graph as input. + """ + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and the 0 weight edges have a weight of 1. + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + edge_list = [ + (0, 1, 100), + (0, 2, 100), + (0, 5, 1), + (1, 2, 100), + (1, 4, 1), + (2, 3, 1), + (3, 4, 100), + (3, 5, 100), + (4, 5, 100), + (1, 0, 100), + (2, 0, 100), + (5, 0, 1), + (2, 1, 100), + (4, 1, 1), + (3, 2, 1), + (4, 3, 100), + (5, 3, 100), + (5, 4, 100), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edge_list) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 19) + + tour = nx_app.traveling_salesman_problem(G, weight="weight", method=fixed_asadpour) + + # Check that the returned list is a valid tour. Because this is an + # incomplete graph, the conditions are not as strict. We need the tour to + # + # Start and end at the same node + # Pass through every vertex at least once + # Have a total cost at most ln(6) / ln(ln(6)) = 3.0723 times the optimal + # + # For the second condition it is possible to have the tour pass through the + # same vertex more then. Imagine that the tour on the complete version takes + # an edge not in the original graph. In the output this is substituted with + # the shortest path between those vertices, allowing vertices to appear more + # than once. + # + # However, we are using a fixed random number generator so we know what the + # expected tour is. + expected_tours = [[1, 4, 5, 0, 2, 3, 2, 1], [3, 2, 0, 1, 4, 5, 3]] + + assert tour in expected_tours + + +def test_asadpour_real_world(): + """ + This test uses airline prices between the six largest cities in the US. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + + This test also uses the `source` keyword argument to ensure that the tour + always starts at city 0. + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"} + + expected_tours = [ + ["JFK", "LAX", "PHX", "ORD", "IAH", "PHL", "JFK"], + ["JFK", "ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + nx.relabel_nodes(G, node_map, copy=False) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 37, source="JFK") + + tour = nx_app.traveling_salesman_problem(G, weight="weight", method=fixed_asadpour) + + assert tour in expected_tours + + +def test_asadpour_real_world_path(): + """ + This test uses airline prices between the six largest cities in the US. This + time using a path, not a cycle. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"} + + expected_paths = [ + ["ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ["JFK", "PHL", "IAH", "ORD", "PHX", "LAX"], + ] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + nx.relabel_nodes(G, node_map, copy=False) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 56) + + path = nx_app.traveling_salesman_problem( + G, weight="weight", cycle=False, method=fixed_asadpour + ) + + assert path in expected_paths + + +def test_asadpour_disconnected_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + disconnected graph. + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.add_node(5) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_incomplete_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + incomplete graph + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.remove_edge(0, 1) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_empty_graph(): + """ + Test the asadpour_atsp function with an empty graph + """ + G = nx.DiGraph() + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +@pytest.mark.slow +def test_asadpour_integral_held_karp(): + """ + This test uses an integral held karp solution and the held karp function + will return a graph rather than a dict, bypassing most of the asadpour + algorithm. + + At first glance, this test probably doesn't look like it ensures that we + skip the rest of the asadpour algorithm, but it does. We are not fixing a + see for the random number generator, so if we sample any spanning trees + the approximation would be different basically every time this test is + executed but it is not since held karp is deterministic and we do not + reach the portion of the code with the dependence on random numbers. + """ + np = pytest.importorskip("numpy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + for _ in range(2): + tour = nx_app.traveling_salesman_problem(G, method=nx_app.asadpour_atsp) + + assert [1, 3, 2, 5, 2, 6, 4, 0, 1] == tour + + +def test_directed_tsp_impossible(): + """ + Test the asadpour algorithm with a graph without a hamiltonian circuit + """ + pytest.importorskip("numpy") + + # In this graph, once we leave node 0 we cannot return + edges = [ + (0, 1, 10), + (0, 2, 11), + (0, 3, 12), + (1, 2, 4), + (1, 3, 6), + (2, 1, 3), + (2, 3, 2), + (3, 1, 5), + (3, 2, 1), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + + pytest.raises(nx.NetworkXError, nx_app.traveling_salesman_problem, G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py new file mode 100644 index 0000000..37619db --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py @@ -0,0 +1,274 @@ +import itertools + +import networkx as nx +from networkx.algorithms.approximation import ( + treewidth_min_degree, + treewidth_min_fill_in, +) +from networkx.algorithms.approximation.treewidth import ( + MinDegreeHeuristic, + min_fill_in_heuristic, +) + + +def is_tree_decomp(graph, decomp): + """Check if the given tree decomposition is valid.""" + for x in graph.nodes(): + appear_once = False + for bag in decomp.nodes(): + if x in bag: + appear_once = True + break + assert appear_once + + # Check if each connected pair of nodes are at least once together in a bag + for (x, y) in graph.edges(): + appear_together = False + for bag in decomp.nodes(): + if x in bag and y in bag: + appear_together = True + break + assert appear_together + + # Check if the nodes associated with vertex v form a connected subset of T + for v in graph.nodes(): + subset = [] + for bag in decomp.nodes(): + if v in bag: + subset.append(bag) + sub_graph = decomp.subgraph(subset) + assert nx.is_connected(sub_graph) + + +class TestTreewidthMinDegree: + """Unit tests for the min_degree function""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 3) + cls.small_tree.add_edge(4, 3) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(0, 1) # deg(0) = 1 + + cls.deterministic_graph.add_edge(1, 2) # deg(1) = 2 + + cls.deterministic_graph.add_edge(2, 3) + cls.deterministic_graph.add_edge(2, 4) # deg(2) = 3 + + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(3, 6) # deg(3) = 4 + + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(4, 6) + cls.deterministic_graph.add_edge(4, 7) # deg(4) = 5 + + cls.deterministic_graph.add_edge(5, 6) + cls.deterministic_graph.add_edge(5, 7) + cls.deterministic_graph.add_edge(5, 8) + cls.deterministic_graph.add_edge(5, 9) # deg(5) = 6 + + cls.deterministic_graph.add_edge(6, 7) + cls.deterministic_graph.add_edge(6, 8) + cls.deterministic_graph.add_edge(6, 9) # deg(6) = 6 + + cls.deterministic_graph.add_edge(7, 8) + cls.deterministic_graph.add_edge(7, 9) # deg(7) = 5 + + cls.deterministic_graph.add_edge(8, 9) # deg(8) = 4 + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_degree(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test small tree + + Test if the computed treewidth of the known self.small_tree is 2. + As we know which value we can expect from our heuristic, values other + than two are regressions + """ + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test heuristic abort condition for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + + deg_heuristic = MinDegreeHeuristic(graph) + node = deg_heuristic.best_node(graph) + if node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_degree(G) + + def test_two_component_graph(self): + """Test empty graph""" + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_degree(G) + assert treewidth == 0 + + def test_heuristic_first_steps(self): + """Test first steps of min_degree heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + deg_heuristic = MinDegreeHeuristic(graph) + elim_node = deg_heuristic.best_node(graph) + print(f"Graph {graph}:") + steps = [] + + while elim_node is not None: + print(f"Removing {elim_node}:") + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + print(f"Graph {graph}:") + elim_node = deg_heuristic.best_node(graph) + + # check only the first 5 elements for equality + assert steps[:5] == [0, 1, 2, 3, 4] + + +class TestTreewidthMinFillIn: + """Unit tests for the treewidth_min_fill_in function.""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 2) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 4) + cls.small_tree.add_edge(1, 4) + cls.small_tree.add_edge(2, 4) + cls.small_tree.add_edge(4, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(1, 2) + cls.deterministic_graph.add_edge(1, 3) + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(2, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(3, 6) + cls.deterministic_graph.add_edge(5, 6) + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_fill_in(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test if the computed treewidth of the known self.small_tree is 2""" + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test if min_fill_in returns None for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + next_node = min_fill_in_heuristic(graph) + if next_node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_fill_in(G) + + def test_two_component_graph(self): + """Test empty graph""" + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 0 + + def test_heuristic_first_steps(self): + """Test first steps of min_fill_in heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + print(f"Graph {graph}:") + elim_node = min_fill_in_heuristic(graph) + steps = [] + + while elim_node is not None: + print(f"Removing {elim_node}:") + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + print(f"Graph {graph}:") + elim_node = min_fill_in_heuristic(graph) + + # check only the first 2 elements for equality + assert steps[:2] == [6, 5] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py new file mode 100644 index 0000000..5cc5a38 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py @@ -0,0 +1,68 @@ +import networkx as nx +from networkx.algorithms.approximation import min_weighted_vertex_cover + + +def is_cover(G, node_cover): + return all({u, v} & node_cover for u, v in G.edges()) + + +class TestMWVC: + """Unit tests for the approximate minimum weighted vertex cover + function, + :func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`. + + """ + + def test_unweighted_directed(self): + # Create a star graph in which half the nodes are directed in + # and half are directed out. + G = nx.DiGraph() + G.add_edges_from((0, v) for v in range(1, 26)) + G.add_edges_from((v, 0) for v in range(26, 51)) + cover = min_weighted_vertex_cover(G) + assert 1 == len(cover) + assert is_cover(G, cover) + + def test_unweighted_undirected(self): + # create a simple star graph + size = 50 + sg = nx.star_graph(size) + cover = min_weighted_vertex_cover(sg) + assert 1 == len(cover) + assert is_cover(sg, cover) + + def test_weighted(self): + wg = nx.Graph() + wg.add_node(0, weight=10) + wg.add_node(1, weight=1) + wg.add_node(2, weight=1) + wg.add_node(3, weight=1) + wg.add_node(4, weight=1) + + wg.add_edge(0, 1) + wg.add_edge(0, 2) + wg.add_edge(0, 3) + wg.add_edge(0, 4) + + wg.add_edge(1, 2) + wg.add_edge(2, 3) + wg.add_edge(3, 4) + wg.add_edge(4, 1) + + cover = min_weighted_vertex_cover(wg, weight="weight") + csum = sum(wg.nodes[node]["weight"] for node in cover) + assert 4 == csum + assert is_cover(wg, cover) + + def test_unweighted_self_loop(self): + slg = nx.Graph() + slg.add_node(0) + slg.add_node(1) + slg.add_node(2) + + slg.add_edge(0, 1) + slg.add_edge(2, 2) + + cover = min_weighted_vertex_cover(slg) + assert 2 == len(cover) + assert is_cover(slg, cover) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/traveling_salesman.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/traveling_salesman.py new file mode 100644 index 0000000..806c8b7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/traveling_salesman.py @@ -0,0 +1,1434 @@ +""" +================================= +Travelling Salesman Problem (TSP) +================================= + +Implementation of approximate algorithms +for solving and approximating the TSP problem. + +Categories of algorithms which are implemented: + +- Christofides (provides a 3/2-approximation of TSP) +- Greedy +- Simulated Annealing (SA) +- Threshold Accepting (TA) +- Asadpour Asymmetric Traveling Salesman Algorithm + +The Travelling Salesman Problem tries to find, given the weight +(distance) between all points where a salesman has to visit, the +route so that: + +- The total distance (cost) which the salesman travels is minimized. +- The salesman returns to the starting point. +- Note that for a complete graph, the salesman visits each point once. + +The function `travelling_salesman_problem` allows for incomplete +graphs by finding all-pairs shortest paths, effectively converting +the problem to a complete graph problem. It calls one of the +approximate methods on that problem and then converts the result +back to the original graph using the previously found shortest paths. + +TSP is an NP-hard problem in combinatorial optimization, +important in operations research and theoretical computer science. + +http://en.wikipedia.org/wiki/Travelling_salesman_problem +""" +import math + +import networkx as nx +from networkx.algorithms.tree.mst import random_spanning_tree +from networkx.utils import not_implemented_for, pairwise, py_random_state + +__all__ = [ + "traveling_salesman_problem", + "christofides", + "asadpour_atsp", + "greedy_tsp", + "simulated_annealing_tsp", + "threshold_accepting_tsp", +] + + +def swap_two_nodes(soln, seed): + """Swap two nodes in `soln` to give a neighbor solution. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + move_one_node + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln[a], soln[b] = soln[b], soln[a] + return soln + + +def move_one_node(soln, seed): + """Move one node to another position to give a neighbor solution. + + The node to move and the position to move to are chosen randomly. + The first and last nodes are left untouched as soln must be a cycle + starting at that node. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + swap_two_nodes + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln.insert(b, soln.pop(a)) + return soln + + +@not_implemented_for("directed") +def christofides(G, weight="weight", tree=None): + """Approximate a solution of the traveling salesman problem + + Compute a 3/2-approximation of the traveling salesman problem + in a complete undirected graph using Christofides [1]_ algorithm. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + tree : NetworkX graph or None (default: None) + A minimum spanning tree of G. Or, if None, the minimum spanning + tree is computed using :func:`networkx.minimum_spanning_tree` + + Returns + ------- + list + List of nodes in `G` along a cycle with a 3/2-approximation of + the minimal Hamiltonian cycle. + + References + ---------- + .. [1] Christofides, Nicos. "Worst-case analysis of a new heuristic for + the travelling salesman problem." No. RR-388. Carnegie-Mellon Univ + Pittsburgh Pa Management Sciences Research Group, 1976. + """ + # Remove selfloops if necessary + loop_nodes = nx.nodes_with_selfloops(G) + try: + node = next(loop_nodes) + except StopIteration: + pass + else: + G = G.copy() + G.remove_edge(node, node) + G.remove_edges_from((n, n) for n in loop_nodes) + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if tree is None: + tree = nx.minimum_spanning_tree(G, weight=weight) + L = G.copy() + L.remove_nodes_from([v for v, degree in tree.degree if not (degree % 2)]) + MG = nx.MultiGraph() + MG.add_edges_from(tree.edges) + edges = nx.min_weight_matching(L, maxcardinality=True, weight=weight) + MG.add_edges_from(edges) + return _shortcutting(nx.eulerian_circuit(MG)) + + +def _shortcutting(circuit): + """Remove duplicate nodes in the path""" + nodes = [] + for u, v in circuit: + if v in nodes: + continue + if not nodes: + nodes.append(u) + nodes.append(v) + nodes.append(nodes[0]) + return nodes + + +def traveling_salesman_problem(G, weight="weight", nodes=None, cycle=True, method=None): + """Find the shortest path in `G` connecting specified nodes + + This function allows approximate solution to the traveling salesman + problem on networks that are not complete graphs and/or where the + salesman does not need to visit all nodes. + + This function proceeds in two steps. First, it creates a complete + graph using the all-pairs shortest_paths between nodes in `nodes`. + Edge weights in the new graph are the lengths of the paths + between each pair of nodes in the original graph. + Second, an algorithm (default: `christofides` for undirected and + `asadpour_atsp` for directed) is used to approximate the minimal Hamiltonian + cycle on this new graph. The available algorithms are: + + - christofides + - greedy_tsp + - simulated_annealing_tsp + - threshold_accepting_tsp + - asadpour_atsp + + Once the Hamiltonian Cycle is found, this function post-processes to + accommodate the structure of the original graph. If `cycle` is ``False``, + the biggest weight edge is removed to make a Hamiltonian path. + Then each edge on the new complete graph used for that analysis is + replaced by the shortest_path between those nodes on the original graph. + + Parameters + ---------- + G : NetworkX graph + A possibly weighted graph + + nodes : collection of nodes (default=G.nodes) + collection (list, set, etc.) of nodes to visit + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + cycle : bool (default: True) + Indicates whether a cycle should be returned, or a path. + Note: the cycle is the approximate minimal cycle. + The path simply removes the biggest edge in that cycle. + + method : function (default: None) + A function that returns a cycle on all nodes and approximates + the solution to the traveling salesman problem on a complete + graph. The returned cycle is then used to find a corresponding + solution on `G`. `method` should be callable; take inputs + `G`, and `weight`; and return a list of nodes along the cycle. + + Provided options include :func:`christofides`, :func:`greedy_tsp`, + :func:`simulated_annealing_tsp` and :func:`threshold_accepting_tsp`. + + If `method is None`: use :func:`christofides` for undirected `G` and + :func:`threshold_accepting_tsp` for directed `G`. + + To specify parameters for these provided functions, construct lambda + functions that state the specific value. `method` must have 2 inputs. + (See examples). + + Returns + ------- + list + List of nodes in `G` along a path with an approximation of the minimal + path through `nodes`. + + + Raises + ------ + NetworkXError + If `G` is a directed graph it has to be strongly connected or the + complete version cannot be generated. + + Examples + -------- + >>> tsp = nx.approximation.traveling_salesman_problem + >>> G = nx.cycle_graph(9) + >>> G[4][5]["weight"] = 5 # all other weights are 1 + >>> tsp(G, nodes=[3, 6]) + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3] + >>> path = tsp(G, cycle=False) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + Build (curry) your own function to provide parameter values to the methods. + + >>> SA_tsp = nx.approximation.simulated_annealing_tsp + >>> method = lambda G, wt: SA_tsp(G, "greedy", weight=wt, temp=500) + >>> path = tsp(G, cycle=False, method=method) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + """ + if method is None: + if G.is_directed(): + method = asadpour_atsp + else: + method = christofides + if nodes is None: + nodes = list(G.nodes) + + dist = {} + path = {} + for n, (d, p) in nx.all_pairs_dijkstra(G, weight=weight): + dist[n] = d + path[n] = p + + if G.is_directed(): + # If the graph is not strongly connected, raise an exception + if not nx.is_strongly_connected(G): + raise nx.NetworkXError("G is not strongly connected") + GG = nx.DiGraph() + else: + GG = nx.Graph() + for u in nodes: + for v in nodes: + if u == v: + continue + GG.add_edge(u, v, weight=dist[u][v]) + best_GG = method(GG, weight) + + if not cycle: + # find and remove the biggest edge + (u, v) = max(pairwise(best_GG), key=lambda x: dist[x[0]][x[1]]) + pos = best_GG.index(u) + 1 + while best_GG[pos] != v: + pos = best_GG[pos:].index(u) + 1 + best_GG = best_GG[pos:-1] + best_GG[:pos] + + best_path = [] + for u, v in pairwise(best_GG): + best_path.extend(path[u][v][:-1]) + best_path.append(v) + return best_path + + +@not_implemented_for("undirected") +@py_random_state(2) +def asadpour_atsp(G, weight="weight", seed=None, source=None): + """ + Returns an approximate solution to the traveling salesman problem. + + This approximate solution is one of the best known approximations for the + asymmetric traveling salesman problem developed by Asadpour et al, + [1]_. The algorithm first solves the Held-Karp relaxation to find a lower + bound for the weight of the cycle. Next, it constructs an exponential + distribution of undirected spanning trees where the probability of an + edge being in the tree corresponds to the weight of that edge using a + maximum entropy rounding scheme. Next we sample that distribution + $2 \\lceil \\ln n \\rceil$ times and save the minimum sampled tree once the + direction of the arcs is added back to the edges. Finally, we augment + then short circuit that graph to find the approximate tour for the + salesman. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. The + distance between all paris of nodes should be included and the triangle + inequality should hold. That is, the direct edge between any two nodes + should be the path of least cost. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + source : node label (default=`None`) + If given, return the cycle starting and ending at the given node. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman can follow to minimize + the total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete or has less than two nodes, the algorithm raises + an exception. + + NetworkXError + If `source` is not `None` and is not a node in `G`, the algorithm raises + an exception. + + NetworkXNotImplemented + If `G` is an undirected graph. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + Examples + -------- + >>> import networkx as nx + >>> import networkx.algorithms.approximation as approx + >>> G = nx.complete_graph(3, create_using=nx.DiGraph) + >>> nx.set_edge_attributes(G, {(0, 1): 2, (1, 2): 2, (2, 0): 2, (0, 2): 1, (2, 1): 1, (1, 0): 1}, "weight") + >>> tour = approx.asadpour_atsp(G,source=0) + >>> tour + [0, 2, 1, 0] + """ + from math import ceil, exp + from math import log as ln + + # Check that G is a complete graph + N = len(G) - 1 + if N < 2: + raise nx.NetworkXError("G must have at least two nodes") + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G is not a complete DiGraph") + # Check that the source vertex, if given, is in the graph + if source is not None and source not in G.nodes: + raise nx.NetworkXError("Given source node not in G.") + + opt_hk, z_star = held_karp_ascent(G, weight) + + # Test to see if the ascent method found an integer solution or a fractional + # solution. If it is integral then z_star is a nx.Graph, otherwise it is + # a dict + if not isinstance(z_star, dict): + # Here we are using the shortcutting method to go from the list of edges + # returned from eularian_circuit to a list of nodes + return _shortcutting(nx.eulerian_circuit(z_star, source=source)) + + # Create the undirected support of z_star + z_support = nx.MultiGraph() + for u, v in z_star: + if (u, v) not in z_support.edges: + edge_weight = min(G[u][v][weight], G[v][u][weight]) + z_support.add_edge(u, v, **{weight: edge_weight}) + + # Create the exponential distribution of spanning trees + gamma = spanning_tree_distribution(z_support, z_star) + + # Write the lambda values to the edges of z_support + z_support = nx.Graph(z_support) + lambda_dict = {(u, v): exp(gamma[(u, v)]) for u, v in z_support.edges()} + nx.set_edge_attributes(z_support, lambda_dict, "weight") + del gamma, lambda_dict + + # Sample 2 * ceil( ln(n) ) spanning trees and record the minimum one + minimum_sampled_tree = None + minimum_sampled_tree_weight = math.inf + for _ in range(2 * ceil(ln(G.number_of_nodes()))): + sampled_tree = random_spanning_tree(z_support, "weight", seed=seed) + sampled_tree_weight = sampled_tree.size(weight) + if sampled_tree_weight < minimum_sampled_tree_weight: + minimum_sampled_tree = sampled_tree.copy() + minimum_sampled_tree_weight = sampled_tree_weight + + # Orient the edges in that tree to keep the cost of the tree the same. + t_star = nx.MultiDiGraph() + for u, v, d in minimum_sampled_tree.edges(data=weight): + if d == G[u][v][weight]: + t_star.add_edge(u, v, **{weight: d}) + else: + t_star.add_edge(v, u, **{weight: d}) + + # Find the node demands needed to neutralize the flow of t_star in G + node_demands = {n: t_star.out_degree(n) - t_star.in_degree(n) for n in t_star} + nx.set_node_attributes(G, node_demands, "demand") + + # Find the min_cost_flow + flow_dict = nx.min_cost_flow(G, "demand") + + # Build the flow into t_star + for source, values in flow_dict.items(): + for target in values: + if (source, target) not in t_star.edges and values[target] > 0: + # IF values[target] > 0 we have to add that many edges + for _ in range(values[target]): + t_star.add_edge(source, target) + + # Return the shortcut eulerian circuit + circuit = nx.eulerian_circuit(t_star, source=source) + return _shortcutting(circuit) + + +def held_karp_ascent(G, weight="weight"): + """ + Minimizes the Held-Karp relaxation of the TSP for `G` + + Solves the Held-Karp relaxation of the input complete digraph and scales + the output solution for use in the Asadpour [1]_ ASTP algorithm. + + The Held-Karp relaxation defines the lower bound for solutions to the + ATSP, although it does return a fractional solution. This is used in the + Asadpour algorithm as an initial solution which is later rounded to a + integral tree within the spanning tree polytopes. This function solves + the relaxation with the branch and bound method in [2]_. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. + The distance between all paris of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + Returns + ------- + OPT : float + The cost for the optimal solution to the Held-Karp relaxation + z : dict or nx.Graph + A symmetrized and scaled version of the optimal solution to the + Held-Karp relaxation for use in the Asadpour algorithm. + + If an integral solution is found, then that is an optimal solution for + the ATSP problem and that is returned instead. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + .. [2] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + import numpy as np + import scipy.optimize as optimize + + def k_pi(): + """ + Find the set of minimum 1-Arborescences for G at point pi. + + Returns + ------- + Set + The set of minimum 1-Arborescences + """ + # Create a copy of G without vertex 1. + G_1 = G.copy() + minimum_1_arborescences = set() + minimum_1_arborescence_weight = math.inf + + # node is node '1' in the Held and Karp paper + n = next(G.__iter__()) + G_1.remove_node(n) + + # Iterate over the spanning arborescences of the graph until we know + # that we have found the minimum 1-arborescences. My proposed strategy + # is to find the most extensive root to connect to from 'node 1' and + # the least expensive one. We then iterate over arborescences until + # the cost of the basic arborescence is the cost of the minimum one + # plus the difference between the most and least expensive roots, + # that way the cost of connecting 'node 1' will by definition not by + # minimum + min_root = {"node": None, weight: math.inf} + max_root = {"node": None, weight: -math.inf} + for u, v, d in G.edges(n, data=True): + if d[weight] < min_root[weight]: + min_root = {"node": v, weight: d[weight]} + if d[weight] > max_root[weight]: + max_root = {"node": v, weight: d[weight]} + + min_in_edge = min(G.in_edges(n, data=True), key=lambda x: x[2][weight]) + min_root[weight] = min_root[weight] + min_in_edge[2][weight] + max_root[weight] = max_root[weight] + min_in_edge[2][weight] + + min_arb_weight = math.inf + for arb in nx.ArborescenceIterator(G_1): + arb_weight = arb.size(weight) + if min_arb_weight == math.inf: + min_arb_weight = arb_weight + elif arb_weight > min_arb_weight + max_root[weight] - min_root[weight]: + break + # We have to pick the root node of the arborescence for the out + # edge of the first vertex as that is the only node without an + # edge directed into it. + for N, deg in arb.in_degree: + if deg == 0: + # root found + arb.add_edge(n, N, **{weight: G[n][N][weight]}) + arb_weight += G[n][N][weight] + break + + # We can pick the minimum weight in-edge for the vertex with + # a cycle. If there are multiple edges with the same, minimum + # weight, We need to add all of them. + # + # Delete the edge (N, v) so that we cannot pick it. + edge_data = G[N][n] + G.remove_edge(N, n) + min_weight = min(G.in_edges(n, data=weight), key=lambda x: x[2])[2] + min_edges = [ + (u, v, d) for u, v, d in G.in_edges(n, data=weight) if d == min_weight + ] + for u, v, d in min_edges: + new_arb = arb.copy() + new_arb.add_edge(u, v, **{weight: d}) + new_arb_weight = arb_weight + d + # Check to see the weight of the arborescence, if it is a + # new minimum, clear all of the old potential minimum + # 1-arborescences and add this is the only one. If its + # weight is above the known minimum, do not add it. + if new_arb_weight < minimum_1_arborescence_weight: + minimum_1_arborescences.clear() + minimum_1_arborescence_weight = new_arb_weight + # We have a 1-arborescence, add it to the set + if new_arb_weight == minimum_1_arborescence_weight: + minimum_1_arborescences.add(new_arb) + G.add_edge(N, n, **edge_data) + + return minimum_1_arborescences + + def direction_of_ascent(): + """ + Find the direction of ascent at point pi. + + See [1]_ for more information. + + Returns + ------- + dict + A mapping from the nodes of the graph which represents the direction + of ascent. + + References + ---------- + .. [1] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + # 1. Set d equal to the zero n-vector. + d = {} + for n in G: + d[n] = 0 + del n + # 2. Find a 1-Aborescence T^k such that k is in K(pi, d). + minimum_1_arborescences = k_pi() + while True: + # Reduce K(pi) to K(pi, d) + # Find the arborescence in K(pi) which increases the lest in + # direction d + min_k_d_weight = math.inf + min_k_d = None + for arborescence in minimum_1_arborescences: + weighted_cost = 0 + for n, deg in arborescence.degree: + weighted_cost += d[n] * (deg - 2) + if weighted_cost < min_k_d_weight: + min_k_d_weight = weighted_cost + min_k_d = arborescence + + # 3. If sum of d_i * v_{i, k} is greater than zero, terminate + if min_k_d_weight > 0: + return d, min_k_d + # 4. d_i = d_i + v_{i, k} + for n, deg in min_k_d.degree: + d[n] += deg - 2 + # Check that we do not need to terminate because the direction + # of ascent does not exist. This is done with linear + # programming. + c = np.full(len(minimum_1_arborescences), -1, dtype=int) + a_eq = np.empty((len(G) + 1, len(minimum_1_arborescences)), dtype=int) + b_eq = np.zeros(len(G) + 1, dtype=int) + b_eq[len(G)] = 1 + for arb_count, arborescence in enumerate(minimum_1_arborescences): + n_count = len(G) - 1 + for n, deg in arborescence.degree: + a_eq[n_count][arb_count] = deg - 2 + n_count -= 1 + a_eq[len(G)][arb_count] = 1 + program_result = optimize.linprog(c, A_eq=a_eq, b_eq=b_eq) + # If the constants exist, then the direction of ascent doesn't + if program_result.success: + # There is no direction of ascent + return None, minimum_1_arborescences + + # 5. GO TO 2 + + def find_epsilon(k, d): + """ + Given the direction of ascent at pi, find the maximum distance we can go + in that direction. + + Parameters + ---------- + k_xy : set + The set of 1-arborescences which have the minimum rate of increase + in the direction of ascent + + d : dict + The direction of ascent + + Returns + ------- + float + The distance we can travel in direction `d` + """ + min_epsilon = math.inf + for e_u, e_v, e_w in G.edges(data=weight): + if (e_u, e_v) in k.edges: + continue + # Now, I have found a condition which MUST be true for the edges to + # be a valid substitute. The edge in the graph which is the + # substitute is the one with the same terminated end. This can be + # checked rather simply. + # + # Find the edge within k which is the substitute. Because k is a + # 1-arborescence, we know that they is only one such edges + # leading into every vertex. + if len(k.in_edges(e_v, data=weight)) > 1: + raise Exception + sub_u, sub_v, sub_w = next(k.in_edges(e_v, data=weight).__iter__()) + k.add_edge(e_u, e_v, **{weight: e_w}) + k.remove_edge(sub_u, sub_v) + if ( + max(d for n, d in k.in_degree()) <= 1 + and len(G) == k.number_of_edges() + and nx.is_weakly_connected(k) + ): + # Ascent method calculation + if d[sub_u] == d[e_u] or sub_w == e_w: + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + continue + epsilon = (sub_w - e_w) / (d[e_u] - d[sub_u]) + if 0 < epsilon < min_epsilon: + min_epsilon = epsilon + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + + return min_epsilon + + # I have to know that the elements in pi correspond to the correct elements + # in the direction of ascent, even if the node labels are not integers. + # Thus, I will use dictionaries to made that mapping. + pi_dict = {} + for n in G: + pi_dict[n] = 0 + del n + original_edge_weights = {} + for u, v, d in G.edges(data=True): + original_edge_weights[(u, v)] = d[weight] + dir_ascent, k_d = direction_of_ascent() + while dir_ascent is not None: + max_distance = find_epsilon(k_d, dir_ascent) + for n, v in dir_ascent.items(): + pi_dict[n] += max_distance * v + for u, v, d in G.edges(data=True): + d[weight] = original_edge_weights[(u, v)] + pi_dict[u] + dir_ascent, k_d = direction_of_ascent() + # k_d is no longer an individual 1-arborescence but rather a set of + # minimal 1-arborescences at the maximum point of the polytope and should + # be reflected as such + k_max = k_d + + # Search for a cycle within k_max. If a cycle exists, return it as the + # solution + for k in k_max: + if len([n for n in k if k.degree(n) == 2]) == G.order(): + # Tour found + return k.size(weight), k + + # Write the original edge weights back to G and every member of k_max at + # the maximum point. Also average the number of times that edge appears in + # the set of minimal 1-arborescences. + x_star = {} + size_k_max = len(k_max) + for u, v, d in G.edges(data=True): + edge_count = 0 + d[weight] = original_edge_weights[(u, v)] + for k in k_max: + if (u, v) in k.edges(): + edge_count += 1 + k[u][v][weight] = original_edge_weights[(u, v)] + x_star[(u, v)] = edge_count / size_k_max + # Now symmetrize the edges in x_star and scale them according to (5) in + # reference [1] + z_star = {} + scale_factor = (G.order() - 1) / G.order() + for u, v in x_star.keys(): + frequency = x_star[(u, v)] + x_star[(v, u)] + if frequency > 0: + z_star[(u, v)] = scale_factor * frequency + del x_star + # Return the optimal weight and the z dict + return next(k_max.__iter__()).size(weight), z_star + + +def spanning_tree_distribution(G, z): + """ + Find the asadpour exponential distribution of spanning trees. + + Solves the Maximum Entropy Convex Program in the Asadpour algorithm [1]_ + using the approach in section 7 to build an exponential distribution of + undirected spanning trees. + + This algorithm ensures that the probability of any edge in a spanning + tree is proportional to the sum of the probabilities of the tress + containing that edge over the sum of the probabilities of all spanning + trees of the graph. + + Parameters + ---------- + G : nx.MultiGraph + The undirected support graph for the Held Karp relaxation + + z : dict + The output of `held_karp_ascent()`, a scaled version of the Held-Karp + solution. + + Returns + ------- + gamma : dict + The probability distribution which approximately preserves the marginal + probabilities of `z`. + """ + from math import exp + from math import log as ln + + def q(e): + """ + The value of q(e) is described in the Asadpour paper is "the + probability that edge e will be included in a spanning tree T that is + chosen with probability proportional to exp(gamma(T))" which + basically means that it is the total probability of the edge appearing + across the whole distribution. + + Parameters + ---------- + e : tuple + The `(u, v)` tuple describing the edge we are interested in + + Returns + ------- + float + The probability that a spanning tree chosen according to the + current values of gamma will include edge `e`. + """ + # Create the laplacian matrices + for u, v, d in G.edges(data=True): + d[lambda_key] = exp(gamma[(u, v)]) + G_Kirchhoff = nx.total_spanning_tree_weight(G, lambda_key) + G_e = nx.contracted_edge(G, e, self_loops=False) + G_e_Kirchhoff = nx.total_spanning_tree_weight(G_e, lambda_key) + + # Multiply by the weight of the contracted edge since it is not included + # in the total weight of the contracted graph. + return exp(gamma[(e[0], e[1])]) * G_e_Kirchhoff / G_Kirchhoff + + # initialize gamma to the zero dict + gamma = {} + for u, v, _ in G.edges: + gamma[(u, v)] = 0 + + # set epsilon + EPSILON = 0.2 + + # pick an edge attribute name that is unlikely to be in the graph + lambda_key = "spanning_tree_distribution's secret attribute name for lambda" + + while True: + # We need to know that know that no values of q_e are greater than + # (1 + epsilon) * z_e, however changing one gamma value can increase the + # value of a different q_e, so we have to complete the for loop without + # changing anything for the condition to be meet + in_range_count = 0 + # Search for an edge with q_e > (1 + epsilon) * z_e + for u, v in gamma: + e = (u, v) + q_e = q(e) + z_e = z[e] + if q_e > (1 + EPSILON) * z_e: + delta = ln( + (q_e * (1 - (1 + EPSILON / 2) * z_e)) + / ((1 - q_e) * (1 + EPSILON / 2) * z_e) + ) + gamma[e] -= delta + # Check that delta had the desired effect + new_q_e = q(e) + desired_q_e = (1 + EPSILON / 2) * z_e + if round(new_q_e, 8) != round(desired_q_e, 8): + raise nx.NetworkXError( + f"Unable to modify probability for edge ({u}, {v})" + ) + else: + in_range_count += 1 + # Check if the for loop terminated without changing any gamma + if in_range_count == len(gamma): + break + + # Remove the new edge attributes + for _, _, d in G.edges(data=True): + if lambda_key in d: + del d[lambda_key] + + return gamma + + +def greedy_tsp(G, weight="weight", source=None): + """Return a low cost cycle starting at `source` and its cost. + + This approximates a solution to the traveling salesman problem. + It finds a cycle of all the nodes that a salesman can visit in order + to visit many nodes while minimizing total distance. + It uses a simple greedy algorithm. + In essence, this function returns a large cycle given a source point + for which the total cost of the cycle is minimized. + + Parameters + ---------- + G : Graph + The Graph should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete, the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.greedy_tsp(G, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + This implementation of a greedy algorithm is based on the following: + + - The algorithm adds a node to the solution at every iteration. + - The algorithm selects a node not already in the cycle whose connection + to the previous node adds the least cost to the cycle. + + A greedy algorithm does not always give the best solution. + However, it can construct a first feasible solution which can + be passed as a parameter to an iterative improvement algorithm such + as Simulated Annealing, or Threshold Accepting. + + Time complexity: It has a running time $O(|V|^2)$ + """ + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if source is None: + source = nx.utils.arbitrary_element(G) + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + nodeset = set(G) + nodeset.remove(source) + cycle = [source] + next_node = source + while nodeset: + nbrdict = G[next_node] + next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1)) + cycle.append(next_node) + nodeset.remove(next_node) + cycle.append(cycle[0]) + return cycle + + +@py_random_state(9) +def simulated_annealing_tsp( + G, + init_cycle, + weight="weight", + source=None, + temp=100, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.01, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses simulated annealing to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, simulated + annealing perturbs that solution, occasionally accepting changes that make + the solution worse to escape from a locally optimal solution. The chance + of accepting such changes decreases over the iterations to encourage + an optimal result. In summary, the function returns a cycle starting + at `source` for which the total cost is minimized. It also returns the cost. + + The chance of accepting a proposed change is related to a parameter called + the temperature (annealing has a physical analogue of steel hardening + as it cools). As the temperature is reduced, the chance of moves that + increase cost goes down. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + init_cycle : list of all nodes or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + temp : int, optional (default=100) + The algorithm's temperature parameter. It represents the initial + value of temperature + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.01) + Percentage of temperature decrease in each iteration + of outer loop + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.simulated_annealing_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.simulated_annealing_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Simulated Annealing is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. If not provided, it is + constructed by a simple greedy algorithm. At every iteration, the + algorithm selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of a + neighbor solution. + If $c(x') - c(x) <= 0$ then the neighbor solution becomes the current + solution for the next iteration. Otherwise, the algorithm accepts + the neighbor solution with probability $p = exp - ([c(x') - c(x)] / temp)$. + Otherwise the current solution is retained. + + `temp` is a parameter of the algorithm and represents temperature. + + Time complexity: + For $N_i$ iterations of the inner loop and $N_o$ iterations of the + outer loop, this algorithm has running time $O(N_i * N_o * |V|)$. + + For more information and how the algorithm is inspired see: + http://en.wikipedia.org/wiki/Simulated_annealing + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle should be a cycle over all nodes in G.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations and temp > 0: + count += 1 + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= 0: + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + else: + # Accept even a worse solution with probability p. + p = math.exp(-delta / temp) + if p >= seed.random(): + cycle = adj_sol + cost = adj_cost + temp -= temp * alpha + + return best_cycle + + +@py_random_state(9) +def threshold_accepting_tsp( + G, + init_cycle, + weight="weight", + source=None, + threshold=1, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.1, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses threshold accepting methods to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, threshold + accepting methods perturb that solution, accepting any changes that make + the solution no worse than increasing by a threshold amount. Improvements + in cost are accepted, but so are changes leading to small increases in cost. + This allows the solution to leave suboptimal local minima in solution space. + The threshold is decreased slowly as iterations proceed helping to ensure + an optimum. In summary, the function returns a cycle starting at `source` + for which the total cost is minimized. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + init_cycle : list or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + threshold : int, optional (default=1) + The algorithm's threshold parameter. It represents the initial + threshold's value + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.1) + Percentage of threshold decrease when there is at + least one acceptance of a neighbor solution. + If no inner loop moves are accepted the threshold remains unchanged. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.threshold_accepting_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.threshold_accepting_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Threshold Accepting is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. This solution can be + constructed by a simple greedy algorithm. At every iteration, it + selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of + neighbor solution. + If $c(x') - c(x) <= threshold$ then the neighbor solution becomes the current + solution for the next iteration, where the threshold is named threshold. + + In comparison to the Simulated Annealing algorithm, the Threshold + Accepting algorithm does not accept very low quality solutions + (due to the presence of the threshold value). In the case of + Simulated Annealing, even a very low quality solution can + be accepted with probability $p$. + + Time complexity: + It has a running time $O(m * n * |V|)$ where $m$ and $n$ are the number + of times the outer and inner loop run respectively. + + For more information and how algorithm is inspired see: + https://doi.org/10.1016/0021-9991(90)90201-B + + See Also + -------- + simulated_annealing_tsp + + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle is not all and only nodes.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = list(G.neighbors(source))[0] + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations: + count += 1 + accepted = False + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= threshold: + accepted = True + + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + if accepted: + threshold -= threshold * alpha + + return best_cycle diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/treewidth.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/treewidth.py new file mode 100644 index 0000000..61fb1e7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/treewidth.py @@ -0,0 +1,249 @@ +"""Functions for computing treewidth decomposition. + +Treewidth of an undirected graph is a number associated with the graph. +It can be defined as the size of the largest vertex set (bag) in a tree +decomposition of the graph minus one. + +`Wikipedia: Treewidth `_ + +The notions of treewidth and tree decomposition have gained their +attractiveness partly because many graph and network problems that are +intractable (e.g., NP-hard) on arbitrary graphs become efficiently +solvable (e.g., with a linear time algorithm) when the treewidth of the +input graphs is bounded by a constant [1]_ [2]_. + +There are two different functions for computing a tree decomposition: +:func:`treewidth_min_degree` and :func:`treewidth_min_fill_in`. + +.. [1] Hans L. Bodlaender and Arie M. C. A. Koster. 2010. "Treewidth + computations I.Upper bounds". Inf. Comput. 208, 3 (March 2010),259-275. + http://dx.doi.org/10.1016/j.ic.2009.03.008 + +.. [2] Hans L. Bodlaender. "Discovering Treewidth". Institute of Information + and Computing Sciences, Utrecht University. + Technical Report UU-CS-2005-018. + http://www.cs.uu.nl + +.. [3] K. Wang, Z. Lu, and J. Hicks *Treewidth*. + https://web.archive.org/web/20210507025929/http://web.eecs.utk.edu/~cphill25/cs594_spring2015_projects/treewidth.pdf + +""" + +import itertools +import sys +from heapq import heapify, heappop, heappush + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def treewidth_min_degree(G): + """Returns a treewidth decomposition using the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree, i.e., first + the node with the lowest degree is chosen, then the graph is updated + and the corresponding node is removed. Next, a new node with the lowest + degree is chosen, and so on. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + deg_heuristic = MinDegreeHeuristic(G) + return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph)) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def treewidth_min_fill_in(G): + """Returns a treewidth decomposition using the Minimum Fill-in heuristic. + + The heuristic chooses a node from the graph, where the number of edges + added turning the neighbourhood of the chosen node into clique is as + small as possible. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + return treewidth_decomp(G, min_fill_in_heuristic) + + +class MinDegreeHeuristic: + """Implements the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree + (number of neighbours), i.e., first the node with the lowest degree is + chosen, then the graph is updated and the corresponding node is + removed. Next, a new node with the lowest degree is chosen, and so on. + """ + + def __init__(self, graph): + self._graph = graph + + # nodes that have to be updated in the heap before each iteration + self._update_nodes = [] + + self._degreeq = [] # a heapq with 2-tuples (degree,node) + + # build heap with initial degrees + for n in graph: + self._degreeq.append((len(graph[n]), n)) + heapify(self._degreeq) + + def best_node(self, graph): + # update nodes in self._update_nodes + for n in self._update_nodes: + # insert changed degrees into degreeq + heappush(self._degreeq, (len(graph[n]), n)) + + # get the next valid (minimum degree) node + while self._degreeq: + (min_degree, elim_node) = heappop(self._degreeq) + if elim_node not in graph or len(graph[elim_node]) != min_degree: + # outdated entry in degreeq + continue + elif min_degree == len(graph) - 1: + # fully connected: abort condition + return None + + # remember to update nodes in the heap before getting the next node + self._update_nodes = graph[elim_node] + return elim_node + + # the heap is empty: abort + return None + + +def min_fill_in_heuristic(graph): + """Implements the Minimum Degree heuristic. + + Returns the node from the graph, where the number of edges added when + turning the neighbourhood of the chosen node into clique is as small as + possible. This algorithm chooses the nodes using the Minimum Fill-In + heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses + additional constant memory.""" + + if len(graph) == 0: + return None + + min_fill_in_node = None + + min_fill_in = sys.maxsize + + # create sorted list of (degree, node) + degree_list = [(len(graph[node]), node) for node in graph] + degree_list.sort() + + # abort condition + min_degree = degree_list[0][0] + if min_degree == len(graph) - 1: + return None + + for (_, node) in degree_list: + num_fill_in = 0 + nbrs = graph[node] + for nbr in nbrs: + # count how many nodes in nbrs current nbr is not connected to + # subtract 1 for the node itself + num_fill_in += len(nbrs - graph[nbr]) - 1 + if num_fill_in >= 2 * min_fill_in: + break + + num_fill_in /= 2 # divide by 2 because of double counting + + if num_fill_in < min_fill_in: # update min-fill-in node + if num_fill_in == 0: + return node + min_fill_in = num_fill_in + min_fill_in_node = node + + return min_fill_in_node + + +def treewidth_decomp(G, heuristic=min_fill_in_heuristic): + """Returns a treewidth decomposition using the passed heuristic. + + Parameters + ---------- + G : NetworkX graph + heuristic : heuristic function + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + + # make dict-of-sets structure + graph = {n: set(G[n]) - {n} for n in G} + + # stack containing nodes and neighbors in the order from the heuristic + node_stack = [] + + # get first node from heuristic + elim_node = heuristic(graph) + while elim_node is not None: + # connect all neighbours with each other + nbrs = graph[elim_node] + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + # push node and its current neighbors on stack + node_stack.append((elim_node, nbrs)) + + # remove node from graph + for u in graph[elim_node]: + graph[u].remove(elim_node) + + del graph[elim_node] + elim_node = heuristic(graph) + + # the abort condition is met; put all remaining nodes into one bag + decomp = nx.Graph() + first_bag = frozenset(graph.keys()) + decomp.add_node(first_bag) + + treewidth = len(first_bag) - 1 + + while node_stack: + # get node and its neighbors from the stack + (curr_node, nbrs) = node_stack.pop() + + # find a bag all neighbors are in + old_bag = None + for bag in decomp.nodes: + if nbrs <= bag: + old_bag = bag + break + + if old_bag is None: + # no old_bag was found: just connect to the first_bag + old_bag = first_bag + + # create new node for decomposition + nbrs.add(curr_node) + new_bag = frozenset(nbrs) + + # update treewidth + treewidth = max(treewidth, len(new_bag) - 1) + + # add edge to decomposition (implicitly also adds the new node) + decomp.add_edge(old_bag, new_bag) + + return treewidth, decomp diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/vertex_cover.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/vertex_cover.py new file mode 100644 index 0000000..d117c73 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/approximation/vertex_cover.py @@ -0,0 +1,80 @@ +"""Functions for computing an approximate minimum weight vertex cover. + +A |vertex cover|_ is a subset of nodes such that each edge in the graph +is incident to at least one node in the subset. + +.. _vertex cover: https://en.wikipedia.org/wiki/Vertex_cover +.. |vertex cover| replace:: *vertex cover* + +""" + +__all__ = ["min_weighted_vertex_cover"] + + +def min_weighted_vertex_cover(G, weight=None): + r"""Returns an approximate minimum weighted vertex cover. + + The set of nodes returned by this function is guaranteed to be a + vertex cover, and the total weight of the set is guaranteed to be at + most twice the total weight of the minimum weight vertex cover. In + other words, + + .. math:: + + w(S) \leq 2 * w(S^*), + + where $S$ is the vertex cover returned by this function, + $S^*$ is the vertex cover of minimum weight out of all vertex + covers of the graph, and $w$ is the function that computes the + sum of the weights of each node in that given set. + + Parameters + ---------- + G : NetworkX graph + + weight : string, optional (default = None) + If None, every node has weight 1. If a string, use this node + attribute as the node weight. A node without this attribute is + assumed to have weight 1. + + Returns + ------- + min_weighted_cover : set + Returns a set of nodes whose weight sum is no more than twice + the weight sum of the minimum weight vertex cover. + + Notes + ----- + For a directed graph, a vertex cover has the same definition: a set + of nodes such that each edge in the graph is incident to at least + one node in the set. Whether the node is the head or tail of the + directed edge is ignored. + + This is the local-ratio algorithm for computing an approximate + vertex cover. The algorithm greedily reduces the costs over edges, + iteratively building a cover. The worst-case runtime of this + implementation is $O(m \log n)$, where $n$ is the number + of nodes and $m$ the number of edges in the graph. + + References + ---------- + .. [1] Bar-Yehuda, R., and Even, S. (1985). "A local-ratio theorem for + approximating the weighted vertex cover problem." + *Annals of Discrete Mathematics*, 25, 27–46 + + + """ + cost = dict(G.nodes(data=weight, default=1)) + # While there are uncovered edges, choose an uncovered and update + # the cost of the remaining edges. + cover = set() + for u, v in G.edges(): + if u in cover or v in cover: + continue + if cost[u] <= cost[v]: + cover.add(u) + cost[v] -= cost[u] + else: + cover.add(v) + cost[u] -= cost[v] + return cover diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/__init__.py new file mode 100644 index 0000000..4d98886 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.assortativity.connectivity import * +from networkx.algorithms.assortativity.correlation import * +from networkx.algorithms.assortativity.mixing import * +from networkx.algorithms.assortativity.neighbor_degree import * +from networkx.algorithms.assortativity.pairs import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/connectivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/connectivity.py new file mode 100644 index 0000000..ad05418 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/connectivity.py @@ -0,0 +1,139 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["average_degree_connectivity", "k_nearest_neighbors"] + + +def average_degree_connectivity( + G, source="in+out", target="in+out", nodes=None, weight=None +): + r"""Compute the average degree connectivity of graph. + + The average degree connectivity is the average nearest neighbor degree of + nodes with degree k. For weighted graphs, an analogous measure can + be computed using the weighted average neighbors degree defined in + [1]_, for a node `i`, as + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, + `w_{ij}` is the weight of the edge that links `i` and `j`, + and `N(i)` are the neighbors of node `i`. + + Parameters + ---------- + G : NetworkX graph + + source : "in"|"out"|"in+out" (default:"in+out") + Directed graphs only. Use "in"- or "out"-degree for source node. + + target : "in"|"out"|"in+out" (default:"in+out" + Directed graphs only. Use "in"- or "out"-degree for target node. + + nodes : list or iterable (optional) + Compute neighbor connectivity for these nodes. The default is all + nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d : dict + A dictionary keyed by degree k with the value of average connectivity. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', + 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[1, 2]["weight"] = 3 + >>> nx.average_degree_connectivity(G) + {1: 2.0, 2: 1.5} + >>> nx.average_degree_connectivity(G, weight="weight") + {1: 2.0, 2: 1.75} + + See Also + -------- + average_neighbor_degree + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + # First, determine the type of neighbors and the type of degree to use. + if G.is_directed(): + if source not in ("in", "out", "in+out"): + raise nx.NetworkXError('source must be one of "in", "out", or "in+out"') + if target not in ("in", "out", "in+out"): + raise nx.NetworkXError('target must be one of "in", "out", or "in+out"') + direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree} + neighbor_funcs = { + "out": G.successors, + "in": G.predecessors, + "in+out": G.neighbors, + } + source_degree = direction[source] + target_degree = direction[target] + neighbors = neighbor_funcs[source] + # `reverse` indicates whether to look at the in-edge when + # computing the weight of an edge. + reverse = source == "in" + else: + if source != "in+out" or target != "in+out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = G.degree + target_degree = G.degree + neighbors = G.neighbors + reverse = False + dsum = defaultdict(int) + dnorm = defaultdict(int) + # Check if `source_nodes` is actually a single node in the graph. + source_nodes = source_degree(nodes) + if nodes in G: + source_nodes = [(nodes, source_degree(nodes))] + for n, k in source_nodes: + nbrdeg = target_degree(neighbors(n)) + if weight is None: + s = sum(d for n, d in nbrdeg) + else: # weight nbr degree by weight of (n,nbr) edge + if reverse: + s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg) + else: + s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg) + dnorm[k] += source_degree(n, weight=weight) + dsum[k] += s + + # normalize + return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()} + + +def k_nearest_neighbors(G, source="in+out", target="in+out", nodes=None, weight=None): + """Compute the average degree connectivity of graph. + + .. deprecated 2.6 + + k_nearest_neighbors function is deprecated and will be removed in v3.0. + Use `average_degree_connectivity` instead. + """ + import warnings + + msg = ( + "k_nearest_neighbors function is deprecated and will be removed in v3.0.\n" + "Use `average_degree_connectivity` instead." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return average_degree_connectivity(G, source, target, nodes, weight) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/correlation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/correlation.py new file mode 100644 index 0000000..19ea04e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/correlation.py @@ -0,0 +1,298 @@ +"""Node assortativity coefficients and correlation measures. +""" +from networkx.algorithms.assortativity.mixing import ( + attribute_mixing_matrix, + degree_mixing_matrix, +) +from networkx.algorithms.assortativity.pairs import node_degree_xy + +__all__ = [ + "degree_pearson_correlation_coefficient", + "degree_assortativity_coefficient", + "attribute_assortativity_coefficient", + "numeric_assortativity_coefficient", +] + + +def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute degree assortativity only for nodes in container. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_assortativity_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + See Also + -------- + attribute_assortativity_coefficient + numeric_assortativity_coefficient + degree_mixing_dict + degree_mixing_matrix + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , where e is the joint + probability distribution (mixing matrix) of the degrees. If G is + directed than the matrix e is the joint probability of the + user-specified degree type for the source and target. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + if nodes is None: + nodes = G.nodes + + degrees = None + + if G.is_directed(): + indeg = ( + {d for _, d in G.in_degree(nodes, weight=weight)} + if "in" in (x, y) + else set() + ) + outdeg = ( + {d for _, d in G.out_degree(nodes, weight=weight)} + if "out" in (x, y) + else set() + ) + degrees = set.union(indeg, outdeg) + else: + degrees = {d for _, d in G.degree(nodes, weight=weight)} + + mapping = {d: i for i, d, in enumerate(degrees)} + M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping) + + return _numeric_ac(M, mapping=mapping) + + +def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + This is the same as degree_assortativity_coefficient but uses the + potentially faster scipy.stats.pearsonr function. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute pearson correlation of degrees only for specified nodes. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_pearson_correlation_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + Notes + ----- + This calls scipy.stats.pearsonr. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + import scipy as sp + import scipy.stats # call as sp.stats + + xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + x, y = zip(*xy) + return sp.stats.pearsonr(x, y)[0] + + +def attribute_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key + + nodes: list or iterable (optional) + Compute attribute assortativity for nodes in container. + The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.attribute_assortativity_coefficient(G, "color")) + 1.0 + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)), + where M is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + M = attribute_mixing_matrix(G, attribute, nodes) + return attribute_ac(M) + + +def numeric_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for numerical node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given numeric attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Compute numeric assortativity only for attributes of nodes in + container. The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], size=2) + >>> G.add_nodes_from([2, 3], size=3) + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.numeric_assortativity_coefficient(G, "size")) + 1.0 + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation + coefficient of the specified (scalar valued) attribute across edges. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + """ + if nodes is None: + nodes = G.nodes + vals = {G.nodes[n][attribute] for n in nodes} + mapping = {d: i for i, d, in enumerate(vals)} + M = attribute_mixing_matrix(G, attribute, nodes, mapping) + return _numeric_ac(M, mapping) + + +def attribute_ac(M): + """Compute assortativity for attribute matrix M. + + Parameters + ---------- + M : numpy.ndarray + 2D ndarray representing the attribute mixing matrix. + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)), + where e is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + if M.sum() != 1.0: + M = M / M.sum() + s = (M @ M).sum() + t = M.trace() + r = (t - s) / (1 - s) + return r + + +def _numeric_ac(M, mapping): + # M is a 2D numpy array + # numeric assortativity coefficient, pearsonr + import numpy as np + + if M.sum() != 1.0: + M = M / M.sum() + x = np.array(list(mapping.keys())) + y = x # x and y have the same support + idx = list(mapping.values()) + a = M.sum(axis=0) + b = M.sum(axis=1) + vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2 + varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2 + xy = np.outer(x, y) + ab = np.outer(a[idx], b[idx]) + return (xy * (M - ab)).sum() / np.sqrt(vara * varb) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/mixing.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/mixing.py new file mode 100644 index 0000000..f457d75 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/mixing.py @@ -0,0 +1,298 @@ +""" +Mixing matrices for node attributes and degree. +""" +from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy +from networkx.utils import dict_to_numpy_array + +__all__ = [ + "attribute_mixing_matrix", + "attribute_mixing_dict", + "degree_mixing_matrix", + "degree_mixing_dict", + "numeric_mixing_matrix", + "mixing_dict", +] + + +def attribute_mixing_dict(G, attribute, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Unse nodes in container to build the dict. The default is all nodes. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edge(1, 3) + >>> d = nx.attribute_mixing_dict(G, "color") + >>> print(d["red"]["blue"]) + 1 + >>> print(d["blue"]["red"]) # d symmetric for undirected graphs + 1 + + Returns + ------- + d : dictionary + Counts or joint probability of occurrence of attribute pairs. + """ + xy_iter = node_attribute_xy(G, attribute, nodes) + return mixing_dict(xy_iter, normalized=normalized) + + +def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Use only nodes in container to build the matrix. The default is + all nodes. + + mapping : dictionary, optional + Mapping from node attribute to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + Returns + ------- + m: numpy array + Counts or joint probability of occurrence of attribute pairs. + + Notes + ----- + If each node has a unique attribute value, the unnormalized mixing matrix + will be equal to the adjacency matrix. To get a denser mixing matrix, + the rounding can be performed to form groups of nodes with equal values. + For example, the exact height of persons in cm (180.79155222, 163.9080892, + 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163, + 163, 168, 168, ...). + + Definitions of attribute mixing matrix vary on whether the matrix + should include rows for attribute values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> gender = {0: 'male', 1: 'female', 2: 'female'} + >>> nx.set_node_attributes(G, gender, 'gender') + >>> mapping = {'male': 0, 'female': 1} + >>> mix_mat = nx.attribute_mixing_matrix(G, 'gender', mapping=mapping) + >>> # mixing from male nodes to female nodes + >>> mix_mat[mapping['male'], mapping['female']] + 0.25 + """ + d = attribute_mixing_dict(G, attribute, nodes) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for degree. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or joint probability of occurrence of degree pairs. + """ + xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + return mixing_dict(xy_iter, normalized=normalized) + + +def degree_mixing_matrix( + G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None +): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + nodes: list or iterable (optional) + Build the matrix using only nodes in container. + The default is all nodes. + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + mapping : dictionary, optional + Mapping from node degree to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + Returns + ------- + m: numpy array + Counts, or joint probability, of occurrence of node degree. + + Notes + ----- + Definitions of degree mixing matrix vary on whether the matrix + should include rows for degree values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. See examples. + + Examples + -------- + >>> G = nx.star_graph(3) + >>> mix_mat = nx.degree_mixing_matrix(G) + >>> mix_mat[0, 1] # mixing from node degree 1 to node degree 3 + 0.5 + + If you want every possible degree to appear as a row, even if no nodes + have that degree, use `mapping` as follows, + + >>> max_degree = max(deg for n, deg in G.degree) + >>> mapping = {x: x for x in range(max_degree + 1)} # identity mapping + >>> mix_mat = nx.degree_mixing_matrix(G, mapping=mapping) + >>> mix_mat[3, 1] # mixing from node degree 3 to node degree 1 + 0.5 + """ + d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +def numeric_mixing_matrix(G, attribute, nodes=None, normalized=True, mapping=None): + """Returns numeric mixing matrix for attribute. + + .. deprecated:: 2.6 + + numeric_mixing_matrix is deprecated and will be removed in 3.0. + Use `attribute_mixing_matrix` instead. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Build the matrix only with nodes in container. The default is all nodes. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + mapping : dictionary, optional + Mapping from node attribute to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + Notes + ----- + If each node has a unique attribute value, the unnormalized mixing matrix + will be equal to the adjacency matrix. To get a denser mixing matrix, + the rounding can be performed to form groups of nodes with equal values. + For example, the exact height of persons in cm (180.79155222, 163.9080892, + 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163, + 163, 168, 168, ...). + + Returns + ------- + m: numpy array + Counts, or joint, probability of occurrence of node attribute pairs. + """ + import warnings + + msg = ( + "numeric_mixing_matrix is deprecated and will be removed in v3.0.\n" + "Use `attribute_mixing_matrix` instead." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return attribute_mixing_matrix( + G, attribute, nodes=nodes, normalized=normalized, mapping=mapping + ) + + +def mixing_dict(xy, normalized=False): + """Returns a dictionary representation of mixing matrix. + + Parameters + ---------- + xy : list or container of two-tuples + Pairs of (x,y) items. + + attribute : string + Node attribute key + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or Joint probability of occurrence of values in xy. + """ + d = {} + psum = 0.0 + for x, y in xy: + if x not in d: + d[x] = {} + if y not in d: + d[y] = {} + v = d[x].get(y, 0) + d[x][y] = v + 1 + psum += 1 + + if normalized: + for _, jdict in d.items(): + for j in jdict: + jdict[j] /= psum + return d diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/neighbor_degree.py new file mode 100644 index 0000000..74a39b1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/neighbor_degree.py @@ -0,0 +1,159 @@ +import networkx as nx + +__all__ = ["average_neighbor_degree"] + + +def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None): + r"""Returns the average degree of the neighborhood of each node. + + In an undirected graph, the neighborhood `N(i)` of node `i` contains the + nodes that are connected to `i` by an edge. + + For directed graphs, `N(i)` is defined according to the parameter `source`: + + - if source is 'in', then `N(i)` consists of predecessors of node `i`. + - if source is 'out', then `N(i)` consists of successors of node `i`. + - if source is 'in+out', then `N(i)` is both predecessors and successors. + + The average neighborhood degree of a node `i` is + + .. math:: + + k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j + + where `N(i)` are the neighbors of node `i` and `k_j` is + the degree of node `j` which belongs to `N(i)`. For weighted + graphs, an analogous measure can be defined [1]_, + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, `w_{ij}` + is the weight of the edge that links `i` and `j` and + `N(i)` are the neighbors of node `i`. + + + Parameters + ---------- + G : NetworkX graph + + source : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-neighbors of source node. + + target : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-degree for target node. + + nodes : list or iterable, optional (default=G.nodes) + Compute neighbor degree only for specified nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d: dict + A dictionary keyed by node to the average degree of its neighbors. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[0, 1]["weight"] = 5 + >>> G.edges[2, 3]["weight"] = 3 + + >>> nx.average_neighbor_degree(G) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0} + >>> nx.average_neighbor_degree(G, weight="weight") + {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0} + + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.average_neighbor_degree(G, source="in", target="in") + {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0} + + >>> nx.average_neighbor_degree(G, source="out", target="out") + {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0} + + See Also + -------- + average_degree_connectivity + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + if G.is_directed(): + if source == "in": + source_degree = G.in_degree + elif source == "out": + source_degree = G.out_degree + elif source == "in+out": + source_degree = G.degree + else: + raise nx.NetworkXError( + f"source argument {source} must be 'in', 'out' or 'in+out'" + ) + + if target == "in": + target_degree = G.in_degree + elif target == "out": + target_degree = G.out_degree + elif target == "in+out": + target_degree = G.degree + else: + raise nx.NetworkXError( + f"target argument {target} must be 'in', 'out' or 'in+out'" + ) + else: + if source != "out" or target != "out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = target_degree = G.degree + + # precompute target degrees -- should *not* be weighted degree + t_deg = dict(target_degree()) + + # Set up both predecessor and successor neighbor dicts leaving empty if not needed + G_P = G_S = {n: {} for n in G} + if G.is_directed(): + # "in" or "in+out" cases: G_P contains predecessors + if "in" in source: + G_P = G.pred + # "out" or "in+out" cases: G_S contains successors + if "out" in source: + G_S = G.succ + else: + # undirected leave G_P empty but G_S is the adjacency + G_S = G.adj + + # Main loop: Compute average degree of neighbors + avg = {} + for n, deg in source_degree(nodes, weight=weight): + # handle degree zero average + if deg == 0: + avg[n] = 0.0 + continue + + # we sum over both G_P and G_S, but one of the two is usually empty. + if weight is None: + avg[n] = ( + sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n]) + ) / deg + else: + avg[n] = ( + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items()) + + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items()) + ) / deg + return avg diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/pairs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/pairs.py new file mode 100644 index 0000000..4c0be7a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/pairs.py @@ -0,0 +1,114 @@ +"""Generators of x-y pairs of node data.""" +__all__ = ["node_attribute_xy", "node_degree_xy"] + + +def node_attribute_xy(G, attribute, nodes=None): + """Returns iterator of node-attribute pairs for all edges in G. + + Parameters + ---------- + G: NetworkX graph + + attribute: key + The node attribute key. + + nodes: list or iterable (optional) + Use only edges that are incident to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (attribute, attribute) values. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node(1, color="red") + >>> G.add_node(2, color="blue") + >>> G.add_edge(1, 2) + >>> list(nx.node_attribute_xy(G, "color")) + [('red', 'blue')] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + if nodes is None: + nodes = set(G) + else: + nodes = set(nodes) + Gnodes = G.nodes + for u, nbrsdict in G.adjacency(): + if u not in nodes: + continue + uattr = Gnodes[u].get(attribute, None) + if G.is_multigraph(): + for v, keys in nbrsdict.items(): + vattr = Gnodes[v].get(attribute, None) + for _ in keys: + yield (uattr, vattr) + else: + for v in nbrsdict: + vattr = Gnodes[v].get(attribute, None) + yield (uattr, vattr) + + +def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): + """Generate node degree-degree pairs for edges in G. + + Parameters + ---------- + G: NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Use only edges that are adjacency to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (degree, degree) values. + + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> list(nx.node_degree_xy(G, x="out", y="in")) + [(1, 1)] + >>> list(nx.node_degree_xy(G, x="in", y="out")) + [(0, 0)] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + nodes = set(G) if nodes is None else set(nodes) + if G.is_directed(): + direction = {"out": G.out_degree, "in": G.in_degree} + xdeg = direction[x] + ydeg = direction[y] + else: + xdeg = ydeg = G.degree + + for u, degu in xdeg(nodes, weight=weight): + # use G.edges to treat multigraphs correctly + neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) + for _, degv in ydeg(neighbors, weight=weight): + yield degu, degv diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/base_test.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/base_test.py new file mode 100644 index 0000000..73bb32d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/base_test.py @@ -0,0 +1,85 @@ +import networkx as nx + + +class BaseTestAttributeMixing: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_nodes_from([0, 1], fish="one") + G.add_nodes_from([2, 3], fish="two") + G.add_nodes_from([4], fish="red") + G.add_nodes_from([5], fish="blue") + G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.G = G + + D = nx.DiGraph() + D.add_nodes_from([0, 1], fish="one") + D.add_nodes_from([2, 3], fish="two") + D.add_nodes_from([4], fish="red") + D.add_nodes_from([5], fish="blue") + D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.D = D + + M = nx.MultiGraph() + M.add_nodes_from([0, 1], fish="one") + M.add_nodes_from([2, 3], fish="two") + M.add_nodes_from([4], fish="red") + M.add_nodes_from([5], fish="blue") + M.add_edges_from([(0, 1), (0, 1), (2, 3)]) + cls.M = M + + S = nx.Graph() + S.add_nodes_from([0, 1], fish="one") + S.add_nodes_from([2, 3], fish="two") + S.add_nodes_from([4], fish="red") + S.add_nodes_from([5], fish="blue") + S.add_edge(0, 0) + S.add_edge(2, 2) + cls.S = S + + +class BaseTestDegreeMixing: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.D = nx.DiGraph() + cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)]) + cls.D2 = nx.DiGraph() + cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)]) + cls.M = nx.MultiGraph() + nx.add_path(cls.M, range(4)) + cls.M.add_edge(0, 1) + cls.S = nx.Graph() + cls.S.add_edges_from([(0, 0), (1, 1)]) + cls.W = nx.Graph() + cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + cls.W.add_edge(0, 2, weight=1) + S1 = nx.star_graph(4) + S2 = nx.star_graph(4) + cls.DS = nx.disjoint_union(S1, S2) + cls.DS.add_edge(4, 5) + + +class BaseTestNumericMixing: + @classmethod + def setup_class(cls): + N = nx.Graph() + N.add_nodes_from([0, 1], margin=-2) + N.add_nodes_from([2, 3], margin=-2) + N.add_nodes_from([4], margin=-3) + N.add_nodes_from([5], margin=-4) + N.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.N = N + + F = nx.Graph() + F.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + F.add_edge(0, 2, weight=1) + nx.set_node_attributes(F, dict(F.degree(weight="weight")), "margin") + cls.F = F + + M = nx.Graph() + M.add_nodes_from([1, 2], margin=-1) + M.add_nodes_from([3], margin=1) + M.add_nodes_from([4], margin=2) + M.add_edges_from([(3, 4), (1, 2), (1, 3)]) + cls.M = M diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py new file mode 100644 index 0000000..c8fae23 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py @@ -0,0 +1,145 @@ +from itertools import permutations + +import pytest + +import networkx as nx + + +class TestNeighborConnectivity: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.5} + nd = nx.average_degree_connectivity(D) + assert nd == answer + + answer = {1: 2.0, 2: 1.5} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="weight") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="weight") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, weight="weight", source="in", target="in" + ) + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, source="in", target="out", weight="weight" + ) + assert nd == answer + + def test_weight_keyword(self): + G = nx.path_graph(4) + G[1][2]["other"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="other") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G, weight=None) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="other") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_degree_connectivity(G)[5] + assert nd == 1.8 + nd = nx.average_degree_connectivity(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + nd = nx.k_nearest_neighbors(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_zero_deg(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(1, 4) + c = nx.average_degree_connectivity(G) + assert c == {1: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="in", target="in") + assert c == {0: 0, 1: 0} + c = nx.average_degree_connectivity(G, source="in", target="out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="in", target="in+out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="out", target="out") + assert c == {0: 0, 3: 0} + c = nx.average_degree_connectivity(G, source="out", target="in") + assert c == {0: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="out", target="in+out") + assert c == {0: 0, 3: 1} + + def test_in_out_weight(self): + G = nx.DiGraph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(3, 1, weight=1) + for s, t in permutations(["in", "out", "in+out"], 2): + c = nx.average_degree_connectivity(G, source=s, target=t) + cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight") + assert c == cw + + def test_invalid_source(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, source="bogus") + + def test_invalid_target(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, target="bogus") + + def test_invalid_undirected_graph(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, target="bogus") + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, source="bogus") + + def test_single_node(self): + # TODO Is this really the intended behavior for providing a + # single node as the argument `nodes`? Shouldn't the function + # just return the connectivity value itself? + G = nx.trivial_graph() + conn = nx.average_degree_connectivity(G, nodes=0) + assert conn == {0: 0} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py new file mode 100644 index 0000000..ffba703 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py @@ -0,0 +1,114 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx +from networkx.algorithms.assortativity.correlation import attribute_ac + +from .base_test import ( + BaseTestAttributeMixing, + BaseTestDegreeMixing, + BaseTestNumericMixing, +) + + +class TestDegreeMixingCorrelation(BaseTestDegreeMixing): + def test_degree_assortativity_undirected(self): + r = nx.degree_assortativity_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_assortativity_directed(self): + r = nx.degree_assortativity_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_assortativity_directed2(self): + """Test degree assortativity for a directed graph where the set of + in/out degree does not equal the total degree.""" + r = nx.degree_assortativity_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_assortativity_multigraph(self): + r = nx.degree_assortativity_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_pearson_assortativity_undirected(self): + r = nx.degree_pearson_correlation_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_pearson_assortativity_directed(self): + r = nx.degree_pearson_correlation_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_pearson_assortativity_directed2(self): + """Test degree assortativity with Pearson for a directed graph where + the set of in/out degree does not equal the total degree.""" + r = nx.degree_pearson_correlation_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_pearson_assortativity_multigraph(self): + r = nx.degree_pearson_correlation_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_assortativity_weighted(self): + r = nx.degree_assortativity_coefficient(self.W, weight="weight") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_degree_assortativity_double_star(self): + r = nx.degree_assortativity_coefficient(self.DS) + np.testing.assert_almost_equal(r, -0.9339, decimal=4) + + +class TestAttributeMixingCorrelation(BaseTestAttributeMixing): + def test_attribute_assortativity_undirected(self): + r = nx.attribute_assortativity_coefficient(self.G, "fish") + assert r == 6.0 / 22.0 + + def test_attribute_assortativity_directed(self): + r = nx.attribute_assortativity_coefficient(self.D, "fish") + assert r == 1.0 / 3.0 + + def test_attribute_assortativity_multigraph(self): + r = nx.attribute_assortativity_coefficient(self.M, "fish") + assert r == 1.0 + + def test_attribute_assortativity_coefficient(self): + # from "Mixing patterns in networks" + # fmt: off + a = np.array([[0.258, 0.016, 0.035, 0.013], + [0.012, 0.157, 0.058, 0.019], + [0.013, 0.023, 0.306, 0.035], + [0.005, 0.007, 0.024, 0.016]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.623, decimal=3) + + def test_attribute_assortativity_coefficient2(self): + # fmt: off + a = np.array([[0.18, 0.02, 0.01, 0.03], + [0.02, 0.20, 0.03, 0.02], + [0.01, 0.03, 0.16, 0.01], + [0.03, 0.02, 0.01, 0.22]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.68, decimal=2) + + def test_attribute_assortativity(self): + a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]]) + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.029, decimal=3) + + +class TestNumericMixingCorrelation(BaseTestNumericMixing): + def test_numeric_assortativity_negative(self): + r = nx.numeric_assortativity_coefficient(self.N, "margin") + np.testing.assert_almost_equal(r, -0.2903, decimal=4) + + def test_numeric_assortativity_float(self): + r = nx.numeric_assortativity_coefficient(self.F, "margin") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_numeric_assortativity_mixed(self): + r = nx.numeric_assortativity_coefficient(self.M, "margin") + np.testing.assert_almost_equal(r, 0.4340, decimal=4) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py new file mode 100644 index 0000000..cb4ae07 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py @@ -0,0 +1,182 @@ +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + +from .base_test import ( + BaseTestAttributeMixing, + BaseTestDegreeMixing, + BaseTestNumericMixing, +) + + +class TestDegreeMixingDict(BaseTestDegreeMixing): + def test_degree_mixing_dict_undirected(self): + d = nx.degree_mixing_dict(self.P4) + d_result = {1: {2: 2}, 2: {1: 2, 2: 2}} + assert d == d_result + + def test_degree_mixing_dict_undirected_normalized(self): + d = nx.degree_mixing_dict(self.P4, normalized=True) + d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}} + assert d == d_result + + def test_degree_mixing_dict_directed(self): + d = nx.degree_mixing_dict(self.D) + print(d) + d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}} + assert d == d_result + + def test_degree_mixing_dict_multigraph(self): + d = nx.degree_mixing_dict(self.M) + d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}} + assert d == d_result + + def test_degree_mixing_dict_weighted(self): + d = nx.degree_mixing_dict(self.W, weight="weight") + d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}} + assert d == d_result + + +class TestDegreeMixingMatrix(BaseTestDegreeMixing): + def test_degree_mixing_matrix_undirected(self): + # fmt: off + a_result = np.array([[0, 2], + [2, 2]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.P4, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.P4) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_directed(self): + # fmt: off + a_result = np.array([[0, 0, 2], + [1, 0, 1], + [0, 0, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.D, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.D) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_multigraph(self): + # fmt: off + a_result = np.array([[0, 1, 0], + [1, 0, 3], + [0, 3, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.M, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.M) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_selfloop(self): + # fmt: off + a_result = np.array([[2]]) + # fmt: on + a = nx.degree_mixing_matrix(self.S, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.S) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_weighted(self): + a_result = np.array([[0.0, 1.0], [1.0, 6.0]]) + a = nx.degree_mixing_matrix(self.W, weight="weight", normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.W, weight="weight") + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_degree_mixing_matrix_mapping(self): + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + mapping = {0.5: 1, 1.5: 0} + a = nx.degree_mixing_matrix( + self.W, weight="weight", normalized=False, mapping=mapping + ) + np.testing.assert_equal(a, a_result) + + +class TestAttributeMixingDict(BaseTestAttributeMixing): + def test_attribute_mixing_dict_undirected(self): + d = nx.attribute_mixing_dict(self.G, "fish") + d_result = { + "one": {"one": 2, "red": 1}, + "two": {"two": 2, "blue": 1}, + "red": {"one": 1}, + "blue": {"two": 1}, + } + assert d == d_result + + def test_attribute_mixing_dict_directed(self): + d = nx.attribute_mixing_dict(self.D, "fish") + d_result = { + "one": {"one": 1, "red": 1}, + "two": {"two": 1, "blue": 1}, + "red": {}, + "blue": {}, + } + assert d == d_result + + def test_attribute_mixing_dict_multigraph(self): + d = nx.attribute_mixing_dict(self.M, "fish") + d_result = {"one": {"one": 4}, "two": {"two": 2}} + assert d == d_result + + +class TestAttributeMixingMatrix(BaseTestAttributeMixing): + def test_attribute_mixing_matrix_undirected(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.G, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_directed(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.D, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_multigraph(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.M, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + +class TestNumericMixingMatrix(BaseTestNumericMixing): + def test_numeric_mixing_matrix_negative(self): + mapping = {-2: 0, -3: 1, -4: 2} + a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) + a = nx.numeric_mixing_matrix( + self.N, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.numeric_mixing_matrix(self.N, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_numeric_mixing_matrix_float(self): + mapping = {0.5: 1, 1.5: 0} + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + a = nx.numeric_mixing_matrix( + self.F, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.numeric_mixing_matrix(self.F, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py new file mode 100644 index 0000000..27abbb0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py @@ -0,0 +1,104 @@ +import pytest + +import networkx as nx + + +class TestAverageNeighbor: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + nd = nx.average_neighbor_degree(D) + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "out") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2} + nd = nx.average_neighbor_degree(G, weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + print(D.edges(data=True)) + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "out", "out", weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in", weight="weight") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "in", "out", weight="weight") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in", weight="weight") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight") + assert nd == answer + + def test_degree_k4(self): + G = nx.complete_graph(4) + answer = {0: 3, 1: 3, 2: 3, 3: 3} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in") + assert nd == answer + + def test_degree_k4_nodes(self): + G = nx.complete_graph(4) + answer = {1: 3.0, 2: 3.0} + nd = nx.average_neighbor_degree(G, nodes=[1, 2]) + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_neighbor_degree(G)[5] + assert nd == 1.8 + nd = nx.average_neighbor_degree(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_error_invalid_source_target(self): + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") + G = G.to_directed() + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py new file mode 100644 index 0000000..3984292 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py @@ -0,0 +1,87 @@ +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestAttributeMixingXY(BaseTestAttributeMixing): + def test_node_attribute_xy_undirected(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish")) + attrxy_result = sorted( + [ + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ("one", "red"), + ("red", "one"), + ("blue", "two"), + ("two", "blue"), + ] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_undirected_nodes(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"])) + attrxy_result = sorted([]) + assert attrxy == attrxy_result + + def test_node_attribute_xy_directed(self): + attrxy = sorted(nx.node_attribute_xy(self.D, "fish")) + attrxy_result = sorted( + [("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_multigraph(self): + attrxy = sorted(nx.node_attribute_xy(self.M, "fish")) + attrxy_result = [ + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ] + assert attrxy == attrxy_result + + def test_node_attribute_xy_selfloop(self): + attrxy = sorted(nx.node_attribute_xy(self.S, "fish")) + attrxy_result = [("one", "one"), ("two", "two")] + assert attrxy == attrxy_result + + +class TestDegreeMixingXY(BaseTestDegreeMixing): + def test_node_degree_xy_undirected(self): + xy = sorted(nx.node_degree_xy(self.P4)) + xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_undirected_nodes(self): + xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1])) + xy_result = sorted([(1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_directed(self): + xy = sorted(nx.node_degree_xy(self.D)) + xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)]) + assert xy == xy_result + + def test_node_degree_xy_multigraph(self): + xy = sorted(nx.node_degree_xy(self.M)) + xy_result = sorted( + [(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)] + ) + assert xy == xy_result + + def test_node_degree_xy_selfloop(self): + xy = sorted(nx.node_degree_xy(self.S)) + xy_result = sorted([(2, 2), (2, 2)]) + assert xy == xy_result + + def test_node_degree_xy_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=7) + G.add_edge(2, 3, weight=10) + xy = sorted(nx.node_degree_xy(G, weight="weight")) + xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)]) + assert xy == xy_result diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/asteroidal.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/asteroidal.py new file mode 100644 index 0000000..8dfa933 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/asteroidal.py @@ -0,0 +1,167 @@ +""" +Algorithms for asteroidal triples and asteroidal numbers in graphs. + +An asteroidal triple in a graph G is a set of three non-adjacent vertices +u, v and w such that there exist a path between any two of them that avoids +closed neighborhood of the third. More formally, v_j, v_k belongs to the same +connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood +of v_i. A graph which does not contain any asteroidal triples is called +an AT-free graph. The class of AT-free graphs is a graph class for which +many NP-complete problems are solvable in polynomial time. Amongst them, +independent set and coloring. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_at_free", "find_asteroidal_triple"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def find_asteroidal_triple(G): + r"""Find an asteroidal triple in the given graph. + + An asteroidal triple is a triple of non-adjacent vertices such that + there exists a path between any two of them which avoids the closed + neighborhood of the third. It checks all independent triples of vertices + and whether they are an asteroidal triple or not. This is done with the + help of a data structure called a component structure. + A component structure encodes information about which vertices belongs to + the same connected component when the closed neighborhood of a given vertex + is removed from the graph. The algorithm used to check is the trivial + one, outlined in [1]_, which has a runtime of + :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the + creation of the component structure. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not + + Returns + ------- + list or None + An asteroidal triple is returned as a list of nodes. If no asteroidal + triple exists, i.e. the graph is AT-free, then None is returned. + The returned value depends on the certificate parameter. The default + option is a bool which is True if the graph is AT-free, i.e. the + given graph contains no asteroidal triples, and False otherwise, i.e. + if the graph contains at least one asteroidal triple. + + Notes + ----- + The component structure and the algorithm is described in [1]_. The current + implementation implements the trivial algorithm for simple graphs. + + References + ---------- + .. [1] Ekkehard Köhler, + "Recognizing Graphs without asteroidal triples", + Journal of Discrete Algorithms 2, pages 439-452, 2004. + https://www.sciencedirect.com/science/article/pii/S157086670400019X + """ + V = set(G.nodes) + + if len(V) < 6: + # An asteroidal triple cannot exist in a graph with 5 or less vertices. + return None + + component_structure = create_component_structure(G) + E_complement = set(nx.complement(G).edges) + + for e in E_complement: + u = e[0] + v = e[1] + u_neighborhood = set(G[u]).union([u]) + v_neighborhood = set(G[v]).union([v]) + union_of_neighborhoods = u_neighborhood.union(v_neighborhood) + for w in V - union_of_neighborhoods: + # Check for each pair of vertices whether they belong to the + # same connected component when the closed neighborhood of the + # third is removed. + if ( + component_structure[u][v] == component_structure[u][w] + and component_structure[v][u] == component_structure[v][w] + and component_structure[w][u] == component_structure[w][v] + ): + return [u, v, w] + return None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def is_at_free(G): + """Check if a graph is AT-free. + + The method uses the `find_asteroidal_triple` method to recognize + an AT-free graph. If no asteroidal triple is found the graph is + AT-free and True is returned. If at least one asteroidal triple is + found the graph is not AT-free and False is returned. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not. + + Returns + ------- + bool + True if G is AT-free and False otherwise. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> nx.is_at_free(G) + True + + >>> G = nx.cycle_graph(6) + >>> nx.is_at_free(G) + False + """ + return find_asteroidal_triple(G) is None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def create_component_structure(G): + r"""Create component structure for G. + + A *component structure* is an `nxn` array, denoted `c`, where `n` is + the number of vertices, where each row and column corresponds to a vertex. + + .. math:: + c_{uv} = \begin{cases} 0, if v \in N[u] \\ + k, if v \in component k of G \setminus N[u] \end{cases} + + Where `k` is an arbitrary label for each component. The structure is used + to simplify the detection of asteroidal triples. + + Parameters + ---------- + G : NetworkX Graph + Undirected, simple graph. + + Returns + ------- + component_structure : dictionary + A dictionary of dictionaries, keyed by pairs of vertices. + + """ + V = set(G.nodes) + component_structure = {} + for v in V: + label = 0 + closed_neighborhood = set(G[v]).union({v}) + row_dict = {} + for u in closed_neighborhood: + row_dict[u] = 0 + + G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood) + for cc in nx.connected_components(G_reduced): + label += 1 + for u in cc: + row_dict[u] = label + + component_structure[v] = row_dict + + return component_structure diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/__init__.py new file mode 100644 index 0000000..09e5a38 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/__init__.py @@ -0,0 +1,86 @@ +r""" This module provides functions and operations for bipartite +graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in +`E` that only connect nodes from opposite sets. It is common in the literature +to use an spatial analogy referring to the two node sets as top and bottom nodes. + +The bipartite algorithms are not imported into the networkx namespace +at the top level so the easiest way to use them is with: + +>>> from networkx.algorithms import bipartite + +NetworkX does not have a custom bipartite graph class but the Graph() +or DiGraph() classes can be used to represent bipartite graphs. However, +you have to keep track of which set each node belongs to, and make +sure that there is no edge between nodes of the same set. The convention used +in NetworkX is to use a node attribute named `bipartite` with values 0 or 1 to +identify the sets each node belongs to. This convention is not enforced in +the source code of bipartite functions, it's only a recommendation. + +For example: + +>>> B = nx.Graph() +>>> # Add nodes with the node attribute "bipartite" +>>> B.add_nodes_from([1, 2, 3, 4], bipartite=0) +>>> B.add_nodes_from(["a", "b", "c"], bipartite=1) +>>> # Add edges only between nodes of opposite node sets +>>> B.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + +Many algorithms of the bipartite module of NetworkX require, as an argument, a +container with all the nodes that belong to one set, in addition to the bipartite +graph `B`. The functions in the bipartite package do not check that the node set +is actually correct nor that the input graph is actually bipartite. +If `B` is connected, you can find the two node sets using a two-coloring +algorithm: + +>>> nx.is_connected(B) +True +>>> bottom_nodes, top_nodes = bipartite.sets(B) + +However, if the input graph is not connected, there are more than one possible +colorations. This is the reason why we require the user to pass a container +with all nodes of one bipartite node set as an argument to most bipartite +functions. In the face of ambiguity, we refuse the temptation to guess and +raise an :exc:`AmbiguousSolution ` +Exception if the input graph for +:func:`bipartite.sets ` +is disconnected. + +Using the `bipartite` node attribute, you can easily get the two node sets: + +>>> top_nodes = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0} +>>> bottom_nodes = set(B) - top_nodes + +So you can easily use the bipartite algorithms that require, as an argument, a +container with all nodes that belong to one node set: + +>>> print(round(bipartite.density(B, bottom_nodes), 2)) +0.5 +>>> G = bipartite.projected_graph(B, top_nodes) + +All bipartite graph generators in NetworkX build bipartite graphs with the +`bipartite` node attribute. Thus, you can use the same approach: + +>>> RB = bipartite.random_graph(5, 7, 0.2) +>>> RB_top = {n for n, d in RB.nodes(data=True) if d["bipartite"] == 0} +>>> RB_bottom = set(RB) - RB_top +>>> list(RB_top) +[0, 1, 2, 3, 4] +>>> list(RB_bottom) +[5, 6, 7, 8, 9, 10, 11] + +For other bipartite graph generators see +:mod:`Generators `. + +""" + +from networkx.algorithms.bipartite.basic import * +from networkx.algorithms.bipartite.centrality import * +from networkx.algorithms.bipartite.cluster import * +from networkx.algorithms.bipartite.covering import * +from networkx.algorithms.bipartite.edgelist import * +from networkx.algorithms.bipartite.matching import * +from networkx.algorithms.bipartite.matrix import * +from networkx.algorithms.bipartite.projection import * +from networkx.algorithms.bipartite.redundancy import * +from networkx.algorithms.bipartite.spectral import * +from networkx.algorithms.bipartite.generators import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/basic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/basic.py new file mode 100644 index 0000000..ac4686a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/basic.py @@ -0,0 +1,315 @@ +""" +========================== +Bipartite Graph Algorithms +========================== +""" +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.exception import AmbiguousSolution + +__all__ = [ + "is_bipartite", + "is_bipartite_node_set", + "color", + "sets", + "density", + "degrees", +] + + +def color(G): + """Returns a two-coloring of the graph. + + Raises an exception if the graph is not bipartite. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + color : dictionary + A dictionary keyed by node with a 1 or 0 as data for each node color. + + Raises + ------ + NetworkXError + If the graph is not two-colorable. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> c = bipartite.color(G) + >>> print(c) + {0: 1, 1: 0, 2: 1, 3: 0} + + You can use this to set a node attribute indicating the biparite set: + + >>> nx.set_node_attributes(G, c, "bipartite") + >>> print(G.nodes[0]["bipartite"]) + 1 + >>> print(G.nodes[1]["bipartite"]) + 0 + """ + if G.is_directed(): + import itertools + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + neighbors = G.neighbors + + color = {} + for n in G: # handle disconnected graphs + if n in color or len(G[n]) == 0: # skip isolates + continue + queue = [n] + color[n] = 1 # nodes seen with color (1 or 0) + while queue: + v = queue.pop() + c = 1 - color[v] # opposite color of node v + for w in neighbors(v): + if w in color: + if color[w] == color[v]: + raise nx.NetworkXError("Graph is not bipartite.") + else: + color[w] = c + queue.append(w) + # color isolates with 0 + color.update(dict.fromkeys(nx.isolates(G), 0)) + return color + + +def is_bipartite(G): + """Returns True if graph G is bipartite, False if not. + + Parameters + ---------- + G : NetworkX graph + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> print(bipartite.is_bipartite(G)) + True + + See Also + -------- + color, is_bipartite_node_set + """ + try: + color(G) + return True + except nx.NetworkXError: + return False + + +def is_bipartite_node_set(G, nodes): + """Returns True if nodes and G/nodes are a bipartition of G. + + Parameters + ---------- + G : NetworkX graph + + nodes: list or container + Check if nodes are a one of a bipartite set. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X = set([1, 3]) + >>> bipartite.is_bipartite_node_set(G, X) + True + + Notes + ----- + An exception is raised if the input nodes are not distinct, because in this + case some bipartite algorithms will yield incorrect results. + For connected graphs the bipartite sets are unique. This function handles + disconnected graphs. + """ + S = set(nodes) + + if len(S) < len(nodes): + # this should maybe just return False? + raise AmbiguousSolution( + "The input node set contains duplicates.\n" + "This may lead to incorrect results when using it in bipartite algorithms.\n" + "Consider using set(nodes) as the input" + ) + + for CC in (G.subgraph(c).copy() for c in connected_components(G)): + X, Y = sets(CC) + if not ( + (X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S)) + ): + return False + return True + + +def sets(G, top_nodes=None): + """Returns bipartite node sets of graph G. + + Raises an exception if the graph is not bipartite or if the input + graph is disconnected and thus more than one valid solution exists. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + Parameters + ---------- + G : NetworkX graph + + top_nodes : container, optional + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + X : set + Nodes from one side of the bipartite graph. + Y : set + Nodes from the other side. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + NetworkXError + Raised if the input graph is not bipartite. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X, Y = bipartite.sets(G) + >>> list(X) + [0, 2] + >>> list(Y) + [1, 3] + + See Also + -------- + color + + """ + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + if top_nodes is not None: + X = set(top_nodes) + Y = set(G) - X + else: + if not is_connected(G): + msg = "Disconnected graph: Ambiguous solution for bipartite sets." + raise nx.AmbiguousSolution(msg) + c = color(G) + X = {n for n, is_top in c.items() if is_top} + Y = {n for n, is_top in c.items() if not is_top} + return (X, Y) + + +def density(B, nodes): + """Returns density of bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + Returns + ------- + d : float + The bipartite density + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> X = set([0, 1, 2]) + >>> bipartite.density(G, X) + 1.0 + >>> Y = set([3, 4]) + >>> bipartite.density(G, Y) + 1.0 + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color + """ + n = len(B) + m = nx.number_of_edges(B) + nb = len(nodes) + nt = n - nb + if m == 0: # includes cases n==0 and n==1 + d = 0.0 + else: + if B.is_directed(): + d = m / (2 * nb * nt) + else: + d = m / (nb * nt) + return d + + +def degrees(B, nodes, weight=None): + """Returns the degrees of the two node sets in the bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + (degX,degY) : tuple of dictionaries + The degrees of the two bipartite sets as dictionaries keyed by node. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> Y = set([3, 4]) + >>> degX, degY = bipartite.degrees(G, Y) + >>> dict(degX) + {0: 2, 1: 2, 2: 2} + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color, density + """ + bottom = set(nodes) + top = set(B) - bottom + return (B.degree(top, weight), B.degree(bottom, weight)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/centrality.py new file mode 100644 index 0000000..3eab834 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/centrality.py @@ -0,0 +1,266 @@ +import networkx as nx + +__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"] + + +def degree_centrality(G, nodes): + r"""Compute the degree centrality for nodes in a bipartite network. + + The degree centrality for a node `v` is the fraction of nodes + connected to it. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + centrality : dictionary + Dictionary keyed by node with bipartite degree centrality as the value. + + See Also + -------- + betweenness_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both bipartite node + sets. See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + For unipartite networks, the degree centrality values are + normalized by dividing by the maximum possible degree (which is + `n-1` where `n` is the number of nodes in G). + + In the bipartite case, the maximum possible degree of a node in a + bipartite node set is the number of nodes in the opposite node set + [1]_. The degree centrality for a node `v` in the bipartite + sets `U` with `n` nodes and `V` with `m` nodes is + + .. math:: + + d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U , + + d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V , + + + where `deg(v)` is the degree of node `v`. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + s = 1.0 / len(bottom) + centrality = {n: d * s for n, d in G.degree(top)} + s = 1.0 / len(top) + centrality.update({n: d * s for n, d in G.degree(bottom)}) + return centrality + + +def betweenness_centrality(G, nodes): + r"""Compute betweenness centrality for nodes in a bipartite network. + + Betweenness centrality of a node `v` is the sum of the + fraction of all-pairs shortest paths that pass through `v`. + + Values of betweenness are normalized by the maximum possible + value which for bipartite graphs is limited by the relative size + of the two node sets [1]_. + + Let `n` be the number of nodes in the node set `U` and + `m` be the number of nodes in the node set `V`, then + nodes in `U` are normalized by dividing by + + .. math:: + + \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] , + + where + + .. math:: + + s = (n - 1) \div m , t = (n - 1) \mod m , + + and nodes in `V` are normalized by dividing by + + .. math:: + + \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] , + + where, + + .. math:: + + p = (m - 1) \div n , r = (m - 1) \mod n . + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + betweenness : dictionary + Dictionary keyed by node with bipartite betweenness centrality + as the value. + + See Also + -------- + degree_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + s, t = divmod(n - 1, m) + bet_max_top = ( + ((m**2) * ((s + 1) ** 2)) + + (m * (s + 1) * (2 * t - s - 1)) + - (t * ((2 * s) - t + 3)) + ) / 2.0 + p, r = divmod(m - 1, n) + bet_max_bot = ( + ((n**2) * ((p + 1) ** 2)) + + (n * (p + 1) * (2 * r - p - 1)) + - (r * ((2 * p) - r + 3)) + ) / 2.0 + betweenness = nx.betweenness_centrality(G, normalized=False, weight=None) + for node in top: + betweenness[node] /= bet_max_top + for node in bottom: + betweenness[node] /= bet_max_bot + return betweenness + + +def closeness_centrality(G, nodes, normalized=True): + r"""Compute the closeness centrality for nodes in a bipartite network. + + The closeness of a node is the distance to all other nodes in the + graph or in the case that the graph is not connected to all other nodes + in the connected component containing that node. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + normalized : bool, optional + If True (default) normalize by connected component size. + + Returns + ------- + closeness : dictionary + Dictionary keyed by node with bipartite closeness centrality + as the value. + + See Also + -------- + betweenness_centrality + degree_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + Closeness centrality is normalized by the minimum distance possible. + In the bipartite case the minimum distance for a node in one bipartite + node set is 1 from all nodes in the other node set and 2 from all + other nodes in its own set [1]_. Thus the closeness centrality + for node `v` in the two bipartite sets `U` with + `n` nodes and `V` with `m` nodes is + + .. math:: + + c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U, + + c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V, + + where `d` is the sum of the distances from `v` to all + other nodes. + + Higher values of closeness indicate higher centrality. + + As in the unipartite case, setting normalized=True causes the + values to normalized further to n-1 / size(G)-1 where n is the + number of nodes in the connected part of graph containing the + node. If the graph is not completely connected, this algorithm + computes the closeness centrality for each connected part + separately. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + closeness = {} + path_length = nx.single_source_shortest_path_length + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + for node in top: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (m + 2 * (n - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + for node in bottom: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (n + 2 * (m - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + return closeness diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/cluster.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/cluster.py new file mode 100644 index 0000000..4b61cb9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/cluster.py @@ -0,0 +1,277 @@ +"""Functions for computing clustering of pairs + +""" + +import itertools + +import networkx as nx + +__all__ = [ + "clustering", + "average_clustering", + "latapy_clustering", + "robins_alexander_clustering", +] + + +def cc_dot(nu, nv): + return len(nu & nv) / len(nu | nv) + + +def cc_max(nu, nv): + return len(nu & nv) / max(len(nu), len(nv)) + + +def cc_min(nu, nv): + return len(nu & nv) / min(len(nu), len(nv)) + + +modes = {"dot": cc_dot, "min": cc_min, "max": cc_max} + + +def latapy_clustering(G, nodes=None, mode="dot"): + r"""Compute a bipartite clustering coefficient for nodes. + + The bipartie clustering coefficient is a measure of local density + of connections defined as [1]_: + + .. math:: + + c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|} + + where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, + and `c_{uv}` is the pairwise clustering coefficient between nodes + `u` and `v`. + + The mode selects the function for `c_{uv}` which can be: + + `dot`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|} + + `min`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)} + + `max`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)} + + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute bipartite clustering for these nodes. The default + is all nodes in G. + + mode : string + The pariwise bipartite clustering method to be used in the computation. + It must be "dot", "max", or "min". + + Returns + ------- + clustering : dictionary + A dictionary keyed by node with the clustering coefficient value. + + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) # path graphs are bipartite + >>> c = bipartite.clustering(G) + >>> c[0] + 0.5 + >>> c = bipartite.clustering(G, mode="min") + >>> c[0] + 1.0 + + See Also + -------- + robins_alexander_clustering + average_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if not nx.algorithms.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph is not bipartite") + + try: + cc_func = modes[mode] + except KeyError as err: + raise nx.NetworkXError( + "Mode for bipartite clustering must be: dot, min or max" + ) from err + + if nodes is None: + nodes = G + ccs = {} + for v in nodes: + cc = 0.0 + nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v} + for u in nbrs2: + cc += cc_func(set(G[u]), set(G[v])) + if cc > 0.0: # len(nbrs2)>0 + cc /= len(nbrs2) + ccs[v] = cc + return ccs + + +clustering = latapy_clustering + + +def average_clustering(G, nodes=None, mode="dot"): + r"""Compute the average bipartite clustering coefficient. + + A clustering coefficient for the whole graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where `n` is the number of nodes in `G`. + + Similar measures for the two bipartite sets can be defined [1]_ + + .. math:: + + C_X = \frac{1}{|X|}\sum_{v \in X} c_v, + + where `X` is a bipartite set of `G`. + + Parameters + ---------- + G : graph + a bipartite graph + + nodes : list or iterable, optional + A container of nodes to use in computing the average. + The nodes should be either the entire graph (the default) or one of the + bipartite sets. + + mode : string + The pariwise bipartite clustering method. + It must be "dot", "max", or "min" + + Returns + ------- + clustering : float + The average bipartite clustering for the given set of nodes or the + entire graph if no nodes are specified. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.star_graph(3) # star graphs are bipartite + >>> bipartite.average_clustering(G) + 0.75 + >>> X, Y = bipartite.sets(G) + >>> bipartite.average_clustering(G, X) + 0.0 + >>> bipartite.average_clustering(G, Y) + 1.0 + + See Also + -------- + clustering + + Notes + ----- + The container of nodes passed to this function must contain all of the nodes + in one of the bipartite sets ("top" or "bottom") in order to compute + the correct average bipartite clustering coefficients. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if nodes is None: + nodes = G + ccs = latapy_clustering(G, nodes=nodes, mode=mode) + return sum(ccs[v] for v in nodes) / len(nodes) + + +def robins_alexander_clustering(G): + r"""Compute the bipartite clustering of G. + + Robins and Alexander [1]_ defined bipartite clustering coefficient as + four times the number of four cycles `C_4` divided by the number of + three paths `L_3` in a bipartite graph: + + .. math:: + + CC_4 = \frac{4 * C_4}{L_3} + + Parameters + ---------- + G : graph + a bipartite graph + + Returns + ------- + clustering : float + The Robins and Alexander bipartite clustering for the input graph. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.davis_southern_women_graph() + >>> print(round(bipartite.robins_alexander_clustering(G), 3)) + 0.468 + + See Also + -------- + latapy_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking + directors: Network structure and distance in bipartite graphs. + Computational & Mathematical Organization Theory 10(1), 69–94. + + """ + if G.order() < 4 or G.size() < 3: + return 0 + L_3 = _threepaths(G) + if L_3 == 0: + return 0 + C_4 = _four_cycles(G) + return (4.0 * C_4) / L_3 + + +def _four_cycles(G): + cycles = 0 + for v in G: + for u, w in itertools.combinations(G[v], 2): + cycles += len((set(G[u]) & set(G[w])) - {v}) + return cycles / 4 + + +def _threepaths(G): + paths = 0 + for v in G: + for u in G[v]: + for w in set(G[u]) - {v}: + paths += len(set(G[w]) - {v, u}) + # Divide by two because we count each three path twice + # one for each possible starting point + return paths / 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/covering.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/covering.py new file mode 100644 index 0000000..2802dbc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/covering.py @@ -0,0 +1,55 @@ +""" Functions related to graph covers.""" + +from networkx.algorithms.bipartite.matching import hopcroft_karp_matching +from networkx.algorithms.covering import min_edge_cover as _min_edge_cover +from networkx.utils import not_implemented_for + +__all__ = ["min_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def min_edge_cover(G, matching_algorithm=None): + """Returns a set of edges which constitutes + the minimum edge cover of the graph. + + The smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching in a + given bipartite graph. The function must take one input, the + graph ``G``, and return a dictionary mapping each node to its + mate. If not specified, + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + will be used. Other possibilities include + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`, + + Returns + ------- + set + A set of the edges in a minimum edge cover of the graph, given as + pairs of nodes. It contains both the edges `(u, v)` and `(v, u)` + for given nodes `u` and `v` among the edges of minimum edge cover. + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + A minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + """ + if G.order() == 0: # Special case for the empty graph + return set() + if matching_algorithm is None: + matching_algorithm = hopcroft_karp_matching + return _min_edge_cover(G, matching_algorithm=matching_algorithm) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/edgelist.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/edgelist.py new file mode 100644 index 0000000..5d183f4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/edgelist.py @@ -0,0 +1,357 @@ +""" +******************** +Bipartite Edge Lists +******************** +Read and write NetworkX graphs as bipartite edge lists. + +Format +------ +You can read or write three formats of edge lists with these functions. + +Node pairs with no data:: + + 1 2 + +Python dictionary as data:: + + 1 2 {'weight':7, 'color':'green'} + +Arbitrary data:: + + 1 2 7 green + +For each edge (u, v) the node u is assigned to part 0 and the node v to part 1. +""" +__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"] + +import networkx as nx +from networkx.utils import not_implemented_for, open_file + + +@open_file(1, mode="wb") +def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"): + """Write a bipartite graph as a list of edges. + + Parameters + ---------- + G : Graph + A NetworkX bipartite graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + data : bool or list, optional + If False write no edge data. + If True write a string representation of the edge data dictionary.. + If a list (or other iterable) is provided, write the keys specified + in the list. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> nx.write_edgelist(G, "test.edgelist") + >>> fh = open("test.edgelist", "wb") + >>> nx.write_edgelist(G, fh) + >>> nx.write_edgelist(G, "test.edgelist.gz") + >>> nx.write_edgelist(G, "test.edgelist.gz", data=False) + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7, color="red") + >>> nx.write_edgelist(G, "test.edgelist", data=False) + >>> nx.write_edgelist(G, "test.edgelist", data=["color"]) + >>> nx.write_edgelist(G, "test.edgelist", data=["color", "weight"]) + + See Also + -------- + write_edgelist + generate_edgelist + """ + for line in generate_edgelist(G, delimiter, data): + line += "\n" + path.write(line.encode(encoding)) + + +@not_implemented_for("directed") +def generate_edgelist(G, delimiter=" ", data=True): + """Generate a single line of the bipartite graph G in edge list format. + + Parameters + ---------- + G : NetworkX graph + The graph is assumed to have node attribute `part` set to 0,1 representing + the two graph parts + + delimiter : string, optional + Separator for node labels + + data : bool or list of keys + If False generate no edge data. If True use a dictionary + representation of edge data. If a list of keys use a list of data + values corresponding to the keys. + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> G[1][2]["weight"] = 3 + >>> G[2][3]["capacity"] = 12 + >>> for line in bipartite.generate_edgelist(G, data=False): + ... print(line) + 0 1 + 2 1 + 2 3 + + >>> for line in bipartite.generate_edgelist(G): + ... print(line) + 0 1 {} + 2 1 {'weight': 3} + 2 3 {'capacity': 12} + + >>> for line in bipartite.generate_edgelist(G, data=["weight"]): + ... print(line) + 0 1 + 2 1 3 + 2 3 + """ + try: + part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0] + except BaseException as err: + raise AttributeError("Missing node attribute `bipartite`") from err + if data is True or data is False: + for n in part0: + for edge in G.edges(n, data=data): + yield delimiter.join(map(str, edge)) + else: + for n in part0: + for u, v, d in G.edges(n, data=True): + edge = [u, v] + try: + edge.extend(d[k] for k in data) + except KeyError: + pass # missing data for this edge, should warn? + yield delimiter.join(map(str, edge)) + + +def parse_edgelist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True +): + """Parse lines of an edge list representation of a bipartite graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in edgelist format + comments : string, optional + Marker for comment lines + delimiter : string, optional + Separator for node labels + create_using: NetworkX graph container, optional + Use given NetworkX graph for holding nodes or edges. + nodetype : Python type, optional + Convert nodes to this type. + data : bool or list of (label,type) tuples + If False generate no edge data or if True use a dictionary + representation of edge data or a list tuples specifying dictionary + key names and types for edge data. + + Returns + ------- + G: NetworkX Graph + The bipartite graph corresponding to lines + + Examples + -------- + Edgelist with no data: + + >>> from networkx.algorithms import bipartite + >>> lines = ["1 2", "2 3", "3 4"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.nodes(data=True)) + [(1, {'bipartite': 0}), (2, {'bipartite': 0}), (3, {'bipartite': 0}), (4, {'bipartite': 1})] + >>> sorted(G.edges()) + [(1, 2), (2, 3), (3, 4)] + + Edgelist with data in Python dictionary representation: + + >>> lines = ["1 2 {'weight':3}", "2 3 {'weight':27}", "3 4 {'weight':3.0}"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})] + + Edgelist with data in a list: + + >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})] + + See Also + -------- + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + # split line, should have 2 or more + s = line.strip().split(delimiter) + if len(s) < 2: + continue + u = s.pop(0) + v = s.pop(0) + d = s + if nodetype is not None: + try: + u = nodetype(u) + v = nodetype(v) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes {u},{v} " f"to type {nodetype}." + ) from err + + if len(d) == 0 or data is False: + # no data or data type specified + edgedata = {} + elif data is True: + # no edge types specified + try: # try to evaluate as dictionary + edgedata = dict(literal_eval(" ".join(d))) + except BaseException as err: + raise TypeError( + f"Failed to convert edge data ({d})" f"to dictionary." + ) from err + else: + # convert edge data to dictionary with specified keys and type + if len(d) != len(data): + raise IndexError( + f"Edge data {d} and data_keys {data} are not the same length" + ) + edgedata = {} + for (edge_key, edge_type), edge_value in zip(data, d): + try: + edge_value = edge_type(edge_value) + except BaseException as err: + raise TypeError( + f"Failed to convert {edge_key} data " + f"{edge_value} to type {edge_type}." + ) from err + edgedata.update({edge_key: edge_value}) + G.add_node(u, bipartite=0) + G.add_node(v, bipartite=1) + G.add_edge(u, v, **edgedata) + return G + + +@open_file(0, mode="rb") +def read_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + data=True, + edgetype=None, + encoding="utf-8", +): + """Read a bipartite graph from a list of edges. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be uncompressed. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : Graph container, optional, + Use specified container to build graph. The default is networkx.Graph, + an undirected graph. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + data : bool or list of (label,type) tuples + Tuples specifying dictionary key names and types for edge data + edgetype : int, float, str, Python type, optional OBSOLETE + Convert edge data from strings to specified type and use as 'weight' + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> bipartite.write_edgelist(G, "test.edgelist") + >>> G = bipartite.read_edgelist("test.edgelist") + + >>> fh = open("test.edgelist", "rb") + >>> G = bipartite.read_edgelist(fh) + >>> fh.close() + + >>> G = bipartite.read_edgelist("test.edgelist", nodetype=int) + + Edgelist with data in a list: + + >>> textline = "1 2 3" + >>> fh = open("test.edgelist", "w") + >>> d = fh.write(textline) + >>> fh.close() + >>> G = bipartite.read_edgelist( + ... "test.edgelist", nodetype=int, data=(("weight", float),) + ... ) + >>> list(G) + [1, 2] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0})] + + See parse_edgelist() for more examples of formatting. + + See Also + -------- + parse_edgelist + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + """ + lines = (line.decode(encoding) for line in path) + return parse_edgelist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=data, + ) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/generators.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/generators.py new file mode 100644 index 0000000..eddd61b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/generators.py @@ -0,0 +1,595 @@ +""" +Generators and functions for bipartite graphs. +""" +import math +import numbers +from functools import reduce + +import networkx as nx +from networkx.utils import nodes_or_number, py_random_state + +__all__ = [ + "configuration_model", + "havel_hakimi_graph", + "reverse_havel_hakimi_graph", + "alternating_havel_hakimi_graph", + "preferential_attachment_graph", + "random_graph", + "gnmk_random_graph", + "complete_bipartite_graph", +] + + +@nodes_or_number([0, 1]) +def complete_bipartite_graph(n1, n2, create_using=None): + """Returns the complete bipartite graph `K_{n_1,n_2}`. + + The graph is composed of two partitions with nodes 0 to (n1 - 1) + in the first and nodes n1 to (n1 + n2 - 1) in the second. + Each node in the first is connected to each node in the second. + + Parameters + ---------- + n1, n2 : integer or iterable container of nodes + If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. + If a container, the elements are the nodes. + create_using : NetworkX graph instance, (default: nx.Graph) + Return graph of this type. + + Notes + ----- + Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are + containers of nodes. If only one of n1 or n2 are integers, that + integer is replaced by `range` of that integer. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.complete_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + n1, top = n1 + n2, bottom = n2 + if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): + bottom = [n1 + i for i in bottom] + G.add_nodes_from(top, bipartite=0) + G.add_nodes_from(bottom, bipartite=1) + if len(G) != len(top) + len(bottom): + raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes") + G.add_edges_from((u, v) for u in top for v in bottom) + G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})" + return G + + +@py_random_state(3) +def configuration_model(aseq, bseq, create_using=None, seed=None): + """Returns a random bipartite graph from two given degree sequences. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in set B by choosing + randomly from the possible free stubs, one in A and one in B. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.configuration_model + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length and sum of each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build lists of degree-repeated vertex numbers + stubs = [[v] * aseq[v] for v in range(0, lena)] + astubs = [x for subseq in stubs for x in subseq] + + stubs = [[v] * bseq[v - lena] for v in range(lena, lena + lenb)] + bstubs = [x for subseq in stubs for x in subseq] + + # shuffle lists + seed.shuffle(astubs) + seed.shuffle(bstubs) + + G.add_edges_from([astubs[i], bstubs[i]] for i in range(suma)) + + G.name = "bipartite_configuration_model" + return G + + +def havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to the highest degree + nodes in set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(0, naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + astubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to largest degree nodes in the b set + bstubs.sort() + for target in bstubs[-degree:]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_havel_hakimi_graph" + return G + + +def reverse_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in the set B by connecting + the highest degree nodes in set A to the lowest degree nodes in + set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.reverse_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(0, lena)] + bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)] + astubs.sort() + bstubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to the smallest degree nodes in the b set + for target in bstubs[0:degree]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_reverse_havel_hakimi_graph" + return G + + +def alternating_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using + an alternating Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to alternatively the + highest and the lowest degree nodes in set B until all stubs are + connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.alternating_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(0, naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + while astubs: + astubs.sort() + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + bstubs.sort() + small = bstubs[0 : degree // 2] # add these low degree targets + large = bstubs[(-degree + degree // 2) :] # now high degree targets + stubs = [x for z in zip(large, small) for x in z] # combine, sorry + if len(stubs) < len(small) + len(large): # check for zip truncation + stubs.append(large.pop()) + for target in stubs: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_alternating_havel_hakimi_graph" + return G + + +@py_random_state(3) +def preferential_attachment_graph(aseq, p, create_using=None, seed=None): + """Create a bipartite graph with a preferential attachment model from + a given single degree sequence. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes starting with node len(aseq). + The number of nodes in set B is random. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + p : float + Probability that a new bottom node is added. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] Guillaume, J.L. and Latapy, M., + Bipartite graphs as models of complex networks. + Physica A: Statistical Mechanics and its Applications, + 2006, 371(2), pp.795-813. + .. [2] Jean-Loup Guillaume and Matthieu Latapy, + Bipartite structure of all complex networks, + Inf. Process. Lett. 90, 2004, pg. 215-221 + https://doi.org/10.1016/j.ipl.2004.03.007 + + Notes + ----- + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.preferential_attachment_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + if p > 1: + raise nx.NetworkXError(f"probability {p} > 1") + + naseq = len(aseq) + G = _add_nodes_with_bipartite_label(G, naseq, 0) + vv = [[v] * aseq[v] for v in range(0, naseq)] + while vv: + while vv[0]: + source = vv[0][0] + vv[0].remove(source) + if seed.random() < p or len(G) == naseq: + target = len(G) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + else: + bb = [[b] * G.degree(b) for b in range(naseq, len(G))] + # flatten the list of lists into a list. + bbstubs = reduce(lambda x, y: x + y, bb) + # choose preferentially a bottom node. + target = seed.choice(bbstubs) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + vv.remove(vv[0]) + G.name = "bipartite_preferential_attachment_model" + return G + + +@py_random_state(3) +def random_graph(n, m, p, seed=None, directed=False): + """Returns a bipartite random graph. + + This is a bipartite version of the binomial (Erdős-Rényi) graph. + The graph is composed of two partitions. Set A has nodes 0 to + (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Notes + ----- + The bipartite random graph algorithm chooses each of the n*m (undirected) + or 2*nm (directed) possible edges with probability p. + + This algorithm is $O(n+m)$ where $m$ is the expected number of edges. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.random_graph + + See Also + -------- + gnp_random_graph, configuration_model + + References + ---------- + .. [1] Vladimir Batagelj and Ulrik Brandes, + "Efficient generation of large random networks", + Phys. Rev. E, 71, 036113, 2005. + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"fast_gnp_random_graph({n},{m},{p})" + + if p <= 0: + return G + if p >= 1: + return nx.complete_bipartite_graph(n, m) + + lp = math.log(1.0 - p) + + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(v, n + w) + + if directed: + # use the same algorithm to + # add edges from the "m" to "n" set + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(n + w, v) + + return G + + +@py_random_state(3) +def gnmk_random_graph(n, m, k, seed=None, directed=False): + """Returns a random bipartite graph G_{n,m,k}. + + Produces a bipartite graph chosen randomly out of the set of all graphs + with n top nodes, m bottom nodes, and k edges. + The graph is composed of two sets of nodes. + Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + k : int + The number of edges + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Examples + -------- + from nx.algorithms import bipartite + G = bipartite.gnmk_random_graph(10,20,50) + + See Also + -------- + gnm_random_graph + + Notes + ----- + If k > m * n then a complete bipartite graph is returned. + + This graph is a bipartite version of the `G_{nm}` random graph model. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.gnmk_random_graph + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"bipartite_gnm_random_graph({n},{m},{k})" + if n == 1 or m == 1: + return G + max_edges = n * m # max_edges for bipartite networks + if k >= max_edges: # Maybe we should raise an exception here + return nx.complete_bipartite_graph(n, m, create_using=G) + + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + bottom = list(set(G) - set(top)) + edge_count = 0 + while edge_count < k: + # generate random edge,u,v + u = seed.choice(top) + v = seed.choice(bottom) + if v in G[u]: + continue + else: + G.add_edge(u, v) + edge_count += 1 + return G + + +def _add_nodes_with_bipartite_label(G, lena, lenb): + G.add_nodes_from(range(0, lena + lenb)) + b = dict(zip(range(0, lena), [0] * lena)) + b.update(dict(zip(range(lena, lena + lenb), [1] * lenb))) + nx.set_node_attributes(G, b, "bipartite") + return G diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/matching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/matching.py new file mode 100644 index 0000000..48a22f9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/matching.py @@ -0,0 +1,579 @@ +# This module uses material from the Wikipedia article Hopcroft--Karp algorithm +# , accessed on +# January 3, 2015, which is released under the Creative Commons +# Attribution-Share-Alike License 3.0 +# . That article includes +# pseudocode, which has been translated into the corresponding Python code. +# +# Portions of this module use code from David Eppstein's Python Algorithms and +# Data Structures (PADS) library, which is dedicated to the public domain (for +# proof, see ). +"""Provides functions for computing maximum cardinality matchings and minimum +weight full matchings in a bipartite graph. + +If you don't care about the particular implementation of the maximum matching +algorithm, simply use the :func:`maximum_matching`. If you do care, you can +import one of the named maximum matching algorithms directly. + +For example, to find a maximum matching in the complete bipartite graph with +two vertices on the left and three vertices on the right: + +>>> G = nx.complete_bipartite_graph(2, 3) +>>> left, right = nx.bipartite.sets(G) +>>> list(left) +[0, 1] +>>> list(right) +[2, 3, 4] +>>> nx.bipartite.maximum_matching(G) +{0: 2, 1: 3, 2: 0, 3: 1} + +The dictionary returned by :func:`maximum_matching` includes a mapping for +vertices in both the left and right vertex sets. + +Similarly, :func:`minimum_weight_full_matching` produces, for a complete +weighted bipartite graph, a matching whose cardinality is the cardinality of +the smaller of the two partitions, and for which the sum of the weights of the +edges included in the matching is minimal. + +""" +import collections +import itertools + +import networkx as nx +from networkx.algorithms.bipartite import sets as bipartite_sets +from networkx.algorithms.bipartite.matrix import biadjacency_matrix + +__all__ = [ + "maximum_matching", + "hopcroft_karp_matching", + "eppstein_matching", + "to_vertex_cover", + "minimum_weight_full_matching", +] + +INFINITY = float("inf") + + +def hopcroft_karp_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + A matching is a set of edges that do not share any nodes. A maximum + cardinality matching is a matching with the most edges possible. It + is not always unique. Finding a matching in a bipartite graph can be + treated as a networkx flow problem. + + The functions ``hopcroft_karp_matching`` and ``maximum_matching`` + are aliases of the same function. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container of nodes + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with the `Hopcroft--Karp matching algorithm + `_ for + bipartite graphs. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + maximum_matching + hopcroft_karp_matching + eppstein_matching + + References + ---------- + .. [1] John E. Hopcroft and Richard M. Karp. "An n^{5 / 2} Algorithm for + Maximum Matchings in Bipartite Graphs" In: **SIAM Journal of Computing** + 2.4 (1973), pp. 225--231. . + + """ + # First we define some auxiliary search functions. + # + # If you are a human reading these auxiliary search functions, the "global" + # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined + # below the functions, so that they are initialized close to the initial + # invocation of the search functions. + def breadth_first_search(): + for v in left: + if leftmatches[v] is None: + distances[v] = 0 + queue.append(v) + else: + distances[v] = INFINITY + distances[None] = INFINITY + while queue: + v = queue.popleft() + if distances[v] < distances[None]: + for u in G[v]: + if distances[rightmatches[u]] is INFINITY: + distances[rightmatches[u]] = distances[v] + 1 + queue.append(rightmatches[u]) + return distances[None] is not INFINITY + + def depth_first_search(v): + if v is not None: + for u in G[v]: + if distances[rightmatches[u]] == distances[v] + 1: + if depth_first_search(rightmatches[u]): + rightmatches[u] = v + leftmatches[v] = u + return True + distances[v] = INFINITY + return False + return True + + # Initialize the "global" variables that maintain state during the search. + left, right = bipartite_sets(G, top_nodes) + leftmatches = {v: None for v in left} + rightmatches = {v: None for v in right} + distances = {} + queue = collections.deque() + + # Implementation note: this counter is incremented as pairs are matched but + # it is currently not used elsewhere in the computation. + num_matched_pairs = 0 + while breadth_first_search(): + for v in left: + if leftmatches[v] is None: + if depth_first_search(v): + num_matched_pairs += 1 + + # Strip the entries matched to `None`. + leftmatches = {k: v for k, v in leftmatches.items() if v is not None} + rightmatches = {k: v for k, v in rightmatches.items() if v is not None} + + # At this point, the left matches and the right matches are inverses of one + # another. In other words, + # + # leftmatches == {v, k for k, v in rightmatches.items()} + # + # Finally, we combine both the left matches and right matches. + return dict(itertools.chain(leftmatches.items(), rightmatches.items())) + + +def eppstein_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matching`, such that + ``matching[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matching`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with David Eppstein's version of the algorithm + Hopcroft--Karp algorithm (see :func:`hopcroft_karp_matching`), which + originally appeared in the `Python Algorithms and Data Structures library + (PADS) `_. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + + hopcroft_karp_matching + + """ + # Due to its original implementation, a directed graph is needed + # so that the two sets of bipartite nodes can be distinguished + left, right = bipartite_sets(G, top_nodes) + G = nx.DiGraph(G.edges(left)) + # initialize greedy matching (redundant, but faster than full search) + matching = {} + for u in G: + for v in G[u]: + if v not in matching: + matching[v] = u + break + while True: + # structure residual graph into layers + # pred[u] gives the neighbor in the previous layer for u in U + # preds[v] gives a list of neighbors in the previous layer for v in V + # unmatched gives a list of unmatched vertices in final layer of V, + # and is also used as a flag value for pred[u] when u is in the first + # layer + preds = {} + unmatched = [] + pred = {u: unmatched for u in G} + for v in matching: + del pred[matching[v]] + layer = list(pred) + + # repeatedly extend layering structure by another pair of layers + while layer and not unmatched: + newLayer = {} + for u in layer: + for v in G[u]: + if v not in preds: + newLayer.setdefault(v, []).append(u) + layer = [] + for v in newLayer: + preds[v] = newLayer[v] + if v in matching: + layer.append(matching[v]) + pred[matching[v]] = v + else: + unmatched.append(v) + + # did we finish layering without finding any alternating paths? + if not unmatched: + unlayered = {} + for u in G: + # TODO Why is extra inner loop necessary? + for v in G[u]: + if v not in preds: + unlayered[v] = None + # TODO Originally, this function returned a three-tuple: + # + # return (matching, list(pred), list(unlayered)) + # + # For some reason, the documentation for this function + # indicated that the second and third elements of the returned + # three-tuple would be the vertices in the left and right vertex + # sets, respectively, that are also in the maximum independent set. + # However, what I think the author meant was that the second + # element is the list of vertices that were unmatched and the third + # element was the list of vertices that were matched. Since that + # seems to be the case, they don't really need to be returned, + # since that information can be inferred from the matching + # dictionary. + + # All the matched nodes must be a key in the dictionary + for key in matching.copy(): + matching[matching[key]] = key + return matching + + # recursively search backward through layers to find alternating paths + # recursion returns true if found path, false otherwise + def recurse(v): + if v in preds: + L = preds.pop(v) + for u in L: + if u in pred: + pu = pred.pop(u) + if pu is unmatched or recurse(pu): + matching[v] = u + return True + return False + + for v in unmatched: + recurse(v) + + +def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, targets): + """Returns True if and only if the vertex `v` is connected to one of + the target vertices by an alternating path in `G`. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `v` is a vertex in `G`. + + `matched_edges` is a set of edges present in a maximum matching in `G`. + + `unmatched_edges` is a set of edges not present in a maximum + matching in `G`. + + `targets` is a set of vertices. + + """ + + def _alternating_dfs(u, along_matched=True): + """Returns True if and only if `u` is connected to one of the + targets by an alternating path. + + `u` is a vertex in the graph `G`. + + If `along_matched` is True, this step of the depth-first search + will continue only through edges in the given matching. Otherwise, it + will continue only through edges *not* in the given matching. + + """ + visited = set() + # Follow matched edges when depth is even, + # and follow unmatched edges when depth is odd. + initial_depth = 0 if along_matched else 1 + stack = [(u, iter(G[u]), initial_depth)] + while stack: + parent, children, depth = stack[-1] + valid_edges = matched_edges if depth % 2 else unmatched_edges + try: + child = next(children) + if child not in visited: + if (parent, child) in valid_edges or (child, parent) in valid_edges: + if child in targets: + return True + visited.add(child) + stack.append((child, iter(G[child]), depth + 1)) + except StopIteration: + stack.pop() + return False + + # Check for alternating paths starting with edges in the matching, then + # check for alternating paths starting with edges not in the + # matching. + return _alternating_dfs(v, along_matched=True) or _alternating_dfs( + v, along_matched=False + ) + + +def _connected_by_alternating_paths(G, matching, targets): + """Returns the set of vertices that are connected to one of the target + vertices by an alternating path in `G` or are themselves a target. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `matching` is a dictionary representing a maximum matching in `G`, as + returned by, for example, :func:`maximum_matching`. + + `targets` is a set of vertices. + + """ + # Get the set of matched edges and the set of unmatched edges. Only include + # one version of each undirected edge (for example, include edge (1, 2) but + # not edge (2, 1)). Using frozensets as an intermediary step we do not + # require nodes to be orderable. + edge_sets = {frozenset((u, v)) for u, v in matching.items()} + matched_edges = {tuple(edge) for edge in edge_sets} + unmatched_edges = { + (u, v) for (u, v) in G.edges() if frozenset((u, v)) not in edge_sets + } + + return { + v + for v in G + if v in targets + or _is_connected_by_alternating_path( + G, v, matched_edges, unmatched_edges, targets + ) + } + + +def to_vertex_cover(G, matching, top_nodes=None): + """Returns the minimum vertex cover corresponding to the given maximum + matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + matching : dictionary + + A dictionary whose keys are vertices in `G` and whose values are the + distinct neighbors comprising the maximum matching for `G`, as returned + by, for example, :func:`maximum_matching`. The dictionary *must* + represent the maximum matching. + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + vertex_cover : :class:`set` + + The minimum vertex cover in `G`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented using the procedure guaranteed by `Konig's + theorem + `_, + which proves an equivalence between a maximum matching and a minimum vertex + cover in bipartite graphs. + + Since a minimum vertex cover is the complement of a maximum independent set + for any graph, one can compute the maximum independent set of a bipartite + graph this way: + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> matching = nx.bipartite.maximum_matching(G) + >>> vertex_cover = nx.bipartite.to_vertex_cover(G, matching) + >>> independent_set = set(G) - vertex_cover + >>> print(list(independent_set)) + [2, 3, 4] + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + """ + # This is a Python implementation of the algorithm described at + # . + L, R = bipartite_sets(G, top_nodes) + # Let U be the set of unmatched vertices in the left vertex set. + unmatched_vertices = set(G) - set(matching) + U = unmatched_vertices & L + # Let Z be the set of vertices that are either in U or are connected to U + # by alternating paths. + Z = _connected_by_alternating_paths(G, matching, U) + # At this point, every edge either has a right endpoint in Z or a left + # endpoint not in Z. This gives us the vertex cover. + return (L - Z) | (R & Z) + + +#: Returns the maximum cardinality matching in the given bipartite graph. +#: +#: This function is simply an alias for :func:`hopcroft_karp_matching`. +maximum_matching = hopcroft_karp_matching + + +def minimum_weight_full_matching(G, top_nodes=None, weight="weight"): + r"""Returns a minimum weight full matching of the bipartite graph `G`. + + Let :math:`G = ((U, V), E)` be a weighted bipartite graph with real weights + :math:`w : E \to \mathbb{R}`. This function then produces a matching + :math:`M \subseteq E` with cardinality + + .. math:: + \lvert M \rvert = \min(\lvert U \rvert, \lvert V \rvert), + + which minimizes the sum of the weights of the edges included in the + matching, :math:`\sum_{e \in M} w(e)`, or raises an error if no such + matching exists. + + When :math:`\lvert U \rvert = \lvert V \rvert`, this is commonly + referred to as a perfect matching; here, since we allow + :math:`\lvert U \rvert` and :math:`\lvert V \rvert` to differ, we + follow Karp [1]_ and refer to the matching as *full*. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. + + weight : string, optional (default='weight') + + The edge data key used to provide each value in the matrix. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + ValueError + Raised if no full matching exists. + + ImportError + Raised if SciPy is not available. + + Notes + ----- + The problem of determining a minimum weight full matching is also known as + the rectangular linear assignment problem. This implementation defers the + calculation of the assignment to SciPy. + + References + ---------- + .. [1] Richard Manning Karp: + An algorithm to Solve the m x n Assignment Problem in Expected Time + O(mn log n). + Networks, 10(2):143–152, 1980. + + """ + import numpy as np + import scipy as sp + import scipy.optimize # call as sp.optimize + + left, right = nx.bipartite.sets(G, top_nodes) + U = list(left) + V = list(right) + # We explicitly create the biadjancency matrix having infinities + # where edges are missing (as opposed to zeros, which is what one would + # get by using toarray on the sparse matrix). + weights_sparse = biadjacency_matrix( + G, row_order=U, column_order=V, weight=weight, format="coo" + ) + weights = np.full(weights_sparse.shape, np.inf) + weights[weights_sparse.row, weights_sparse.col] = weights_sparse.data + left_matches = sp.optimize.linear_sum_assignment(weights) + d = {U[u]: V[v] for u, v in zip(*left_matches)} + # d will contain the matching from edges in left to right; we need to + # add the ones from right to left as well. + d.update({v: u for u, v in d.items()}) + return d diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/matrix.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/matrix.py new file mode 100644 index 0000000..276d3e4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/matrix.py @@ -0,0 +1,174 @@ +""" +==================== +Biadjacency matrices +==================== +""" +import itertools + +import networkx as nx +from networkx.convert_matrix import _generate_weighted_edges + +__all__ = ["biadjacency_matrix", "from_biadjacency_matrix"] + + +def biadjacency_matrix( + G, row_order, column_order=None, dtype=None, weight="weight", format="csr" +): + r"""Returns the biadjacency matrix of the bipartite graph G. + + Let `G = (U, V, E)` be a bipartite graph with node sets + `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency + matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1` + if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is + not `None` and matches the name of an edge attribute, its value is + used instead of 1. + + Parameters + ---------- + G : graph + A NetworkX graph + + row_order : list of nodes + The rows of the matrix are ordered according to the list of nodes. + + column_order : list, optional + The columns of the matrix are ordered according to the list of nodes. + If column_order is None, then the ordering of columns is arbitrary. + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [2]_ for details. + + Returns + ------- + M : SciPy sparse matrix + Biadjacency matrix representation of the bipartite graph G. + + Notes + ----- + No attempt is made to check that the input graph is bipartite. + + For directed bipartite graphs only successors are considered as neighbors. + To obtain an adjacency matrix with ones (or weight values) for both + predecessors and successors you have to generate two biadjacency matrices + where the rows of one of them are the columns of the other, and then add + one to the transpose of the other. + + See Also + -------- + adjacency_matrix + from_biadjacency_matrix + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + .. [2] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + import scipy.sparse # call as sp.sparse + + nlen = len(row_order) + if nlen == 0: + raise nx.NetworkXError("row_order is empty list") + if len(row_order) != len(set(row_order)): + msg = "Ambiguous ordering: `row_order` contained duplicates." + raise nx.NetworkXError(msg) + if column_order is None: + column_order = list(set(G) - set(row_order)) + mlen = len(column_order) + if len(column_order) != len(set(column_order)): + msg = "Ambiguous ordering: `column_order` contained duplicates." + raise nx.NetworkXError(msg) + + row_index = dict(zip(row_order, itertools.count())) + col_index = dict(zip(column_order, itertools.count())) + + if G.number_of_edges() == 0: + row, col, data = [], [], [] + else: + row, col, data = zip( + *( + (row_index[u], col_index[v], d.get(weight, 1)) + for u, v, d in G.edges(row_order, data=True) + if u in row_index and v in col_index + ) + ) + # TODO: change coo_matrix -> coo_array for NX 3.0 + A = sp.sparse.coo_matrix((data, (row, col)), shape=(nlen, mlen), dtype=dtype) + try: + import warnings + + warnings.warn( + "biadjacency_matrix will return a scipy.sparse array instead of a matrix in NetworkX 3.0", + FutureWarning, + stacklevel=2, + ) + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse array format: {format}") from err + + +def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"): + r"""Creates a new bipartite graph from a biadjacency matrix given as a + SciPy sparse matrix. + + Parameters + ---------- + A: scipy sparse matrix + A biadjacency matrix representation of a graph + + create_using: NetworkX graph + Use specified graph for result. The default is Graph() + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + The nodes are labeled with the attribute `bipartite` set to an integer + 0 or 1 representing membership in part 0 or part 1 of the bipartite graph. + + If `create_using` is an instance of :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph` and the entries of `A` are of + type :class:`int`, then this function returns a multigraph (of the same + type as `create_using`) with parallel edges. In this case, `edge_attribute` + will be ignored. + + See Also + -------- + biadjacency_matrix + from_numpy_array + + References + ---------- + [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n), bipartite=0) + G.add_nodes_from(range(n, n + m), bipartite=1) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A)) + # If the entries in the adjacency matrix are integers and the graph is a + # multigraph, then create parallel edges, each with weight 1, for each + # entry in the adjacency matrix. Otherwise, create one edge for each + # positive entry in the adjacency matrix and set the weight of that edge to + # be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph(): + chain = itertools.chain.from_iterable + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/projection.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/projection.py new file mode 100644 index 0000000..8864195 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/projection.py @@ -0,0 +1,538 @@ +"""One-mode (unipartite) projections of bipartite graphs.""" +import networkx as nx +from networkx.exception import NetworkXAlgorithmError +from networkx.utils import not_implemented_for + +__all__ = [ + "project", + "projected_graph", + "weighted_projected_graph", + "collaboration_weighted_projected_graph", + "overlap_weighted_projected_graph", + "generic_weighted_projected_graph", +] + + +def projected_graph(B, nodes, multigraph=False): + r"""Returns the projection of B onto one of its node sets. + + Returns the graph G that is the projection of the bipartite graph B + onto the specified nodes. They retain their attributes and are connected + in G if they have a common neighbor in B. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + multigraph: bool (default=False) + If True return a multigraph where the multiple edges represent multiple + shared neighbors. They edge key in the multigraph is assigned to the + label of the neighbor. + + Returns + ------- + Graph : NetworkX graph or multigraph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges()) + [(1, 3)] + + If nodes `a`, and `b` are connected through both nodes 1 and 2 then + building a multigraph results in two edges in the projection onto + [`a`, `b`]: + + >>> B = nx.Graph() + >>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)]) + >>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True) + >>> print([sorted((u, v)) for u, v in G.edges()]) + [['a', 'b'], ['a', 'b']] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + Returns a simple graph that is the projection of the bipartite graph B + onto the set of nodes given in list nodes. If multigraph=True then + a multigraph is returned with an edge for every shared neighbor. + + Directed graphs are allowed as input. The output will also then + be a directed graph with edges if there is a directed path between + the nodes. + + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + """ + if B.is_multigraph(): + raise nx.NetworkXError("not defined for multigraphs") + if B.is_directed(): + directed = True + if multigraph: + G = nx.MultiDiGraph() + else: + G = nx.DiGraph() + else: + directed = False + if multigraph: + G = nx.MultiGraph() + else: + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u} + if multigraph: + for n in nbrs2: + if directed: + links = set(B[u]) & set(B.pred[n]) + else: + links = set(B[u]) & set(B[n]) + for l in links: + if not G.has_edge(u, n, l): + G.add_edge(u, n, key=l) + else: + G.add_edges_from((u, n) for n in nbrs2) + return G + + +@not_implemented_for("multigraph") +def weighted_projected_graph(B, nodes, ratio=False): + r"""Returns a weighted projection of B onto one of its node sets. + + The weighted projected graph is the projection of the bipartite + network B onto the specified nodes with weights representing the + number of shared neighbors or the ratio between actual shared + neighbors and possible shared neighbors if ``ratio is True`` [1]_. + The nodes retain their attributes and are connected in the resulting + graph if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Distinct nodes to project onto (the "bottom" nodes). + + ratio: Bool (default=False) + If True, edge weight is the ratio between actual shared neighbors + and maximum possible shared neighbors (i.e., the size of the other + node set). If False, edges weight is the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.weighted_projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 1})] + >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True) + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 0.5})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite, or that + the input nodes are distinct. However, if the length of the input nodes is + greater than or equal to the nodes in the graph B, an exception is raised. + If the nodes are not distinct but don't raise this error, the output weights + will be incorrect. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + n_top = len(B) - len(nodes) + + if n_top < 1: + raise NetworkXAlgorithmError( + f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n" + "They are either not a valid bipartite partition or contain duplicates" + ) + + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + common = unbrs & vnbrs + if not ratio: + weight = len(common) + else: + weight = len(common) / n_top + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +def collaboration_weighted_projected_graph(B, nodes): + r"""Newman's weighted projection of B onto one of its node sets. + + The collaboration weighted projection is the projection of the + bipartite network B onto the specified nodes with weights assigned + using Newman's collaboration model [1]_: + + .. math:: + + w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1} + + where `u` and `v` are nodes from the bottom bipartite node set, + and `k` is a node of the top node set. + The value `d_k` is the degree of node `k` in the bipartite + network and `\delta_{u}^{k}` is 1 if node `u` is + linked to node `k` in the original bipartite graph or 0 otherwise. + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite + graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> B.add_edge(1, 5) + >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5]) + >>> list(G) + [0, 2, 4, 5] + >>> for edge in sorted(G.edges(data=True)): + ... print(edge) + ... + (0, 2, {'weight': 0.5}) + (0, 5, {'weight': 0.5}) + (2, 4, {'weight': 1.0}) + (2, 5, {'weight': 0.5}) + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Scientific collaboration networks: II. + Shortest paths, weighted networks, and centrality, + M. E. J. Newman, Phys. Rev. E 64, 016132 (2001). + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u} + for v in nbrs2: + vnbrs = set(pred[v]) + common_degree = (len(B[n]) for n in unbrs & vnbrs) + weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1) + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +def overlap_weighted_projected_graph(B, nodes, jaccard=True): + r"""Overlap weighted projection of B onto one of its node sets. + + The overlap weighted projection is the projection of the bipartite + network B onto the specified nodes with weights representing + the Jaccard index between the neighborhoods of the two nodes in the + original bipartite network [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} + + or if the parameter 'jaccard' is False, the fraction of common + neighbors by minimum of both nodes degree in the original + bipartite graph [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)} + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + jaccard: Bool (default=True) + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> nodes = [0, 2, 4] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes) + >>> list(G) + [0, 2, 4] + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False) + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation + Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + if jaccard: + wt = len(unbrs & vnbrs) / len(unbrs | vnbrs) + else: + wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs)) + G.add_edge(u, v, weight=wt) + return G + + +@not_implemented_for("multigraph") +def generic_weighted_projected_graph(B, nodes, weight_function=None): + r"""Weighted projection of B with a user-specified weight function. + + The bipartite network B is projected on to the specified nodes + with weights computed by a user-specified function. This function + must accept as a parameter the neighborhood sets of two nodes and + return an integer or a float. + + The nodes retain their attributes and are connected in the resulting graph + if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + weight_function : function + This function must accept as parameters the same input graph + that this function, and two nodes; and return an integer or a float. + The default function computes the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> # Define some custom weight functions + >>> def jaccard(G, u, v): + ... unbrs = set(G[u]) + ... vnbrs = set(G[v]) + ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) + ... + >>> def my_weight(G, u, v, weight="weight"): + ... w = 0 + ... for nbr in set(G[u]) & set(G[v]): + ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1) + ... return w + ... + >>> # A complete bipartite graph with 4 nodes and 4 edges + >>> B = nx.complete_bipartite_graph(2, 2) + >>> # Add some arbitrary weight to the edges + >>> for i, (u, v) in enumerate(B.edges()): + ... B.edges[u, v]["weight"] = i + 1 + ... + >>> for edge in B.edges(data=True): + ... print(edge) + ... + (0, 2, {'weight': 1}) + (0, 3, {'weight': 2}) + (1, 2, {'weight': 3}) + (1, 3, {'weight': 4}) + >>> # By default, the weight is the number of shared neighbors + >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 2})] + >>> # To specify a custom weight function use the weight_function parameter + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=jaccard + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 1.0})] + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=my_weight + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 10})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + projected_graph + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + if weight_function is None: + + def weight_function(G, u, v): + # Notice that we use set(pred[v]) for handling the directed case. + return len(set(G[u]) & set(pred[v])) + + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u} + for v in nbrs2: + weight = weight_function(B, u, v) + G.add_edge(u, v, weight=weight) + return G + + +def project(B, nodes, create_using=None): + import warnings + + warnings.warn( + ( + "networkx.project is deprecated and will be removed" + "in NetworkX 3.0, use networkx.projected_graph instead." + ), + DeprecationWarning, + stacklevel=2, + ) + return projected_graph(B, nodes) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/redundancy.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/redundancy.py new file mode 100644 index 0000000..55de063 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/redundancy.py @@ -0,0 +1,111 @@ +"""Node redundancy for bipartite graphs.""" +from itertools import combinations + +from networkx import NetworkXError + +__all__ = ["node_redundancy"] + + +def node_redundancy(G, nodes=None): + r"""Computes the node redundancy coefficients for the nodes in the bipartite + graph `G`. + + The redundancy coefficient of a node `v` is the fraction of pairs of + neighbors of `v` that are both linked to other nodes. In a one-mode + projection these nodes would be linked together even if `v` were + not there. + + More formally, for any vertex `v`, the *redundancy coefficient of `v`* is + defined by + + .. math:: + + rc(v) = \frac{|\{\{u, w\} \subseteq N(v), + \: \exists v' \neq v,\: (v',u) \in E\: + \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}}, + + where `N(v)` is the set of neighbors of `v` in `G`. + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute redundancy for these nodes. The default is all nodes in G. + + Returns + ------- + redundancy : dictionary + A dictionary keyed by node with the node redundancy value. + + Examples + -------- + Compute the redundancy coefficient of each node in a graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> rc[0] + 1.0 + + Compute the average redundancy for the graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> sum(rc.values()) / len(G) + 1.0 + + Compute the average redundancy for a set of nodes:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> nodes = [0, 2] + >>> sum(rc[n] for n in nodes) / len(nodes) + 1.0 + + Raises + ------ + NetworkXError + If any of the nodes in the graph (or in `nodes`, if specified) has + (out-)degree less than two (which would result in division by zero, + according to the definition of the redundancy coefficient). + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + + """ + if nodes is None: + nodes = G + if any(len(G[v]) < 2 for v in nodes): + raise NetworkXError( + "Cannot compute redundancy coefficient for a node" + " that has fewer than two neighbors." + ) + # TODO This can be trivially parallelized. + return {v: _node_redundancy(G, v) for v in nodes} + + +def _node_redundancy(G, v): + """Returns the redundancy of the node `v` in the bipartite graph `G`. + + If `G` is a graph with `n` nodes, the redundancy of a node is the ratio + of the "overlap" of `v` to the maximum possible overlap of `v` + according to its degree. The overlap of `v` is the number of pairs of + neighbors that have mutual neighbors themselves, other than `v`. + + `v` must have at least two neighbors in `G`. + + """ + n = len(G[v]) + # TODO On Python 3, we could just use `G[u].keys() & G[w].keys()` instead + # of instantiating the entire sets. + overlap = sum( + 1 for (u, w) in combinations(G[v], 2) if (set(G[u]) & set(G[w])) - {v} + ) + return (2 * overlap) / (n * (n - 1)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/spectral.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/spectral.py new file mode 100644 index 0000000..290893f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/spectral.py @@ -0,0 +1,68 @@ +""" +Spectral bipartivity measure. +""" +import networkx as nx + +__all__ = ["spectral_bipartivity"] + + +def spectral_bipartivity(G, nodes=None, weight="weight"): + """Returns the spectral bipartivity. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or container optional(default is all nodes) + Nodes to return value of spectral bipartivity contribution. + + weight : string or None optional (default = 'weight') + Edge data key to use for edge weights. If None, weights set to 1. + + Returns + ------- + sb : float or dict + A single number if the keyword nodes is not specified, or + a dictionary keyed by node with the spectral bipartivity contribution + of that node as the value. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> bipartite.spectral_bipartivity(G) + 1.0 + + Notes + ----- + This implementation uses Numpy (dense) matrices which are not efficient + for storing large sparse graphs. + + See Also + -------- + color + + References + ---------- + .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of + bipartivity in complex networks", PhysRev E 72, 046105 (2005) + """ + import scipy as sp + import scipy.linalg # call as sp.linalg + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist, weight=weight) + expA = sp.linalg.expm(A) + expmA = sp.linalg.expm(-A) + coshA = 0.5 * (expA + expmA) + if nodes is None: + # return single number for entire graph + return coshA.diagonal().sum() / expA.diagonal().sum() + else: + # contribution for individual nodes + index = dict(zip(nodelist, range(len(nodelist)))) + sb = {} + for n in nodes: + i = index[n] + sb[n] = coshA[i, i] / expA[i, i] + return sb diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_basic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_basic.py new file mode 100644 index 0000000..655506b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_basic.py @@ -0,0 +1,125 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteBasic: + def test_is_bipartite(self): + assert bipartite.is_bipartite(nx.path_graph(4)) + assert bipartite.is_bipartite(nx.DiGraph([(1, 0)])) + assert not bipartite.is_bipartite(nx.complete_graph(3)) + + def test_bipartite_color(self): + G = nx.path_graph(4) + c = bipartite.color(G) + assert c == {0: 1, 1: 0, 2: 1, 3: 0} + + def test_not_bipartite_color(self): + with pytest.raises(nx.NetworkXError): + c = bipartite.color(nx.complete_graph(4)) + + def test_bipartite_directed(self): + G = bipartite.random_graph(10, 10, 0.1, directed=True) + assert bipartite.is_bipartite(G) + + def test_bipartite_sets(self): + G = nx.path_graph(4) + X, Y = bipartite.sets(G) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_directed(self): + G = nx.path_graph(4) + D = G.to_directed() + X, Y = bipartite.sets(D) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_given_top_nodes(self): + G = nx.path_graph(4) + top_nodes = [0, 2] + X, Y = bipartite.sets(G, top_nodes) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + G = nx.path_graph(4) + G.add_edges_from([(5, 6), (6, 7)]) + X, Y = bipartite.sets(G) + + def test_is_bipartite_node_set(self): + G = nx.path_graph(4) + + with pytest.raises(nx.AmbiguousSolution): + bipartite.is_bipartite_node_set(G, [1, 1, 2, 3]) + + assert bipartite.is_bipartite_node_set(G, [0, 2]) + assert bipartite.is_bipartite_node_set(G, [1, 3]) + assert not bipartite.is_bipartite_node_set(G, [1, 2]) + G.add_edge(10, 20) + assert bipartite.is_bipartite_node_set(G, [0, 2, 10]) + assert bipartite.is_bipartite_node_set(G, [0, 2, 20]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 10]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 20]) + + def test_bipartite_density(self): + G = nx.path_graph(5) + X, Y = bipartite.sets(G) + density = len(list(G.edges())) / (len(X) * len(Y)) + assert bipartite.density(G, X) == density + D = nx.DiGraph(G.edges()) + assert bipartite.density(D, X) == density / 2.0 + assert bipartite.density(nx.Graph(), {}) == 0.0 + + def test_bipartite_degrees(self): + G = nx.path_graph(5) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y) + assert dict(u) == {1: 2, 3: 2} + assert dict(d) == {0: 1, 2: 2, 4: 1} + + def test_bipartite_weighted_degrees(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=0.1, other=0.2) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y, weight="weight") + assert dict(u) == {1: 1.1, 3: 2} + assert dict(d) == {0: 0.1, 2: 2, 4: 1} + u, d = bipartite.degrees(G, Y, weight="other") + assert dict(u) == {1: 1.2, 3: 2} + assert dict(d) == {0: 0.2, 2: 2, 4: 1} + + def test_biadjacency_matrix_weight(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + pytest.importorskip("scipy") + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py new file mode 100644 index 0000000..50ac906 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py @@ -0,0 +1,176 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteCentrality: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.K3 = nx.complete_bipartite_graph(3, 3) + cls.C4 = nx.cycle_graph(4) + cls.davis = nx.davis_southern_women_graph() + cls.top_nodes = [ + n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0 + ] + + def test_degree_centrality(self): + d = bipartite.degree_centrality(self.P4, [1, 3]) + answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5} + assert d == answer + d = bipartite.degree_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert d == answer + d = bipartite.degree_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert d == answer + + def test_betweenness_centrality(self): + c = bipartite.betweenness_centrality(self.P4, [1, 3]) + answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0} + assert c == answer + c = bipartite.betweenness_centrality(self.K3, [0, 1, 2]) + answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125} + assert c == answer + c = bipartite.betweenness_centrality(self.C4, [0, 2]) + answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + assert c == answer + + def test_closeness_centrality(self): + c = bipartite.closeness_centrality(self.P4, [1, 3]) + answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3} + assert c == answer + c = bipartite.closeness_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert c == answer + c = bipartite.closeness_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert c == answer + G = nx.Graph() + G.add_node(0) + G.add_node(1) + c = bipartite.closeness_centrality(G, [0]) + assert c == {0: 0.0, 1: 0.0} + c = bipartite.closeness_centrality(G, [1]) + assert c == {0: 0.0, 1: 0.0} + + def test_davis_degree_centrality(self): + G = self.davis + deg = bipartite.degree_centrality(G, self.top_nodes) + answer = { + "E8": 0.78, + "E9": 0.67, + "E7": 0.56, + "Nora Fayette": 0.57, + "Evelyn Jefferson": 0.57, + "Theresa Anderson": 0.57, + "E6": 0.44, + "Sylvia Avondale": 0.50, + "Laura Mandeville": 0.50, + "Brenda Rogers": 0.50, + "Katherina Rogers": 0.43, + "E5": 0.44, + "Helen Lloyd": 0.36, + "E3": 0.33, + "Ruth DeSand": 0.29, + "Verne Sanderson": 0.29, + "E12": 0.33, + "Myra Liddel": 0.29, + "E11": 0.22, + "Eleanor Nye": 0.29, + "Frances Anderson": 0.29, + "Pearl Oglethorpe": 0.21, + "E4": 0.22, + "Charlotte McDowd": 0.29, + "E10": 0.28, + "Olivia Carleton": 0.14, + "Flora Price": 0.14, + "E2": 0.17, + "E1": 0.17, + "Dorothy Murchison": 0.14, + "E13": 0.17, + "E14": 0.17, + } + for node, value in answer.items(): + assert value == pytest.approx(deg[node], abs=1e-2) + + def test_davis_betweenness_centrality(self): + G = self.davis + bet = bipartite.betweenness_centrality(G, self.top_nodes) + answer = { + "E8": 0.24, + "E9": 0.23, + "E7": 0.13, + "Nora Fayette": 0.11, + "Evelyn Jefferson": 0.10, + "Theresa Anderson": 0.09, + "E6": 0.07, + "Sylvia Avondale": 0.07, + "Laura Mandeville": 0.05, + "Brenda Rogers": 0.05, + "Katherina Rogers": 0.05, + "E5": 0.04, + "Helen Lloyd": 0.04, + "E3": 0.02, + "Ruth DeSand": 0.02, + "Verne Sanderson": 0.02, + "E12": 0.02, + "Myra Liddel": 0.02, + "E11": 0.02, + "Eleanor Nye": 0.01, + "Frances Anderson": 0.01, + "Pearl Oglethorpe": 0.01, + "E4": 0.01, + "Charlotte McDowd": 0.01, + "E10": 0.01, + "Olivia Carleton": 0.01, + "Flora Price": 0.01, + "E2": 0.00, + "E1": 0.00, + "Dorothy Murchison": 0.00, + "E13": 0.00, + "E14": 0.00, + } + for node, value in answer.items(): + assert value == pytest.approx(bet[node], abs=1e-2) + + def test_davis_closeness_centrality(self): + G = self.davis + clos = bipartite.closeness_centrality(G, self.top_nodes) + answer = { + "E8": 0.85, + "E9": 0.79, + "E7": 0.73, + "Nora Fayette": 0.80, + "Evelyn Jefferson": 0.80, + "Theresa Anderson": 0.80, + "E6": 0.69, + "Sylvia Avondale": 0.77, + "Laura Mandeville": 0.73, + "Brenda Rogers": 0.73, + "Katherina Rogers": 0.73, + "E5": 0.59, + "Helen Lloyd": 0.73, + "E3": 0.56, + "Ruth DeSand": 0.71, + "Verne Sanderson": 0.71, + "E12": 0.56, + "Myra Liddel": 0.69, + "E11": 0.54, + "Eleanor Nye": 0.67, + "Frances Anderson": 0.67, + "Pearl Oglethorpe": 0.67, + "E4": 0.54, + "Charlotte McDowd": 0.60, + "E10": 0.55, + "Olivia Carleton": 0.59, + "Flora Price": 0.59, + "E2": 0.52, + "E1": 0.52, + "Dorothy Murchison": 0.65, + "E13": 0.52, + "E14": 0.52, + } + for node, value in answer.items(): + assert value == pytest.approx(clos[node], abs=1e-2) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py new file mode 100644 index 0000000..e33e6c0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py @@ -0,0 +1,84 @@ +import pytest + +import networkx as nx +import networkx.algorithms.bipartite as bipartite +from networkx.algorithms.bipartite.cluster import cc_dot, cc_max, cc_min + + +def test_pairwise_bipartite_cc_functions(): + # Test functions for different kinds of bipartite clustering coefficients + # between pairs of nodes using 3 example graphs from figure 5 p. 40 + # Latapy et al (2008) + G1 = nx.Graph([(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7)]) + G2 = nx.Graph([(0, 2), (0, 3), (0, 4), (1, 3), (1, 4), (1, 5)]) + G3 = nx.Graph( + [(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)] + ) + result = { + 0: [1 / 3.0, 2 / 3.0, 2 / 5.0], + 1: [1 / 2.0, 2 / 3.0, 2 / 3.0], + 2: [2 / 8.0, 2 / 5.0, 2 / 5.0], + } + for i, G in enumerate([G1, G2, G3]): + assert bipartite.is_bipartite(G) + assert cc_dot(set(G[0]), set(G[1])) == result[i][0] + assert cc_min(set(G[0]), set(G[1])) == result[i][1] + assert cc_max(set(G[0]), set(G[1])) == result[i][2] + + +def test_star_graph(): + G = nx.star_graph(3) + # all modes are the same + answer = {0: 0, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="min") == answer + assert bipartite.clustering(G, mode="max") == answer + + +def test_not_bipartite(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.complete_graph(4)) + + +def test_bad_mode(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.path_graph(4), mode="foo") + + +def test_path_graph(): + G = nx.path_graph(4) + answer = {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="max") == answer + answer = {0: 1, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="min") == answer + + +def test_average_path_graph(): + G = nx.path_graph(4) + assert bipartite.average_clustering(G, mode="dot") == 0.5 + assert bipartite.average_clustering(G, mode="max") == 0.5 + assert bipartite.average_clustering(G, mode="min") == 1 + + +def test_ra_clustering_davis(): + G = nx.davis_southern_women_graph() + cc4 = round(bipartite.robins_alexander_clustering(G), 3) + assert cc4 == 0.468 + + +def test_ra_clustering_square(): + G = nx.path_graph(4) + G.add_edge(0, 3) + assert bipartite.robins_alexander_clustering(G) == 1.0 + + +def test_ra_clustering_zero(): + G = nx.Graph() + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_nodes_from(range(4)) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edges_from([(0, 1), (2, 3), (3, 4)]) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edge(1, 2) + assert bipartite.robins_alexander_clustering(G) == 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_covering.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_covering.py new file mode 100644 index 0000000..2f1b02e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_covering.py @@ -0,0 +1,33 @@ +import networkx as nx +import networkx.algorithms.bipartite as bipartite + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.bipartite.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert bipartite.min_edge_cover(G) == set() + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert bipartite.min_edge_cover(G) == {(0, 1), (1, 0)} + + def test_bipartite_default(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G, bipartite.eppstein_matching) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py new file mode 100644 index 0000000..9cf0c43 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py @@ -0,0 +1,192 @@ +""" + Unit tests for bipartite edgelists. +""" +import io +import os +import tempfile + +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_nodes_from(["a", "c", "e"], bipartite=0) + cls.G.add_nodes_from(["b", "d", "f"], bipartite=1) + cls.G.add_node("g", bipartite=0) + cls.DG = nx.DiGraph(cls.G) + cls.MG = nx.MultiGraph() + cls.MG.add_edges_from([(1, 2), (1, 2), (1, 2)]) + cls.MG.add_node(1, bipartite=0) + cls.MG.add_node(2, bipartite=1) + + def test_read_edgelist_1(self): + s = b"""\ +# comment line +1 2 +# comment line +2 3 +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + def test_read_edgelist_3(self): + s = b"""\ +# comment line +1 2 {'weight':2.0} +# comment line +2 3 {'weight':3.0} +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=False) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=True) + assert edges_equal( + G.edges(data=True), [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] + ) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n3 2\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n3 2 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n3 2 3.0\n" + + def test_unicode(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_latin1_issue(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + pytest.raises( + UnicodeEncodeError, bipartite.write_edgelist, G, fname, encoding="latin-1" + ) + os.close(fd) + os.unlink(fname) + + def test_latin1(self): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + bipartite.write_edgelist(G, fname, encoding="latin-1") + H = bipartite.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_edgelist_graph(self): + G = self.G + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + H2 = bipartite.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_integers(self): + G = nx.convert_node_labels_to_integers(self.G) + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_multigraph(self): + G = self.MG + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_empty_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + bytesIO = io.BytesIO() + bipartite.write_edgelist(nx.DiGraph(), bytesIO) + + def test_raise_attribute(self): + with pytest.raises(AttributeError): + G = nx.path_graph(4) + bytesIO = io.BytesIO() + bipartite.write_edgelist(G, bytesIO) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_generators.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_generators.py new file mode 100644 index 0000000..5f3b84c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_generators.py @@ -0,0 +1,400 @@ +import numbers + +import pytest + +import networkx as nx + +from ..generators import ( + alternating_havel_hakimi_graph, + complete_bipartite_graph, + configuration_model, + gnmk_random_graph, + havel_hakimi_graph, + preferential_attachment_graph, + random_graph, + reverse_havel_hakimi_graph, +) + +""" +Generators - Bipartite +---------------------- +""" + + +class TestGeneratorsBipartite: + def test_complete_bipartite_graph(self): + G = complete_bipartite_graph(0, 0) + assert nx.is_isomorphic(G, nx.null_graph()) + + for i in [1, 5]: + G = complete_bipartite_graph(i, 0) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + G = complete_bipartite_graph(0, i) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + + G = complete_bipartite_graph(2, 2) + assert nx.is_isomorphic(G, nx.cycle_graph(4)) + + G = complete_bipartite_graph(1, 5) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + G = complete_bipartite_graph(5, 1) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + # complete_bipartite_graph(m1,m2) is a connected graph with + # m1+m2 nodes and m1*m2 edges + for m1, m2 in [(5, 11), (7, 3)]: + G = complete_bipartite_graph(m1, m2) + assert nx.number_of_nodes(G) == m1 + m2 + assert nx.number_of_edges(G) == m1 * m2 + + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.MultiDiGraph) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3) # default to Graph + assert sorted(mG.edges()) == sorted(G.edges()) + assert not mG.is_multigraph() + assert not mG.is_directed() + + # specify nodes rather than number of nodes + for n1, n2 in [([1, 2], "ab"), (3, 2), (3, "ab"), ("ab", 3)]: + G = complete_bipartite_graph(n1, n2) + if isinstance(n1, numbers.Integral): + if isinstance(n2, numbers.Integral): + n2 = range(n1, n1 + n2) + n1 = range(n1) + elif isinstance(n2, numbers.Integral): + n2 = range(n2) + edges = {(u, v) for u in n1 for v in n2} + assert edges == set(G.edges) + assert G.size() == len(edges) + + # raise when node sets are not distinct + for n1, n2 in [([1, 2], 3), (3, [1, 2]), ("abc", "bcd")]: + pytest.raises(nx.NetworkXError, complete_bipartite_graph, n1, n2) + + def test_configuration_model(self): + aseq = [] + bseq = [] + G = configuration_model(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = configuration_model(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, configuration_model, aseq, bseq) + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2, 2] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = configuration_model(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph() + ) + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + configuration_model, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 4 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_reverse_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, reverse_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_alternating_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, alternating_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_preferential_attachment(self): + aseq = [3, 2, 1, 1] + G = preferential_attachment_graph(aseq, 0.5) + assert G.is_multigraph() + assert not G.is_directed() + + G = preferential_attachment_graph(aseq, 0.5, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + + def test_random_graph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_random_digraph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9, directed=True) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_gnmk_random_graph(self): + n = 10 + m = 20 + edges = 100 + # set seed because sometimes it is not connected + # which raises an error in bipartite.sets(G) below. + G = gnmk_random_graph(n, m, edges, seed=1234) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + # print(X) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) + + def test_gnmk_random_graph_complete(self): + n = 10 + m = 20 + edges = 200 + G = gnmk_random_graph(n, m, edges) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + # print(X) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_matching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_matching.py new file mode 100644 index 0000000..116da84 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_matching.py @@ -0,0 +1,326 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.matching` module.""" +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms.bipartite.matching import ( + eppstein_matching, + hopcroft_karp_matching, + maximum_matching, + minimum_weight_full_matching, + to_vertex_cover, +) + + +class TestMatching: + """Tests for bipartite matching algorithms.""" + + def setup(self): + """Creates a bipartite graph for use in testing matching algorithms. + + The bipartite graph has a maximum cardinality matching that leaves + vertex 1 and vertex 10 unmatched. The first six numbers are the left + vertices and the next six numbers are the right vertices. + + """ + self.simple_graph = nx.complete_bipartite_graph(2, 3) + self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1} + + edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)] + self.top_nodes = set(range(6)) + self.graph = nx.Graph() + self.graph.add_nodes_from(range(12)) + self.graph.add_edges_from(edges) + + # Example bipartite graph from issue 2127 + G = nx.Graph() + G.add_nodes_from( + [ + (1, "C"), + (1, "B"), + (0, "G"), + (1, "F"), + (1, "E"), + (0, "C"), + (1, "D"), + (1, "I"), + (0, "A"), + (0, "D"), + (0, "F"), + (0, "E"), + (0, "H"), + (1, "G"), + (1, "A"), + (0, "I"), + (0, "B"), + (1, "H"), + ] + ) + G.add_edge((1, "C"), (0, "A")) + G.add_edge((1, "B"), (0, "A")) + G.add_edge((0, "G"), (1, "I")) + G.add_edge((0, "G"), (1, "H")) + G.add_edge((1, "F"), (0, "A")) + G.add_edge((1, "F"), (0, "C")) + G.add_edge((1, "F"), (0, "E")) + G.add_edge((1, "E"), (0, "A")) + G.add_edge((1, "E"), (0, "C")) + G.add_edge((0, "C"), (1, "D")) + G.add_edge((0, "C"), (1, "I")) + G.add_edge((0, "C"), (1, "G")) + G.add_edge((0, "C"), (1, "H")) + G.add_edge((1, "D"), (0, "A")) + G.add_edge((1, "I"), (0, "A")) + G.add_edge((1, "I"), (0, "E")) + G.add_edge((0, "A"), (1, "G")) + G.add_edge((0, "A"), (1, "H")) + G.add_edge((0, "E"), (1, "G")) + G.add_edge((0, "E"), (1, "H")) + self.disconnected_graph = G + + def check_match(self, matching): + """Asserts that the matching is what we expect from the bipartite graph + constructed in the :meth:`setup` fixture. + + """ + # For the sake of brevity, rename `matching` to `M`. + M = matching + matched_vertices = frozenset(itertools.chain(*M.items())) + # Assert that the maximum number of vertices (10) is matched. + assert matched_vertices == frozenset(range(12)) - {1, 10} + # Assert that no vertex appears in two edges, or in other words, that + # the matching (u, v) and (v, u) both appear in the matching + # dictionary. + assert all(u == M[M[u]] for u in range(12) if u in M) + + def check_vertex_cover(self, vertices): + """Asserts that the given set of vertices is the vertex cover we + expected from the bipartite graph constructed in the :meth:`setup` + fixture. + + """ + # By Konig's theorem, the number of edges in a maximum matching equals + # the number of vertices in a minimum vertex cover. + assert len(vertices) == 5 + # Assert that the set is truly a vertex cover. + for (u, v) in self.graph.edges(): + assert u in vertices or v in vertices + # TODO Assert that the vertices are the correct ones. + + def test_eppstein_matching(self): + """Tests that David Eppstein's implementation of the Hopcroft--Karp + algorithm produces a maximum cardinality matching. + + """ + self.check_match(eppstein_matching(self.graph, self.top_nodes)) + + def test_hopcroft_karp_matching(self): + """Tests that the Hopcroft--Karp algorithm produces a maximum + cardinality matching in a bipartite graph. + + """ + self.check_match(hopcroft_karp_matching(self.graph, self.top_nodes)) + + def test_to_vertex_cover(self): + """Test for converting a maximum matching to a minimum vertex cover.""" + matching = maximum_matching(self.graph, self.top_nodes) + vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes) + self.check_vertex_cover(vertex_cover) + + def test_eppstein_matching_simple(self): + match = eppstein_matching(self.simple_graph) + assert match == self.simple_solution + + def test_hopcroft_karp_matching_simple(self): + match = hopcroft_karp_matching(self.simple_graph) + assert match == self.simple_solution + + def test_eppstein_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = eppstein_matching(self.disconnected_graph) + + def test_hopcroft_karp_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = hopcroft_karp_matching(self.disconnected_graph) + + def test_issue_2127(self): + """Test from issue 2127""" + # Build the example DAG + G = nx.DiGraph() + G.add_edge("A", "C") + G.add_edge("A", "B") + G.add_edge("C", "E") + G.add_edge("C", "D") + G.add_edge("E", "G") + G.add_edge("E", "F") + G.add_edge("G", "I") + G.add_edge("G", "H") + + tc = nx.transitive_closure(G) + btc = nx.Graph() + + # Create a bipartite graph based on the transitive closure of G + for v in tc.nodes(): + btc.add_node((0, v)) + btc.add_node((1, v)) + + for u, v in tc.edges(): + btc.add_edge((0, u), (1, v)) + + top_nodes = {n for n in btc if n[0] == 0} + matching = hopcroft_karp_matching(btc, top_nodes) + vertex_cover = to_vertex_cover(btc, matching, top_nodes) + independent_set = set(G) - {v for _, v in vertex_cover} + assert {"B", "D", "F", "I", "H"} == independent_set + + def test_vertex_cover_issue_2384(self): + G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_vertex_cover_issue_3306(self): + G = nx.Graph() + edges = [(0, 2), (1, 0), (1, 1), (1, 2), (2, 2)] + G.add_edges_from([((i, "L"), (j, "R")) for i, j in edges]) + + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_unorderable_nodes(self): + a = object() + b = object() + c = object() + d = object() + e = object() + G = nx.Graph([(a, d), (b, d), (b, e), (c, d)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + +def test_eppstein_matching(): + """Test in accordance to issue #1927""" + G = nx.Graph() + G.add_nodes_from(["a", 2, 3, 4], bipartite=0) + G.add_nodes_from([1, "b", "c"], bipartite=1) + G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"), (4, 1)]) + matching = eppstein_matching(G) + assert len(matching) == len(maximum_matching(G)) + assert all(x in set(matching.keys()) for x in set(matching.values())) + + +class TestMinimumWeightFullMatching: + @classmethod + def setup_class(cls): + pytest.importorskip("scipy") + + def test_minimum_weight_full_matching_incomplete_graph(self): + B = nx.Graph() + B.add_nodes_from([1, 2], bipartite=0) + B.add_nodes_from([3, 4], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 3, weight=100) + B.add_edge(2, 4, weight=50) + matching = minimum_weight_full_matching(B) + assert matching == {1: 4, 2: 3, 4: 1, 3: 2} + + def test_minimum_weight_full_matching_with_no_full_matching(self): + B = nx.Graph() + B.add_nodes_from([1, 2, 3], bipartite=0) + B.add_nodes_from([4, 5, 6], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 4, weight=100) + B.add_edge(3, 4, weight=50) + B.add_edge(3, 5, weight=50) + B.add_edge(3, 6, weight=50) + with pytest.raises(ValueError): + minimum_weight_full_matching(B) + + def test_minimum_weight_full_matching_square(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=300) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2} + + def test_minimum_weight_full_matching_smaller_left(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_top_nodes_right(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6]) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_right(self): + G = nx.complete_bipartite_graph(4, 3) + G.add_edge(0, 4, weight=400) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=300) + G.add_edge(1, 4, weight=150) + G.add_edge(1, 5, weight=450) + G.add_edge(1, 6, weight=225) + G.add_edge(2, 4, weight=400) + G.add_edge(2, 5, weight=600) + G.add_edge(2, 6, weight=290) + G.add_edge(3, 4, weight=1) + G.add_edge(3, 5, weight=2) + G.add_edge(3, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2} + + def test_minimum_weight_full_matching_negative_weights(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, weight=-2) + G.add_edge(0, 3, weight=0.2) + G.add_edge(1, 2, weight=-2) + G.add_edge(1, 3, weight=0.3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} + + def test_minimum_weight_full_matching_different_weight_key(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, mass=2) + G.add_edge(0, 3, mass=0.2) + G.add_edge(1, 2, mass=1) + G.add_edge(1, 3, mass=2) + matching = minimum_weight_full_matching(G, weight="mass") + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py new file mode 100644 index 0000000..393b71e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py @@ -0,0 +1,79 @@ +import pytest + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") +sparse = pytest.importorskip("scipy.sparse") + + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal + + +class TestBiadjacencyMatrix: + def test_biadjacency_matrix_weight(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 + + def test_null_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph(), []) + + def test_empty_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), []) + + def test_duplicate_row(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [1, 1]) + + def test_duplicate_col(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], [1, 1]) + + def test_format_keyword(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], format="foo") + + def test_from_biadjacency_roundtrip(self): + B1 = nx.path_graph(5) + M = bipartite.biadjacency_matrix(B1, [0, 2, 4]) + B2 = bipartite.from_biadjacency_matrix(M) + assert nx.is_isomorphic(B1, B2) + + def test_from_biadjacency_weight(self): + M = sparse.csc_matrix([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (1, 3)]) + B = bipartite.from_biadjacency_matrix(M, edge_attribute="weight") + e = [(0, 2, {"weight": 1}), (0, 3, {"weight": 2}), (1, 3, {"weight": 3})] + assert edges_equal(B.edges(data=True), e) + + def test_from_biadjacency_multigraph(self): + M = sparse.csc_matrix([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M, create_using=nx.MultiGraph()) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (0, 3), (1, 3), (1, 3), (1, 3)]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_project.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_project.py new file mode 100644 index 0000000..74f8059 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_project.py @@ -0,0 +1,398 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, nodes_equal + + +class TestBipartiteProject: + def test_path_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + + def test_path_projected_properties_graph(self): + G = nx.path_graph(4) + G.add_node(1, name="one") + G.add_node(2, name="two") + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + assert P.nodes[1]["name"] == G.nodes[1]["name"] + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + assert P.nodes[2]["name"] == G.nodes[2]["name"] + + def test_path_collaboration_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_directed_path_collaboration_projected_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_path_weighted_projected_graph(self): + G = nx.path_graph(4) + + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.weighted_projected_graph(G, [1, 2, 3, 3]) + + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_path_weighted_projected_directed_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_star_projected_graph(self): + G = nx.star_graph(3) + P = bipartite.projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + P = bipartite.weighted_projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + + P = bipartite.projected_graph(G, [0]) + assert nodes_equal(list(P), [0]) + assert edges_equal(list(P.edges()), []) + + def test_project_multigraph(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("a", 2) + G.add_edge("b", 2) + P = bipartite.projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.weighted_projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.projected_graph(G, "ab", multigraph=True) + assert edges_equal(list(P.edges()), [("a", "b"), ("a", "b")]) + + def test_project_collaboration(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("b", 2) + G.add_edge("c", 2) + G.add_edge("c", 3) + G.add_edge("c", 4) + G.add_edge("b", 4) + P = bipartite.collaboration_weighted_projected_graph(G, "abc") + assert P["a"]["b"]["weight"] == 1 + assert P["b"]["c"]["weight"] == 2 + + def test_directed_projection(self): + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge("B", 2) + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 1 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B")]) + + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge(2, "B") + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 2 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B"), ("A", "B")]) + + +class TestBipartiteWeightedProjection: + @classmethod + def setup_class(cls): + # Tore Opsahl's example + # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ + cls.G = nx.Graph() + cls.G.add_edge("A", 1) + cls.G.add_edge("A", 2) + cls.G.add_edge("B", 1) + cls.G.add_edge("B", 2) + cls.G.add_edge("B", 3) + cls.G.add_edge("B", 4) + cls.G.add_edge("B", 5) + cls.G.add_edge("C", 1) + cls.G.add_edge("D", 3) + cls.G.add_edge("E", 4) + cls.G.add_edge("E", 5) + cls.G.add_edge("E", 6) + cls.G.add_edge("F", 6) + # Graph based on figure 6 from Newman (2001) + cls.N = nx.Graph() + cls.N.add_edge("A", 1) + cls.N.add_edge("A", 2) + cls.N.add_edge("A", 3) + cls.N.add_edge("B", 1) + cls.N.add_edge("B", 2) + cls.N.add_edge("B", 3) + cls.N.add_edge("C", 1) + cls.N.add_edge("D", 1) + cls.N.add_edge("E", 3) + + def test_project_weighted_shared(self): + edges = [ + ("A", "B", 2), + ("A", "C", 1), + ("B", "C", 1), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3), + ("A", "E", 1), + ("A", "C", 1), + ("A", "D", 1), + ("B", "E", 1), + ("B", "C", 1), + ("B", "D", 1), + ("C", "D", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_newman(self): + edges = [ + ("A", "B", 1.5), + ("A", "C", 0.5), + ("B", "C", 0.5), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 11 / 6.0), + ("A", "E", 1 / 2.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 2.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_ratio(self): + edges = [ + ("A", "B", 2 / 6.0), + ("A", "C", 1 / 6.0), + ("B", "C", 1 / 6.0), + ("B", "D", 1 / 6.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 6.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_overlap(self): + edges = [ + ("A", "B", 2 / 2.0), + ("A", "C", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("B", "E", 2 / 3.0), + ("E", "F", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 1.0), + ("A", "C", 1 / 1.0), + ("A", "D", 1 / 1.0), + ("B", "E", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_jaccard(self): + edges = [ + ("A", "B", 2 / 5.0), + ("A", "C", 1 / 2.0), + ("B", "C", 1 / 5.0), + ("B", "D", 1 / 5.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in P.edges(): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_generic_weighted_projected_graph_simple(self): + def shared(G, u, v): + return len(set(G[u]) & set(G[v])) + + B = nx.path_graph(5) + G = bipartite.generic_weighted_projected_graph( + B, [0, 2, 4], weight_function=shared + ) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(list(G.edges(data=True))), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(list(G.edges(data=True))), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + B = nx.DiGraph() + nx.add_path(B, range(5)) + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})] + ) + + def test_generic_weighted_projected_graph_custom(self): + def jaccard(G, u, v): + unbrs = set(G[u]) + vnbrs = set(G[v]) + return len(unbrs & vnbrs) / len(unbrs | vnbrs) + + def my_weight(G, u, v, weight="weight"): + w = 0 + for nbr in set(G[u]) & set(G[v]): + w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1) + return w + + B = nx.bipartite.complete_bipartite_graph(2, 2) + for i, (u, v) in enumerate(B.edges()): + B.edges[u, v]["weight"] = i + 1 + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=jaccard + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})]) + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=my_weight + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})]) + G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py new file mode 100644 index 0000000..8c04e95 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py @@ -0,0 +1,31 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.redundancy` module. + +""" + +import pytest + +from networkx import NetworkXError, cycle_graph +from networkx.algorithms.bipartite import complete_bipartite_graph, node_redundancy + + +def test_no_redundant_nodes(): + G = complete_bipartite_graph(2, 2) + rc = node_redundancy(G) + assert all(redundancy == 1 for redundancy in rc.values()) + + +def test_redundant_nodes(): + G = cycle_graph(6) + edge = {0, 3} + G.add_edge(*edge) + redundancy = node_redundancy(G) + for v in edge: + assert redundancy[v] == 2 / 3 + for v in set(G) - edge: + assert redundancy[v] == 1 + + +def test_not_enough_neighbors(): + with pytest.raises(NetworkXError): + G = complete_bipartite_graph(1, 2) + node_redundancy(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py new file mode 100644 index 0000000..0cdc2d6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py @@ -0,0 +1,81 @@ +import pytest + +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.bipartite import spectral_bipartivity as sb + +# Examples from Figure 1 +# E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of +# bipartivity in complex networks", PhysRev E 72, 046105 (2005) + + +class TestSpectralBipartivity: + def test_star_like(self): + # star-like + + G = nx.star_graph(2) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.843, abs=1e-3) + + G = nx.star_graph(3) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.871, abs=1e-3) + + G = nx.star_graph(4) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.890, abs=1e-3) + + def test_k23_like(self): + # K2,3-like + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.769, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.829, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + assert sb(G) == pytest.approx(0.731, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.692, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.597, abs=1e-3) + + def test_single_nodes(self): + + # single nodes + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.85, abs=1e-2) + assert sbn[2] == pytest.approx(0.77, abs=1e-2) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.73, abs=1e-2) + assert sbn[2] == pytest.approx(0.82, abs=1e-2) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/boundary.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/boundary.py new file mode 100644 index 0000000..25c1e28 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/boundary.py @@ -0,0 +1,135 @@ +"""Routines to find the boundary of a set of nodes. + +An edge boundary is a set of edges, each of which has exactly one +endpoint in a given set of nodes (or, in the case of directed graphs, +the set of edges whose source node is in the set). + +A node boundary of a set *S* of nodes is the set of (out-)neighbors of +nodes in *S* that are outside *S*. + +""" +from itertools import chain + +__all__ = ["edge_boundary", "node_boundary"] + + +def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None): + """Returns the edge boundary of `nbunch1`. + + The *edge boundary* of a set *S* with respect to a set *T* is the + set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*. + If *T* is not specified, it is assumed to be the set of all nodes + not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose edge boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + keys : bool + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + data : bool or object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + default : object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + Returns + ------- + iterator + An iterator over the edges in the boundary of `nbunch1` with + respect to `nbunch2`. If `keys`, `data`, or `default` + are specified and `G` is a multigraph, then edges are returned + with keys and/or data, as in :meth:`MultiGraph.edges`. + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + # Here we create an iterator over edges incident to nodes in the set + # `nset1`. The `Graph.edges()` method does not provide a guarantee + # on the orientation of the edges, so our algorithm below must + # handle the case in which exactly one orientation, either (u, v) or + # (v, u), appears in this iterable. + if G.is_multigraph(): + edges = G.edges(nset1, data=data, keys=keys, default=default) + else: + edges = G.edges(nset1, data=data, default=default) + # If `nbunch2` is not provided, then it is assumed to be the set + # complement of `nbunch1`. For the sake of efficiency, this is + # implemented by using the `not in` operator, instead of by creating + # an additional set and using the `in` operator. + if nbunch2 is None: + return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1)) + nset2 = set(nbunch2) + return ( + e + for e in edges + if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2) + ) + + +def node_boundary(G, nbunch1, nbunch2=None): + """Returns the node boundary of `nbunch1`. + + The *node boundary* of a set *S* with respect to a set *T* is the + set of nodes *v* in *T* such that for some *u* in *S*, there is an + edge joining *u* to *v*. If *T* is not specified, it is assumed to + be the set of all nodes not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose node boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + Returns + ------- + set + The node boundary of `nbunch1` with respect to `nbunch2`. + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1 + # If `nbunch2` is not specified, it is assumed to be the set + # complement of `nbunch1`. + if nbunch2 is not None: + bdy &= set(nbunch2) + return bdy diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/bridges.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/bridges.py new file mode 100644 index 0000000..eda4a4f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/bridges.py @@ -0,0 +1,199 @@ +"""Bridge-finding algorithms.""" +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bridges", "has_bridges", "local_bridges"] + + +@not_implemented_for("directed") +def bridges(G, root=None): + """Generate all bridges in a graph. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. Equivalently, a bridge is an + edge that does not belong to any cycle. Bridges are also known as cut-edges, + isthmuses, or cut arcs. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be returned. + + Yields + ------ + e : edge + An edge in the graph whose removal disconnects the graph (or + causes the number of connected components to increase). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge: + + >>> G = nx.barbell_graph(10, 0) + >>> list(nx.bridges(G)) + [(9, 10)] + + Notes + ----- + This is an implementation of the algorithm described in [1]_. An edge is a + bridge if and only if it is not contained in any chain. Chains are found + using the :func:`networkx.chain_decomposition` function. + + The algorithm described in [1]_ requires a simple graph. If the provided + graph is a multigraph, we convert it to a simple graph and verify that any + bridges discovered by the chain decomposition algorithm are not multi-edges. + + Ignoring polylogarithmic factors, the worst-case time complexity is the + same as the :func:`networkx.chain_decomposition` function, + $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is + the number of edges. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions + """ + multigraph = G.is_multigraph() + H = nx.Graph(G) if multigraph else G + chains = nx.chain_decomposition(H, root=root) + chain_edges = set(chain.from_iterable(chains)) + for u, v in H.edges(): + if (u, v) not in chain_edges and (v, u) not in chain_edges: + if multigraph and len(G[u][v]) > 1: + continue + yield u, v + + +@not_implemented_for("directed") +def has_bridges(G, root=None): + """Decide whether a graph has any bridges. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be considered. + + Returns + ------- + bool + Whether the graph (or the connected component containing `root`) + has any bridges. + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge:: + + >>> G = nx.barbell_graph(10, 0) + >>> nx.has_bridges(G) + True + + On the other hand, the cycle graph has no bridges:: + + >>> G = nx.cycle_graph(5) + >>> nx.has_bridges(G) + False + + Notes + ----- + This implementation uses the :func:`networkx.bridges` function, so + it shares its worst-case time complexity, $O(m + n)$, ignoring + polylogarithmic factors, where $n$ is the number of nodes in the + graph and $m$ is the number of edges. + + """ + try: + next(bridges(G)) + except StopIteration: + return False + else: + return True + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def local_bridges(G, with_span=True, weight=None): + """Iterate over local bridges of `G` optionally computing the span + + A *local bridge* is an edge whose endpoints have no common neighbors. + That is, the edge is not part of a triangle in the graph. + + The *span* of a *local bridge* is the shortest path length between + the endpoints if the local bridge is removed. + + Parameters + ---------- + G : undirected graph + + with_span : bool + If True, yield a 3-tuple `(u, v, span)` + + weight : function, string or None (default: None) + If function, used to compute edge weights for the span. + If string, the edge data attribute used in calculating span. + If None, all edges have weight 1. + + Yields + ------ + e : edge + The local bridges as an edge 2-tuple of nodes `(u, v)` or + as a 3-tuple `(u, v, span)` when `with_span is True`. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph or multigraph. + + Examples + -------- + A cycle graph has every edge a local bridge with span N-1. + + >>> G = nx.cycle_graph(9) + >>> (0, 8, 8) in set(nx.local_bridges(G)) + True + """ + if with_span is not True: + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + yield u, v + else: + wt = nx.weighted._weight_function(G, weight) + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + enodes = {u, v} + + def hide_edge(n, nbr, d): + if n not in enodes or nbr not in enodes: + return wt(n, nbr, d) + return None + + try: + span = nx.shortest_path_length(G, u, v, weight=hide_edge) + yield u, v, span + except nx.NetworkXNoPath: + yield u, v, float("inf") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/__init__.py new file mode 100644 index 0000000..cf07fe2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/__init__.py @@ -0,0 +1,19 @@ +from .betweenness import * +from .betweenness_subset import * +from .closeness import * +from .current_flow_betweenness import * +from .current_flow_betweenness_subset import * +from .current_flow_closeness import * +from .degree_alg import * +from .dispersion import * +from .eigenvector import * +from .group import * +from .harmonic import * +from .katz import * +from .load import * +from .percolation import * +from .reaching import * +from .second_order import * +from .subgraph_alg import * +from .trophic import * +from .voterank_alg import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/betweenness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/betweenness.py new file mode 100644 index 0000000..54b7db9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/betweenness.py @@ -0,0 +1,436 @@ +"""Betweenness centrality measures.""" +import warnings +from collections import deque +from heapq import heappop, heappush +from itertools import count + +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import py_random_state +from networkx.utils.decorators import not_implemented_for + +__all__ = ["betweenness_centrality", "edge_betweenness_centrality", "edge_betweenness"] + + +@py_random_state(5) +def betweenness_centrality( + G, k=None, normalized=True, weight=None, endpoints=False, seed=None +): + r"""Compute the shortest-path betweenness centrality for nodes. + + Betweenness centrality of a node $v$ is the sum of the + fraction of all-pairs shortest paths that pass through $v$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of + those paths passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$, + $\sigma(s, t|v) = 0$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by `2/((n-1)(n-2))` + for graphs, and `1/((n-1)(n-2))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + endpoints : bool, optional + If True include the endpoints in the shortest path counts. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + See [4]_ for the original first published version and [2]_ for details on + algorithms for variations and related metrics. + + For approximate betweenness calculations set k=#samples to use + k nodes ("pivots") to estimate the betweenness values. For an estimate + of the number of pivots needed see [3]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + .. [3] Ulrik Brandes and Christian Pich: + Centrality Estimation in Large Networks. + International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007. + https://dx.doi.org/10.1142/S0218127407018403 + .. [4] Linton C. Freeman: + A set of measures of centrality based on betweenness. + Sociometry 40: 35–41, 1977 + https://doi.org/10.2307/3033543 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + if endpoints: + betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s) + else: + betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s) + # rescaling + betweenness = _rescale( + betweenness, + len(G), + normalized=normalized, + directed=G.is_directed(), + k=k, + endpoints=endpoints, + ) + return betweenness + + +@py_random_state(4) +def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None): + r"""Compute betweenness centrality for edges. + + Betweenness centrality of an edge $e$ is the sum of the + fraction of all-pairs shortest paths that pass through $e$ + + .. math:: + + c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of + those paths passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by $2/(n(n-1))$ + for graphs, and $1/(n(n-1))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + edges : dictionary + Dictionary of edges with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes, + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + # b[e]=0 for e in G.edges() + betweenness.update(dict.fromkeys(G.edges(), 0.0)) + if k is None: + nodes = G + else: + nodes = seed.sample(G.nodes(), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + betweenness = _accumulate_edges(betweenness, S, P, sigma, s) + # rescaling + for n in G: # remove nodes to only return edges + del betweenness[n] + betweenness = _rescale_e( + betweenness, len(G), normalized=normalized, directed=G.is_directed() + ) + if G.is_multigraph(): + betweenness = _add_edge_keys(G, betweenness, weight=weight) + return betweenness + + +# obsolete name +def edge_betweenness(G, k=None, normalized=True, weight=None, seed=None): + warnings.warn( + "edge_betweeness is replaced by edge_betweenness_centrality", DeprecationWarning + ) + return edge_betweenness_centrality(G, k, normalized, weight, seed) + + +# helpers for betweenness centrality + + +def _single_source_shortest_path_basic(G, s): + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + D[s] = 0 + Q = deque([s]) + while Q: # use BFS to find shortest paths + v = Q.popleft() + S.append(v) + Dv = D[v] + sigmav = sigma[v] + for w in G[v]: + if w not in D: + Q.append(w) + D[w] = Dv + 1 + if D[w] == Dv + 1: # this is a shortest path, count paths + sigma[w] += sigmav + P[w].append(v) # predecessors + return S, P, sigma, D + + +def _single_source_dijkstra_path_basic(G, s, weight): + weight = _weight_function(G, weight) + # modified from Eppstein + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + push = heappush + pop = heappop + seen = {s: 0} + c = count() + Q = [] # use Q as heap with (distance,node id) tuples + push(Q, (0, next(c), s, s)) + while Q: + (dist, _, pred, v) = pop(Q) + if v in D: + continue # already searched this node. + sigma[v] += sigma[pred] # count paths + S.append(v) + D[v] = dist + for w, edgedata in G[v].items(): + vw_dist = dist + weight(v, w, edgedata) + if w not in D and (w not in seen or vw_dist < seen[w]): + seen[w] = vw_dist + push(Q, (vw_dist, next(c), v, w)) + sigma[w] = 0.0 + P[w] = [v] + elif vw_dist == seen[w]: # handle equal paths + sigma[w] += sigma[v] + P[w].append(v) + return S, P, sigma, D + + +def _accumulate_basic(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness, delta + + +def _accumulate_endpoints(betweenness, S, P, sigma, s): + betweenness[s] += len(S) - 1 + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + 1 + return betweenness, delta + + +def _accumulate_edges(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + c = sigma[v] * coeff + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False): + if normalized: + if endpoints: + if n < 2: + scale = None # no normalization + else: + # Scale factor should include endpoint nodes + scale = 1 / (n * (n - 1)) + elif n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False, k=None): + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +@not_implemented_for("graph") +def _add_edge_keys(G, betweenness, weight=None): + r"""Adds the corrected betweenness centrality (BC) values for multigraphs. + + Parameters + ---------- + G : NetworkX graph. + + betweenness : dictionary + Dictionary mapping adjacent node tuples to betweenness centrality values. + + weight : string or function + See `_weight_function` for details. Defaults to `None`. + + Returns + ------- + edges : dictionary + The parameter `betweenness` including edges with keys and their + betweenness centrality values. + + The BC value is divided among edges of equal weight. + """ + _weight = _weight_function(G, weight) + + edge_bc = dict.fromkeys(G.edges, 0.0) + for u, v in betweenness: + d = G[u][v] + wt = _weight(u, v, d) + keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt] + bc = betweenness[(u, v)] / len(keys) + for k in keys: + edge_bc[(u, v, k)] = bc + + return edge_bc diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/betweenness_subset.py new file mode 100644 index 0000000..6b6958f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/betweenness_subset.py @@ -0,0 +1,282 @@ +"""Betweenness centrality measures for subsets of nodes.""" +import warnings + +from networkx.algorithms.centrality.betweenness import _add_edge_keys +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = [ + "betweenness_centrality_subset", + "betweenness_centrality_source", + "edge_betweenness_centrality_subset", +] + + +def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None): + r"""Compute betweenness centrality for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|v)$ is the number of those paths + passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, + and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_. + + + Parameters + ---------- + G : graph + A NetworkX graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by $2/((n-1)(n-2))$ + for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is + designed to make betweenness_centrality(G) be the same as + betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_subset(b, S, P, sigma, s, targets) + b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed()) + return b + + +def edge_betweenness_centrality_subset( + G, sources, targets, normalized=False, weight=None +): + r"""Compute betweenness centrality for edges for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|e)$ is the number of those paths + passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A networkx graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by `2/(n(n-1))` + for graphs, and `1/(n(n-1))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + edges : dictionary + Dictionary of edges with Betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is the same + as in edge_betweenness_centrality() and is designed to make + edge_betweenness_centrality(G) be the same as + edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges() + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_edges_subset(b, S, P, sigma, s, targets) + for n in G: # remove nodes to only return edges + del b[n] + b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed()) + if G.is_multigraph(): + b = _add_edge_keys(G, b, weight=weight) + return b + + +# obsolete name +def betweenness_centrality_source(G, normalized=True, weight=None, sources=None): + msg = "betweenness_centrality_source --> betweenness_centrality_subset" + warnings.warn(msg, DeprecationWarning) + if sources is None: + sources = G.nodes() + targets = list(G) + return betweenness_centrality_subset(G, sources, targets, normalized, weight) + + +def _accumulate_subset(betweenness, S, P, sigma, s, targets): + delta = dict.fromkeys(S, 0.0) + target_set = set(targets) - {s} + while S: + w = S.pop() + if w in target_set: + coeff = (delta[w] + 1.0) / sigma[w] + else: + coeff = delta[w] / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets): + """edge_betweenness_centrality_subset helper.""" + delta = dict.fromkeys(S, 0) + target_set = set(targets) + while S: + w = S.pop() + for v in P[w]: + if w in target_set: + c = (sigma[v] / sigma[w]) * (1.0 + delta[w]) + else: + c = delta[w] / len(P[w]) + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False): + """betweenness_centrality_subset helper.""" + if normalized: + if n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False): + """edge_betweenness_centrality_subset helper.""" + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/closeness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/closeness.py new file mode 100644 index 0000000..4fc2caa --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/closeness.py @@ -0,0 +1,279 @@ +""" +Closeness centrality measures. +""" +import functools + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils.decorators import not_implemented_for + +__all__ = ["closeness_centrality", "incremental_closeness_centrality"] + + +def closeness_centrality(G, u=None, distance=None, wf_improved=True): + r"""Compute closeness centrality for nodes. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + average shortest path distance to `u` over all `n-1` reachable nodes. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n-1` is the number of nodes reachable from `u`. Notice that the + closeness distance function computes the incoming distance to `u` + for directed graphs. To use outward distance, act on `G.reverse()`. + + Notice that higher values of closeness indicate higher centrality. + + Wasserman and Faust propose an improved formula for graphs with + more than one connected component. The result is "a ratio of the + fraction of actors in the group who are reachable, to the average + distance" from the reachable actors [2]_. You might think this + scale factor is inverted but it is not. As is, nodes from small + components receive a smaller closeness value. Letting `N` denote + the number of nodes in the graph, + + .. math:: + + C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + Parameters + ---------- + G : graph + A NetworkX graph + + u : node, optional + Return only the value for node u + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None` (the default) all edges have a distance of 1. + Absent edge attributes are assigned a distance of 1. Note that no check + is performed to ensure that edges have the provided attribute. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.closeness_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, incremental_closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately scaled by that parts size. + + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + The closeness centrality uses *inward* distance to a node, not outward. + If you want to use outword distances apply the function to `G.reverse()` + + In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the + outward distance rather than the inward distance. If you use a 'distance' + keyword and a DiGraph, your results will change between v2.2 and v2.3. + + References + ---------- + .. [1] Linton C. Freeman: Centrality in networks: I. + Conceptual clarification. Social Networks 1:215-239, 1979. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] pg. 201 of Wasserman, S. and Faust, K., + Social Network Analysis: Methods and Applications, 1994, + Cambridge University Press. + """ + if G.is_directed(): + G = G.reverse() # create a reversed graph view + + if distance is not None: + # use Dijkstra's algorithm with specified attribute as edge weight + path_length = functools.partial( + nx.single_source_dijkstra_path_length, weight=distance + ) + else: + path_length = nx.single_source_shortest_path_length + + if u is None: + nodes = G.nodes + else: + nodes = [u] + closeness_dict = {} + for n in nodes: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + if u is not None: + return closeness_dict[u] + return closeness_dict + + +@not_implemented_for("directed") +def incremental_closeness_centrality( + G, edge, prev_cc=None, insertion=True, wf_improved=True +): + r"""Incremental closeness centrality for nodes. + + Compute closeness centrality for nodes using level-based work filtering + as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al. + + Level-based work filtering detects unnecessary updates to the closeness + centrality and filters them out. + + --- + From "Incremental Algorithms for Closeness Centrality": + + Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V + such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)` + Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. + + Where :math:`dG(u, v)` denotes the length of the shortest path between + two vertices u, v in a graph G, cc[s] is the closeness centrality for a + vertex s in V, and cc'[s] is the closeness centrality for a + vertex s in V, with the (u, v) edge added. + --- + + We use Theorem 1 to filter out updates when adding or removing an edge. + When adding an edge (u, v), we compute the shortest path lengths from all + other nodes to u and to v before the node is added. When removing an edge, + we compute the shortest path lengths after the edge is removed. Then we + apply Theorem 1 to use previously computed closeness centrality for nodes + where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for + undirected, unweighted graphs; the distance argument is not supported. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + sum of the shortest path distances from `u` to all `n-1` other nodes. + Since the sum of distances depends on the number of nodes in the + graph, closeness is normalized by the sum of minimum possible + distances `n-1`. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n` is the number of nodes in the graph. + + Notice that higher values of closeness indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge : tuple + The modified edge (u, v) in the graph. + + prev_cc : dictionary + The previous closeness centrality for all nodes in the graph. + + insertion : bool, optional + If True (default) the edge was inserted, otherwise it was deleted from the graph. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately. + + References + ---------- + .. [1] Freeman, L.C., 1979. Centrality in networks: I. + Conceptual clarification. Social Networks 1, 215--239. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental + Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data + http://sariyuce.com/papers/bigdata13.pdf + """ + if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()): + raise NetworkXError("prev_cc and G do not have the same nodes") + + # Unpack edge + (u, v) = edge + path_length = nx.single_source_shortest_path_length + + if insertion: + # For edge insertion, we want shortest paths before the edge is inserted + du = path_length(G, u) + dv = path_length(G, v) + + G.add_edge(u, v) + else: + G.remove_edge(u, v) + + # For edge removal, we want shortest paths after the edge is removed + du = path_length(G, u) + dv = path_length(G, v) + + if prev_cc is None: + return nx.closeness_centrality(G) + + nodes = G.nodes() + closeness_dict = {} + for n in nodes: + if n in du and n in dv and abs(du[n] - dv[n]) <= 1: + closeness_dict[n] = prev_cc[n] + else: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + + # Leave the graph as we found it + if insertion: + G.remove_edge(u, v) + else: + G.add_edge(u, v) + + return closeness_dict diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py new file mode 100644 index 0000000..a18b81c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py @@ -0,0 +1,340 @@ +"""Current-flow betweenness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, + flow_matrix_row, +) +from networkx.utils import ( + not_implemented_for, + py_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "current_flow_betweenness_centrality", + "approximate_current_flow_betweenness_centrality", + "edge_current_flow_betweenness_centrality", +] + + +@py_random_state(7) +@not_implemented_for("directed") +def approximate_current_flow_betweenness_centrality( + G, + normalized=True, + weight=None, + dtype=float, + solver="full", + epsilon=0.5, + kmax=10000, + seed=None, +): + r"""Compute the approximate current-flow betweenness centrality for nodes. + + Approximates the current-flow betweenness centrality within absolute + error of epsilon with high probability [1]_. + + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + epsilon: float + Absolute error tolerance. + + kmax: int + Maximum number of sample node pairs to use for approximation. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + current_flow_betweenness_centrality + + Notes + ----- + The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$ + and the space required is $O(m)$ for $n$ nodes and $m$ edges. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer: + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + betweenness = dict.fromkeys(H, 0.0) + nb = (n - 1.0) * (n - 2.0) # normalization factor + cstar = n * (n - 1) / nb + l = 1 # parameter in approximation, adjustable + k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n))) + if k > kmax: + msg = f"Number random pairs k>kmax ({k}>{kmax}) " + raise nx.NetworkXError(msg, "Increase kmax or epsilon") + cstar2k = cstar / (2 * k) + for _ in range(k): + s, t = pair = seed.sample(range(n), 2) + b = np.zeros(n, dtype=dtype) + b[s] = 1 + b[t] = -1 + p = C.solve(b) + for v in H: + if v in pair: + continue + for nbr in H[v]: + w = H[v][nbr].get(weight, 1.0) + betweenness[v] += w * np.abs(p[v] - p[nbr]) * cstar2k + if normalized: + factor = 1.0 + else: + factor = nb / 2.0 + # remap to original node names and "unnormalize" if required + return {ordering[k]: v * factor for k, v in betweenness.items()} + + +@not_implemented_for("directed") +def current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(n))) + for i in range(n): + betweenness[s] += (i - pos[i]) * row[i] + betweenness[t] += (n - i - 1 - pos[i]) * row[i] + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = float((betweenness[v] - v) * 2.0 / nb) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +def edge_current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for edges. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of edge tuples with betweenness centrality as the value. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(1, n + 1))) + for i in range(n): + betweenness[e] += (i + 1 - pos[i]) * row[i] + betweenness[e] += (n - i - pos[i]) * row[i] + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py new file mode 100644 index 0000000..37e5a78 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py @@ -0,0 +1,224 @@ +"""Current-flow betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import flow_matrix_row +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = [ + "current_flow_betweenness_centrality_subset", + "edge_current_flow_betweenness_centrality_subset", +] + + +@not_implemented_for("directed") +def current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for subsets of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + from networkx.utils import reverse_cuthill_mckee_ordering + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(n))) + H = nx.relabel_nodes(G, mapping) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[s] += 0.5 * np.abs(row[i] - row[j]) + betweenness[t] += 0.5 * np.abs(row[i] - row[j]) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = betweenness[v] / nb + 1.0 / (2 - n) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +def edge_current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for edges using subsets + of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dict + Dictionary of edge tuples with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(n))) + H = nx.relabel_nodes(G, mapping) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[e] += 0.5 * np.abs(row[i] - row[j]) + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_closeness.py new file mode 100644 index 0000000..b6b64c8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/current_flow_closeness.py @@ -0,0 +1,96 @@ +"""Current-flow closeness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, +) +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = ["current_flow_closeness_centrality", "information_centrality"] + + +@not_implemented_for("directed") +def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"): + """Compute current-flow closeness centrality for nodes. + + Current-flow closeness centrality is variant of closeness + centrality based on effective resistance between nodes in + a network. This metric is also known as information centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with current flow closeness centrality as the value. + + See Also + -------- + closeness_centrality + + Notes + ----- + The algorithm is from Brandes [1]_. + + See also [2]_ for the original definition of information centrality. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer, + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] Karen Stephenson and Marvin Zelen: + Rethinking centrality: Methods and examples. + Social Networks 11(1):1-37, 1989. + https://doi.org/10.1016/0378-8733(89)90016-6 + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + n = H.number_of_nodes() + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver + for v in H: + col = C2.get_row(v) + for w in H: + betweenness[v] += col[v] - 2 * col[w] + betweenness[w] += col[v] + for v in H: + betweenness[v] = 1 / (betweenness[v]) + return {ordering[k]: v for k, v in betweenness.items()} + + +information_centrality = current_flow_closeness_centrality diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/degree_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/degree_alg.py new file mode 100644 index 0000000..a7e7b92 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/degree_alg.py @@ -0,0 +1,127 @@ +"""Degree centrality measures.""" +from networkx.utils.decorators import not_implemented_for + +__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"] + + +def degree_centrality(G): + """Compute the degree centrality for nodes. + + The degree centrality for a node v is the fraction of nodes it + is connected to. + + Parameters + ---------- + G : graph + A networkx graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with degree centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.degree()} + return centrality + + +@not_implemented_for("undirected") +def in_degree_centrality(G): + """Compute the in-degree centrality for nodes. + + The in-degree centrality for a node v is the fraction of nodes its + incoming edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with in-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + See Also + -------- + degree_centrality, out_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.in_degree()} + return centrality + + +@not_implemented_for("undirected") +def out_degree_centrality(G): + """Compute the out-degree centrality for nodes. + + The out-degree centrality for a node v is the fraction of nodes its + outgoing edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with out-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + See Also + -------- + degree_centrality, in_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.out_degree()} + return centrality diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/dispersion.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/dispersion.py new file mode 100644 index 0000000..8005670 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/dispersion.py @@ -0,0 +1,97 @@ +from itertools import combinations + +__all__ = ["dispersion"] + + +def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0): + r"""Calculate dispersion between `u` and `v` in `G`. + + A link between two actors (`u` and `v`) has a high dispersion when their + mutual ties (`s` and `t`) are not well connected with each other. + + Parameters + ---------- + G : graph + A NetworkX graph. + u : node, optional + The source for the dispersion score (e.g. ego node of the network). + v : node, optional + The target of the dispersion score if specified. + normalized : bool + If True (default) normalize by the embededness of the nodes (u and v). + + Returns + ------- + nodes : dictionary + If u (v) is specified, returns a dictionary of nodes with dispersion + score for all "target" ("source") nodes. If neither u nor v is + specified, returns a dictionary of dictionaries for all nodes 'u' in the + graph with a dispersion score for each node 'v'. + + Notes + ----- + This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical + usage would be to run dispersion on the ego network $G_u$ if $u$ were + specified. Running :func:`dispersion` with neither $u$ nor $v$ specified + can take some time to complete. + + References + ---------- + .. [1] Romantic Partnerships and the Dispersion of Social Ties: + A Network Analysis of Relationship Status on Facebook. + Lars Backstrom, Jon Kleinberg. + https://arxiv.org/pdf/1310.6753v1.pdf + + """ + + def _dispersion(G_u, u, v): + """dispersion for all nodes 'v' in a ego network G_u of node 'u'""" + u_nbrs = set(G_u[u]) + ST = {n for n in G_u[v] if n in u_nbrs} + set_uv = {u, v} + # all possible ties of connections that u and b share + possib = combinations(ST, 2) + total = 0 + for (s, t) in possib: + # neighbors of s that are in G_u, not including u and v + nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv + # s and t are not directly connected + if t not in nbrs_s: + # s and t do not share a connection + if nbrs_s.isdisjoint(G_u[t]): + # tick for disp(u, v) + total += 1 + # neighbors that u and v share + embededness = len(ST) + + dispersion_val = total + if normalized: + dispersion_val = (total + b) ** alpha + if embededness + c != 0: + dispersion_val /= embededness + c + + return dispersion_val + + if u is None: + # v and u are not specified + if v is None: + results = {n: {} for n in G} + for u in G: + for v in G[u]: + results[u][v] = _dispersion(G, u, v) + # u is not specified, but v is + else: + results = dict.fromkeys(G[v], {}) + for u in G[v]: + results[u] = _dispersion(G, v, u) + else: + # u is specified with no target v + if v is None: + results = dict.fromkeys(G[u], {}) + for v in G[u]: + results[v] = _dispersion(G, u, v) + # both u and v are specified + else: + results = _dispersion(G, u, v) + + return results diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/eigenvector.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/eigenvector.py new file mode 100644 index 0000000..f55cedb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/eigenvector.py @@ -0,0 +1,229 @@ +"""Functions for computing eigenvector centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"] + + +@not_implemented_for("multigraph") +def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None): + r"""Compute the eigenvector centrality for the graph `G`. + + Eigenvector centrality computes the centrality for a node based on the + centrality of its neighbors. The eigenvector centrality for node $i$ is + the $i$-th element of the vector $x$ defined by the equation + + .. math:: + + Ax = \lambda x + + where $A$ is the adjacency matrix of the graph `G` with eigenvalue + $\lambda$. By virtue of the Perron–Frobenius theorem, there is a unique + solution $x$, all of whose entries are positive, if $\lambda$ is the + largest eigenvalue of the adjacency matrix $A$ ([2]_). + + Parameters + ---------- + G : graph + A networkx graph + + max_iter : integer, optional (default=100) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional (default=None) + Starting value of eigenvector iteration for each node. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality(G) + >>> sorted((v, f"{c:0.2f}") for v, c in centrality.items()) + [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')] + + Raises + ------ + NetworkXPointlessConcept + If the graph `G` is the null graph. + + NetworkXError + If each value in `nstart` is zero. + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + See Also + -------- + eigenvector_centrality_numpy + pagerank + hits + + Notes + ----- + The measure was introduced by [1]_ and is discussed in [2]_. + + The power iteration method is used to compute the eigenvector and + convergence is **not** guaranteed. Our method stops after ``max_iter`` + iterations or when the change in the computed vector between two + iterations is smaller than an error tolerance of + ``G.number_of_nodes() * tol``. This implementation uses ($A + I$) + rather than the adjacency matrix $A$ because it shifts the spectrum + to enable discerning the correct eigenvector even for networks with + multiple dominant eigenvalues. + + For directed graphs this is "left" eigenvector centrality which corresponds + to the in-edges in the graph. For out-edges eigenvector centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Phillip Bonacich. + "Power and Centrality: A Family of Measures." + *American Journal of Sociology* 92(5):1170–1182, 1986 + + .. [2] Mark E. J. Newman. + *Networks: An Introduction.* + Oxford University Press, USA, 2010, pp. 169. + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + # If no initial vector is provided, start with the all-ones vector. + if nstart is None: + nstart = {v: 1 for v in G} + if all(v == 0 for v in nstart.values()): + raise nx.NetworkXError("initial vector cannot have all zero values") + # Normalize the initial vector so that each entry is in [0, 1]. This is + # guaranteed to never have a divide-by-zero error by the previous line. + nstart_sum = sum(nstart.values()) + x = {k: v / nstart_sum for k, v in nstart.items()} + nnodes = G.number_of_nodes() + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = xlast.copy() # Start with xlast times I to iterate with (A+I) + # do the multiplication y^T = x^T A (left eigenvector) + for n in x: + for nbr in G[n]: + w = G[n][nbr].get(weight, 1) if weight else 1 + x[nbr] += xlast[n] * w + # Normalize the vector. The normalization denominator `norm` + # should never be zero by the Perron--Frobenius + # theorem. However, in case it is due to numerical error, we + # assume the norm to be one instead. + norm = math.hypot(*x.values()) or 1 + x = {k: v / norm for k, v in x.items()} + # Check for convergence (in the L_1 norm). + if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0): + r"""Compute the eigenvector centrality for the graph G. + + Eigenvector centrality computes the centrality for a node based on the + centrality of its neighbors. The eigenvector centrality for node $i$ is + + .. math:: + + Ax = \lambda x + + where $A$ is the adjacency matrix of the graph G with eigenvalue $\lambda$. + By virtue of the Perron–Frobenius theorem, there is a unique and positive + solution if $\lambda$ is the largest eigenvalue associated with the + eigenvector of the adjacency matrix $A$ ([2]_). + + Parameters + ---------- + G : graph + A networkx graph + + weight : None or string, optional (default=None) + The name of the edge attribute used as weight. + If None, all edge weights are considered equal. + In this measure the weight is interpreted as the connection strength. + max_iter : integer, optional (default=100) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality_numpy(G) + >>> print([f"{node} {centrality[node]:0.2f}" for node in centrality]) + ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] + + See Also + -------- + eigenvector_centrality + pagerank + hits + + Notes + ----- + The measure was introduced by [1]_. + + This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to + find the largest eigenvalue/eigenvector pair. + + For directed graphs this is "left" eigenvector centrality which corresponds + to the in-edges in the graph. For out-edges eigenvector centrality + first reverse the graph with ``G.reverse()``. + + Raises + ------ + NetworkXPointlessConcept + If the graph ``G`` is the null graph. + + References + ---------- + .. [1] Phillip Bonacich: + Power and Centrality: A Family of Measures. + American Journal of Sociology 92(5):1170–1182, 1986 + http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf + .. [2] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, pp. 169. + """ + import numpy as np + import scipy as sp + import scipy.sparse.linalg # call as sp.sparse.linalg + + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float) + _, eigenvector = sp.sparse.linalg.eigs( + M.T, k=1, which="LR", maxiter=max_iter, tol=tol + ) + largest = eigenvector.flatten().real + norm = np.sign(largest.sum()) * sp.linalg.norm(largest) + return dict(zip(G, largest / norm)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/flow_matrix.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/flow_matrix.py new file mode 100644 index 0000000..fcd6568 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/flow_matrix.py @@ -0,0 +1,131 @@ +# Helpers for current-flow betweenness and current-flow closness +# Lazy computations for inverse Laplacian and flow-matrix rows. +import networkx as nx + + +def flow_matrix_row(G, weight=None, dtype=float, solver="lu"): + # Generate a row of the current-flow matrix + import numpy as np + + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + w = C.w # w is the Laplacian matrix width + # row-by-row flow matrix + for u, v in sorted(sorted((u, v)) for u, v in G.edges()): + B = np.zeros(w, dtype=dtype) + c = G[u][v].get(weight, 1.0) + B[u % w] = c + B[v % w] = -c + # get only the rows needed in the inverse laplacian + # and multiply to get the flow matrix row + row = B @ C.get_rows(u, v) + yield row, (u, v) + + +# Class to compute the inverse laplacian only for specified rows +# Allows computation of the current-flow matrix without storing entire +# inverse laplacian matrix +class InverseLaplacian: + def __init__(self, L, width=None, dtype=None): + global np + import numpy as np + + (n, n) = L.shape + self.dtype = dtype + self.n = n + if width is None: + self.w = self.width(L) + else: + self.w = width + self.C = np.zeros((self.w, n), dtype=dtype) + self.L1 = L[1:, 1:] + self.init_solver(L) + + def init_solver(self, L): + pass + + def solve(self, r): + raise nx.NetworkXError("Implement solver") + + def solve_inverse(self, r): + raise nx.NetworkXError("Implement solver") + + def get_rows(self, r1, r2): + for r in range(r1, r2 + 1): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C + + def get_row(self, r): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C[r % self.w] + + def width(self, L): + m = 0 + for i, row in enumerate(L): + w = 0 + x, y = np.nonzero(row) + if len(y) > 0: + v = y - i + w = v.max() - v.min() + 1 + m = max(w, m) + return m + + +class FullInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + self.IL = np.zeros(L.shape, dtype=self.dtype) + self.IL[1:, 1:] = np.linalg.inv(self.L1.todense()) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s = self.IL @ rhs + return s + + def solve_inverse(self, r): + return self.IL[r, 1:] + + +class SuperLUInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + import scipy as sp + import scipy.sparse.linalg # call as sp.sparse.linalg + + self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc()) + + def solve_inverse(self, r): + rhs = np.zeros(self.n, dtype=self.dtype) + rhs[r] = 1 + return self.lusolve(rhs[1:]) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = self.lusolve(rhs[1:]) + return s + + +class CGInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + global sp + import scipy as sp + import scipy.sparse.linalg # call as sp.sparse.linalg + + ilu = sp.sparse.linalg.spilu(self.L1.tocsc()) + n = self.n - 1 + self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] + return s + + def solve_inverse(self, r): + rhs = np.zeros(self.n, self.dtype) + rhs[r] = 1 + return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/group.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/group.py new file mode 100644 index 0000000..5d4e43a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/group.py @@ -0,0 +1,779 @@ +"""Group centrality measures.""" +from copy import deepcopy + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _accumulate_endpoints, + _single_source_dijkstra_path_basic, + _single_source_shortest_path_basic, +) +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "group_betweenness_centrality", + "group_closeness_centrality", + "group_degree_centrality", + "group_in_degree_centrality", + "group_out_degree_centrality", + "prominent_group", +] + + +def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): + r"""Compute the group betweenness centrality for a group of nodes. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + C : list or set or list of lists or list of sets + A group or a list of groups containing nodes which belong to G, for which group betweenness + centrality is to be calculated. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` + where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + betweenness : list of floats or float + If C is a single group then return a float. If C is a list with + several groups then return a list of group betweenness centralities. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The initial implementation of the algorithm is mentioned in [2]_. This function uses + an improved algorithm presented in [4]_. + + The number of nodes in the group must be a maximum of n - 2 where `n` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + + """ + GBC = [] # initialize betweenness + list_of_groups = True + # check weather C contains one or many groups + if any(el in G for el in C): + C = [C] + list_of_groups = False + set_v = {node for group in C for node in group} + if set_v - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.") + + # pre-processing + PB, sigma, D = _group_preprocessing(G, set_v, weight) + + # the algorithm for each group + for group in C: + group = set(group) # set of nodes in group + # initialize the matrices of the sigma and the PB + GBC_group = 0 + sigma_m = deepcopy(sigma) + PB_m = deepcopy(PB) + sigma_m_v = deepcopy(sigma_m) + PB_m_v = deepcopy(PB_m) + for v in group: + GBC_group += PB_m[v][v] + for x in group: + for y in group: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 + ): + if D[x][v] == D[x][y] + D[y][v]: + dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] + if D[x][y] == D[x][v] + D[v][y]: + dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] + if D[v][y] == D[v][x] + D[x][y]: + dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] + sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) + PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy + if y != v: + PB_m_v[x][y] -= PB_m[x][v] * dxyv + if x != v: + PB_m_v[x][y] -= PB_m[v][y] * dvxy + sigma_m, sigma_m_v = sigma_m_v, sigma_m + PB_m, PB_m_v = PB_m_v, PB_m + + # endpoints + v, c = len(G), len(group) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = c * (2 * v - c - 1) + elif nx.is_connected(G): + scale = c * (2 * v - c - 1) + if scale == 0: + for group_node1 in group: + for node in D[group_node1]: + if node != group_node1: + if node in group: + scale += 1 + else: + scale += 2 + GBC_group -= scale + + # normalized + if normalized: + scale = 1 / ((v - c) * (v - c - 1)) + GBC_group *= scale + + # If undirected than count only the undirected edges + elif not G.is_directed(): + GBC_group /= 2 + + GBC.append(GBC_group) + if list_of_groups: + return GBC + return GBC[0] + + +def _group_preprocessing(G, set_v, weight): + sigma = {} + delta = {} + D = {} + betweenness = dict.fromkeys(G, 0) + for s in G: + if weight is None: # use BFS + S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight) + betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s) + for i in delta[s].keys(): # add the paths from s to i and rescale sigma + if s != i: + delta[s][i] += 1 + if weight is not None: + sigma[s][i] = sigma[s][i] / 2 + # building the path betweenness matrix only for nodes that appear in the group + PB = dict.fromkeys(G) + for group_node1 in set_v: + PB[group_node1] = dict.fromkeys(G, 0.0) + for group_node2 in set_v: + if group_node2 not in D[group_node1]: + continue + for node in G: + # if node is connected to the two group nodes than continue + if group_node2 in D[node] and group_node1 in D[node]: + if ( + D[node][group_node2] + == D[node][group_node1] + D[group_node1][group_node2] + ): + PB[group_node1][group_node2] += ( + delta[node][group_node2] + * sigma[node][group_node1] + * sigma[group_node1][group_node2] + / sigma[node][group_node2] + ) + return PB, sigma, D + + +def prominent_group( + G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False +): + r"""Find the prominent group of size $k$ in graph $G$. The prominence of the + group is evaluated by the group betweenness centrality. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int + The number of nodes in the group. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))`` + where ``|V|`` is the number of nodes in G and ``|C|`` is the number of + nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + C : list or set, optional (default=None) + list of nodes which won't be candidates of the prominent group. + + greedy : bool, optional (default=False) + Using a naive greedy algorithm in order to find non-optimal prominent + group. For scale free networks the results are negligibly below the optimal + results. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + max_GBC : float + The group betweenness centrality of the prominent group. + + max_group : list + The list of nodes in the prominent group. + + See Also + -------- + betweenness_centrality, group_betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The algorithm is described in [2]_ and is based on techniques mentioned in [4]_. + + The number of nodes in the group must be a maximum of ``n - 2`` where ``n`` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev: + "Finding the Most Prominent Group in Complex Networks" + AI communications 20(4): 287-296, 2007. + https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855 + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + """ + import numpy as np + import pandas as pd + + if C is not None: + C = set(C) + if C - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.") + nodes = list(G.nodes - C) + else: + nodes = list(G.nodes) + DF_tree = nx.Graph() + PB, sigma, D = _group_preprocessing(G, nodes, weight) + betweenness = pd.DataFrame.from_dict(PB) + if C is not None: + for node in C: + # remove from the betweenness all the nodes not part of the group + betweenness.drop(index=node, inplace=True) + betweenness.drop(columns=node, inplace=True) + CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)] + max_GBC = 0 + max_group = [] + DF_tree.add_node( + 1, + CL=CL, + betweenness=betweenness, + GBC=0, + GM=[], + sigma=sigma, + cont=dict(zip(nodes, np.diag(betweenness))), + ) + + # the algorithm + DF_tree.nodes[1]["heu"] = 0 + for i in range(k): + DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]] + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy + ) + + v = len(G) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = k * (2 * v - k - 1) + elif nx.is_connected(G): + scale = k * (2 * v - k - 1) + if scale == 0: + for group_node1 in max_group: + for node in D[group_node1]: + if node != group_node1: + if node in max_group: + scale += 1 + else: + scale += 2 + max_GBC -= scale + + # normalized + if normalized: + scale = 1 / ((v - k) * (v - k - 1)) + max_GBC *= scale + + # If undirected then count only the undirected edges + elif not G.is_directed(): + max_GBC /= 2 + max_GBC = float("%.2f" % max_GBC) + return max_GBC, max_group + + +def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy): + # stopping condition - if we found a group of size k and with higher GBC then prune + if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC: + return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"] + # stopping condition - if the size of group members equal to k or there are less than + # k - |GM| in the candidate list or the heuristic function plus the GBC is bellow the + # maximal GBC found then prune + if ( + len(DF_tree.nodes[root]["GM"]) == k + or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"]) + or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC + ): + return max_GBC, DF_tree, max_group + + # finding the heuristic of both children + node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy) + + # finding the child with the bigger heuristic + GBC and expand + # that node first if greedy then only expand the plus node + if greedy: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + + elif ( + DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"] + > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"] + ): + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + else: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + return max_GBC, DF_tree, max_group + + +def _heuristic(k, root, DF_tree, D, nodes, greedy): + import numpy as np + + # This helper function add two nodes to DF_tree - one left son and the + # other right son, finds their heuristic, CL, GBC, and GM + node_p = DF_tree.number_of_nodes() + 1 + node_m = DF_tree.number_of_nodes() + 2 + added_node = DF_tree.nodes[root]["CL"][0] + + # adding the plus node + DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_p]["GM"].append(added_node) + DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node] + root_node = DF_tree.nodes[root] + for x in nodes: + for y in nodes: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + root_node["sigma"][x][y] == 0 + or root_node["sigma"][x][added_node] == 0 + or root_node["sigma"][added_node][y] == 0 + ): + if D[x][added_node] == D[x][y] + D[y][added_node]: + dxyv = ( + root_node["sigma"][x][y] + * root_node["sigma"][y][added_node] + / root_node["sigma"][x][added_node] + ) + if D[x][y] == D[x][added_node] + D[added_node][y]: + dxvy = ( + root_node["sigma"][x][added_node] + * root_node["sigma"][added_node][y] + / root_node["sigma"][x][y] + ) + if D[added_node][y] == D[added_node][x] + D[x][y]: + dvxy = ( + root_node["sigma"][added_node][x] + * root_node["sigma"][x][y] + / root_node["sigma"][added_node][y] + ) + DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy) + DF_tree.nodes[node_p]["betweenness"][x][y] = ( + root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy + ) + if y != added_node: + DF_tree.nodes[node_p]["betweenness"][x][y] -= ( + root_node["betweenness"][x][added_node] * dxyv + ) + if x != added_node: + DF_tree.nodes[node_p]["betweenness"][x][y] -= ( + root_node["betweenness"][added_node][y] * dvxy + ) + + DF_tree.nodes[node_p]["CL"] = [ + node + for _, node in sorted( + zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True + ) + if node not in DF_tree.nodes[node_p]["GM"] + ] + DF_tree.nodes[node_p]["cont"] = dict( + zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"])) + ) + DF_tree.nodes[node_p]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_p]["GM"])): + DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][ + DF_tree.nodes[node_p]["CL"][i] + ] + + # adding the minus node - don't insert the first node in the CL to GM + # Insert minus node only if isn't greedy type algorithm + if not greedy: + DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_m]["CL"].pop(0) + DF_tree.nodes[node_m]["cont"].pop(added_node) + DF_tree.nodes[node_m]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_m]["GM"])): + DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][ + DF_tree.nodes[node_m]["CL"][i] + ] + else: + node_m = None + + return node_p, node_m, DF_tree + + +def group_closeness_centrality(G, S, weight=None): + r"""Compute the group closeness centrality for a group of nodes. + + Group closeness centrality of a group of nodes $S$ is a measure + of how close the group is to the other nodes in the graph. + + .. math:: + + c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}} + + d_{S, v} = min_{u \in S} (d_{u, v}) + + where $V$ is the set of nodes, $d_{S, v}$ is the distance of + the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes + in $V$ that are not in $S$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group closeness + centrality is to be calculated. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + Raises + ------ + NodeNotFound + If node(s) in S are not present in G. + + Returns + ------- + closeness : float + Group closeness centrality of the group S. + + See Also + -------- + closeness_centrality + + Notes + ----- + The measure was introduced in [1]_. + The formula implemented here is described in [2]_. + + Higher values of closeness indicate greater centrality. + + It is assumed that 1 / 0 is 0 (required in the case of directed graphs, + or when a shortest path length is 0). + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + For directed graphs, the incoming distance is utilized here. To use the + outward distance, act on `G.reverse()`. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] J. Zhao et. al.: + Measuring and Maximizing Group Closeness Centrality over + Disk Resident Graphs. + WWWConference Proceedings, 2014. 689-694. + https://doi.org/10.1145/2567948.2579356 + """ + if G.is_directed(): + G = G.reverse() # reverse view + closeness = 0 # initialize to 0 + V = set(G) # set of nodes in G + S = set(S) # set of nodes in group S + V_S = V - S # set of nodes in V but not S + shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight) + # accumulation + for v in V_S: + try: + closeness += shortest_path_lengths[v] + except KeyError: # no path exists + closeness += 0 + try: + closeness = len(V_S) / closeness + except ZeroDivisionError: # 1 / 0 assumed as 0 + closeness = 0 + return closeness + + +def group_degree_centrality(G, S): + """Compute the group degree centrality for a group of nodes. + + Group degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group degree + centrality is to be calculated. + + Raises + ------ + NetworkXError + If node(s) in S are not in G. + + Returns + ------- + centrality : float + Group degree centrality of the group S. + + See Also + -------- + degree_centrality + group_in_degree_centrality + group_out_degree_centrality + + Notes + ----- + The measure was introduced in [1]_. + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + """ + centrality = len(set().union(*list(set(G.neighbors(i)) for i in S)) - set(S)) + centrality /= len(G.nodes()) - len(S) + return centrality + + +@not_implemented_for("undirected") +def group_in_degree_centrality(G, S): + """Compute the group in-degree centrality for a group of nodes. + + Group in-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by incoming edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group in-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_out_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group in-degree centrality, the reverse graph is used. + """ + return group_degree_centrality(G.reverse(), S) + + +@not_implemented_for("undirected") +def group_out_degree_centrality(G, S): + """Compute the group out-degree centrality for a group of nodes. + + Group out-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by outgoing edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group out-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_in_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group out-degree centrality, the graph itself is used. + """ + return group_degree_centrality(G, S) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/harmonic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/harmonic.py new file mode 100644 index 0000000..96e995d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/harmonic.py @@ -0,0 +1,79 @@ +"""Functions for computing the harmonic centrality of a graph.""" +from functools import partial + +import networkx as nx + +__all__ = ["harmonic_centrality"] + + +def harmonic_centrality(G, nbunch=None, distance=None, sources=None): + r"""Compute harmonic centrality for nodes. + + Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal + of the shortest path distances from all other nodes to `u` + + .. math:: + + C(u) = \sum_{v \neq u} \frac{1}{d(v, u)} + + where `d(v, u)` is the shortest-path distance between `v` and `u`. + + If `sources` is given as an argument, the returned harmonic centrality + values are calculated as the sum of the reciprocals of the shortest + path distances from the nodes specified in `sources` to `u` instead + of from all nodes to `u`. + + Notice that higher values indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : container (default: all nodes in G) + Container of nodes for which harmonic centrality values are calculated. + + sources : container (default: all nodes in G) + Container of nodes `v` over which reciprocal distances are computed. + Nodes not in `G` are silently ignored. + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None`, then each edge will have distance equal to 1. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with harmonic centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + References + ---------- + .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality." + Internet Mathematics 10.3-4 (2014): 222-262. + """ + + nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes) + sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes + + spl = partial(nx.shortest_path_length, G, weight=distance) + centrality = {u: 0 for u in nbunch} + for v in sources: + dist = spl(v) + for u in nbunch.intersection(dist): + d = dist[u] + if d == 0: # handle u == v and edges with 0 weight + continue + centrality[u] += 1 / d + + return centrality diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/katz.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/katz.py new file mode 100644 index 0000000..fd0bb93 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/katz.py @@ -0,0 +1,333 @@ +"""Katz centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["katz_centrality", "katz_centrality_numpy"] + + +@not_implemented_for("multigraph") +def katz_centrality( + G, + alpha=0.1, + beta=1.0, + max_iter=1000, + tol=1.0e-6, + nstart=None, + normalized=True, + weight=None, +): + r"""Compute the Katz centrality for the nodes of the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + alpha : float + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar, the + dictionary must have an value for every node. + + max_iter : integer, optional (default=1000) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of Katz iteration for each node. + + normalized : bool, optional (default=True) + If True normalize the resulting values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality_numpy + eigenvector_centrality + eigenvector_centrality_numpy + pagerank + hits + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm it uses the power method to find the eigenvector + corresponding to the largest eigenvalue of the adjacency matrix of ``G``. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for the algorithm to converge. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + The iteration will stop after ``max_iter`` iterations or an error tolerance of + ``number_of_nodes(G) * tol`` has been reached. + + When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same + as eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 720. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + if len(G) == 0: + return {} + + nnodes = G.number_of_nodes() + + if nstart is None: + # choose starting vector with entries of 0 + x = {n: 0 for n in G} + else: + x = nstart + + try: + b = dict.fromkeys(G, float(beta)) + except (TypeError, ValueError, AttributeError) as err: + b = beta + if set(beta) != set(G): + raise nx.NetworkXError( + "beta dictionary " "must have a value for every node" + ) from err + + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast, 0) + # do the multiplication y^T = Alpha * x^T A - Beta + for n in x: + for nbr in G[n]: + x[nbr] += xlast[n] * G[n][nbr].get(weight, 1) + for n in x: + x[n] = alpha * x[n] + b[n] + + # check convergence + error = sum(abs(x[n] - xlast[n]) for n in x) + if error < nnodes * tol: + if normalized: + # normalize vector + try: + s = 1.0 / math.hypot(*x.values()) + # this should never be zero? + except ZeroDivisionError: + s = 1.0 + else: + s = 1 + for n in x: + x[n] *= s + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@not_implemented_for("multigraph") +def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None): + r"""Compute the Katz centrality for the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + alpha : float + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar the + dictionary must have an value for every node. + + normalized : bool + If True normalize the resulting values. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality_numpy(G, 1 / phi) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality + eigenvector_centrality_numpy + eigenvector_centrality + pagerank + hits + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm uses a direct linear solver to solve the above equation. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for there to be a solution. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + + When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same + as eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 173. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + import numpy as np + + if len(G) == 0: + return {} + try: + nodelist = beta.keys() + if set(nodelist) != set(G): + raise nx.NetworkXError( + "beta dictionary " "must have a value for every node" + ) + b = np.array(list(beta.values()), dtype=float) + except AttributeError: + nodelist = list(G) + try: + b = np.ones((len(nodelist), 1)) * beta + except (TypeError, ValueError, AttributeError) as err: + raise nx.NetworkXError("beta must be a number") from err + + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T + n = A.shape[0] + centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b) + if normalized: + norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) + else: + norm = 1.0 + centrality = dict(zip(nodelist, map(float, centrality / norm))) + return centrality diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/load.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/load.py new file mode 100644 index 0000000..c27c5fd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/load.py @@ -0,0 +1,197 @@ +"""Load centrality.""" +from operator import itemgetter + +import networkx as nx + +__all__ = ["load_centrality", "edge_load_centrality"] + + +def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None): + """Compute load centrality for nodes. + + The load centrality of a node is the fraction of all shortest + paths that pass through that node. + + Parameters + ---------- + G : graph + A networkx graph. + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + cutoff : bool, optional (default=None) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Load centrality is slightly different than betweenness. It was originally + introduced by [2]_. For this load algorithm see [1]_. + + References + ---------- + .. [1] Mark E. J. Newman: + Scientific collaboration networks. II. + Shortest paths, weighted networks, and centrality. + Physical Review E 64, 016132, 2001. + http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132 + .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim + Universal behavior of Load Distribution in Scale-Free Networks. + Physical Review Letters 87(27):1–4, 2001. + https://doi.org/10.1103/PhysRevLett.87.278701 + """ + if v is not None: # only one node + betweenness = 0.0 + for source in G: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + betweenness += ubetween[v] if v in ubetween else 0 + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + betweenness *= 1.0 / ((order - 1) * (order - 2)) + else: + betweenness = {}.fromkeys(G, 0.0) + for source in betweenness: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + for vk in ubetween: + betweenness[vk] += ubetween[vk] + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + scale = 1.0 / ((order - 1) * (order - 2)) + for v in betweenness: + betweenness[v] *= scale + return betweenness # all nodes + + +def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None): + """Node betweenness_centrality helper: + + See betweenness_centrality for what you probably want. + This actually computes "load" and not betweenness. + See https://networkx.lanl.gov/ticket/103 + + This calculates the load of each node for paths from a single source. + (The fraction of number of shortests paths from source that go + through each node.) + + To get the load for a node you need to do all-pairs shortest paths. + + If weight is not None then use Dijkstra for finding shortest paths. + """ + # get the predecessor and path length data + if weight is None: + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + else: + (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight) + + # order the nodes by path length + onodes = [(l, vert) for (vert, l) in length.items()] + onodes.sort() + onodes[:] = [vert for (l, vert) in onodes if l > 0] + + # initialize betweenness + between = {}.fromkeys(length, 1.0) + + while onodes: + v = onodes.pop() + if v in pred: + num_paths = len(pred[v]) # Discount betweenness if more than + for x in pred[v]: # one shortest path. + if x == source: # stop if hit source because all remaining v + break # also have pred[v]==[source] + between[x] += between[v] / num_paths + # remove source + for v in between: + between[v] -= 1 + # rescale to be between 0 and 1 + if normalized: + l = len(between) + if l > 2: + # scale by 1/the number of possible paths + scale = 1 / ((l - 1) * (l - 2)) + for v in between: + between[v] *= scale + return between + + +load_centrality = newman_betweenness_centrality + + +def edge_load_centrality(G, cutoff=False): + """Compute edge load. + + WARNING: This concept of edge load has not been analysed + or discussed outside of NetworkX that we know of. + It is based loosely on load_centrality in the sense that + it counts the number of shortest paths which cross each edge. + This function is for demonstration and testing purposes. + + Parameters + ---------- + G : graph + A networkx graph + + cutoff : bool, optional (default=False) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + A dict keyed by edge 2-tuple to the number of shortest paths + which use that edge. Where more than one path is shortest + the count is divided equally among paths. + """ + betweenness = {} + for u, v in G.edges(): + betweenness[(u, v)] = 0.0 + betweenness[(v, u)] = 0.0 + + for source in G: + ubetween = _edge_betweenness(G, source, cutoff=cutoff) + for e, ubetweenv in ubetween.items(): + betweenness[e] += ubetweenv # cumulative total + return betweenness + + +def _edge_betweenness(G, source, nodes=None, cutoff=False): + """Edge betweenness helper.""" + # get the predecessor data + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + # order the nodes by path length + onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))] + # initialize betweenness, doesn't account for any edge weights + between = {} + for u, v in G.edges(nodes): + between[(u, v)] = 1.0 + between[(v, u)] = 1.0 + + while onodes: # work through all paths + v = onodes.pop() + if v in pred: + # Discount betweenness if more than one shortest path. + num_paths = len(pred[v]) + for w in pred[v]: + if w in pred: + # Discount betweenness, mult path + num_paths = len(pred[w]) + for x in pred[w]: + between[(w, x)] += between[(v, w)] / num_paths + between[(x, w)] += between[(w, v)] / num_paths + return between diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/percolation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/percolation.py new file mode 100644 index 0000000..4d70338 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/percolation.py @@ -0,0 +1,124 @@ +"""Percolation centrality measures.""" + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = ["percolation_centrality"] + + +def percolation_centrality(G, attribute="percolation", states=None, weight=None): + r"""Compute the percolation centrality for nodes. + + Percolation centrality of a node $v$, at a given time, is defined + as the proportion of ‘percolated paths’ that go through that node. + + This measure quantifies relative impact of nodes based on their + topological connectivity, as well as their percolation states. + + Percolation states of nodes are used to depict network percolation + scenarios (such as during infection transmission in a social network + of individuals, spreading of computer viruses on computer networks, or + transmission of disease over a network of towns) over time. In this + measure usually the percolation state is expressed as a decimal + between 0.0 and 1.0. + + When all nodes are in the same percolated state this measure is + equivalent to betweenness centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + attribute : None or string, optional (default='percolation') + Name of the node attribute to use for percolation state, used + if `states` is None. + + states : None or dict, optional (default=None) + Specify percolation states for the nodes, nodes as keys states + as values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + + Returns + ------- + nodes : dictionary + Dictionary of nodes with percolation centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and + Liaquat Hossain [1]_ + Pair dependecies are calculated and accumulated using [2]_ + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain + Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes + during Percolation in Networks + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095 + .. [2] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + """ + percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + + nodes = G + + if states is None: + states = nx.get_node_attributes(nodes, attribute) + + # sum of all percolation states + p_sigma_x_t = 0.0 + for v in states.values(): + p_sigma_x_t += v + + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + # accumulation + percolation = _accumulate_percolation( + percolation, S, P, sigma, s, states, p_sigma_x_t + ) + + n = len(G) + + for v in percolation: + percolation[v] *= 1 / (n - 2) + + return percolation + + +def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + # percolation weight + pw_s_w = states[s] / (p_sigma_x_t - states[w]) + percolation[w] += delta[w] * pw_s_w + return percolation diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/reaching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/reaching.py new file mode 100644 index 0000000..3f3fb48 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/reaching.py @@ -0,0 +1,204 @@ +"""Functions for computing reaching centrality of a node or a graph.""" + +import networkx as nx +from networkx.utils import pairwise + +__all__ = ["global_reaching_centrality", "local_reaching_centrality"] + + +def _average_weight(G, path, weight=None): + """Returns the average weight of an edge in a weighted path. + + Parameters + ---------- + G : graph + A networkx graph. + + path: list + A list of vertices that define the path. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. Then the average weight of an edge + is assumed to be the multiplicative inverse of the length of the path. + Otherwise holds the name of the edge attribute used as weight. + """ + path_length = len(path) - 1 + if path_length <= 0: + return 0 + if weight is None: + return 1 / path_length + total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path)) + return total_weight / path_length + + +def global_reaching_centrality(G, weight=None, normalized=True): + """Returns the global reaching centrality of a directed graph. + + The *global reaching centrality* of a weighted directed graph is the + average over all nodes of the difference between the local reaching + centrality of the node and the greatest local reaching centrality of + any node in the graph [1]_. For more information on the local + reaching centrality, see :func:`local_reaching_centrality`. + Informally, the local reaching centrality is the proportion of the + graph that is reachable from the neighbors of the node. + + Parameters + ---------- + G : DiGraph + A networkx DiGraph. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If ``None``, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The global reaching centrality of the graph. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(1, 3) + >>> nx.global_reaching_centrality(G) + 1.0 + >>> G.add_edge(3, 2) + >>> nx.global_reaching_centrality(G) + 0.75 + + See also + -------- + local_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + + # If provided, weights must be interpreted as connection strength + # (so higher weights are more likely to be chosen). However, the + # shortest path algorithms in NetworkX assume the provided "weight" + # is actually a distance (so edges with higher weight are less + # likely to be chosen). Therefore we need to invert the weights when + # computing shortest paths. + # + # If weight is None, we leave it as-is so that the shortest path + # algorithm can use a faster, unweighted algorithm. + if weight is not None: + + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + shortest_paths = nx.shortest_path(G, weight=as_distance) + else: + shortest_paths = nx.shortest_path(G) + + centrality = local_reaching_centrality + # TODO This can be trivially parallelized. + lrc = [ + centrality(G, node, paths=paths, weight=weight, normalized=normalized) + for node, paths in shortest_paths.items() + ] + + max_lrc = max(lrc) + return sum(max_lrc - c for c in lrc) / (len(G) - 1) + + +def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True): + """Returns the local reaching centrality of a node in a directed + graph. + + The *local reaching centrality* of a node in a directed graph is the + proportion of other nodes reachable from that node [1]_. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph. + + v : node + A node in the directed graph `G`. + + paths : dictionary (default=None) + If this is not `None` it must be a dictionary representation + of single-source shortest paths, as computed by, for example, + :func:`networkx.shortest_path` with source node `v`. Use this + keyword argument if you intend to invoke this function many + times but don't want the paths to be recomputed each time. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If `None`, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The local reaching centrality of the node ``v`` in the graph + ``G``. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3)]) + >>> nx.local_reaching_centrality(G, 3) + 0.0 + >>> G.add_edge(3, 2) + >>> nx.local_reaching_centrality(G, 3) + 0.5 + + See also + -------- + global_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if paths is None: + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + if weight is not None: + # Interpret weights as lengths. + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + paths = nx.shortest_path(G, source=v, weight=as_distance) + else: + paths = nx.shortest_path(G, source=v) + # If the graph is unweighted, simply return the proportion of nodes + # reachable from the source node ``v``. + if weight is None and G.is_directed(): + return (len(paths) - 1) / (len(G) - 1) + if normalized and weight is not None: + norm = G.size(weight=weight) / G.size() + else: + norm = 1 + # TODO This can be trivially parallelized. + avgw = (_average_weight(G, path, weight=weight) for path in paths.values()) + sum_avg_weight = sum(avgw) / norm + return sum_avg_weight / (len(G) - 1) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/second_order.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/second_order.py new file mode 100644 index 0000000..3acd69e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/second_order.py @@ -0,0 +1,133 @@ +"""Copyright (c) 2015 – Thomson Licensing, SAS + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +* Neither the name of Thomson Licensing, or Technicolor, nor the names +of its contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com) + +__all__ = ["second_order_centrality"] + + +@not_implemented_for("directed") +def second_order_centrality(G): + """Compute the second order centrality for nodes of G. + + The second order centrality of a given node is the standard deviation of + the return times to that node of a perpetual random walk on G: + + Parameters + ---------- + G : graph + A NetworkX connected and undirected graph. + + Returns + ------- + nodes : dictionary + Dictionary keyed by node with second order centrality as the value. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> soc = nx.second_order_centrality(G) + >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id + 0 + + Raises + ------ + NetworkXException + If the graph G is empty, non connected or has negative weights. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Lower values of second order centrality indicate higher centrality. + + The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_. + + This code implements the analytical version of the algorithm, i.e., + there is no simulation of a random walk process involved. The random walk + is here unbiased (corresponding to eq 6 of the paper [1]_), thus the + centrality values are the standard deviations for random walk return times + on the transformed input graph G (equal in-degree at each nodes by adding + self-loops). + + Complexity of this implementation, made to run locally on a single machine, + is O(n^3), with n the size of G, which makes it viable only for small + graphs. + + References + ---------- + .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan + "Second order centrality: Distributed assessment of nodes criticity in + complex networks", Elsevier Computer Communications 34(5):619-628, 2011. + """ + import numpy as np + + n = len(G) + + if n == 0: + raise nx.NetworkXException("Empty graph.") + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if any(d.get("weight", 0) < 0 for u, v, d in G.edges(data=True)): + raise nx.NetworkXException("Graph has negative edge weights.") + + # balancing G for Metropolis-Hastings random walks + G = nx.DiGraph(G) + in_deg = dict(G.in_degree(weight="weight")) + d_max = max(in_deg.values()) + for i, deg in in_deg.items(): + if deg < d_max: + G.add_edge(i, i, weight=d_max - deg) + + P = nx.to_numpy_array(G) + P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix + + def _Qj(P, j): + P = P.copy() + P[:, j] = 0 + return P + + M = np.empty([n, n]) + + for i in range(n): + M[:, i] = np.linalg.solve( + np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0] + ) # eq 3 + + return dict( + zip(G.nodes, [np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1)) for i in range(n)]) + ) # eq 6 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/subgraph_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/subgraph_alg.py new file mode 100644 index 0000000..0f52d51 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/subgraph_alg.py @@ -0,0 +1,338 @@ +""" +Subraph centrality and communicability betweenness. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "subgraph_centrality_exp", + "subgraph_centrality", + "communicability_betweenness_centrality", + "estrada_index", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def subgraph_centrality_exp(G): + r"""Returns the subgraph centrality for each node of G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes:dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm exponentiates the adjacency matrix. + + The subgraph centrality of a node `u` in G can be found using + the matrix exponential of the adjacency matrix of G [1]_, + + .. math:: + + SC(u)=(e^A)_{uu} . + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality_exp(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + """ + # alternative implementation that calculates the matrix exponential + import scipy as sp + import scipy.linalg # call as sp.linalg + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + expA = sp.linalg.expm(A) + # convert diagonal to dictionary keyed by node + sc = dict(zip(nodelist, map(float, expA.diagonal()))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def subgraph_centrality(G): + r"""Returns subgraph centrality for each node in G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality_exp: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm computes eigenvalues and eigenvectors + of the adjacency matrix. + + Subgraph centrality of a node `u` in G can be found using + a spectral decomposition of the adjacency matrix [1]_, + + .. math:: + + SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}}, + + where `v_j` is an eigenvector of the adjacency matrix `A` of G + corresponding to the eigenvalue `\lambda_j`. + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + w, v = np.linalg.eigh(A) + vsquare = np.array(v) ** 2 + expw = np.exp(w) + xg = vsquare @ expw + # convert vector dictionary keyed by node + sc = dict(zip(nodelist, map(float, xg))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def communicability_betweenness_centrality(G): + r"""Returns subgraph communicability for all pairs of nodes in G. + + Communicability betweenness measure makes use of the number of walks + connecting every pair of nodes as the basis of a betweenness centrality + measure. + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with communicability betweenness as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges, + and `A` denote the adjacency matrix of `G`. + + Let `G(r)=(V,E(r))` be the graph resulting from + removing all edges connected to node `r` but not the node itself. + + The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros + only in row and column `r`. + + The subraph betweenness of a node `r` is [1]_ + + .. math:: + + \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}}, + p\neq q, q\neq r, + + where + `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks + involving node r, + `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting + at node `p` and ending at node `q`, + and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the + number of terms in the sum. + + The resulting `\omega_{r}` takes values between zero and one. + The lower bound cannot be attained for a connected + graph, and the upper bound is attained in the star graph. + + References + ---------- + .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano, + "Communicability Betweenness in Complex Networks" + Physica A 388 (2009) 764-774. + https://arxiv.org/abs/0905.4102 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> cbc = nx.communicability_betweenness_centrality(G) + >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)]) + ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03'] + """ + import numpy as np + import scipy as sp + import scipy.linalg # call as sp.linalg + + nodelist = list(G) # ordering of nodes in matrix + n = len(nodelist) + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(n))) + cbc = {} + for v in G: + # remove row and col of node v + i = mapping[v] + row = A[i, :].copy() + col = A[:, i].copy() + A[i, :] = 0 + A[:, i] = 0 + B = (expA - sp.linalg.expm(A)) / expA + # sum with row/col of node v and diag set to zero + B[i, :] = 0 + B[:, i] = 0 + B -= np.diag(np.diag(B)) + cbc[v] = B.sum() + # put row and col back + A[i, :] = row + A[:, i] = col + # rescale when more than two nodes + order = len(cbc) + if order > 2: + scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0)) + for v in cbc: + cbc[v] *= scale + return cbc + + +def estrada_index(G): + r"""Returns the Estrada index of a the graph G. + + The Estrada Index is a topological index of folding or 3D "compactness" ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + estrada index: float + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and let + `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}` + be a non-increasing ordering of the eigenvalues of its adjacency + matrix `A`. The Estrada index is ([1]_, [2]_) + + .. math:: + EE(G)=\sum_{j=1}^n e^{\lambda _j}. + + References + ---------- + .. [1] E. Estrada, "Characterization of 3D molecular structure", + Chem. Phys. Lett. 319, 713 (2000). + https://doi.org/10.1016/S0009-2614(00)00158-5 + .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada, + "Estimating the Estrada index", + Linear Algebra and its Applications. 427, 1 (2007). + https://doi.org/10.1016/j.laa.2007.06.020 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> ei = nx.estrada_index(G) + >>> print(f"{ei:0.5}") + 20.55 + """ + return sum(subgraph_centrality(G).values()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py new file mode 100644 index 0000000..4c059cf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py @@ -0,0 +1,780 @@ +import pytest + +import networkx as nx + + +def weighted_G(): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + return G + + +class TestBetweennessCentrality: + def test_K5(self): + """Betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K5_endpoints(self): + """Betweenness centrality: K5 endpoints""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_sample_from_P3(self): + """Betweenness centrality: P3 sample""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1) + # python versions give different results with same seed + b_approx1 = {0: 0.0, 1: 1.5, 2: 0.0} + b_approx2 = {0: 0.0, 1: 0.75, 2: 0.0} + for n in sorted(G): + assert b[n] in (b_approx1[n], b_approx2[n]) + + def test_P3_endpoints(self): + """Betweenness centrality: P3 endpoints""" + G = nx.path_graph(3) + b_answer = {0: 2.0, 1: 3.0, 2: 2.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3} + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Betweenness centrality: Krackhardt kite graph normalized""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Betweenness centrality: Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.570, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.041, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.130, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.011, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.165, + "Gillenormand": 0.020, + "Magnon": 0.000, + "MlleGillenormand": 0.048, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.132, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.043, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.005, + "Bahorel": 0.002, + "Bossuet": 0.031, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_disconnected_path(self): + """Betweenness centrality: disconnected path""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_disconnected_path_endpoints(self): + """Betweenness centrality: disconnected path endpoints""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7) + + def test_directed_path(self): + """Betweenness centrality: directed path""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_directed_path_normalized(self): + """Betweenness centrality: directed path normalized""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 0.5, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedBetweennessCentrality: + def test_K5(self): + """Weighted betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Weighted betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Weighted betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Weighted betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Weighted betweenness centrality: + Krackhardt kite graph normalized + """ + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Weighted betweenness centrality: + Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Weighted betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.454, + "Labarre": 0.000, + "Marguerite": 0.009, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.066, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.114, + "MmeThenardier": 0.046, + "Thenardier": 0.129, + "Cosette": 0.075, + "Javert": 0.193, + "Fauchelevent": 0.026, + "Bamatabois": 0.080, + "Perpetue": 0.000, + "Simplice": 0.001, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.023, + "Boulatruelle": 0.000, + "Eponine": 0.023, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.285, + "Gillenormand": 0.024, + "Magnon": 0.005, + "MlleGillenormand": 0.036, + "MmePontmercy": 0.005, + "MlleVaubois": 0.000, + "LtGillenormand": 0.015, + "Marius": 0.072, + "BaronessT": 0.004, + "Mabeuf": 0.089, + "Enjolras": 0.003, + "Combeferre": 0.000, + "Prouvaire": 0.000, + "Feuilly": 0.004, + "Courfeyrac": 0.001, + "Bahorel": 0.007, + "Bossuet": 0.028, + "Joly": 0.000, + "Grantaire": 0.036, + "MotherPlutarch": 0.000, + "Gueulemer": 0.025, + "Babet": 0.015, + "Claquesous": 0.042, + "Montparnasse": 0.050, + "Toussaint": 0.011, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.002, + "MmeHucheloup": 0.034, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Weighted betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_G(self): + """Weighted betweenness centrality: G""" + G = weighted_G() + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G2(self): + """Weighted betweenness centrality: G2""" + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G3(self): + """Weighted betweenness centrality: G3""" + G = nx.MultiGraph(weighted_G()) + es = list(G.edges(data=True))[::2] # duplicate every other edge + G.add_edges_from(es) + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G4(self): + """Weighted betweenness centrality: G4""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("s", "x", 6), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("x", "y", 3), + ("y", "s", 7), + ("y", "v", 6), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = dict.fromkeys(G.edges(), 1 / 10) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness centrality: weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_graph(self): + """Edge betweenness centrality: normalized weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) + + def test_weighted_multigraph(self): + """Edge betweenness centrality: weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_multigraph(self): + """Edge betweenness centrality: normalized weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py new file mode 100644 index 0000000..6b66b8a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py @@ -0,0 +1,227 @@ +import pytest + +import networkx as nx + + +class TestSubsetBetweennessCentrality: + def test_K5(self): + """Betweenness Centrality Subset: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Betweenness Centrality Subset: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Betweenness Centrality Subset: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Betweenness Centrality Subset: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Betweenness Centrality Subset: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Betweenness Centrality Subset: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Betweenness Centrality Subset: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Betweenness Centrality Subset: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None) + + expected_b = { + 1: 0, + 2: 1.0 / 10, + 3: 1.0 / 10, + 4: 1.0 / 10, + 5: 1.0 / 10, + 6: 3.0 / 10, + 7: 1.0 / 10, + 8: 4.0 / 10, + 9: 0, + 10: 1.0 / 10, + 11: 1.0 / 10, + 12: 1.0 / 10, + } + + for n in sorted(G): + assert b[n] == pytest.approx(expected_b[n], abs=1e-7) + + +class TestBetweennessCentralitySources: + def test_K5(self): + """Betweenness Centrality Sources: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality_source(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Betweenness Centrality Sources: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality_source(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeSubsetBetweennessCentrality: + def test_K5(self): + """Edge betweenness subset centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Edge betweenness subset centrality: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Edge betweenness subset centrality: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Edge betweenness subset centrality: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Edge betweenness subset centrality: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Edge betweenness subset centrality: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Edge betweenness subset centrality: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = 1.0 + b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5 + b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py new file mode 100644 index 0000000..d274206 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py @@ -0,0 +1,306 @@ +""" +Tests for closeness centrality. +""" +import pytest + +import networkx as nx + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + + F = nx.florentine_families_graph() + cls.F = F + + cls.LM = nx.les_miserables_graph() + + # Create random undirected, unweighted graph for testing incremental version + cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123) + cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G) + + def test_wf_improved(self): + G = nx.union(self.P4, nx.path_graph([4, 5, 6])) + c = nx.closeness_centrality(G) + cwf = nx.closeness_centrality(G, wf_improved=False) + res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222} + wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667} + for n in G: + assert c[n] == pytest.approx(res[n], abs=1e-3) + assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3) + + def test_digraph(self): + G = nx.path_graph(3, create_using=nx.DiGraph()) + c = nx.closeness_centrality(G) + cr = nx.closeness_centrality(G.reverse()) + d = {0: 0.0, 1: 0.500, 2: 0.667} + dr = {0: 0.667, 1: 0.500, 2: 0.0} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + assert cr[n] == pytest.approx(dr[n], abs=1e-3) + + def test_k5_closeness(self): + c = nx.closeness_centrality(self.K5) + d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000} + for n in sorted(self.K5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_closeness(self): + c = nx.closeness_centrality(self.P3) + d = {0: 0.667, 1: 1.000, 2: 0.667} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_closeness(self): + c = nx.closeness_centrality(self.K) + d = { + 0: 0.529, + 1: 0.529, + 2: 0.500, + 3: 0.600, + 4: 0.500, + 5: 0.643, + 6: 0.643, + 7: 0.600, + 8: 0.429, + 9: 0.310, + } + for n in sorted(self.K): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_closeness(self): + c = nx.closeness_centrality(self.F) + d = { + "Acciaiuoli": 0.368, + "Albizzi": 0.483, + "Barbadori": 0.4375, + "Bischeri": 0.400, + "Castellani": 0.389, + "Ginori": 0.333, + "Guadagni": 0.467, + "Lamberteschi": 0.326, + "Medici": 0.560, + "Pazzi": 0.286, + "Peruzzi": 0.368, + "Ridolfi": 0.500, + "Salviati": 0.389, + "Strozzi": 0.4375, + "Tornabuoni": 0.483, + } + for n in sorted(self.F): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_closeness(self): + c = nx.closeness_centrality(self.LM) + d = { + "Napoleon": 0.302, + "Myriel": 0.429, + "MlleBaptistine": 0.413, + "MmeMagloire": 0.413, + "CountessDeLo": 0.302, + "Geborand": 0.302, + "Champtercier": 0.302, + "Cravatte": 0.302, + "Count": 0.302, + "OldMan": 0.302, + "Valjean": 0.644, + "Labarre": 0.394, + "Marguerite": 0.413, + "MmeDeR": 0.394, + "Isabeau": 0.394, + "Gervais": 0.394, + "Listolier": 0.341, + "Tholomyes": 0.392, + "Fameuil": 0.341, + "Blacheville": 0.341, + "Favourite": 0.341, + "Dahlia": 0.341, + "Zephine": 0.341, + "Fantine": 0.461, + "MmeThenardier": 0.461, + "Thenardier": 0.517, + "Cosette": 0.478, + "Javert": 0.517, + "Fauchelevent": 0.402, + "Bamatabois": 0.427, + "Perpetue": 0.318, + "Simplice": 0.418, + "Scaufflaire": 0.394, + "Woman1": 0.396, + "Judge": 0.404, + "Champmathieu": 0.404, + "Brevet": 0.404, + "Chenildieu": 0.404, + "Cochepaille": 0.404, + "Pontmercy": 0.373, + "Boulatruelle": 0.342, + "Eponine": 0.396, + "Anzelma": 0.352, + "Woman2": 0.402, + "MotherInnocent": 0.398, + "Gribier": 0.288, + "MmeBurgon": 0.344, + "Jondrette": 0.257, + "Gavroche": 0.514, + "Gillenormand": 0.442, + "Magnon": 0.335, + "MlleGillenormand": 0.442, + "MmePontmercy": 0.315, + "MlleVaubois": 0.308, + "LtGillenormand": 0.365, + "Marius": 0.531, + "BaronessT": 0.352, + "Mabeuf": 0.396, + "Enjolras": 0.481, + "Combeferre": 0.392, + "Prouvaire": 0.357, + "Feuilly": 0.392, + "Courfeyrac": 0.400, + "Bahorel": 0.394, + "Bossuet": 0.475, + "Joly": 0.394, + "Grantaire": 0.358, + "MotherPlutarch": 0.285, + "Gueulemer": 0.463, + "Babet": 0.463, + "Claquesous": 0.452, + "Montparnasse": 0.458, + "Toussaint": 0.402, + "Child1": 0.342, + "Child2": 0.342, + "Brujon": 0.380, + "MmeHucheloup": 0.353, + } + for n in sorted(self.LM): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_closeness(self): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + XG = nx.Graph() + XG.add_weighted_edges_from(edges) + c = nx.closeness_centrality(XG, distance="weight") + d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + # + # Tests for incremental closeness centrality. + # + @staticmethod + def pick_add_edge(g): + u = nx.utils.arbitrary_element(g) + possible_nodes = set(g.nodes()) + neighbors = list(g.neighbors(u)) + [u] + possible_nodes.difference_update(neighbors) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + @staticmethod + def pick_remove_edge(g): + u = nx.utils.arbitrary_element(g) + possible_nodes = list(g.neighbors(u)) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + dir_G = nx.gn_graph(n=5) + prev_cc = None + edge = self.pick_add_edge(dir_G) + insert = True + nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert) + + def test_wrong_size_prev_cc_raises(self): + with pytest.raises(nx.NetworkXError): + G = self.undirected_G.copy() + edge = self.pick_add_edge(G) + insert = True + prev_cc = self.undirected_G_cc.copy() + prev_cc.pop(0) + nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + def test_wrong_nodes_prev_cc_raises(self): + with pytest.raises(nx.NetworkXError): + G = self.undirected_G.copy() + edge = self.pick_add_edge(G) + insert = True + prev_cc = self.undirected_G_cc.copy() + num_nodes = len(prev_cc) + prev_cc.pop(0) + prev_cc[num_nodes] = 0.5 + nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + def test_zero_centrality(self): + G = nx.path_graph(3) + prev_cc = nx.closeness_centrality(G) + edge = self.pick_remove_edge(G) + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False) + G.remove_edges_from([edge]) + real_cc = nx.closeness_centrality(G) + shared_items = set(test_cc.items()) & set(real_cc.items()) + assert len(shared_items) == len(real_cc) + assert 0 in test_cc.values() + + def test_incremental(self): + # Check that incremental and regular give same output + G = self.undirected_G.copy() + prev_cc = None + for i in range(5): + if i % 2 == 0: + # Remove an edge + insert = False + edge = self.pick_remove_edge(G) + else: + # Add an edge + insert = True + edge = self.pick_add_edge(G) + + # start = timeit.default_timer() + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + # inc_elapsed = (timeit.default_timer() - start) + # print(f"incremental time: {inc_elapsed}") + + if insert: + G.add_edges_from([edge]) + else: + G.remove_edges_from([edge]) + + # start = timeit.default_timer() + real_cc = nx.closeness_centrality(G) + # reg_elapsed = (timeit.default_timer() - start) + # print(f"regular time: {reg_elapsed}") + # Example output: + # incremental time: 0.208 + # regular time: 0.276 + # incremental time: 0.00683 + # regular time: 0.260 + # incremental time: 0.0224 + # regular time: 0.278 + # incremental time: 0.00804 + # regular time: 0.208 + # incremental time: 0.00947 + # regular time: 0.188 + + assert set(test_cc.items()) == set(real_cc.items()) + + prev_cc = test_cc diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py new file mode 100644 index 0000000..e9f5179 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py @@ -0,0 +1,177 @@ +import pytest + +import networkx as nx +from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + b_answer = {0: 0, 1: 2, 2: 2, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_solvers2(self): + """Betweenness centrality: alternate solvers""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestApproximateFlowBetweennessCentrality: + def test_K4_normalized(self): + "Approximate current-flow betweenness centrality: K4 normalized" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_K4(self): + "Approximate current-flow betweenness centrality: K4" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2) + + def test_star(self): + "Approximate current-flow betweenness centrality: star" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_grid(self): + "Approximate current-flow betweenness centrality: 2d grid" + G = nx.grid_2d_graph(4, 4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_seed(self): + G = nx.complete_graph(4) + b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=0.1) + + def test_solvers(self): + "Approximate current-flow betweenness centrality: solvers" + G = nx.complete_graph(4) + epsilon = 0.1 + for solver in ["full", "lu", "cg"]: + b = approximate_cfbc( + G, normalized=False, solver=solver, epsilon=0.5 * epsilon + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon) + + +class TestWeightedFlowBetweennessCentrality: + pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=True) + b_answer = dict.fromkeys(G.edges(), 0.25) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4_normalized(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = dict.fromkeys(G.edges(), 0.75) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge flow betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py new file mode 100644 index 0000000..7b1611b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py @@ -0,0 +1,147 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow +from networkx import ( + edge_current_flow_betweenness_centrality_subset as edge_current_flow_subset, +) + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight="other" + ) + b_answer = nx.current_flow_betweenness_centrality( + G, normalized=True, weight="other" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +# class TestWeightedFlowBetweennessCentrality(): +# pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False, weight=None) + # weight is None => same as unweighted network + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset( + G, list(G), list(G), normalized=False, weight="other" + ) + b_answer = edge_current_flow(G, normalized=False, weight="other") + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py new file mode 100644 index 0000000..24a916a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py @@ -0,0 +1,37 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +class TestFlowClosenessCentrality: + def test_K4(self): + """Closeness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Closeness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Closeness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_closeness_centrality(G) + b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedFlowClosenessCentrality: + pass diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py new file mode 100644 index 0000000..591df6a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py @@ -0,0 +1,145 @@ +""" + Unit tests for degree centrality. +""" + +import pytest + +import networkx as nx + + +class TestDegreeCentrality: + def setup_method(self): + + self.K = nx.krackhardt_kite_graph() + self.P3 = nx.path_graph(3) + self.K5 = nx.complete_graph(5) + + F = nx.Graph() # Florentine families + F.add_edge("Acciaiuoli", "Medici") + F.add_edge("Castellani", "Peruzzi") + F.add_edge("Castellani", "Strozzi") + F.add_edge("Castellani", "Barbadori") + F.add_edge("Medici", "Barbadori") + F.add_edge("Medici", "Ridolfi") + F.add_edge("Medici", "Tornabuoni") + F.add_edge("Medici", "Albizzi") + F.add_edge("Medici", "Salviati") + F.add_edge("Salviati", "Pazzi") + F.add_edge("Peruzzi", "Strozzi") + F.add_edge("Peruzzi", "Bischeri") + F.add_edge("Strozzi", "Ridolfi") + F.add_edge("Strozzi", "Bischeri") + F.add_edge("Ridolfi", "Tornabuoni") + F.add_edge("Tornabuoni", "Guadagni") + F.add_edge("Albizzi", "Ginori") + F.add_edge("Albizzi", "Guadagni") + F.add_edge("Bischeri", "Guadagni") + F.add_edge("Guadagni", "Lamberteschi") + self.F = F + + G = nx.DiGraph() + G.add_edge(0, 5) + G.add_edge(1, 5) + G.add_edge(2, 5) + G.add_edge(3, 5) + G.add_edge(4, 5) + G.add_edge(5, 6) + G.add_edge(5, 7) + G.add_edge(5, 8) + self.G = G + + def test_degree_centrality_1(self): + d = nx.degree_centrality(self.K5) + exact = dict(zip(range(5), [1] * 5)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_2(self): + d = nx.degree_centrality(self.P3) + exact = {0: 0.5, 1: 1, 2: 0.5} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_3(self): + d = nx.degree_centrality(self.K) + exact = { + 0: 0.444, + 1: 0.444, + 2: 0.333, + 3: 0.667, + 4: 0.333, + 5: 0.556, + 6: 0.556, + 7: 0.333, + 8: 0.222, + 9: 0.111, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_degree_centrality_4(self): + d = nx.degree_centrality(self.F) + names = sorted(self.F.nodes()) + dcs = [ + 0.071, + 0.214, + 0.143, + 0.214, + 0.214, + 0.071, + 0.286, + 0.071, + 0.429, + 0.071, + 0.214, + 0.214, + 0.143, + 0.286, + 0.214, + ] + exact = dict(zip(names, dcs)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_indegree_centrality(self): + d = nx.in_degree_centrality(self.G) + exact = { + 0: 0.0, + 1: 0.0, + 2: 0.0, + 3: 0.0, + 4: 0.0, + 5: 0.625, + 6: 0.125, + 7: 0.125, + 8: 0.125, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_outdegree_centrality(self): + d = nx.out_degree_centrality(self.G) + exact = { + 0: 0.125, + 1: 0.125, + 2: 0.125, + 3: 0.125, + 4: 0.125, + 5: 0.375, + 6: 0.0, + 7: 0.0, + 8: 0.0, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_small_graph_centrality(self): + G = nx.empty_graph(create_using=nx.DiGraph) + assert {} == nx.degree_centrality(G) + assert {} == nx.out_degree_centrality(G) + assert {} == nx.in_degree_centrality(G) + + G = nx.empty_graph(1, create_using=nx.DiGraph) + assert {0: 1} == nx.degree_centrality(G) + assert {0: 1} == nx.out_degree_centrality(G) + assert {0: 1} == nx.in_degree_centrality(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py new file mode 100644 index 0000000..2aac0de --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py @@ -0,0 +1,66 @@ +import networkx as nx + + +def small_ego_G(): + """The sample network from https://arxiv.org/pdf/1310.6753v1.pdf""" + edges = [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("b", "f"), + ("c", "d"), + ("c", "f"), + ("c", "h"), + ("d", "f"), + ("e", "f"), + ("f", "h"), + ("h", "j"), + ("h", "k"), + ("i", "j"), + ("i", "k"), + ("j", "k"), + ("u", "a"), + ("u", "b"), + ("u", "c"), + ("u", "d"), + ("u", "e"), + ("u", "f"), + ("u", "g"), + ("u", "h"), + ("u", "i"), + ("u", "j"), + ("u", "k"), + ] + G = nx.Graph() + G.add_edges_from(edges) + + return G + + +class TestDispersion: + def test_article(self): + """our algorithm matches article's""" + G = small_ego_G() + disp_uh = nx.dispersion(G, "u", "h", normalized=False) + disp_ub = nx.dispersion(G, "u", "b", normalized=False) + assert disp_uh == 4 + assert disp_ub == 1 + + def test_results_length(self): + """there is a result for every node""" + G = small_ego_G() + disp = nx.dispersion(G) + disp_Gu = nx.dispersion(G, "u") + disp_uv = nx.dispersion(G, "u", "h") + assert len(disp) == len(G) + assert len(disp_Gu) == len(G) - 1 + assert isinstance(disp_uv, float) + + def test_impossible_things(self): + G = nx.karate_club_graph() + disp = nx.dispersion(G) + for u in disp: + for v in disp[u]: + assert disp[u][v] >= 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py new file mode 100644 index 0000000..7a44aff --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py @@ -0,0 +1,168 @@ +import math + +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx + + +class TestEigenvectorCentrality: + def test_K5(self): + """Eigenvector centrality: K5""" + G = nx.complete_graph(5) + b = nx.eigenvector_centrality(G) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = {n: 1 for n in G} + b = nx.eigenvector_centrality(G, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + b = nx.eigenvector_centrality(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_P3_unweighted(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + G = nx.path_graph(3) + nx.eigenvector_centrality(G, max_iter=0) + + +class TestEigenvectorCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + H = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges) + cls.H = G.reverse() + cls.H.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + def test_eigenvector_centrality_weighted(self): + G = self.G + p = nx.eigenvector_centrality(G) + for (a, b) in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_weighted_numpy(self): + G = self.G + p = nx.eigenvector_centrality_numpy(G) + for (a, b) in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_eigenvector_centrality_unweighted(self): + G = self.H + p = nx.eigenvector_centrality(G) + for (a, b) in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_unweighted_numpy(self): + G = self.H + p = nx.eigenvector_centrality_numpy(G) + for (a, b) in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestEigenvectorCentralityExceptions: + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.MultiGraph()) + + def test_multigraph_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.MultiGraph()) + + def test_empty(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.Graph()) + + def test_empty_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.Graph()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_group.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_group.py new file mode 100644 index 0000000..3f5559d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_group.py @@ -0,0 +1,278 @@ +""" +Tests for Group Centrality Measures +""" + + +import pytest + +import networkx as nx + + +class TestGroupBetweennessCentrality: + def test_group_betweenness_single_node(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=False + ) + b_answer = 3.0 + assert b == b_answer + + def test_group_betweenness_with_endpoints(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=True + ) + b_answer = 7.0 + assert b == b_answer + + def test_group_betweenness_normalized(self): + """ + Group betweenness centrality for group with more than + 1 node and normalized + """ + G = nx.path_graph(5) + C = [1, 3] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=True, endpoints=False + ) + b_answer = 1.0 + assert b == b_answer + + def test_two_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(7) + C = [[0, 1, 6], [0, 1, 5]] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = [0.0, 3.0] + assert b == b_answer + + def test_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(6) + C = [0, 1, 5] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_disconnected_graph(self): + """ + Group betweenness centrality in a disconnected graph + """ + G = nx.path_graph(5) + G.remove_edge(0, 1) + C = [1] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + C = [1, 2] + b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False) + b_answer = 5.0 + assert b == b_answer + + +class TestProminentGroup: + np = pytest.importorskip("numpy") + pd = pytest.importorskip("pandas") + + def test_prominent_group_single_node(self): + """ + Prominent group for single node + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, endpoints=False) + b_answer, g_answer = 4.0, [2] + assert b == b_answer and g == g_answer + + def test_prominent_group_with_c(self): + """ + Prominent group without some nodes + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, C=[2]) + b_answer, g_answer = 3.0, [1] + assert b == b_answer and g == g_answer + + def test_prominent_group_normalized_endpoints(self): + """ + Prominent group with normalized result, with endpoints + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True) + b_answer, g_answer = 1.7, [2, 5] + assert b == b_answer and g == g_answer + + def test_prominent_group_disconnected_graph(self): + """ + Prominent group of disconnected graph + """ + G = nx.path_graph(6) + G.remove_edge(0, 1) + k = 1 + b, g = nx.prominent_group(G, k, weight=None, normalized=False) + b_answer, g_answer = 4.0, [3] + assert b == b_answer and g == g_answer + + def test_prominent_group_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.prominent_group(nx.path_graph(5), 1, C=[10]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + k = 2 + b, g = nx.prominent_group(G, k, weight="weight", normalized=False) + b_answer, g_answer = 5.0, [1, 2] + assert b == b_answer and g == g_answer + + def test_prominent_group_greedy_algorithm(self): + """ + Group betweenness centrality in a greedy algorithm + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True, greedy=True) + b_answer, g_answer = 1.7, [6, 3] + assert b == b_answer and g == g_answer + + +class TestGroupClosenessCentrality: + def test_group_closeness_single_node(self): + """ + Group closeness centrality for a single node group + """ + G = nx.path_graph(5) + c = nx.group_closeness_centrality(G, [1]) + c_answer = nx.closeness_centrality(G, 1) + assert c == c_answer + + def test_group_closeness_disconnected(self): + """ + Group closeness centrality for a disconnected graph + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 0 + assert c == c_answer + + def test_group_closeness_multiple_node(self): + """ + Group closeness centrality for a group with more than + 1 node + """ + G = nx.path_graph(4) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 1 + assert c == c_answer + + def test_group_closeness_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_closeness_centrality(nx.path_graph(5), [6, 7, 8]) + + +class TestGroupDegreeCentrality: + def test_group_degree_centrality_single_node(self): + """ + Group degree centrality for a single node group + """ + G = nx.path_graph(4) + d = nx.group_degree_centrality(G, [1]) + d_answer = nx.degree_centrality(G)[1] + assert d == d_answer + + def test_group_degree_centrality_multiple_node(self): + """ + Group degree centrality for group with more than + 1 node + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_in_degree_centrality(self): + """ + Group in-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_in_degree_centrality(G, [1, 2]) + d_answer = 0 + assert d == d_answer + + def test_group_out_degree_centrality(self): + """ + Group out-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_out_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_degree_centrality_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NetworkXError + """ + with pytest.raises(nx.NetworkXError): + nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py new file mode 100644 index 0000000..450356e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py @@ -0,0 +1,115 @@ +""" +Tests for degree centrality. +""" +import pytest + +import networkx as nx +from networkx.algorithms.centrality import harmonic_centrality + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.C4_directed = nx.cycle_graph(4, create_using=nx.DiGraph) + + cls.C5 = nx.cycle_graph(5) + + cls.T = nx.balanced_tree(r=2, h=2) + + cls.Gb = nx.DiGraph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (0, 4), (2, 1), (2, 3), (4, 3)]) + + def test_p3_harmonic(self): + c = harmonic_centrality(self.P3) + d = {0: 1.5, 1: 2, 2: 1.5} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic(self): + c = harmonic_centrality(self.P4) + d = {0: 1.8333333, 1: 2.5, 2: 2.5, 3: 1.8333333} + for n in sorted(self.P4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_clique_complete(self): + c = harmonic_centrality(self.K5) + d = {0: 4, 1: 4, 2: 4, 3: 4, 4: 4} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C4(self): + c = harmonic_centrality(self.C4) + d = {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5} + for n in sorted(self.C4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C5(self): + c = harmonic_centrality(self.C5) + d = {0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 4} + for n in sorted(self.C5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_bal_tree(self): + c = harmonic_centrality(self.T) + d = {0: 4.0, 1: 4.1666, 2: 4.1666, 3: 2.8333, 4: 2.8333, 5: 2.8333, 6: 2.8333} + for n in sorted(self.T): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_exampleGraph(self): + c = harmonic_centrality(self.Gb) + d = {0: 0, 1: 2, 2: 1, 3: 2.5, 4: 1} + for n in sorted(self.Gb): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_harmonic(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("a", "b", 10), + ("d", "c", 5), + ("a", "c", 1), + ("e", "f", 2), + ("f", "c", 1), + ("a", "f", 3), + ] + ) + c = harmonic_centrality(XG, distance="weight") + d = {"a": 0, "b": 0.1, "c": 2.533, "d": 0, "e": 0, "f": 0.83333} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_empty(self): + G = nx.DiGraph() + c = harmonic_centrality(G, distance="weight") + d = {} + assert c == d + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + c = harmonic_centrality(G, distance="weight") + d = {0: 0} + assert c == d + + def test_cycle_c4_directed(self): + c = harmonic_centrality(self.C4_directed, nbunch=[0, 1], sources=[1, 2]) + d = {0: 0.833, 1: 0.333} + for n in [0, 1]: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_harmonic_subset(self): + c = harmonic_centrality(self.P3, sources=[0, 1]) + d = {0: 1, 1: 1, 2: 1.5} + for n in self.P3: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic_subset(self): + c = harmonic_centrality(self.P4, nbunch=[2, 3], sources=[0, 1]) + d = {2: 1.5, 3: 0.8333333} + for n in [2, 3]: + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py new file mode 100644 index 0000000..2511453 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py @@ -0,0 +1,345 @@ +import math + +import pytest + +import networkx as nx + + +class TestKatzCentrality: + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = {n: 1 for n in G} + b = nx.katz_centrality(G, alpha, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.katz_centrality(nx.path_graph(3), 0.1, max_iter=0) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality(G, 0.1, beta=beta) + + def test_bad_beta_numbe(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality(G, 0.1, beta="foo") + + +class TestKatzCentralityNumpy: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality_numpy(G, 0.1, beta=beta) + + def test_bad_beta_numbe(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality_numpy(G, 0.1, beta="foo") + + def test_K5_unweighted(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha, weight=None) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3_unweighted(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + +class TestKatzCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.alpha = 0.1 + cls.G.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + H = nx.DiGraph(edges) + cls.H = G.reverse() + cls.H.alpha = 0.1 + cls.H.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality(G, alpha, weight="weight") + for (a, b) in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality(H, alpha, weight="weight") + for (a, b) in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzCentralityDirectedNumpy(TestKatzCentralityDirected): + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + super().setup_class() + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality_numpy(G, alpha, weight="weight") + for (a, b) in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality_numpy(H, alpha, weight="weight") + for (a, b) in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzEigenvectorVKatz: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_eigenvector_v_katz_random(self): + G = nx.gnp_random_graph(10, 0.5, seed=1234) + l = max(np.linalg.eigvals(nx.adjacency_matrix(G).todense())) + e = nx.eigenvector_centrality_numpy(G) + k = nx.katz_centrality_numpy(G, 1.0 / l) + for n in G: + assert e[n] == pytest.approx(k[n], abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py new file mode 100644 index 0000000..065b4fd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py @@ -0,0 +1,337 @@ +import pytest + +import networkx as nx + + +class TestLoadCentrality: + @classmethod + def setup_class(cls): + + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + cls.G = G + cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + cls.F = nx.florentine_families_graph() + cls.LM = nx.les_miserables_graph() + cls.D = nx.cycle_graph(3, create_using=nx.DiGraph()) + cls.D.add_edges_from([(3, 0), (4, 3)]) + + def test_not_strongly_connected(self): + b = nx.load_centrality(self.D) + result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000} + for n in sorted(self.D): + assert result[n] == pytest.approx(b[n], abs=1e-3) + assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3) + + def test_weighted_load(self): + b = nx.load_centrality(self.G, weight="weight", normalized=False) + for n in sorted(self.G): + assert b[n] == self.exact_weighted[n] + + def test_k5_load(self): + G = self.K5 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_load(self): + G = self.P3 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 1.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + c = nx.load_centrality(G, v=1) + assert c == pytest.approx(1.0, abs=1e-7) + c = nx.load_centrality(G, v=1, normalized=True) + assert c == pytest.approx(1.0, abs=1e-7) + + def test_p2_load(self): + G = nx.path_graph(2) + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G) + d = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G) + d = { + "Acciaiuoli": 0.000, + "Albizzi": 0.211, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.251, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.117, + "Salviati": 0.143, + "Strozzi": 0.106, + "Tornabuoni": 0.090, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_load(self): + G = self.LM + c = nx.load_centrality(G) + d = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.567, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.043, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.128, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.012, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.164, + "Gillenormand": 0.021, + "Magnon": 0.000, + "MlleGillenormand": 0.047, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.133, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.041, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.006, + "Bahorel": 0.002, + "Bossuet": 0.032, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_k5_load(self): + G = self.K5 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_p3_load(self): + G = self.P3 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 2.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G, normalized=False) + d = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G, normalized=False) + + d = { + "Acciaiuoli": 0.000, + "Albizzi": 38.333, + "Barbadori": 17.000, + "Bischeri": 19.000, + "Castellani": 10.000, + "Ginori": 0.000, + "Guadagni": 45.667, + "Lamberteschi": 0.000, + "Medici": 95.000, + "Pazzi": 0.000, + "Peruzzi": 4.000, + "Ridolfi": 21.333, + "Salviati": 26.000, + "Strozzi": 19.333, + "Tornabuoni": 16.333, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_load_betweenness_difference(self): + # Difference Between Load and Betweenness + # --------------------------------------- The smallest graph + # that shows the difference between load and betweenness is + # G=ladder_graph(3) (Graph B below) + + # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong + # Wang: Comment on "Scientific collaboration + # networks. II. Shortest paths, weighted networks, and + # centrality". https://arxiv.org/pdf/physics/0511084 + + # Notice that unlike here, their calculation adds to 1 to the + # betweennes of every node i for every path from i to every + # other node. This is exactly what it should be, based on + # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t, + # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore, + # they allow v to be the target node. + + # We follow Brandes 2001, who follows Freeman 1977 that make + # the sum for betweenness of v exclude paths where v is either + # the source or target node. To agree with their numbers, we + # must additionally, remove edge (4,8) from the graph, see AC + # example following (there is a mistake in the figure in their + # paper - personal communication). + + # A = nx.Graph() + # A.add_edges_from([(0,1), (1,2), (1,3), (2,4), + # (3,5), (4,6), (4,7), (4,8), + # (5,8), (6,9), (7,9), (8,9)]) + B = nx.Graph() # ladder_graph(3) + B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + c = nx.load_centrality(B, normalized=False) + d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750} + for n in sorted(B): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_c4_edge_load(self): + G = self.C4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_edge_load(self): + G = self.P4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_k5_edge_load(self): + G = self.K5 + c = nx.edge_load_centrality(G) + d = { + (0, 1): 5.000, + (0, 2): 5.000, + (0, 3): 5.000, + (0, 4): 5.000, + (1, 2): 5.000, + (1, 3): 5.000, + (1, 4): 5.000, + (2, 3): 5.000, + (2, 4): 5.000, + (3, 4): 5.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_tree_edge_load(self): + G = self.T + c = nx.edge_load_centrality(G) + d = { + (0, 1): 24.000, + (0, 2): 24.000, + (1, 3): 12.000, + (1, 4): 12.000, + (2, 5): 12.000, + (2, 6): 12.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py new file mode 100644 index 0000000..e675845 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py @@ -0,0 +1,82 @@ +import pytest + +import networkx as nx + + +def example1a_G(): + G = nx.Graph() + G.add_node(1, percolation=0.1) + G.add_node(2, percolation=0.2) + G.add_node(3, percolation=0.2) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.5) + G.add_node(8, percolation=0.5) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def example1b_G(): + G = nx.Graph() + G.add_node(1, percolation=0.3) + G.add_node(2, percolation=0.5) + G.add_node(3, percolation=0.5) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.1) + G.add_node(8, percolation=0.1) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +class TestPercolationCentrality: + def test_percolation_example1a(self): + """percolation centrality: example 1a""" + G = example1a_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.625, 6: 0.667} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + def test_percolation_example1b(self): + """percolation centrality: example 1a""" + G = example1b_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.825, 6: 0.4} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + def test_converge_to_betweenness(self): + """percolation centrality: should converge to betweenness + centrality when all nodes are percolated the same""" + # taken from betweenness test test_florentine_families_graph + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + p_states = {k: 1.0 for k, v in b_answer.items()} + p_answer = nx.percolation_centrality(G, states=p_states) + for n in sorted(G): + assert p_answer[n] == pytest.approx(b_answer[n], abs=1e-3) + + p_states = {k: 0.3 for k, v in b_answer.items()} + p_answer = nx.percolation_centrality(G, states=p_states) + for n in sorted(G): + assert p_answer[n] == pytest.approx(b_answer[n], abs=1e-3) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_reaching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_reaching.py new file mode 100644 index 0000000..86ae8a9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_reaching.py @@ -0,0 +1,109 @@ +"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module.""" +import pytest + +import networkx as nx + + +class TestGlobalReachingCentrality: + """Unit tests for the global reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.global_reaching_centrality(G, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.global_reaching_centrality(G, weight="weight") + + def test_directed_star(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.5 + assert grc(G) == 1 + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight=None) == 0.25 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.375 + + def test_cycle_directed_unweighted(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_undirected_unweighted(self): + G = nx.Graph() + G.add_edge(1, 2) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_directed_weighted(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)]) + assert nx.global_reaching_centrality(G) == 0 + + def test_cycle_undirected_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False) == 0 + + def test_directed_weighted(self): + G = nx.DiGraph() + G.add_edge("A", "B", weight=5) + G.add_edge("B", "C", weight=1) + G.add_edge("B", "D", weight=0.25) + G.add_edge("D", "E", weight=1) + + denom = len(G) - 1 + A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom + B_local = sum([1, 0.25, 0.625]) / denom + C_local = 0 + D_local = sum([1]) / denom + E_local = 0 + + local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local] + max_local = max(local_reach_ctrs) + expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom + grc = nx.global_reaching_centrality + actual = grc(G, normalized=False, weight="weight") + assert expected == pytest.approx(actual, abs=1e-7) + + +class TestLocalReachingCentrality: + """Unit tests for the local reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + G.add_weighted_edges_from([(0, 1, 0)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.local_reaching_centrality + assert grc(G, 1, weight=None, normalized=False) == 0.75 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=False, weight="weight" + ) + assert centrality == 1.5 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py new file mode 100644 index 0000000..903bbe9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py @@ -0,0 +1,67 @@ +""" +Tests for second order centrality. +""" + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +class TestSecondOrderCentrality: + def test_empty(self): + with pytest.raises(nx.NetworkXException): + G = nx.empty_graph() + nx.second_order_centrality(G) + + def test_non_connected(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph() + G.add_node(0) + G.add_node(1) + nx.second_order_centrality(G) + + def test_non_negative_edge_weights(self): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(2) + G.add_edge(0, 1, weight=-1) + nx.second_order_centrality(G) + + def test_one_node_graph(self): + """Second order centrality: single node""" + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + assert nx.second_order_centrality(G)[0] == 0 + + def test_P3(self): + """Second order centrality: line graph, as defined in paper""" + G = nx.path_graph(3) + b_answer = {0: 3.741, 1: 1.414, 2: 3.741} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + def test_K3(self): + """Second order centrality: complete graph, as defined in paper""" + G = nx.complete_graph(3) + b_answer = {0: 1.414, 1: 1.414, 2: 1.414} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + def test_ring_graph(self): + """Second order centrality: ring graph, as defined in paper""" + G = nx.cycle_graph(5) + b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py new file mode 100644 index 0000000..7109275 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py @@ -0,0 +1,110 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.centrality.subgraph_alg import ( + communicability_betweenness_centrality, + estrada_index, + subgraph_centrality, + subgraph_centrality_exp, +) + + +class TestSubgraph: + def test_subgraph_centrality(self): + answer = {0: 1.5430806348152433, 1: 1.5430806348152433} + result = subgraph_centrality(nx.path_graph(2)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 1.6445956054135658, + "Albert": 2.4368257358712189, + "Aric": 2.4368257358712193, + "Dan": 3.1306328496328168, + "Franck": 2.3876142275231915, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = subgraph_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + result1 = subgraph_centrality_exp(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_subgraph_centrality_big_graph(self): + g199 = nx.complete_graph(199) + g200 = nx.complete_graph(200) + + comm199 = nx.subgraph_centrality(g199) + comm199_exp = nx.subgraph_centrality_exp(g199) + + comm200 = nx.subgraph_centrality(g200) + comm200_exp = nx.subgraph_centrality_exp(g200) + + def test_communicability_betweenness_centrality_small(self): + result = communicability_betweenness_centrality(nx.path_graph(2)) + assert result == {0: 0, 1: 0} + + result = communicability_betweenness_centrality(nx.path_graph(1)) + assert result == {0: 0} + + result = communicability_betweenness_centrality(nx.path_graph(0)) + assert result == {} + + answer = {0: 0.1411224421177313, 1: 1.0, 2: 0.1411224421177313} + result = communicability_betweenness_centrality(nx.path_graph(3)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + result = communicability_betweenness_centrality(nx.complete_graph(3)) + for k, v in result.items(): + assert 0.49786143366223296 == pytest.approx(v, abs=1e-7) + + def test_communicability_betweenness_centrality(self): + answer = { + 0: 0.07017447951484615, + 1: 0.71565598701107991, + 2: 0.71565598701107991, + 3: 0.07017447951484615, + } + result = communicability_betweenness_centrality(nx.path_graph(4)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 0.060039074193949521, + "Albert": 0.315470761661372, + "Aric": 0.31547076166137211, + "Dan": 0.68297778678316201, + "Franck": 0.21977926617449497, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = communicability_betweenness_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_estrada_index(self): + answer = 1041.2470334195475 + result = estrada_index(nx.karate_club_graph()) + assert answer == pytest.approx(result, abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_trophic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_trophic.py new file mode 100644 index 0000000..f1d6813 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_trophic.py @@ -0,0 +1,302 @@ +"""Test trophic levels, trophic differences and trophic coherence +""" +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_trophic_levels(): + """Trivial example""" + G = nx.DiGraph() + G.add_edge("a", "b") + G.add_edge("b", "c") + + d = nx.trophic_levels(G) + assert d == {"a": 1, "b": 2, "c": 3} + + +def test_trophic_levels_levine(): + """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83, + 195-207 + """ + S = nx.DiGraph() + S.add_edge(1, 2, weight=1.0) + S.add_edge(1, 3, weight=0.2) + S.add_edge(1, 4, weight=0.8) + S.add_edge(2, 3, weight=0.2) + S.add_edge(2, 5, weight=0.3) + S.add_edge(4, 3, weight=0.6) + S.add_edge(4, 5, weight=0.7) + S.add_edge(5, 4, weight=0.2) + + # save copy for later, test intermediate implementation details first + S2 = S.copy() + + # drop nodes of in-degree zero + z = [nid for nid, d in S.in_degree if d == 0] + for nid in z: + S.remove_node(nid) + + # find adjacency matrix + q = nx.linalg.graphmatrix.adjacency_matrix(S).T + + # fmt: off + expected_q = np.array([ + [0, 0, 0., 0], + [0.2, 0, 0.6, 0], + [0, 0, 0, 0.2], + [0.3, 0, 0.7, 0] + ]) + # fmt: on + assert np.array_equal(q.todense(), expected_q) + + # must be square, size of number of nodes + assert len(q.shape) == 2 + assert q.shape[0] == q.shape[1] + assert q.shape[0] == len(S) + + nn = q.shape[0] + + i = np.eye(nn) + n = np.linalg.inv(i - q) + y = np.asarray(n) @ np.ones(nn) + + expected_y = np.array([1, 2.07906977, 1.46511628, 2.3255814]) + assert np.allclose(y, expected_y) + + expected_d = {1: 1, 2: 2, 3: 3.07906977, 4: 2.46511628, 5: 3.3255814} + + d = nx.trophic_levels(S2) + + for nid, level in d.items(): + expected_level = expected_d[nid] + assert expected_level == pytest.approx(level, abs=1e-7) + + +def test_trophic_levels_simple(): + matrix_a = np.array([[0, 0], [1, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + assert d[0] == pytest.approx(2, abs=1e-7) + assert d[1] == pytest.approx(1, abs=1e-7) + + +def test_trophic_levels_more_complex(): + # fmt: off + matrix = np.array([ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + expected_result = [1, 2, 3, 4] + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + # fmt: off + matrix = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + + expected_result = [1, 2, 2.5, 3.25] + print("Calculated result: ", d) + print("Expected Result: ", expected_result) + + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + +def test_trophic_levels_even_more_complex(): + # fmt: off + # Another, bigger matrix + matrix = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # Generated this linear system using pen and paper: + K = np.array([ + [1, 0, -1, 0, 0], + [0, 0.5, 0, -0.5, 0], + [0, 0, 1, 0, 0], + [0, -0.5, 0, 1, -0.5], + [0, 0, 0, 0, 1], + ]) + # fmt: on + result_1 = np.ravel(np.linalg.inv(K) @ np.ones(5)) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + result_2 = nx.trophic_levels(G) + + for ind in range(5): + assert result_1[ind] == pytest.approx(result_2[ind], abs=1e-7) + + +def test_trophic_levels_singular_matrix(): + """Should raise an error with graphs with only non-basal nodes""" + matrix = np.identity(4) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_levels_singular_with_basal(): + """Should fail to compute if there are any parts of the graph which are not + reachable from any basal node (with in-degree zero). + """ + G = nx.DiGraph() + # a has in-degree zero + G.add_edge("a", "b") + + # b is one level above a, c and d + G.add_edge("c", "b") + G.add_edge("d", "b") + + # c and d form a loop, neither are reachable from a + G.add_edge("c", "d") + G.add_edge("d", "c") + + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + # if self-loops are allowed, smaller example: + G = nx.DiGraph() + G.add_edge("a", "b") # a has in-degree zero + G.add_edge("c", "b") # b is one level above a and c + G.add_edge("c", "c") # c has a self-loop + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_differences(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + assert diffs[(0, 2)] == pytest.approx(1.5, abs=1e-7) + assert diffs[(1, 2)] == pytest.approx(0.5, abs=1e-7) + assert diffs[(1, 3)] == pytest.approx(1.25, abs=1e-7) + assert diffs[(2, 3)] == pytest.approx(0.75, abs=1e-7) + + +def test_trophic_incoherence_parameter_no_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 1, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 1] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # no self-loops case + # fmt: off + matrix_d = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_d, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + +def test_trophic_incoherence_parameter_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(2, abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_voterank.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_voterank.py new file mode 100644 index 0000000..aa653ae --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/tests/test_voterank.py @@ -0,0 +1,61 @@ +""" + Unit tests for VoteRank. +""" + + +import networkx as nx + + +class TestVoteRankCentrality: + # Example Graph present in reference paper + def test_voterank_centrality_1(self): + G = nx.Graph() + G.add_edges_from( + [ + (7, 8), + (7, 5), + (7, 9), + (5, 0), + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (1, 6), + (2, 6), + (3, 6), + (4, 6), + ] + ) + assert [0, 7, 6] == nx.voterank(G) + + # Graph unit test + def test_voterank_centrality_2(self): + G = nx.florentine_families_graph() + d = nx.voterank(G, 4) + exact = ["Medici", "Strozzi", "Guadagni", "Castellani"] + assert exact == d + + # DiGraph unit test + def test_voterank_centrality_3(self): + G = nx.gnc_graph(10, seed=7) + d = nx.voterank(G, 4) + exact = [3, 6, 8] + assert exact == d + + # MultiGraph unit test + def test_voterank_centrality_4(self): + G = nx.MultiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 1, 5, 4] + assert exact == nx.voterank(G) + + # MultiDiGraph unit test + def test_voterank_centrality_5(self): + G = nx.MultiDiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 0, 5, 4] + assert exact == nx.voterank(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/trophic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/trophic.py new file mode 100644 index 0000000..b7b1073 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/trophic.py @@ -0,0 +1,159 @@ +"""Trophic levels""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"] + + +@not_implemented_for("undirected") +def trophic_levels(G, weight="weight"): + r"""Compute the trophic levels of nodes. + + The trophic level of a node $i$ is + + .. math:: + + s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j + + where $k^{in}_i$ is the in-degree of i + + .. math:: + + k^{in}_i = \sum_{j} a_{ij} + + and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention. + + These are calculated using the method outlined in Levine [1]_. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + nodes : dict + Dictionary of nodes with trophic level as the value. + + References + ---------- + .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207 + """ + import numpy as np + + # find adjacency matrix + a = nx.adjacency_matrix(G, weight=weight).T.toarray() + + # drop rows/columns where in-degree is zero + rowsum = np.sum(a, axis=1) + p = a[rowsum != 0][:, rowsum != 0] + # normalise so sum of in-degree weights is 1 along each row + p = p / rowsum[rowsum != 0][:, np.newaxis] + + # calculate trophic levels + nn = p.shape[0] + i = np.eye(nn) + try: + n = np.linalg.inv(i - p) + except np.linalg.LinAlgError as err: + # LinAlgError is raised when there is a non-basal node + msg = ( + "Trophic levels are only defined for graphs where every " + + "node has a path from a basal node (basal nodes are nodes " + + "with no incoming edges)." + ) + raise nx.NetworkXError(msg) from err + y = n.sum(axis=1) + 1 + + levels = {} + + # all nodes with in-degree zero have trophic level == 1 + zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0) + for node_id in zero_node_ids: + levels[node_id] = 1 + + # all other nodes have levels as calculated + nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0) + for i, node_id in enumerate(nonzero_node_ids): + levels[node_id] = y[i] + + return levels + + +@not_implemented_for("undirected") +def trophic_differences(G, weight="weight"): + r"""Compute the trophic differences of the edges of a directed graph. + + The trophic difference $x_ij$ for each edge is defined in Johnson et al. + [1]_ as: + + .. math:: + x_ij = s_j - s_i + + Where $s_i$ is the trophic level of node $i$. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + diffs : dict + Dictionary of edges with trophic differences as the value. + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + levels = trophic_levels(G, weight=weight) + diffs = {} + for u, v in G.edges: + diffs[(u, v)] = levels[v] - levels[u] + return diffs + + +@not_implemented_for("undirected") +def trophic_incoherence_parameter(G, weight="weight", cannibalism=False): + r"""Compute the trophic incoherence parameter of a graph. + + Trophic coherence is defined as the homogeneity of the distribution of + trophic distances: the more similar, the more coherent. This is measured by + the standard deviation of the trophic differences and referred to as the + trophic incoherence parameter $q$ by [1]. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + cannibalism: Boolean + If set to False, self edges are not considered in the calculation + + Returns + ------- + trophic_incoherence_parameter : float + The trophic coherence of a graph + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + import numpy as np + + if cannibalism: + diffs = trophic_differences(G, weight=weight) + else: + # If no cannibalism, remove self-edges + self_loops = list(nx.selfloop_edges(G)) + if self_loops: + # Make a copy so we do not change G's edges in memory + G_2 = G.copy() + G_2.remove_edges_from(self_loops) + else: + # Avoid copy otherwise + G_2 = G + diffs = trophic_differences(G_2, weight=weight) + return np.std(list(diffs.values())) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/voterank_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/voterank_alg.py new file mode 100644 index 0000000..c28df58 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/centrality/voterank_alg.py @@ -0,0 +1,92 @@ +"""Algorithm to select influential nodes in a graph using VoteRank.""" + +__all__ = ["voterank"] + + +def voterank(G, number_of_nodes=None): + """Select a list of influential nodes in a graph using VoteRank algorithm + + VoteRank [1]_ computes a ranking of the nodes in a graph G based on a + voting scheme. With VoteRank, all nodes vote for each of its in-neighbours + and the node with the highest votes is elected iteratively. The voting + ability of out-neighbors of elected nodes is decreased in subsequent turns. + + Parameters + ---------- + G : graph + A NetworkX graph. + + number_of_nodes : integer, optional + Number of ranked nodes to extract (default all nodes). + + Returns + ------- + voterank : list + Ordered list of computed seeds. + Only nodes with positive number of votes are returned. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)]) + >>> nx.voterank(G) + [0, 1] + + The algorithm can be used both for undirected and directed graphs. + However, the directed version is different in two ways: + (i) nodes only vote for their in-neighbors and + (ii) only the voting ability of elected node and its out-neighbors are updated: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)]) + >>> nx.voterank(G) + [2, 3] + + Notes + ----- + Each edge is treated independently in case of multigraphs. + + References + ---------- + .. [1] Zhang, J.-X. et al. (2016). + Identifying a set of influential spreaders in complex networks. + Sci. Rep. 6, 27823; doi: 10.1038/srep27823. + """ + influential_nodes = [] + vote_rank = {} + if len(G) == 0: + return influential_nodes + if number_of_nodes is None or number_of_nodes > len(G): + number_of_nodes = len(G) + if G.is_directed(): + # For directed graphs compute average out-degree + avgDegree = sum(deg for _, deg in G.out_degree()) / len(G) + else: + # For undirected graphs compute average degree + avgDegree = sum(deg for _, deg in G.degree()) / len(G) + # step 1 - initiate all nodes to (0,1) (score, voting ability) + for n in G.nodes(): + vote_rank[n] = [0, 1] + # Repeat steps 1b to 4 until num_seeds are elected. + for _ in range(number_of_nodes): + # step 1b - reset rank + for n in G.nodes(): + vote_rank[n][0] = 0 + # step 2 - vote + for n, nbr in G.edges(): + # In directed graphs nodes only vote for their in-neighbors + vote_rank[n][0] += vote_rank[nbr][1] + if not G.is_directed(): + vote_rank[nbr][0] += vote_rank[n][1] + for n in influential_nodes: + vote_rank[n][0] = 0 + # step 3 - select top node + n = max(G.nodes, key=lambda x: vote_rank[x][0]) + if vote_rank[n][0] == 0: + return influential_nodes + influential_nodes.append(n) + # weaken the selected node + vote_rank[n] = [0, 0] + # step 4 - update voterank properties + for _, nbr in G.edges(n): + vote_rank[nbr][1] -= 1 / avgDegree + vote_rank[nbr][1] = max(vote_rank[nbr][1], 0) + return influential_nodes diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/chains.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/chains.py new file mode 100644 index 0000000..2018c88 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/chains.py @@ -0,0 +1,167 @@ +"""Functions for finding chains in a graph.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["chain_decomposition"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def chain_decomposition(G, root=None): + """Returns the chain decomposition of a graph. + + The *chain decomposition* of a graph with respect a depth-first + search tree is a set of cycles or paths derived from the set of + fundamental cycles of the tree in the following manner. Consider + each fundamental cycle with respect to the given tree, represented + as a list of edges beginning with the nontree edge oriented away + from the root of the tree. For each fundamental cycle, if it + overlaps with any previous fundamental cycle, just take the initial + non-overlapping segment, which is a path instead of a cycle. Each + cycle or path is called a *chain*. For more information, see [1]_. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the chain + decomposition for the connected component containing this node + will be returned. This node indicates the root of the depth-first + search tree. + + Yields + ------ + chain : list + A list of edges representing a chain. There is no guarantee on + the orientation of the edges in each chain (for example, if a + chain includes the edge joining nodes 1 and 2, the chain may + include either (1, 2) or (2, 1)). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.chain_decomposition(G)) + [[(4, 5), (5, 3), (3, 4)]] + + Notes + ----- + The worst-case running time of this implementation is linear in the + number of nodes and number of edges [1]_. + + References + ---------- + .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex- + and 2-edge-connectivity." *Information Processing Letters*, + 113, 241–244. Elsevier. + + """ + + def _dfs_cycle_forest(G, root=None): + """Builds a directed graph composed of cycles from the given graph. + + `G` is an undirected simple graph. `root` is a node in the graph + from which the depth-first search is started. + + This function returns both the depth-first search cycle graph + (as a :class:`~networkx.DiGraph`) and the list of nodes in + depth-first preorder. The depth-first search cycle graph is a + directed graph whose edges are the edges of `G` oriented toward + the root if the edge is a tree edge and away from the root if + the edge is a non-tree edge. If `root` is not specified, this + performs a depth-first search on each connected component of `G` + and returns a directed forest instead. + + If `root` is not in the graph, this raises :exc:`KeyError`. + + """ + # Create a directed graph from the depth-first search tree with + # root node `root` in which tree edges are directed toward the + # root and nontree edges are directed away from the root. For + # each node with an incident nontree edge, this creates a + # directed cycle starting with the nontree edge and returning to + # that node. + # + # The `parent` node attribute stores the parent of each node in + # the DFS tree. The `nontree` edge attribute indicates whether + # the edge is a tree edge or a nontree edge. + # + # We also store the order of the nodes found in the depth-first + # search in the `nodes` list. + H = nx.DiGraph() + nodes = [] + for u, v, d in nx.dfs_labeled_edges(G, source=root): + if d == "forward": + # `dfs_labeled_edges()` yields (root, root, 'forward') + # if it is beginning the search on a new connected + # component. + if u == v: + H.add_node(v, parent=None) + nodes.append(v) + else: + H.add_node(v, parent=u) + H.add_edge(v, u, nontree=False) + nodes.append(v) + # `dfs_labeled_edges` considers nontree edges in both + # orientations, so we need to not add the edge if it its + # other orientation has been added. + elif d == "nontree" and v not in H[u]: + H.add_edge(v, u, nontree=True) + else: + # Do nothing on 'reverse' edges; we only care about + # forward and nontree edges. + pass + return H, nodes + + def _build_chain(G, u, v, visited): + """Generate the chain starting from the given nontree edge. + + `G` is a DFS cycle graph as constructed by + :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge + that begins a chain. `visited` is a set representing the nodes + in `G` that have already been visited. + + This function yields the edges in an initial segment of the + fundamental cycle of `G` starting with the nontree edge (`u`, + `v`) that includes all the edges up until the first node that + appears in `visited`. The tree edges are given by the 'parent' + node attribute. The `visited` set is updated to add each node in + an edge yielded by this function. + + """ + while v not in visited: + yield u, v + visited.add(v) + u, v = v, G.nodes[v]["parent"] + yield u, v + + # Create a directed version of H that has the DFS edges directed + # toward the root and the nontree edges directed away from the root + # (in each connected component). + H, nodes = _dfs_cycle_forest(G, root) + + # Visit the nodes again in DFS order. For each node, and for each + # nontree edge leaving that node, compute the fundamental cycle for + # that nontree edge starting with that edge. If the fundamental + # cycle overlaps with any visited nodes, just take the prefix of the + # cycle up to the point of visited nodes. + # + # We repeat this process for each connected component (implicitly, + # since `nodes` already has a list of the nodes grouped by connected + # component). + visited = set() + for u in nodes: + visited.add(u) + # For each nontree edge going out of node u... + edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d) + for u, v in edges: + # Create the cycle or cycle prefix starting with the + # nontree edge. + chain = list(_build_chain(H, u, v, visited)) + yield chain diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/chordal.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/chordal.py new file mode 100644 index 0000000..ad17ef7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/chordal.py @@ -0,0 +1,476 @@ +""" +Algorithms for chordal graphs. + +A graph is chordal if every cycle of length at least 4 has a chord +(an edge joining two nodes not adjacent in the cycle). +https://en.wikipedia.org/wiki/Chordal_graph +""" +import sys +import warnings + +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_chordal", + "find_induced_nodes", + "chordal_graph_cliques", + "chordal_graph_treewidth", + "NetworkXTreewidthBoundExceeded", + "complete_to_chordal_graph", +] + + +class NetworkXTreewidthBoundExceeded(nx.NetworkXException): + """Exception raised when a treewidth bound has been provided and it has + been exceeded""" + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def is_chordal(G): + """Checks whether G is a chordal graph. + + A graph is chordal if every cycle of length at least 4 has a chord + (an edge joining two nodes not adjacent in the cycle). + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + chordal : bool + True if G is a chordal graph and False otherwise. + + Raises + ------ + NetworkXNotImplemented + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... ] + >>> G = nx.Graph(e) + >>> nx.is_chordal(G) + True + + Notes + ----- + The routine tries to go through every node following maximum cardinality + search. It returns False when it finds that the separator for any node + is not a clique. Based on the algorithms in [1]_. + + References + ---------- + .. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms + to test chordality of graphs, test acyclicity of hypergraphs, and + selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984), + pp. 566–579. + """ + return len(_find_chordality_breaker(G)) == 0 + + +def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize): + """Returns the set of induced nodes in the path from s to t. + + Parameters + ---------- + G : graph + A chordal NetworkX graph + s : node + Source node to look for induced nodes + t : node + Destination node to look for induced nodes + treewidth_bound: float + Maximum treewidth acceptable for the graph H. The search + for induced nodes will end as soon as the treewidth_bound is exceeded. + + Returns + ------- + induced_nodes : Set of nodes + The set of induced nodes in the path from s to t in G + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + If the input graph is an instance of one of these classes, a + :exc:`NetworkXError` is raised. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> G = nx.Graph() + >>> G = nx.generators.classic.path_graph(10) + >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + >>> sorted(induced_nodes) + [1, 2, 3, 4, 5, 6, 7, 8, 9] + + Notes + ----- + G must be a chordal graph and (s,t) an edge that is not in G. + + If a treewidth_bound is provided, the search for induced nodes will end + as soon as the treewidth_bound is exceeded. + + The algorithm is inspired by Algorithm 4 in [1]_. + A formal definition of induced node can also be found on that reference. + + References + ---------- + .. [1] Learning Bounded Treewidth Bayesian Networks. + Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008. + http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + H = nx.Graph(G) + H.add_edge(s, t) + induced_nodes = set() + triplet = _find_chordality_breaker(H, s, treewidth_bound) + while triplet: + (u, v, w) = triplet + induced_nodes.update(triplet) + for n in triplet: + if n != s: + H.add_edge(s, n) + triplet = _find_chordality_breaker(H, s, treewidth_bound) + if induced_nodes: + # Add t and the second node in the induced path from s to t. + induced_nodes.add(t) + for u in G[s]: + if len(induced_nodes & set(G[u])) == 2: + induced_nodes.add(u) + break + return induced_nodes + + +def chordal_graph_cliques(G): + """Returns the set of maximal cliques of a chordal graph. + + The algorithm breaks the graph in connected components and performs a + maximum cardinality search in each component to get the cliques. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + cliques : A set containing the maximal cliques in G. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> setlist = nx.chordal_graph_cliques(G) + """ + msg = "This will return a generator in 3.0." + warnings.warn(msg, DeprecationWarning) + return {c for c in _chordal_graph_cliques(G)} + + +def chordal_graph_treewidth(G): + """Returns the treewidth of the chordal graph G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + treewidth : int + The size of the largest clique in the graph minus one. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> nx.chordal_graph_treewidth(G) + 3 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + max_clique = -1 + for clique in nx.chordal_graph_cliques(G): + max_clique = max(max_clique, len(clique)) + return max_clique - 1 + + +def _is_complete_graph(G): + """Returns True if G is a complete graph.""" + if nx.number_of_selfloops(G) > 0: + raise nx.NetworkXError("Self loop found in _is_complete_graph()") + n = G.number_of_nodes() + if n < 2: + return True + e = G.number_of_edges() + max_edges = (n * (n - 1)) / 2 + return e == max_edges + + +def _find_missing_edge(G): + """Given a non-complete graph G, returns a missing edge.""" + nodes = set(G) + for u in G: + missing = nodes - set(list(G[u].keys()) + [u]) + if missing: + return (u, missing.pop()) + + +def _max_cardinality_node(G, choices, wanna_connect): + """Returns a the node in choices that has more connections in G + to nodes in wanna_connect. + """ + max_number = -1 + for x in choices: + number = len([y for y in G[x] if y in wanna_connect]) + if number > max_number: + max_number = number + max_cardinality_node = x + return max_cardinality_node + + +def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize): + """Given a graph G, starts a max cardinality search + (starting from s if s is given and from an arbitrary node otherwise) + trying to find a non-chordal cycle. + + If it does find one, it returns (u,v,w) where u,v,w are the three + nodes that together with s are involved in the cycle. + """ + if nx.number_of_selfloops(G) > 0: + raise nx.NetworkXError("Input graph is not chordal.") + unnumbered = set(G) + if s is None: + s = arbitrary_element(G) + unnumbered.remove(s) + numbered = {s} + current_treewidth = -1 + while unnumbered: # and current_treewidth <= treewidth_bound: + v = _max_cardinality_node(G, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + clique_wanna_be = set(G[v]) & numbered + sg = G.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + # The graph seems to be chordal by now. We update the treewidth + current_treewidth = max(current_treewidth, len(clique_wanna_be)) + if current_treewidth > treewidth_bound: + raise nx.NetworkXTreewidthBoundExceeded( + f"treewidth_bound exceeded: {current_treewidth}" + ) + else: + # sg is not a clique, + # look for an edge that is not included in sg + (u, w) = _find_missing_edge(sg) + return (u, v, w) + return () + + +def _chordal_graph_cliques(G): + """Returns all maximal cliques of a chordal graph. + + The algorithm breaks the graph in connected components and performs a + maximum cardinality search in each component to get the cliques. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a frozenset of + nodes in `G`. The order of cliques is arbitrary. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> cliques = [c for c in _chordal_graph_cliques(G)] + >>> cliques[0] + frozenset({1, 2, 3}) + """ + for C in (G.subgraph(c).copy() for c in connected_components(G)): + if C.number_of_nodes() == 1: + if nx.number_of_selfloops(C) > 0: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(C.nodes()) + else: + unnumbered = set(C.nodes()) + v = arbitrary_element(C) + unnumbered.remove(v) + numbered = {v} + clique_wanna_be = {v} + while unnumbered: + v = _max_cardinality_node(C, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + new_clique_wanna_be = set(C.neighbors(v)) & numbered + sg = C.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + new_clique_wanna_be.add(v) + if not new_clique_wanna_be >= clique_wanna_be: + yield frozenset(clique_wanna_be) + clique_wanna_be = new_clique_wanna_be + else: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(clique_wanna_be) + + +@not_implemented_for("directed") +def complete_to_chordal_graph(G): + """Return a copy of G completed to a chordal graph + + Adds edges to a copy of G to create a chordal graph. A graph G=(V,E) is + called chordal if for each cycle with length bigger than 3, there exist + two non-adjacent nodes connected by an edge (called a chord). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + H : NetworkX graph + The chordal enhancement of G + alpha : Dictionary + The elimination ordering of nodes of G + + Notes + ----- + There are different approaches to calculate the chordal + enhancement of a graph. The algorithm used here is called + MCS-M and gives at least minimal (local) triangulation of graph. Note + that this triangulation is not necessarily a global minimum. + + https://en.wikipedia.org/wiki/Chordal_graph + + References + ---------- + .. [1] Berry, Anne & Blair, Jean & Heggernes, Pinar & Peyton, Barry. (2004) + Maximum Cardinality Search for Computing Minimal Triangulations of + Graphs. Algorithmica. 39. 287-298. 10.1007/s00453-004-1084-3. + + Examples + -------- + >>> from networkx.algorithms.chordal import complete_to_chordal_graph + >>> G = nx.wheel_graph(10) + >>> H, alpha = complete_to_chordal_graph(G) + """ + H = G.copy() + alpha = {node: 0 for node in H} + if nx.is_chordal(H): + return H, alpha + chords = set() + weight = {node: 0 for node in H.nodes()} + unnumbered_nodes = list(H.nodes()) + for i in range(len(H.nodes()), 0, -1): + # get the node in unnumbered_nodes with the maximum weight + z = max(unnumbered_nodes, key=lambda node: weight[node]) + unnumbered_nodes.remove(z) + alpha[z] = i + update_nodes = [] + for y in unnumbered_nodes: + if G.has_edge(y, z): + update_nodes.append(y) + else: + # y_weight will be bigger than node weights between y and z + y_weight = weight[y] + lower_nodes = [ + node for node in unnumbered_nodes if weight[node] < y_weight + ] + if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z): + update_nodes.append(y) + chords.add((z, y)) + # during calculation of paths the weights should not be updated + for node in update_nodes: + weight[node] += 1 + H.add_edges_from(chords) + return H, alpha diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/clique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/clique.py new file mode 100644 index 0000000..afdaa47 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/clique.py @@ -0,0 +1,776 @@ +"""Functions for finding and manipulating cliques. + +Finding the largest clique in a graph is NP-complete problem, so most of +these algorithms have an exponential running time; for more information, +see the Wikipedia article on the clique problem [1]_. + +.. [1] clique problem:: https://en.wikipedia.org/wiki/Clique_problem + +""" +from collections import defaultdict, deque +from itertools import chain, combinations, islice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "find_cliques", + "find_cliques_recursive", + "make_max_clique_graph", + "make_clique_bipartite", + "graph_clique_number", + "graph_number_of_cliques", + "node_clique_number", + "number_of_cliques", + "cliques_containing_node", + "enumerate_all_cliques", + "max_weight_clique", +] + + +@not_implemented_for("directed") +def enumerate_all_cliques(G): + """Returns all cliques in an undirected graph. + + This function returns an iterator over cliques, each of which is a + list of nodes. The iteration is ordered by cardinality of the + cliques: first all cliques of size one, then all cliques of size + two, etc. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + iterator + An iterator over cliques, each of which is a list of nodes in + `G`. The cliques are ordered according to size. + + Notes + ----- + To obtain a list of all cliques, use + `list(enumerate_all_cliques(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph (for example, when the graph is the complete + graph). This function avoids storing all cliques in memory by only + keeping current candidate node lists in memory during its search. + + The implementation is adapted from the algorithm by Zhang, et + al. (2005) [1]_ to output all cliques discovered. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J., + Langston, M.A., Samatova, N.F., + "Genome-Scale Computational Approaches to Memory-Intensive + Applications in Systems Biology". + *Supercomputing*, 2005. Proceedings of the ACM/IEEE SC 2005 + Conference, pp. 12, 12--18 Nov. 2005. + . + + """ + index = {} + nbrs = {} + for u in G: + index[u] = len(index) + # Neighbors of u that appear after u in the iteration order of G. + nbrs[u] = {v for v in G[u] if v not in index} + + queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G) + # Loop invariants: + # 1. len(base) is nondecreasing. + # 2. (base + cnbrs) is sorted with respect to the iteration order of G. + # 3. cnbrs is a set of common neighbors of nodes in base. + while queue: + base, cnbrs = map(list, queue.popleft()) + yield base + for i, u in enumerate(cnbrs): + # Use generators to reduce memory consumption. + queue.append( + ( + chain(base, [u]), + filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)), + ) + ) + + +@not_implemented_for("directed") +def find_cliques(G, nodes=None): + """Returns all maximal cliques in an undirected graph. + + For each node *n*, a *maximal clique for n* is a largest complete + subgraph containing *n*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is an iterative implementation, so should not + suffer from recursion depth issues. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are returned. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + See Also + -------- + find_cliques_recursive + A recursive version of the same algorithm. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques(G))`. However, be aware that in the worst-case, + the length of this list can be exponential in the number of nodes in + the graph. This function avoids storing all cliques in memory by + only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. It + essentially unrolls the recursion used in the references to avoid + issues of recursion stack depth (for a recursive implementation, see + :func:`find_cliques_recursive`). + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand = set(G) + for node in Q: + if node not in cand: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand &= adj[node] + + if not cand: + yield Q[:] + return + + subg = cand.copy() + stack = [] + Q.append(None) + + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + + try: + while True: + if ext_u: + q = ext_u.pop() + cand.remove(q) + Q[-1] = q + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + stack.append((subg, cand, ext_u)) + Q.append(None) + subg = subg_q + cand = cand_q + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + else: + Q.pop() + subg, cand, ext_u = stack.pop() + except IndexError: + pass + + +# TODO Should this also be not implemented for directed graphs? +def find_cliques_recursive(G, nodes=None): + """Returns all maximal cliques in a graph. + + For each node *v*, a *maximal clique for v* is a largest complete + subgraph containing *v*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is a recursive implementation, so may suffer from + recursion depth issues, but is included for pedagogical reasons. + For a non-recursive implementation, see :func:`find_cliques`. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are yielded. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + See Also + -------- + find_cliques + An iterative version of the same algorithm. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques_recursive(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph. This function avoids storing all cliques in memory + by only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. For a + non-recursive implementation, see :func:`find_cliques`. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return iter([]) + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand_init = set(G) + for node in Q: + if node not in cand_init: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand_init &= adj[node] + + if not cand_init: + return iter([Q]) + + subg_init = cand_init.copy() + + def expand(subg, cand): + u = max(subg, key=lambda u: len(cand & adj[u])) + for q in cand - adj[u]: + cand.remove(q) + Q.append(q) + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + yield from expand(subg_q, cand_q) + Q.pop() + + return expand(subg_init, cand_init) + + +def make_max_clique_graph(G, create_using=None): + """Returns the maximal clique graph of the given graph. + + The nodes of the maximal clique graph of `G` are the cliques of + `G` and an edge joins two cliques if the cliques are not disjoint. + + Parameters + ---------- + G : NetworkX graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A graph whose nodes are the cliques of `G` and whose edges + join two cliques if they are not disjoint. + + Notes + ----- + This function behaves like the following code:: + + import networkx as nx + G = nx.make_clique_bipartite(G) + cliques = [v for v in G.nodes() if G.nodes[v]['bipartite'] == 0] + G = nx.bipartite.projected_graph(G, cliques) + G = nx.relabel_nodes(G, {-v: v - 1 for v in G}) + + It should be faster, though, since it skips all the intermediate + steps. + + """ + if create_using is None: + B = G.__class__() + else: + B = nx.empty_graph(0, create_using) + cliques = list(enumerate(set(c) for c in find_cliques(G))) + # Add a numbered node for each clique. + B.add_nodes_from(i for i, c in cliques) + # Join cliques by an edge if they share a node. + clique_pairs = combinations(cliques, 2) + B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2) + return B + + +def make_clique_bipartite(G, fpos=None, create_using=None, name=None): + """Returns the bipartite clique graph corresponding to `G`. + + In the returned bipartite graph, the "bottom" nodes are the nodes of + `G` and the "top" nodes represent the maximal cliques of `G`. + There is an edge from node *v* to clique *C* in the returned graph + if and only if *v* is an element of *C*. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + fpos : bool + If True or not None, the returned graph will have an + additional attribute, `pos`, a dictionary mapping node to + position in the Euclidean plane. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A bipartite graph whose "bottom" set is the nodes of the graph + `G`, whose "top" set is the cliques of `G`, and whose edges + join nodes of `G` to the cliques that contain them. + + The nodes of the graph `G` have the node attribute + 'bipartite' set to 1 and the nodes representing cliques + have the node attribute 'bipartite' set to 0, as is the + convention for bipartite graphs in NetworkX. + + """ + B = nx.empty_graph(0, create_using) + B.clear() + # The "bottom" nodes in the bipartite graph are the nodes of the + # original graph, G. + B.add_nodes_from(G, bipartite=1) + for i, cl in enumerate(find_cliques(G)): + # The "top" nodes in the bipartite graph are the cliques. These + # nodes get negative numbers as labels. + name = -i - 1 + B.add_node(name, bipartite=0) + B.add_edges_from((v, name) for v in cl) + return B + + +def graph_clique_number(G, cliques=None): + """Returns the clique number of the graph. + + The *clique number* of a graph is the size of the largest clique in + the graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + cliques : list + A list of cliques, each of which is itself a list of nodes. If + not specified, the list of all cliques will be computed, as by + :func:`find_cliques`. + + Returns + ------- + int + The size of the largest clique in `G`. + + Notes + ----- + You should provide `cliques` if you have already computed the list + of maximal cliques, in order to avoid an exponential time search for + maximal cliques. + + """ + if len(G.nodes) < 1: + return 0 + if cliques is None: + cliques = find_cliques(G) + return max([len(c) for c in cliques] or [1]) + + +def graph_number_of_cliques(G, cliques=None): + """Returns the number of maximal cliques in the graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + cliques : list + A list of cliques, each of which is itself a list of nodes. If + not specified, the list of all cliques will be computed, as by + :func:`find_cliques`. + + Returns + ------- + int + The number of maximal cliques in `G`. + + Notes + ----- + You should provide `cliques` if you have already computed the list + of maximal cliques, in order to avoid an exponential time search for + maximal cliques. + + """ + if cliques is None: + cliques = list(find_cliques(G)) + return len(cliques) + + +def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False): + """Returns the size of the largest maximal clique containing each given node. + + Returns a single or list depending on input nodes. + An optional list of cliques can be input if already computed. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + cliques : list, optional (default=None) + A list of cliques, each of which is itself a list of nodes. + If not specified, the list of all cliques will be computed + using :func:`find_cliques`. + + Returns + ------- + int or dict + If `nodes` is a single node, returns the size of the + largest maximal clique in `G` containing that node. + Otherwise return a dict keyed by node to the size + of the largest maximal clique containing that node. + + See Also + -------- + find_cliques + find_cliques yields the maximal cliques of G. + It accepts a `nodes` argument which restricts consideration to + maximal cliques containing all the given `nodes`. + The search for the cliques is optimized for `nodes`. + """ + if cliques is None: + if nodes is not None: + # Use ego_graph to decrease size of graph + # check for single node + if nodes in G: + return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes))) + # handle multiple nodes + return { + n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes + } + + # nodes is None--find all cliques + cliques = list(find_cliques(G)) + + # single node requested + if nodes in G: + return max(len(c) for c in cliques if nodes in c) + + # multiple nodes requested + # preprocess all nodes (faster than one at a time for even 2 nodes) + size_for_n = defaultdict(int) + for c in cliques: + size_of_c = len(c) + for n in c: + if size_for_n[n] < size_of_c: + size_for_n[n] = size_of_c + if nodes is None: + return size_for_n + return {n: size_for_n[n] for n in nodes} + + +def number_of_cliques(G, nodes=None, cliques=None): + """Returns the number of maximal cliques for each node. + + Returns a single or list depending on input nodes. + Optional list of cliques can be input if already computed. + """ + if cliques is None: + cliques = list(find_cliques(G)) + + if nodes is None: + nodes = list(G.nodes()) # none, get entire graph + + if not isinstance(nodes, list): # check for a list + v = nodes + # assume it is a single value + numcliq = len([1 for c in cliques if v in c]) + else: + numcliq = {} + for v in nodes: + numcliq[v] = len([1 for c in cliques if v in c]) + return numcliq + + +def cliques_containing_node(G, nodes=None, cliques=None): + """Returns a list of cliques containing the given node. + + Returns a single list or list of lists depending on input nodes. + Optional list of cliques can be input if already computed. + """ + if cliques is None: + cliques = list(find_cliques(G)) + + if nodes is None: + nodes = list(G.nodes()) # none, get entire graph + + if not isinstance(nodes, list): # check for a list + v = nodes + # assume it is a single value + vcliques = [c for c in cliques if v in c] + else: + vcliques = {} + for v in nodes: + vcliques[v] = [c for c in cliques if v in c] + return vcliques + + +class MaxWeightClique: + """A class for the maximum weight clique algorithm. + + This class is a helper for the `max_weight_clique` function. The class + should not normally be used directly. + + Parameters + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Attributes + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + node_weights: dict + The weight of each node + incumbent_nodes : list + The nodes of the incumbent clique (the best clique found so far) + incumbent_weight: int + The weight of the incumbent clique + """ + + def __init__(self, G, weight): + self.G = G + self.incumbent_nodes = [] + self.incumbent_weight = 0 + + if weight is None: + self.node_weights = {v: 1 for v in G.nodes()} + else: + for v in G.nodes(): + if weight not in G.nodes[v]: + errmsg = f"Node {v!r} does not have the requested weight field." + raise KeyError(errmsg) + if not isinstance(G.nodes[v][weight], int): + errmsg = f"The {weight!r} field of node {v!r} is not an integer." + raise ValueError(errmsg) + self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()} + + def update_incumbent_if_improved(self, C, C_weight): + """Update the incumbent if the node set C has greater weight. + + C is assumed to be a clique. + """ + if C_weight > self.incumbent_weight: + self.incumbent_nodes = C[:] + self.incumbent_weight = C_weight + + def greedily_find_independent_set(self, P): + """Greedily find an independent set of nodes from a set of + nodes P.""" + independent_set = [] + P = P[:] + while P: + v = P[0] + independent_set.append(v) + P = [w for w in P if v != w and not self.G.has_edge(v, w)] + return independent_set + + def find_branching_nodes(self, P, target): + """Find a set of nodes to branch on.""" + residual_wt = {v: self.node_weights[v] for v in P} + total_wt = 0 + P = P[:] + while P: + independent_set = self.greedily_find_independent_set(P) + min_wt_in_class = min(residual_wt[v] for v in independent_set) + total_wt += min_wt_in_class + if total_wt > target: + break + for v in independent_set: + residual_wt[v] -= min_wt_in_class + P = [v for v in P if residual_wt[v] != 0] + return P + + def expand(self, C, C_weight, P): + """Look for the best clique that contains all the nodes in C and zero or + more of the nodes in P, backtracking if it can be shown that no such + clique has greater weight than the incumbent. + """ + self.update_incumbent_if_improved(C, C_weight) + branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight) + while branching_nodes: + v = branching_nodes.pop() + P.remove(v) + new_C = C + [v] + new_C_weight = C_weight + self.node_weights[v] + new_P = [w for w in P if self.G.has_edge(v, w)] + self.expand(new_C, new_C_weight, new_P) + + def find_max_weight_clique(self): + """Find a maximum weight clique.""" + # Sort nodes in reverse order of degree for speed + nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True) + nodes = [v for v in nodes if self.node_weights[v] > 0] + self.expand([], 0, nodes) + + +@not_implemented_for("directed") +def max_weight_clique(G, weight="weight"): + """Find a maximum weight clique in G. + + A *clique* in a graph is a set of nodes such that every two distinct nodes + are adjacent. The *weight* of a clique is the sum of the weights of its + nodes. A *maximum weight clique* of graph G is a clique C in G such that + no clique in G has weight greater than the weight of C. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Returns + ------- + clique : list + the nodes of a maximum weight clique + weight : int + the weight of a maximum weight clique + + Notes + ----- + The implementation is recursive, and therefore it may run into recursion + depth issues if G contains a clique whose number of nodes is close to the + recursion depth limit. + + At each search node, the algorithm greedily constructs a weighted + independent set cover of part of the graph in order to find a small set of + nodes on which to branch. The algorithm is very similar to the algorithm + of Tavares et al. [1]_, other than the fact that the NetworkX version does + not use bitsets. This style of algorithm for maximum weight clique (and + maximum weight independent set, which is the same problem but on the + complement graph) has a decades-long history. See Algorithm B of Warren + and Hicks [2]_ and the references in that paper. + + References + ---------- + .. [1] Tavares, W.A., Neto, M.B.C., Rodrigues, C.D., Michelon, P.: Um + algoritmo de branch and bound para o problema da clique máxima + ponderada. Proceedings of XLVII SBPO 1 (2015). + + .. [2] Warrent, Jeffrey S, Hicks, Illya V.: Combinatorial Branch-and-Bound + for the Maximum Weight Independent Set Problem. Technical Report, + Texas A&M University (2016). + """ + + mwc = MaxWeightClique(G, weight) + mwc.find_max_weight_clique() + return mwc.incumbent_nodes, mwc.incumbent_weight diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/cluster.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/cluster.py new file mode 100644 index 0000000..1421fef --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/cluster.py @@ -0,0 +1,568 @@ +"""Algorithms to characterize the number of triangles in a graph.""" + +from collections import Counter +from itertools import chain, combinations + +from networkx.utils import not_implemented_for + +__all__ = [ + "triangles", + "average_clustering", + "clustering", + "transitivity", + "square_clustering", + "generalized_degree", +] + + +@not_implemented_for("directed") +def triangles(G, nodes=None): + """Compute the number of triangles. + + Finds the number of triangles that include a node as one vertex. + + Parameters + ---------- + G : graph + A networkx graph + nodes : container of nodes, optional (default= all nodes in G) + Compute triangles for nodes in this container. + + Returns + ------- + out : dictionary + Number of triangles keyed by node label. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.triangles(G, 0)) + 6 + >>> print(nx.triangles(G)) + {0: 6, 1: 6, 2: 6, 3: 6, 4: 6} + >>> print(list(nx.triangles(G, (0, 1)).values())) + [6, 6] + + Notes + ----- + When computing triangles for the entire graph each triangle is counted + three times, once at each node. Self loops are ignored. + + """ + # If `nodes` represents a single node in the graph, return only its number + # of triangles. + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[2] // 2 + # Otherwise, `nodes` represents an iterable of nodes, so return a + # dictionary mapping node to number of triangles. + return {v: t // 2 for v, d, t, _ in _triangles_and_degree_iter(G, nodes)} + + +@not_implemented_for("multigraph") +def _triangles_and_degree_iter(G, nodes=None): + """Return an iterator of (node, degree, triangles, generalized degree). + + This double counts triangles so you may want to divide by 2. + See degree(), triangles() and generalized_degree() for definitions + and details. + + """ + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + for v, v_nbrs in nodes_nbrs: + vs = set(v_nbrs) - {v} + gen_degree = Counter(len(vs & (set(G[w]) - {w})) for w in vs) + ntriangles = sum(k * val for k, val in gen_degree.items()) + yield (v, len(vs), ntriangles, gen_degree) + + +@not_implemented_for("multigraph") +def _weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of (node, degree, weighted_triangles). + + Used for weighted clustering. + Note: this returns the geometric average weight of edges in the triangle. + Also, each triangle is counted twice (each direction). + So you may want to divide by 2. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, nbrs in nodes_nbrs: + inbrs = set(nbrs) - {i} + weighted_triangles = 0 + seen = set() + for j in inbrs: + seen.add(j) + # This avoids counting twice -- we double at the end. + jnbrs = set(G[j]) - seen + # Only compute the edge weight once, before the inner inner + # loop. + wij = wt(i, j) + weighted_triangles += sum( + np.cbrt([(wij * wt(j, k) * wt(k, i)) for k in inbrs & jnbrs]) + ) + yield (i, len(inbrs), 2 * weighted_triangles) + + +@not_implemented_for("multigraph") +def _directed_triangles_and_degree_iter(G, nodes=None): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_triangles). + + Used for directed clustering. + Note that unlike `_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in chain(ipreds, isuccs): + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + 1 + for k in chain( + (ipreds & jpreds), + (ipreds & jsuccs), + (isuccs & jpreds), + (isuccs & jsuccs), + ) + ) + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +@not_implemented_for("multigraph") +def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_weighted_triangles). + + Used for directed weighted clustering. + Note that unlike `_weighted_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in ipreds: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]) + ) + + for j in isuccs: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]) + ) + + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +def average_clustering(G, nodes=None, weight=None, count_zeros=True): + r"""Compute the average clustering coefficient for the graph G. + + The clustering coefficient for the graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where :math:`n` is the number of nodes in `G`. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute average clustering for nodes in this container. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + count_zeros : bool + If False include only the nodes with nonzero clustering in the average. + + Returns + ------- + avg : float + Average clustering + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.average_clustering(G)) + 1.0 + + Notes + ----- + This is a space saving routine; it might be faster + to use the clustering function to get a list and then take the average. + + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated + nodes and leafs on clustering measures for small-world networks. + https://arxiv.org/abs/0802.2512 + """ + c = clustering(G, nodes, weight=weight).values() + if not count_zeros: + c = [v for v in c if abs(v) > 0] + return sum(c) / len(c) + + +def clustering(G, nodes=None, weight=None): + r"""Compute the clustering coefficient for nodes. + + For unweighted graphs, the clustering of a node :math:`u` + is the fraction of possible triangles through that node that exist, + + .. math:: + + c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)}, + + where :math:`T(u)` is the number of triangles through node :math:`u` and + :math:`deg(u)` is the degree of :math:`u`. + + For weighted graphs, there are several ways to define clustering [1]_. + the one used here is defined + as the geometric average of the subgraph edge weights [2]_, + + .. math:: + + c_u = \frac{1}{deg(u)(deg(u)-1))} + \sum_{vw} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}. + + The edge weights :math:`\hat{w}_{uv}` are normalized by the maximum weight + in the network :math:`\hat{w}_{uv} = w_{uv}/\max(w)`. + + The value of :math:`c_u` is assigned to 0 if :math:`deg(u) < 2`. + + Additionally, this weighted definition has been generalized to support negative edge weights [3]_. + + For directed graphs, the clustering is similarly defined as the fraction + of all possible directed triangles or geometric average of the subgraph + edge weights for unweighted and weighted directed graph respectively [4]_. + + .. math:: + + c_u = \frac{2}{deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\leftrightarrow}(u)} + T(u), + + where :math:`T(u)` is the number of directed triangles through node + :math:`u`, :math:`deg^{tot}(u)` is the sum of in degree and out degree of + :math:`u` and :math:`deg^{\leftrightarrow}(u)` is the reciprocal degree of + :math:`u`. + + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute clustering for nodes in this container. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + out : float, or dictionary + Clustering coefficient at specified nodes + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.clustering(G, 0)) + 1.0 + >>> print(nx.clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Intensity and coherence of motifs in weighted complex + networks by J. P. Onnela, J. Saramäki, J. Kertész, and K. Kaski, + Physical Review E, 71(6), 065103 (2005). + .. [3] Generalization of Clustering Coefficients to Signed Correlation Networks + by G. Costantini and M. Perugini, PloS one, 9(2), e88669 (2014). + .. [4] Clustering in complex directed networks by G. Fagiolo, + Physical Review E, 76(2), 026107 (2007). + """ + if G.is_directed(): + if weight is not None: + td_iter = _directed_weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + td_iter = _directed_triangles_and_degree_iter(G, nodes) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + # The formula 2*T/(d*(d-1)) from docs is t/(d*(d-1)) here b/c t==2*T + if weight is not None: + td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter} + else: + td_iter = _triangles_and_degree_iter(G, nodes) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t, _ in td_iter} + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clusterc[nodes] + return clusterc + + +def transitivity(G): + r"""Compute graph transitivity, the fraction of all possible triangles + present in G. + + Possible triangles are identified by the number of "triads" + (two edges with a shared vertex). + + The transitivity is + + .. math:: + + T = 3\frac{\#triangles}{\#triads}. + + Parameters + ---------- + G : graph + + Returns + ------- + out : float + Transitivity + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.transitivity(G)) + 1.0 + """ + triangles_contri = [ + (t, d * (d - 1)) for v, d, t, _ in _triangles_and_degree_iter(G) + ] + # If the graph is empty + if len(triangles_contri) == 0: + return 0 + triangles, contri = map(sum, zip(*triangles_contri)) + return 0 if triangles == 0 else triangles / contri + + +def square_clustering(G, nodes=None): + r"""Compute the squares clustering coefficient for nodes. + + For each node return the fraction of possible squares that exist at + the node [1]_ + + .. math:: + C_4(v) = \frac{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]}, + + where :math:`q_v(u,w)` are the number of common neighbors of :math:`u` and + :math:`w` other than :math:`v` (ie squares), and :math:`a_v(u,w) = (k_u - + (1+q_v(u,w)+\theta_{uv})) + (k_w - (1+q_v(u,w)+\theta_{uw}))`, where + :math:`\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0 + otherwise. [2]_ + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute clustering for nodes in this container. + + Returns + ------- + c4 : dictionary + A dictionary keyed by node with the square clustering coefficient value. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.square_clustering(G, 0)) + 1.0 + >>> print(nx.square_clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + While :math:`C_3(v)` (triangle clustering) gives the probability that + two neighbors of node v are connected with each other, :math:`C_4(v)` is + the probability that two neighbors of node v share a common + neighbor different from v. This algorithm can be applied to both + bipartite and unipartite networks. + + References + ---------- + .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005 + Cycles and clustering in bipartite networks. + Physical Review E (72) 056127. + .. [2] Zhang, Peng et al. Clustering Coefficient and Community Structure of + Bipartite Networks. Physica A: Statistical Mechanics and its Applications 387.27 (2008): 6869–6875. + https://arxiv.org/abs/0710.0117v1 + """ + if nodes is None: + node_iter = G + else: + node_iter = G.nbunch_iter(nodes) + clustering = {} + for v in node_iter: + clustering[v] = 0 + potential = 0 + for u, w in combinations(G[v], 2): + squares = len((set(G[u]) & set(G[w])) - {v}) + clustering[v] += squares + degm = squares + 1 + if w in G[u]: + degm += 1 + potential += (len(G[u]) - degm) + (len(G[w]) - degm) + squares + if potential > 0: + clustering[v] /= potential + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clustering[nodes] + return clustering + + +@not_implemented_for("directed") +def generalized_degree(G, nodes=None): + r"""Compute the generalized degree for nodes. + + For each node, the generalized degree shows how many edges of given + triangle multiplicity the node is connected to. The triangle multiplicity + of an edge is the number of triangles an edge participates in. The + generalized degree of node :math:`i` can be written as a vector + :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, k_i^{(N-2)})` where + :math:`k_i^{(j)}` is the number of edges attached to node :math:`i` that + participate in :math:`j` triangles. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute the generalized degree for nodes in this container. + + Returns + ------- + out : Counter, or dictionary of Counters + Generalized degree of specified nodes. The Counter is keyed by edge + triangle multiplicity. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.generalized_degree(G, 0)) + Counter({3: 4}) + >>> print(nx.generalized_degree(G)) + {0: Counter({3: 4}), 1: Counter({3: 4}), 2: Counter({3: 4}), 3: Counter({3: 4}), 4: Counter({3: 4})} + + To recover the number of triangles attached to a node: + + >>> k1 = nx.generalized_degree(G, 0) + >>> sum([k * v for k, v in k1.items()]) / 2 == nx.triangles(G, 0) + True + + Notes + ----- + In a network of N nodes, the highest triangle multiplicty an edge can have + is N-2. + + The return value does not include a `zero` entry if no edges of a + particular triangle multiplicity are present. + + The number of triangles node :math:`i` is attached to can be recovered from + the generalized degree :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, + k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\dotsc +(N-2)k_i^{(N-2)})/2`. + + References + ---------- + .. [1] Networks with arbitrary edge multiplicities by V. Zlatić, + D. Garlaschelli and G. Caldarelli, EPL (Europhysics Letters), + Volume 97, Number 2 (2012). + https://iopscience.iop.org/article/10.1209/0295-5075/97/28005 + """ + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[3] + return {v: gd for v, d, t, gd in _triangles_and_degree_iter(G, nodes)} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/__init__.py new file mode 100644 index 0000000..39381d9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.coloring.greedy_coloring import * +from networkx.algorithms.coloring.equitable_coloring import equitable_color + +__all__ = ["greedy_color", "equitable_color"] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/equitable_coloring.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/equitable_coloring.py new file mode 100644 index 0000000..3072a0a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/equitable_coloring.py @@ -0,0 +1,515 @@ +""" +Equitable coloring of graphs with bounded degree. +""" + +from collections import defaultdict + +import networkx as nx + +__all__ = ["equitable_color"] + + +def is_coloring(G, coloring): + """Determine if the coloring is a valid coloring for the graph G.""" + # Verify that the coloring is valid. + for (s, d) in G.edges: + if coloring[s] == coloring[d]: + return False + return True + + +def is_equitable(G, coloring, num_colors=None): + """Determines if the coloring is valid and equitable for the graph G.""" + + if not is_coloring(G, coloring): + return False + + # Verify whether it is equitable. + color_set_size = defaultdict(int) + for color in coloring.values(): + color_set_size[color] += 1 + + if num_colors is not None: + for color in range(num_colors): + if color not in color_set_size: + # These colors do not have any vertices attached to them. + color_set_size[color] = 0 + + # If there are more than 2 distinct values, the coloring cannot be equitable + all_set_sizes = set(color_set_size.values()) + if len(all_set_sizes) == 0 and num_colors is None: # Was an empty graph + return True + elif len(all_set_sizes) == 1: + return True + elif len(all_set_sizes) == 2: + a, b = list(all_set_sizes) + return abs(a - b) <= 1 + else: # len(all_set_sizes) > 2: + return False + + +def make_C_from_F(F): + C = defaultdict(lambda: []) + for node, color in F.items(): + C[color].append(node) + + return C + + +def make_N_from_L_C(L, C): + nodes = L.keys() + colors = C.keys() + return { + (node, color): sum(1 for v in L[node] if v in C[color]) + for node in nodes + for color in colors + } + + +def make_H_from_C_N(C, N): + return { + (c1, c2): sum(1 for node in C[c1] if N[(node, c2)] == 0) + for c1 in C.keys() + for c2 in C.keys() + } + + +def change_color(u, X, Y, N, H, F, C, L): + """Change the color of 'u' from X to Y and update N, H, F, C.""" + assert F[u] == X and X != Y + + # Change the class of 'u' from X to Y + F[u] = Y + + for k in C.keys(): + # 'u' witnesses an edge from k -> Y instead of from k -> X now. + if N[u, k] == 0: + H[(X, k)] -= 1 + H[(Y, k)] += 1 + + for v in L[u]: + # 'v' has lost a neighbor in X and gained one in Y + N[(v, X)] -= 1 + N[(v, Y)] += 1 + + if N[(v, X)] == 0: + # 'v' witnesses F[v] -> X + H[(F[v], X)] += 1 + + if N[(v, Y)] == 1: + # 'v' no longer witnesses F[v] -> Y + H[(F[v], Y)] -= 1 + + C[X].remove(u) + C[Y].append(u) + + +def move_witnesses(src_color, dst_color, N, H, F, C, T_cal, L): + """Move witness along a path from src_color to dst_color.""" + X = src_color + while X != dst_color: + Y = T_cal[X] + # Move _any_ witness from X to Y = T_cal[X] + w = next(x for x in C[X] if N[(x, Y)] == 0) + change_color(w, X, Y, N=N, H=H, F=F, C=C, L=L) + X = Y + + +def pad_graph(G, num_colors): + """Add a disconnected complete clique K_p such that the number of nodes in + the graph becomes a multiple of `num_colors`. + + Assumes that the graph's nodes are labelled using integers. + + Returns the number of nodes with each color. + """ + + n_ = len(G) + r = num_colors - 1 + + # Ensure that the number of nodes in G is a multiple of (r + 1) + s = n_ // (r + 1) + if n_ != s * (r + 1): + p = (r + 1) - n_ % (r + 1) + s += 1 + + # Complete graph K_p between (imaginary) nodes [n_, ... , n_ + p] + K = nx.relabel_nodes(nx.complete_graph(p), {idx: idx + n_ for idx in range(p)}) + G.add_edges_from(K.edges) + + return s + + +def procedure_P(V_minus, V_plus, N, H, F, C, L, excluded_colors=None): + """Procedure P as described in the paper.""" + + if excluded_colors is None: + excluded_colors = set() + + A_cal = set() + T_cal = {} + R_cal = [] + + # BFS to determine A_cal, i.e. colors reachable from V- + reachable = [V_minus] + marked = set(reachable) + idx = 0 + + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + A_cal.add(pop) + R_cal.append(pop) + + # TODO: Checking whether a color has been visited can be made faster by + # using a look-up table instead of testing for membership in a set by a + # logarithmic factor. + next_layer = [] + for k in C.keys(): + if ( + H[(k, pop)] > 0 + and k not in A_cal + and k not in excluded_colors + and k not in marked + ): + next_layer.append(k) + + for dst in next_layer: + # Record that `dst` can reach `pop` + T_cal[dst] = pop + + marked.update(next_layer) + reachable.extend(next_layer) + + # Variables for the algorithm + b = len(C) - len(A_cal) + + if V_plus in A_cal: + # Easy case: V+ is in A_cal + # Move one node from V+ to V- using T_cal to find the parents. + move_witnesses(V_plus, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L) + else: + # If there is a solo edge, we can resolve the situation by + # moving witnesses from B to A, making G[A] equitable and then + # recursively balancing G[B - w] with a different V_minus and + # but the same V_plus. + + A_0 = set() + A_cal_0 = set() + num_terminal_sets_found = 0 + made_equitable = False + + for W_1 in R_cal[::-1]: + + for v in C[W_1]: + X = None + + for U in C.keys(): + if N[(v, U)] == 0 and U in A_cal and U != W_1: + X = U + + # v does not witness an edge in H[A_cal] + if X is None: + continue + + for U in C.keys(): + # Note: Departing from the paper here. + if N[(v, U)] >= 1 and U not in A_cal: + X_prime = U + w = v + + try: + # Finding the solo neighbor of w in X_prime + y = next( + node + for node in L[w] + if F[node] == X_prime and N[(node, W_1)] == 1 + ) + except StopIteration: + pass + else: + W = W_1 + + # Move w from W to X, now X has one extra node. + change_color(w, W, X, N=N, H=H, F=F, C=C, L=L) + + # Move witness from X to V_minus, making the coloring + # equitable. + move_witnesses( + src_color=X, + dst_color=V_minus, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal, + L=L, + ) + + # Move y from X_prime to W, making W the correct size. + change_color(y, X_prime, W, N=N, H=H, F=F, C=C, L=L) + + # Then call the procedure on G[B - y] + procedure_P( + V_minus=X_prime, + V_plus=V_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors.union(A_cal), + ) + made_equitable = True + break + + if made_equitable: + break + else: + # No node in W_1 was found such that + # it had a solo-neighbor. + A_cal_0.add(W_1) + A_0.update(C[W_1]) + num_terminal_sets_found += 1 + + if num_terminal_sets_found == b: + # Otherwise, construct the maximal independent set and find + # a pair of z_1, z_2 as in Case II. + + # BFS to determine B_cal': the set of colors reachable from V+ + B_cal_prime = set() + T_cal_prime = {} + + reachable = [V_plus] + marked = set(reachable) + idx = 0 + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + B_cal_prime.add(pop) + + # No need to check for excluded_colors here because + # they only exclude colors from A_cal + next_layer = [ + k + for k in C.keys() + if H[(pop, k)] > 0 and k not in B_cal_prime and k not in marked + ] + + for dst in next_layer: + T_cal_prime[pop] = dst + + marked.update(next_layer) + reachable.extend(next_layer) + + # Construct the independent set of G[B'] + I_set = set() + I_covered = set() + W_covering = {} + + B_prime = [node for k in B_cal_prime for node in C[k]] + + # Add the nodes in V_plus to I first. + for z in C[V_plus] + B_prime: + if z in I_covered or F[z] not in B_cal_prime: + continue + + I_set.add(z) + I_covered.add(z) + I_covered.update([nbr for nbr in L[z]]) + + for w in L[z]: + if F[w] in A_cal_0 and N[(z, F[w])] == 1: + if w not in W_covering: + W_covering[w] = z + else: + # Found z1, z2 which have the same solo + # neighbor in some W + z_1 = W_covering[w] + # z_2 = z + + Z = F[z_1] + W = F[w] + + # shift nodes along W, V- + move_witnesses( + W, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L + ) + + # shift nodes along V+ to Z + move_witnesses( + V_plus, + Z, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal_prime, + L=L, + ) + + # change color of z_1 to W + change_color(z_1, Z, W, N=N, H=H, F=F, C=C, L=L) + + # change color of w to some color in B_cal + W_plus = next( + k + for k in C.keys() + if N[(w, k)] == 0 and k not in A_cal + ) + change_color(w, W, W_plus, N=N, H=H, F=F, C=C, L=L) + + # recurse with G[B \cup W*] + excluded_colors.update( + [ + k + for k in C.keys() + if k != W and k not in B_cal_prime + ] + ) + procedure_P( + V_minus=W, + V_plus=W_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors, + ) + + made_equitable = True + break + + if made_equitable: + break + else: + assert False, ( + "Must find a w which is the solo neighbor " + "of two vertices in B_cal_prime." + ) + + if made_equitable: + break + + +def equitable_color(G, num_colors): + """Provides equitable (r + 1)-coloring for nodes of G in O(r * n^2) time + if deg(G) <= r. The algorithm is described in [1]_. + + Attempts to color a graph using r colors, where no neighbors of a node + can have same color as the node itself and the number of nodes with each + color differ by at most 1. + + Parameters + ---------- + G : networkX graph + The nodes of this graph will be colored. + + num_colors : number of colors to use + This number must be at least one more than the maximum degree of nodes + in the graph. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> d = nx.coloring.equitable_color(G, num_colors=3) + >>> nx.algorithms.coloring.equitable_coloring.is_equitable(G, d) + True + + Raises + ------ + NetworkXAlgorithmError + If the maximum degree of the graph ``G`` is greater than + ``num_colors``. + + References + ---------- + .. [1] Kierstead, H. A., Kostochka, A. V., Mydlarz, M., & Szemerédi, E. + (2010). A fast algorithm for equitable coloring. Combinatorica, 30(2), + 217-224. + """ + + # Map nodes to integers for simplicity later. + nodes_to_int = {} + int_to_nodes = {} + + for idx, node in enumerate(G.nodes): + nodes_to_int[node] = idx + int_to_nodes[idx] = node + + G = nx.relabel_nodes(G, nodes_to_int, copy=True) + + # Basic graph statistics and sanity check. + if len(G.nodes) > 0: + r_ = max(G.degree(node) for node in G.nodes) + else: + r_ = 0 + + if r_ >= num_colors: + raise nx.NetworkXAlgorithmError( + f"Graph has maximum degree {r_}, needs " + f"{r_ + 1} (> {num_colors}) colors for guaranteed coloring." + ) + + # Ensure that the number of nodes in G is a multiple of (r + 1) + pad_graph(G, num_colors) + + # Starting the algorithm. + # L = {node: list(G.neighbors(node)) for node in G.nodes} + L_ = {node: [] for node in G.nodes} + + # Arbitrary equitable allocation of colors to nodes. + F = {node: idx % num_colors for idx, node in enumerate(G.nodes)} + + C = make_C_from_F(F) + + # The neighborhood is empty initially. + N = make_N_from_L_C(L_, C) + + # Currently all nodes witness all edges. + H = make_H_from_C_N(C, N) + + # Start of algorithm. + edges_seen = set() + + for u in sorted(G.nodes): + for v in sorted(G.neighbors(u)): + + # Do not double count edges if (v, u) has already been seen. + if (v, u) in edges_seen: + continue + + edges_seen.add((u, v)) + + L_[u].append(v) + L_[v].append(u) + + N[(u, F[v])] += 1 + N[(v, F[u])] += 1 + + if F[u] != F[v]: + # Were 'u' and 'v' witnesses for F[u] -> F[v] or F[v] -> F[u]? + if N[(u, F[v])] == 1: + H[F[u], F[v]] -= 1 # u cannot witness an edge between F[u], F[v] + + if N[(v, F[u])] == 1: + H[F[v], F[u]] -= 1 # v cannot witness an edge between F[v], F[u] + + if N[(u, F[u])] != 0: + # Find the first color where 'u' does not have any neighbors. + Y = next(k for k in C.keys() if N[(u, k)] == 0) + X = F[u] + change_color(u, X, Y, N=N, H=H, F=F, C=C, L=L_) + + # Procedure P + procedure_P(V_minus=X, V_plus=Y, N=N, H=H, F=F, C=C, L=L_) + + return {int_to_nodes[x]: F[x] for x in int_to_nodes} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/greedy_coloring.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/greedy_coloring.py new file mode 100644 index 0000000..329746c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/greedy_coloring.py @@ -0,0 +1,550 @@ +""" +Greedy graph coloring using various strategies. +""" +import itertools +from collections import defaultdict, deque + +import networkx as nx +from networkx.utils import arbitrary_element, py_random_state + +__all__ = [ + "greedy_color", + "strategy_connected_sequential", + "strategy_connected_sequential_bfs", + "strategy_connected_sequential_dfs", + "strategy_independent_set", + "strategy_largest_first", + "strategy_random_sequential", + "strategy_saturation_largest_first", + "strategy_smallest_last", +] + + +def strategy_largest_first(G, colors): + """Returns a list of the nodes of ``G`` in decreasing order by + degree. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return sorted(G, key=G.degree, reverse=True) + + +@py_random_state(2) +def strategy_random_sequential(G, colors, seed=None): + """Returns a random permutation of the nodes of ``G`` as a list. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + nodes = list(G) + seed.shuffle(nodes) + return nodes + + +def strategy_smallest_last(G, colors): + """Returns a deque of the nodes of ``G``, "smallest" last. + + Specifically, the degrees of each node are tracked in a bucket queue. + From this, the node of minimum degree is repeatedly popped from the + graph, updating its neighbors' degrees. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + This implementation of the strategy runs in $O(n + m)$ time + (ignoring polylogarithmic factors), where $n$ is the number of nodes + and $m$ is the number of edges. + + This strategy is related to :func:`strategy_independent_set`: if we + interpret each node removed as an independent set of size one, then + this strategy chooses an independent set of size one instead of a + maximal independent set. + + """ + H = G.copy() + result = deque() + + # Build initial degree list (i.e. the bucket queue data structure) + degrees = defaultdict(set) # set(), for fast random-access removals + lbound = float("inf") + for node, d in H.degree(): + degrees[d].add(node) + lbound = min(lbound, d) # Lower bound on min-degree. + + def find_min_degree(): + # Save time by starting the iterator at `lbound`, not 0. + # The value that we find will be our new `lbound`, which we set later. + return next(d for d in itertools.count(lbound) if d in degrees) + + for _ in G: + # Pop a min-degree node and add it to the list. + min_degree = find_min_degree() + u = degrees[min_degree].pop() + if not degrees[min_degree]: # Clean up the degree list. + del degrees[min_degree] + result.appendleft(u) + + # Update degrees of removed node's neighbors. + for v in H[u]: + degree = H.degree(v) + degrees[degree].remove(v) + if not degrees[degree]: # Clean up the degree list. + del degrees[degree] + degrees[degree - 1].add(v) + + # Finally, remove the node. + H.remove_node(u) + lbound = min_degree - 1 # Subtract 1 in case of tied neighbors. + + return result + + +def _maximal_independent_set(G): + """Returns a maximal independent set of nodes in ``G`` by repeatedly + choosing an independent node of minimum degree (with respect to the + subgraph of unchosen nodes). + + """ + result = set() + remaining = set(G) + while remaining: + G = G.subgraph(remaining) + v = min(remaining, key=G.degree) + result.add(v) + remaining -= set(G[v]) | {v} + return result + + +def strategy_independent_set(G, colors): + """Uses a greedy independent set removal strategy to determine the + colors. + + This function updates ``colors`` **in-place** and return ``None``, + unlike the other strategy functions in this module. + + This algorithm repeatedly finds and removes a maximal independent + set, assigning each node in the set an unused color. + + ``G`` is a NetworkX graph. + + This strategy is related to :func:`strategy_smallest_last`: in that + strategy, an independent set of size one is chosen at each step + instead of a maximal independent set. + + """ + remaining_nodes = set(G) + while len(remaining_nodes) > 0: + nodes = _maximal_independent_set(G.subgraph(remaining_nodes)) + remaining_nodes -= nodes + yield from nodes + + +def strategy_connected_sequential_bfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "bfs") + + +def strategy_connected_sequential_dfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + depth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "dfs") + + +def strategy_connected_sequential(G, colors, traversal="bfs"): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first or depth-first traversal. + + ``traversal`` must be one of the strings ``'dfs'`` or ``'bfs'``, + representing depth-first traversal or breadth-first traversal, + respectively. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + if traversal == "bfs": + traverse = nx.bfs_edges + elif traversal == "dfs": + traverse = nx.dfs_edges + else: + raise nx.NetworkXError( + "Please specify one of the strings 'bfs' or" + " 'dfs' for connected sequential ordering" + ) + for component in nx.connected_components(G): + source = arbitrary_element(component) + # Yield the source node, then all the nodes in the specified + # traversal order. + yield source + for (_, end) in traverse(G.subgraph(component), source): + yield end + + +def strategy_saturation_largest_first(G, colors): + """Iterates over all the nodes of ``G`` in "saturation order" (also + known as "DSATUR"). + + ``G`` is a NetworkX graph. ``colors`` is a dictionary mapping nodes of + ``G`` to colors, for those nodes that have already been colored. + + """ + distinct_colors = {v: set() for v in G} + for i in range(len(G)): + # On the first time through, simply choose the node of highest degree. + if i == 0: + node = max(G, key=G.degree) + yield node + # Add the color 0 to the distinct colors set for each + # neighbors of that node. + for v in G[node]: + distinct_colors[v].add(0) + else: + # Compute the maximum saturation and the set of nodes that + # achieve that saturation. + saturation = { + v: len(c) for v, c in distinct_colors.items() if v not in colors + } + # Yield the node with the highest saturation, and break ties by + # degree. + node = max(saturation, key=lambda v: (saturation[v], G.degree(v))) + yield node + # Update the distinct color sets for the neighbors. + color = colors[node] + for v in G[node]: + distinct_colors[v].add(color) + + +#: Dictionary mapping name of a strategy as a string to the strategy function. +STRATEGIES = { + "largest_first": strategy_largest_first, + "random_sequential": strategy_random_sequential, + "smallest_last": strategy_smallest_last, + "independent_set": strategy_independent_set, + "connected_sequential_bfs": strategy_connected_sequential_bfs, + "connected_sequential_dfs": strategy_connected_sequential_dfs, + "connected_sequential": strategy_connected_sequential, + "saturation_largest_first": strategy_saturation_largest_first, + "DSATUR": strategy_saturation_largest_first, +} + + +def greedy_color(G, strategy="largest_first", interchange=False): + """Color a graph using various strategies of greedy graph coloring. + + Attempts to color a graph using as few colors as possible, where no + neighbours of a node can have same color as the node itself. The + given strategy determines the order in which nodes are colored. + + The strategies are described in [1]_, and smallest-last is based on + [2]_. + + Parameters + ---------- + G : NetworkX graph + + strategy : string or function(G, colors) + A function (or a string representing a function) that provides + the coloring strategy, by returning nodes in the ordering they + should be colored. ``G`` is the graph, and ``colors`` is a + dictionary of the currently assigned colors, keyed by nodes. The + function must return an iterable over all the nodes in ``G``. + + If the strategy function is an iterator generator (that is, a + function with ``yield`` statements), keep in mind that the + ``colors`` dictionary will be updated after each ``yield``, since + this function chooses colors greedily. + + If ``strategy`` is a string, it must be one of the following, + each of which represents one of the built-in strategy functions. + + * ``'largest_first'`` + * ``'random_sequential'`` + * ``'smallest_last'`` + * ``'independent_set'`` + * ``'connected_sequential_bfs'`` + * ``'connected_sequential_dfs'`` + * ``'connected_sequential'`` (alias for the previous strategy) + * ``'saturation_largest_first'`` + * ``'DSATUR'`` (alias for the previous strategy) + + interchange: bool + Will use the color interchange algorithm described by [3]_ if set + to ``True``. + + Note that ``saturation_largest_first`` and ``independent_set`` + do not work with interchange. Furthermore, if you use + interchange with your own strategy function, you cannot rely + on the values in the ``colors`` argument. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> d = nx.coloring.greedy_color(G, strategy="largest_first") + >>> d in [{0: 0, 1: 1, 2: 0, 3: 1}, {0: 1, 1: 0, 2: 1, 3: 0}] + True + + Raises + ------ + NetworkXPointlessConcept + If ``strategy`` is ``saturation_largest_first`` or + ``independent_set`` and ``interchange`` is ``True``. + + References + ---------- + .. [1] Adrian Kosowski, and Krzysztof Manuszewski, + Classical Coloring of Graphs, Graph Colorings, 2-19, 2004. + ISBN 0-8218-3458-4. + .. [2] David W. Matula, and Leland L. Beck, "Smallest-last + ordering and clustering and graph coloring algorithms." *J. ACM* 30, + 3 (July 1983), 417–427. + .. [3] Maciej M. Sysło, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + + """ + if len(G) == 0: + return {} + # Determine the strategy provided by the caller. + strategy = STRATEGIES.get(strategy, strategy) + if not callable(strategy): + raise nx.NetworkXError( + "strategy must be callable or a valid string. " f"{strategy} not valid." + ) + # Perform some validation on the arguments before executing any + # strategy functions. + if interchange: + if strategy is strategy_independent_set: + msg = "interchange cannot be used with independent_set" + raise nx.NetworkXPointlessConcept(msg) + if strategy is strategy_saturation_largest_first: + msg = "interchange cannot be used with" " saturation_largest_first" + raise nx.NetworkXPointlessConcept(msg) + colors = {} + nodes = strategy(G, colors) + if interchange: + return _greedy_coloring_with_interchange(G, nodes) + for u in nodes: + # Set to keep track of colors of neighbours + neighbour_colors = {colors[v] for v in G[u] if v in colors} + # Find the first unused color. + for color in itertools.count(): + if color not in neighbour_colors: + break + # Assign the new color to the current node. + colors[u] = color + return colors + + +# Tools for coloring with interchanges +class _Node: + __slots__ = ["node_id", "color", "adj_list", "adj_color"] + + def __init__(self, node_id, n): + self.node_id = node_id + self.color = -1 + self.adj_list = None + self.adj_color = [None for _ in range(n)] + + def __repr__(self): + return ( + f"Node_id: {self.node_id}, Color: {self.color}, " + f"Adj_list: ({self.adj_list}), adj_color: ({self.adj_color})" + ) + + def assign_color(self, adj_entry, color): + adj_entry.col_prev = None + adj_entry.col_next = self.adj_color[color] + self.adj_color[color] = adj_entry + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry + + def clear_color(self, adj_entry, color): + if adj_entry.col_prev is None: + self.adj_color[color] = adj_entry.col_next + else: + adj_entry.col_prev.col_next = adj_entry.col_next + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry.col_prev + + def iter_neighbors(self): + adj_node = self.adj_list + while adj_node is not None: + yield adj_node + adj_node = adj_node.next + + def iter_neighbors_color(self, color): + adj_color_node = self.adj_color[color] + while adj_color_node is not None: + yield adj_color_node.node_id + adj_color_node = adj_color_node.col_next + + +class _AdjEntry: + __slots__ = ["node_id", "next", "mate", "col_next", "col_prev"] + + def __init__(self, node_id): + self.node_id = node_id + self.next = None + self.mate = None + self.col_next = None + self.col_prev = None + + def __repr__(self): + col_next = None if self.col_next is None else self.col_next.node_id + col_prev = None if self.col_prev is None else self.col_prev.node_id + return ( + f"Node_id: {self.node_id}, Next: ({self.next}), " + f"Mate: ({self.mate.node_id}), " + f"col_next: ({col_next}), col_prev: ({col_prev})" + ) + + +def _greedy_coloring_with_interchange(G, nodes): + """Return a coloring for `orginal_graph` using interchange approach + + This procedure is an adaption of the algorithm described by [1]_, + and is an implementation of coloring with interchange. Please be + advised, that the datastructures used are rather complex because + they are optimized to minimize the time spent identifying + subcomponents of the graph, which are possible candidates for color + interchange. + + Parameters + ---------- + G : NetworkX graph + The graph to be colored + + nodes : list + nodes ordered using the strategy of choice + + Returns + ------- + dict : + A dictionary keyed by node to a color value + + References + ---------- + .. [1] Maciej M. Syslo, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + """ + n = len(G) + + graph = {node: _Node(node, n) for node in G} + + for (node1, node2) in G.edges(): + adj_entry1 = _AdjEntry(node2) + adj_entry2 = _AdjEntry(node1) + adj_entry1.mate = adj_entry2 + adj_entry2.mate = adj_entry1 + node1_head = graph[node1].adj_list + adj_entry1.next = node1_head + graph[node1].adj_list = adj_entry1 + node2_head = graph[node2].adj_list + adj_entry2.next = node2_head + graph[node2].adj_list = adj_entry2 + + k = 0 + for node in nodes: + # Find the smallest possible, unused color + neighbors = graph[node].iter_neighbors() + col_used = {graph[adj_node.node_id].color for adj_node in neighbors} + col_used.discard(-1) + k1 = next(itertools.dropwhile(lambda x: x in col_used, itertools.count())) + + # k1 is now the lowest available color + if k1 > k: + connected = True + visited = set() + col1 = -1 + col2 = -1 + while connected and col1 < k: + col1 += 1 + neighbor_cols = graph[node].iter_neighbors_color(col1) + col1_adj = [it for it in neighbor_cols] + + col2 = col1 + while connected and col2 < k: + col2 += 1 + visited = set(col1_adj) + frontier = list(col1_adj) + i = 0 + while i < len(frontier): + search_node = frontier[i] + i += 1 + col_opp = col2 if graph[search_node].color == col1 else col1 + neighbor_cols = graph[search_node].iter_neighbors_color(col_opp) + + for neighbor in neighbor_cols: + if neighbor not in visited: + visited.add(neighbor) + frontier.append(neighbor) + + # Search if node is not adj to any col2 vertex + connected = ( + len( + visited.intersection(graph[node].iter_neighbors_color(col2)) + ) + > 0 + ) + + # If connected is false then we can swap !!! + if not connected: + # Update all the nodes in the component + for search_node in visited: + graph[search_node].color = ( + col2 if graph[search_node].color == col1 else col1 + ) + col2_adj = graph[search_node].adj_color[col2] + graph[search_node].adj_color[col2] = graph[search_node].adj_color[ + col1 + ] + graph[search_node].adj_color[col1] = col2_adj + + # Update all the neighboring nodes + for search_node in visited: + col = graph[search_node].color + col_opp = col1 if col == col2 else col2 + for adj_node in graph[search_node].iter_neighbors(): + if graph[adj_node.node_id].color != col_opp: + # Direct reference to entry + adj_mate = adj_node.mate + graph[adj_node.node_id].clear_color(adj_mate, col_opp) + graph[adj_node.node_id].assign_color(adj_mate, col) + k1 = col1 + + # We can color this node color k1 + graph[node].color = k1 + k = max(k1, k) + + # Update the neighbors of this node + for adj_node in graph[node].iter_neighbors(): + adj_mate = adj_node.mate + graph[adj_node.node_id].assign_color(adj_mate, k1) + + return {node.node_id: node.color for node in graph.values()} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/tests/test_coloring.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/tests/test_coloring.py new file mode 100644 index 0000000..cc422e3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/coloring/tests/test_coloring.py @@ -0,0 +1,793 @@ +"""Greedy coloring test suite. + +""" + +import pytest + +import networkx as nx + +is_coloring = nx.algorithms.coloring.equitable_coloring.is_coloring +is_equitable = nx.algorithms.coloring.equitable_coloring.is_equitable + + +ALL_STRATEGIES = [ + "largest_first", + "random_sequential", + "smallest_last", + "independent_set", + "connected_sequential_bfs", + "connected_sequential_dfs", + "connected_sequential", + "saturation_largest_first", + "DSATUR", +] + +# List of strategies where interchange=True results in an error +INTERCHANGE_INVALID = ["independent_set", "saturation_largest_first", "DSATUR"] + + +class TestColoring: + def test_basic_cases(self): + def check_basic_case(graph_func, n_nodes, strategy, interchange): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + assert verify_length(coloring, n_nodes) + assert verify_coloring(graph, coloring) + + for graph_func, n_nodes in BASIC_TEST_CASES.items(): + for interchange in [True, False]: + for strategy in ALL_STRATEGIES: + check_basic_case(graph_func, n_nodes, strategy, False) + if strategy not in INTERCHANGE_INVALID: + check_basic_case(graph_func, n_nodes, strategy, True) + + def test_special_cases(self): + def check_special_case(strategy, graph_func, interchange, colors): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + if not hasattr(colors, "__len__"): + colors = [colors] + assert any(verify_length(coloring, n_colors) for n_colors in colors) + assert verify_coloring(graph, coloring) + + for strategy, arglist in SPECIAL_TEST_CASES.items(): + for args in arglist: + check_special_case(strategy, args[0], args[1], args[2]) + + def test_interchange_invalid(self): + graph = one_node_graph() + for strategy in INTERCHANGE_INVALID: + pytest.raises( + nx.NetworkXPointlessConcept, + nx.coloring.greedy_color, + graph, + strategy=strategy, + interchange=True, + ) + + def test_bad_inputs(self): + graph = one_node_graph() + pytest.raises( + nx.NetworkXError, + nx.coloring.greedy_color, + graph, + strategy="invalid strategy", + ) + + def test_strategy_as_function(self): + graph = lf_shc() + colors_1 = nx.coloring.greedy_color(graph, "largest_first") + colors_2 = nx.coloring.greedy_color(graph, nx.coloring.strategy_largest_first) + assert colors_1 == colors_2 + + def test_seed_argument(self): + graph = lf_shc() + rs = nx.coloring.strategy_random_sequential + c1 = nx.coloring.greedy_color(graph, lambda g, c: rs(g, c, seed=1)) + for u, v in graph.edges: + assert c1[u] != c1[v] + + def test_is_coloring(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_coloring(G, coloring) + + coloring[0] = 1 + assert not is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_is_equitable(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_equitable(G, coloring) + + G.add_edges_from([(2, 3), (2, 4), (2, 5)]) + coloring[3] = 1 + coloring[4] = 1 + coloring[5] = 1 + assert is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_num_colors(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (0, 3)]) + pytest.raises(nx.NetworkXAlgorithmError, nx.coloring.equitable_color, G, 2) + + def test_equitable_color(self): + G = nx.fast_gnp_random_graph(n=10, p=0.2, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_empty(self): + G = nx.empty_graph() + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_large(self): + G = nx.fast_gnp_random_graph(100, 0.1, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring, num_colors=max_degree(G) + 1) + + def test_case_V_plus_not_in_A_cal(self): + # Hand crafted case to avoid the easy case. + L = { + 0: [2, 5], + 1: [3, 4], + 2: [0, 8], + 3: [1, 7], + 4: [1, 6], + 5: [0, 6], + 6: [4, 5], + 7: [3], + 8: [2], + } + + F = { + # Color 0 + 0: 0, + 1: 0, + # Color 1 + 2: 1, + 3: 1, + 4: 1, + 5: 1, + # Color 2 + 6: 2, + 7: 2, + 8: 2, + } + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_cast_no_solo(self): + L = { + 0: [8, 9], + 1: [10, 11], + 2: [8], + 3: [9], + 4: [10, 11], + 5: [8], + 6: [9], + 7: [10, 11], + 8: [0, 2, 5], + 9: [0, 3, 6], + 10: [1, 4, 7], + 11: [1, 4, 7], + } + + F = {0: 0, 1: 0, 2: 2, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 1, 9: 1, 10: 1, 11: 1} + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_hard_prob(self): + # Tests for two levels of recursion. + num_colors, s = 5, 5 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 10), + (0, 11), + (0, 12), + (0, 23), + (10, 4), + (10, 9), + (10, 20), + (11, 4), + (11, 8), + (11, 16), + (12, 9), + (12, 22), + (12, 23), + (23, 7), + (1, 17), + (1, 18), + (1, 19), + (1, 24), + (17, 5), + (17, 13), + (17, 22), + (18, 5), + (19, 5), + (19, 6), + (19, 8), + (24, 7), + (24, 16), + (2, 4), + (2, 13), + (2, 14), + (2, 15), + (4, 6), + (13, 5), + (13, 21), + (14, 6), + (14, 15), + (15, 6), + (15, 21), + (3, 16), + (3, 20), + (3, 21), + (3, 22), + (16, 8), + (20, 8), + (21, 9), + (22, 7), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_hardest_prob(self): + # Tests for two levels of recursion. + num_colors, s = 10, 4 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 19), + (0, 24), + (0, 29), + (0, 30), + (0, 35), + (19, 3), + (19, 7), + (19, 9), + (19, 15), + (19, 21), + (19, 24), + (19, 30), + (19, 38), + (24, 5), + (24, 11), + (24, 13), + (24, 20), + (24, 30), + (24, 37), + (24, 38), + (29, 6), + (29, 10), + (29, 13), + (29, 15), + (29, 16), + (29, 17), + (29, 20), + (29, 26), + (30, 6), + (30, 10), + (30, 15), + (30, 22), + (30, 23), + (30, 39), + (35, 6), + (35, 9), + (35, 14), + (35, 18), + (35, 22), + (35, 23), + (35, 25), + (35, 27), + (1, 20), + (1, 26), + (1, 31), + (1, 34), + (1, 38), + (20, 4), + (20, 8), + (20, 14), + (20, 18), + (20, 28), + (20, 33), + (26, 7), + (26, 10), + (26, 14), + (26, 18), + (26, 21), + (26, 32), + (26, 39), + (31, 5), + (31, 8), + (31, 13), + (31, 16), + (31, 17), + (31, 21), + (31, 25), + (31, 27), + (34, 7), + (34, 8), + (34, 13), + (34, 18), + (34, 22), + (34, 23), + (34, 25), + (34, 27), + (38, 4), + (38, 9), + (38, 12), + (38, 14), + (38, 21), + (38, 27), + (2, 3), + (2, 18), + (2, 21), + (2, 28), + (2, 32), + (2, 33), + (2, 36), + (2, 37), + (2, 39), + (3, 5), + (3, 9), + (3, 13), + (3, 22), + (3, 23), + (3, 25), + (3, 27), + (18, 6), + (18, 11), + (18, 15), + (18, 39), + (21, 4), + (21, 10), + (21, 14), + (21, 36), + (28, 6), + (28, 10), + (28, 14), + (28, 16), + (28, 17), + (28, 25), + (28, 27), + (32, 5), + (32, 10), + (32, 12), + (32, 16), + (32, 17), + (32, 22), + (32, 23), + (33, 7), + (33, 10), + (33, 12), + (33, 16), + (33, 17), + (33, 25), + (33, 27), + (36, 5), + (36, 8), + (36, 15), + (36, 16), + (36, 17), + (36, 25), + (36, 27), + (37, 5), + (37, 11), + (37, 15), + (37, 16), + (37, 17), + (37, 22), + (37, 23), + (39, 7), + (39, 8), + (39, 15), + (39, 22), + (39, 23), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 # V- = 0, V+ = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + +# ############################ Utility functions ############################ +def verify_coloring(graph, coloring): + for node in graph.nodes(): + if node not in coloring: + return False + + color = coloring[node] + for neighbor in graph.neighbors(node): + if coloring[neighbor] == color: + return False + + return True + + +def verify_length(coloring, expected): + coloring = dict_to_sets(coloring) + return len(coloring) == expected + + +def dict_to_sets(colors): + if len(colors) == 0: + return [] + + k = max(colors.values()) + 1 + sets = [set() for _ in range(k)] + + for (node, color) in colors.items(): + sets[color].add(node) + + return sets + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + return graph + + +def rs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def slf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def slf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 4), + (2, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 8), + (7, 8), + ] + ) + return graph + + +def lf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(6, 1), (1, 4), (4, 3), (3, 2), (2, 5)]) + return graph + + +def lf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 7), + (1, 6), + (1, 3), + (1, 4), + (7, 2), + (2, 6), + (2, 3), + (2, 5), + (5, 3), + (5, 4), + (4, 3), + ] + ) + return graph + + +def sl_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 3), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def sl_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 4), + (2, 8), + (8, 4), + (8, 6), + (8, 7), + (7, 5), + (7, 6), + (3, 4), + (4, 6), + (6, 5), + (5, 3), + ] + ) + return graph + + +def gis_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def gis_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(1, 5), (2, 5), (3, 6), (4, 6), (5, 6)]) + return graph + + +def cs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + graph.add_edges_from([(1, 2), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (4, 5)]) + return graph + + +def rsi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (3, 4), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 5), + (1, 6), + (1, 7), + (2, 3), + (2, 8), + (2, 9), + (3, 4), + (3, 8), + (3, 9), + (4, 5), + (4, 6), + (4, 7), + (5, 6), + ] + ) + return graph + + +def sli_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 6), + (3, 4), + (4, 5), + (4, 6), + (5, 7), + (6, 7), + ] + ) + return graph + + +def sli_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 7), + (2, 8), + (2, 9), + (3, 6), + (3, 7), + (3, 9), + (4, 5), + (4, 6), + (4, 8), + (4, 9), + (5, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 9), + (7, 8), + (8, 9), + ] + ) + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify the number of expected colors. +BASIC_TEST_CASES = { + empty_graph: 0, + one_node_graph: 1, + two_node_graph: 2, + disconnected: 2, + three_node_clique: 3, +} + + +# -------------------------------------------------------------------------- +# Special test cases. Each strategy has a list of tuples of the form +# (graph function, interchange, valid # of colors) +SPECIAL_TEST_CASES = { + "random_sequential": [ + (rs_shc, False, (2, 3)), + (rs_shc, True, 2), + (rsi_shc, True, (3, 4)), + ], + "saturation_largest_first": [(slf_shc, False, (3, 4)), (slf_hc, False, 4)], + "largest_first": [ + (lf_shc, False, (2, 3)), + (lf_hc, False, 4), + (lf_shc, True, 2), + (lf_hc, True, 3), + (lfi_shc, True, (3, 4)), + (lfi_hc, True, 4), + ], + "smallest_last": [ + (sl_shc, False, (3, 4)), + (sl_hc, False, 5), + (sl_shc, True, 3), + (sl_hc, True, 4), + (sli_shc, True, (3, 4)), + (sli_hc, True, 5), + ], + "independent_set": [(gis_shc, False, (2, 3)), (gis_hc, False, 3)], + "connected_sequential": [(cs_shc, False, (3, 4)), (cs_shc, True, 3)], + "connected_sequential_dfs": [(cs_shc, False, (3, 4))], +} + + +# -------------------------------------------------------------------------- +# Helper functions to test +# (graph function, interchange, valid # of colors) + + +def check_state(L, N, H, F, C): + s = len(C[0]) + num_colors = len(C.keys()) + + assert all(u in L[v] for u in L.keys() for v in L[u]) + assert all(F[u] != F[v] for u in L.keys() for v in L[u]) + assert all(len(L[u]) < num_colors for u in L.keys()) + assert all(len(C[x]) == s for x in C) + assert all(H[(c1, c2)] >= 0 for c1 in C.keys() for c2 in C.keys()) + assert all(N[(u, F[u])] == 0 for u in F.keys()) + + +def max_degree(G): + """Get the maximum degree of any node in G.""" + return max(G.degree(node) for node in G.nodes) if len(G.nodes) > 0 else 0 + + +def make_params_from_graph(G, F): + """Returns {N, L, H, C} from the given graph.""" + num_nodes = len(G) + L = {u: [] for u in range(num_nodes)} + for (u, v) in G.edges: + L[u].append(v) + L[v].append(u) + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + return {"N": N, "F": F, "C": C, "H": H, "L": L} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/communicability_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/communicability_alg.py new file mode 100644 index 0000000..ba4b4ab --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/communicability_alg.py @@ -0,0 +1,161 @@ +""" +Communicability. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["communicability", "communicability_exp"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def communicability(G): + r"""Returns communicability between all pairs of nodes in G. + + The communicability between pairs of nodes in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability_exp: + Communicability between all pairs of nodes in G using spectral + decomposition. + communicability_betweenness_centrality: + Communicability betweeness centrality for each node in G. + + Notes + ----- + This algorithm uses a spectral decomposition of the adjacency matrix. + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes `u` and `v` based on the graph spectrum + is [1]_ + + .. math:: + C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}}, + + where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal + eigenvector of the adjacency matrix associated with the eigenvalue + `\lambda_{j}`. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability(G) + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + w, vec = np.linalg.eigh(A) + expw = np.exp(w) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + # computing communicabilities + for u in G: + c[u] = {} + for v in G: + s = 0 + p = mapping[u] + q = mapping[v] + for j in range(len(nodelist)): + s += vec[:, j][p] * vec[:, j][q] * expw[j] + c[u][v] = float(s) + return c + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def communicability_exp(G): + r"""Returns communicability between all pairs of nodes in G. + + Communicability between pair of node (u,v) of node in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability: + Communicability between pairs of nodes in G. + communicability_betweenness_centrality: + Communicability betweeness centrality for each node in G. + + Notes + ----- + This algorithm uses matrix exponentiation of the adjacency matrix. + + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes u and v is [1]_, + + .. math:: + C(u,v) = (e^A)_{uv}, + + where `A` is the adjacency matrix of G. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability_exp(G) + """ + import scipy as sp + import scipy.linalg # call as sp.linalg + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + # communicability matrix + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + for u in G: + c[u] = {} + for v in G: + c[u][v] = float(expA[mapping[u], mapping[v]]) + return c diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/__init__.py new file mode 100644 index 0000000..9aea405 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/__init__.py @@ -0,0 +1,26 @@ +"""Functions for computing and measuring community structure. + +The functions in this class are not imported into the top-level +:mod:`networkx` namespace. You can access these functions by importing +the :mod:`networkx.algorithms.community` module, then accessing the +functions as attributes of ``community``. For example:: + + >>> from networkx.algorithms import community + >>> G = nx.barbell_graph(5, 1) + >>> communities_generator = community.girvan_newman(G) + >>> top_level_communities = next(communities_generator) + >>> next_level_communities = next(communities_generator) + >>> sorted(map(sorted, next_level_communities)) + [[0, 1, 2, 3, 4], [5], [6, 7, 8, 9, 10]] + +""" +from networkx.algorithms.community.asyn_fluid import * +from networkx.algorithms.community.centrality import * +from networkx.algorithms.community.kclique import * +from networkx.algorithms.community.kernighan_lin import * +from networkx.algorithms.community.label_propagation import * +from networkx.algorithms.community.lukes import * +from networkx.algorithms.community.modularity_max import * +from networkx.algorithms.community.quality import * +from networkx.algorithms.community.community_utils import * +from networkx.algorithms.community.louvain import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/asyn_fluid.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/asyn_fluid.py new file mode 100644 index 0000000..3d723be --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/asyn_fluid.py @@ -0,0 +1,147 @@ +"""Asynchronous Fluid Communities algorithm for community detection.""" + +from collections import Counter + +from networkx.algorithms.components import is_connected +from networkx.exception import NetworkXError +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = ["asyn_fluidc"] + + +@py_random_state(3) +@not_implemented_for("directed", "multigraph") +def asyn_fluidc(G, k, max_iter=100, seed=None): + """Returns communities in `G` as detected by Fluid Communities algorithm. + + The asynchronous fluid communities algorithm is described in + [1]_. The algorithm is based on the simple idea of fluids interacting + in an environment, expanding and pushing each other. Its initialization is + random, so found communities may vary on different executions. + + The algorithm proceeds as follows. First each of the initial k communities + is initialized in a random vertex in the graph. Then the algorithm iterates + over all vertices in a random order, updating the community of each vertex + based on its own community and the communities of its neighbours. This + process is performed several times until convergence. + At all times, each community has a total density of 1, which is equally + distributed among the vertices it contains. If a vertex changes of + community, vertex densities of affected communities are adjusted + immediately. When a complete iteration over all vertices is done, such that + no vertex changes the community it belongs to, the algorithm has converged + and returns. + + This is the original version of the algorithm described in [1]_. + Unfortunately, it does not support weighted graphs yet. + + Parameters + ---------- + G : Graph + + k : integer + The number of communities to be found. + + max_iter : integer + The number of maximum iterations allowed. By default 100. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + k variable is not an optional argument. + + References + ---------- + .. [1] Parés F., Garcia-Gasulla D. et al. "Fluid Communities: A + Competitive and Highly Scalable Community Detection Algorithm". + [https://arxiv.org/pdf/1703.09307.pdf]. + """ + # Initial checks + if not isinstance(k, int): + raise NetworkXError("k must be an integer.") + if not k > 0: + raise NetworkXError("k must be greater than 0.") + if not is_connected(G): + raise NetworkXError("Fluid Communities require connected Graphs.") + if len(G) < k: + raise NetworkXError("k cannot be bigger than the number of nodes.") + # Initialization + max_density = 1.0 + vertices = list(G) + seed.shuffle(vertices) + communities = {n: i for i, n in enumerate(vertices[:k])} + density = {} + com_to_numvertices = {} + for vertex in communities.keys(): + com_to_numvertices[communities[vertex]] = 1 + density[communities[vertex]] = max_density + # Set up control variables and start iterating + iter_count = 0 + cont = True + while cont: + cont = False + iter_count += 1 + # Loop over all vertices in graph in a random order + vertices = list(G) + seed.shuffle(vertices) + for vertex in vertices: + # Updating rule + com_counter = Counter() + # Take into account self vertex community + try: + com_counter.update({communities[vertex]: density[communities[vertex]]}) + except KeyError: + pass + # Gather neighbour vertex communities + for v in G[vertex]: + try: + com_counter.update({communities[v]: density[communities[v]]}) + except KeyError: + continue + # Check which is the community with highest density + new_com = -1 + if len(com_counter.keys()) > 0: + max_freq = max(com_counter.values()) + best_communities = [ + com + for com, freq in com_counter.items() + if (max_freq - freq) < 0.0001 + ] + # If actual vertex com in best communities, it is preserved + try: + if communities[vertex] in best_communities: + new_com = communities[vertex] + except KeyError: + pass + # If vertex community changes... + if new_com == -1: + # Set flag of non-convergence + cont = True + # Randomly chose a new community from candidates + new_com = seed.choice(best_communities) + # Update previous community status + try: + com_to_numvertices[communities[vertex]] -= 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + except KeyError: + pass + # Update new community status + communities[vertex] = new_com + com_to_numvertices[communities[vertex]] += 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + # If maximum iterations reached --> output actual results + if iter_count > max_iter: + break + # Return results by grouping communities as list of vertices + return iter(groups(communities).values()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/centrality.py new file mode 100644 index 0000000..926cd9a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/centrality.py @@ -0,0 +1,170 @@ +"""Functions for computing communities based on centrality notions.""" + +import networkx as nx + +__all__ = ["girvan_newman"] + + +def girvan_newman(G, most_valuable_edge=None): + """Finds communities in a graph using the Girvan–Newman method. + + Parameters + ---------- + G : NetworkX graph + + most_valuable_edge : function + Function that takes a graph as input and outputs an edge. The + edge returned by this function will be recomputed and removed at + each iteration of the algorithm. + + If not specified, the edge with the highest + :func:`networkx.edge_betweenness_centrality` will be used. + + Returns + ------- + iterator + Iterator over tuples of sets of nodes in `G`. Each set of node + is a community, each tuple is a sequence of communities at a + particular level of the algorithm. + + Examples + -------- + To get the first pair of communities:: + + >>> G = nx.path_graph(10) + >>> comp = girvan_newman(G) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To get only the first *k* tuples of communities, use + :func:`itertools.islice`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 2 + >>> comp = girvan_newman(G) + >>> for communities in itertools.islice(comp, k): + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + + To stop getting tuples of communities once the number of communities + is greater than *k*, use :func:`itertools.takewhile`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 4 + >>> comp = girvan_newman(G) + >>> limited = itertools.takewhile(lambda c: len(c) <= k, comp) + >>> for communities in limited: + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5], [6, 7]) + + To just choose an edge to remove based on the weight:: + + >>> from operator import itemgetter + >>> G = nx.path_graph(10) + >>> edges = G.edges() + >>> nx.set_edge_attributes(G, {(u, v): v for u, v in edges}, "weight") + >>> def heaviest(G): + ... u, v, w = max(G.edges(data="weight"), key=itemgetter(2)) + ... return (u, v) + ... + >>> comp = girvan_newman(G, most_valuable_edge=heaviest) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4, 5, 6, 7, 8], [9]) + + To utilize edge weights when choosing an edge with, for example, the + highest betweenness centrality:: + + >>> from networkx import edge_betweenness_centrality as betweenness + >>> def most_central_edge(G): + ... centrality = betweenness(G, weight="weight") + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = girvan_newman(G, most_valuable_edge=most_central_edge) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To specify a different ranking algorithm for edges, use the + `most_valuable_edge` keyword argument:: + + >>> from networkx import edge_betweenness_centrality + >>> from random import random + >>> def most_central_edge(G): + ... centrality = edge_betweenness_centrality(G) + ... max_cent = max(centrality.values()) + ... # Scale the centrality values so they are between 0 and 1, + ... # and add some random noise. + ... centrality = {e: c / max_cent for e, c in centrality.items()} + ... # Add some random noise. + ... centrality = {e: c + random() for e, c in centrality.items()} + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = girvan_newman(G, most_valuable_edge=most_central_edge) + + Notes + ----- + The Girvan–Newman algorithm detects communities by progressively + removing edges from the original graph. The algorithm removes the + "most valuable" edge, traditionally the edge with the highest + betweenness centrality, at each step. As the graph breaks down into + pieces, the tightly knit community structure is exposed and the + result can be depicted as a dendrogram. + + """ + # If the graph is already empty, simply return its connected + # components. + if G.number_of_edges() == 0: + yield tuple(nx.connected_components(G)) + return + # If no function is provided for computing the most valuable edge, + # use the edge betweenness centrality. + if most_valuable_edge is None: + + def most_valuable_edge(G): + """Returns the edge with the highest betweenness centrality + in the graph `G`. + + """ + # We have guaranteed that the graph is non-empty, so this + # dictionary will never be empty. + betweenness = nx.edge_betweenness_centrality(G) + return max(betweenness, key=betweenness.get) + + # The copy of G here must include the edge weight data. + g = G.copy().to_undirected() + # Self-loops must be removed because their removal has no effect on + # the connected components of the graph. + g.remove_edges_from(nx.selfloop_edges(g)) + while g.number_of_edges() > 0: + yield _without_most_central_edges(g, most_valuable_edge) + + +def _without_most_central_edges(G, most_valuable_edge): + """Returns the connected components of the graph that results from + repeatedly removing the most "valuable" edge in the graph. + + `G` must be a non-empty graph. This function modifies the graph `G` + in-place; that is, it removes edges on the graph `G`. + + `most_valuable_edge` is a function that takes the graph `G` as input + (or a subgraph with one or more edges of `G` removed) and returns an + edge. That edge will be removed and this process will be repeated + until the number of connected components in the graph increases. + + """ + original_num_components = nx.number_connected_components(G) + num_new_components = original_num_components + while num_new_components <= original_num_components: + edge = most_valuable_edge(G) + G.remove_edge(*edge) + new_components = tuple(nx.connected_components(G)) + num_new_components = len(new_components) + return new_components diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/community_utils.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/community_utils.py new file mode 100644 index 0000000..f06fcf4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/community_utils.py @@ -0,0 +1,27 @@ +"""Helper functions for community-finding algorithms.""" + +__all__ = ["is_partition"] + + +def is_partition(G, communities): + """Returns *True* if `communities` is a partition of the nodes of `G`. + + A partition of a universe set is a family of pairwise disjoint sets + whose union is the entire universe set. + + Parameters + ---------- + G : NetworkX graph. + + communities : list or iterable of sets of nodes + If not a list, the iterable is converted internally to a list. + If it is an iterator it is exhausted. + + """ + # Alternate implementation: + # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G) + if not isinstance(communities, list): + communities = list(communities) + nodes = {n for c in communities for n in c if n in G} + + return len(G) == len(nodes) == sum(len(c) for c in communities) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/kclique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/kclique.py new file mode 100644 index 0000000..804b1c9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/kclique.py @@ -0,0 +1,79 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["k_clique_communities"] + + +def k_clique_communities(G, k, cliques=None): + """Find k-clique communities in graph using the percolation method. + + A k-clique community is the union of all cliques of size k that + can be reached through adjacent (sharing k-1 nodes) k-cliques. + + Parameters + ---------- + G : NetworkX graph + + k : int + Size of smallest clique + + cliques: list or generator + Precomputed cliques (use networkx.find_cliques(G)) + + Returns + ------- + Yields sets of nodes, one for each k-clique community. + + Examples + -------- + >>> from networkx.algorithms.community import k_clique_communities + >>> G = nx.complete_graph(5) + >>> K5 = nx.convert_node_labels_to_integers(G, first_label=2) + >>> G.add_edges_from(K5.edges()) + >>> c = list(k_clique_communities(G, 4)) + >>> sorted(list(c[0])) + [0, 1, 2, 3, 4, 5, 6] + >>> list(k_clique_communities(G, 6)) + [] + + References + ---------- + .. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek, + Uncovering the overlapping community structure of complex networks + in nature and society Nature 435, 814-818, 2005, + doi:10.1038/nature03607 + """ + if k < 2: + raise nx.NetworkXError(f"k={k}, k must be greater than 1.") + if cliques is None: + cliques = nx.find_cliques(G) + cliques = [frozenset(c) for c in cliques if len(c) >= k] + + # First index which nodes are in which cliques + membership_dict = defaultdict(list) + for clique in cliques: + for node in clique: + membership_dict[node].append(clique) + + # For each clique, see which adjacent cliques percolate + perc_graph = nx.Graph() + perc_graph.add_nodes_from(cliques) + for clique in cliques: + for adj_clique in _get_adjacent_cliques(clique, membership_dict): + if len(clique.intersection(adj_clique)) >= (k - 1): + perc_graph.add_edge(clique, adj_clique) + + # Connected components of clique graph with perc edges + # are the percolated cliques + for component in nx.connected_components(perc_graph): + yield (frozenset.union(*component)) + + +def _get_adjacent_cliques(clique, membership_dict): + adjacent_cliques = set() + for n in clique: + for adj_clique in membership_dict[n]: + if clique != adj_clique: + adjacent_cliques.add(adj_clique) + return adjacent_cliques diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/kernighan_lin.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/kernighan_lin.py new file mode 100644 index 0000000..2ae22e3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/kernighan_lin.py @@ -0,0 +1,137 @@ +"""Functions for computing the Kernighan–Lin bipartition algorithm.""" + +from itertools import count + +import networkx as nx +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils import BinaryHeap, not_implemented_for, py_random_state + +__all__ = ["kernighan_lin_bisection"] + + +def _kernighan_lin_sweep(edges, side): + """ + This is a modified form of Kernighan-Lin, which moves single nodes at a + time, alternating between sides to keep the bisection balanced. We keep + two min-heaps of swap costs to make optimal-next-move selection fast. + """ + costs0, costs1 = costs = BinaryHeap(), BinaryHeap() + for u, side_u, edges_u in zip(count(), side, edges): + cost_u = sum(w if side[v] else -w for v, w in edges_u) + costs[side_u].insert(u, cost_u if side_u else -cost_u) + + def _update_costs(costs_x, x): + for y, w in edges[x]: + costs_y = costs[side[y]] + cost_y = costs_y.get(y) + if cost_y is not None: + cost_y += 2 * (-w if costs_x is costs_y else w) + costs_y.insert(y, cost_y, True) + + i = 0 + totcost = 0 + while costs0 and costs1: + u, cost_u = costs0.pop() + _update_costs(costs0, u) + v, cost_v = costs1.pop() + _update_costs(costs1, v) + totcost += cost_u + cost_v + i += 1 + yield totcost, i, (u, v) + + +@py_random_state(4) +@not_implemented_for("directed") +def kernighan_lin_bisection(G, partition=None, max_iter=10, weight="weight", seed=None): + """Partition a graph into two blocks using the Kernighan–Lin + algorithm. + + This algorithm partitions a network into two sets by iteratively + swapping pairs of nodes to reduce the edge cut between the two sets. The + pairs are chosen according to a modified form of Kernighan-Lin, which + moves node individually, alternating between sides to keep the bisection + balanced. + + Parameters + ---------- + G : graph + + partition : tuple + Pair of iterables containing an initial partition. If not + specified, a random balanced partition is used. + + max_iter : int + Maximum number of times to attempt swaps to find an + improvemement before giving up. + + weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Only used if partition is None + + Returns + ------- + partition : tuple + A pair of sets of nodes representing the bipartition. + + Raises + ------ + NetworkXError + If partition is not a valid partition of the nodes of the graph. + + References + ---------- + .. [1] Kernighan, B. W.; Lin, Shen (1970). + "An efficient heuristic procedure for partitioning graphs." + *Bell Systems Technical Journal* 49: 291--307. + Oxford University Press 2011. + + """ + n = len(G) + labels = list(G) + seed.shuffle(labels) + index = {v: i for i, v in enumerate(labels)} + + if partition is None: + side = [0] * (n // 2) + [1] * ((n + 1) // 2) + else: + try: + A, B = partition + except (TypeError, ValueError) as err: + raise nx.NetworkXError("partition must be two sets") from err + if not is_partition(G, (A, B)): + raise nx.NetworkXError("partition invalid") + side = [0] * n + for a in A: + side[index[a]] = 1 + + if G.is_multigraph(): + edges = [ + [ + (index[u], sum(e.get(weight, 1) for e in d.values())) + for u, d in G[v].items() + ] + for v in labels + ] + else: + edges = [ + [(index[u], e.get(weight, 1)) for u, e in G[v].items()] for v in labels + ] + + for i in range(max_iter): + costs = list(_kernighan_lin_sweep(edges, side)) + min_cost, min_i, _ = min(costs) + if min_cost >= 0: + break + + for _, _, (u, v) in costs[:min_i]: + side[u] = 1 + side[v] = 0 + + A = {u for u, s in zip(labels, side) if s == 0} + B = {u for u, s in zip(labels, side) if s == 1} + return A, B diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/label_propagation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/label_propagation.py new file mode 100644 index 0000000..09c07c2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/label_propagation.py @@ -0,0 +1,209 @@ +""" +Label propagation community detection algorithms. +""" +from collections import Counter, defaultdict + +import networkx as nx +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = ["label_propagation_communities", "asyn_lpa_communities"] + + +@py_random_state(2) +def asyn_lpa_communities(G, weight=None, seed=None): + """Returns communities in `G` as detected by asynchronous label + propagation. + + The asynchronous label propagation algorithm is described in + [1]_. The algorithm is probabilistic and the found communities may + vary on different executions. + + The algorithm proceeds as follows. After initializing each node with + a unique label, the algorithm repeatedly sets the label of a node to + be the label that appears most frequently among that nodes + neighbors. The algorithm halts when each node has the label that + appears most frequently among its neighbors. The algorithm is + asynchronous because each node is updated without waiting for + updates on the remaining nodes. + + This generalized version of the algorithm in [1]_ accepts edge + weights. + + Parameters + ---------- + G : Graph + + weight : string + The edge attribute representing the weight of an edge. + If None, each edge is assumed to have weight one. In this + algorithm, the weight of an edge is used in determining the + frequency with which a label appears among the neighbors of a + node: a higher weight means the label appears more often. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge weight attributes must be numerical. + + References + ---------- + .. [1] Raghavan, Usha Nandini, Réka Albert, and Soundar Kumara. "Near + linear time algorithm to detect community structures in large-scale + networks." Physical Review E 76.3 (2007): 036106. + """ + + labels = {n: i for i, n in enumerate(G)} + cont = True + + while cont: + cont = False + nodes = list(G) + seed.shuffle(nodes) + + for node in nodes: + + if not G[node]: + continue + + # Get label frequencies among adjacent nodes. + # Depending on the order they are processed in, + # some nodes will be in iteration t and others in t-1, + # making the algorithm asynchronous. + if weight is None: + # initialising a Counter from an iterator of labels is + # faster for getting unweighted label frequencies + label_freq = Counter(map(labels.get, G[node])) + else: + # updating a defaultdict is substantially faster + # for getting weighted label frequencies + label_freq = defaultdict(float) + for _, v, wt in G.edges(node, data=weight, default=1): + label_freq[labels[v]] += wt + + # Get the labels that appear with maximum frequency. + max_freq = max(label_freq.values()) + best_labels = [ + label for label, freq in label_freq.items() if freq == max_freq + ] + + # If the node does not have one of the maximum frequency labels, + # randomly choose one of them and update the node's label. + # Continue the iteration as long as at least one node + # doesn't have a maximum frequency label. + if labels[node] not in best_labels: + labels[node] = seed.choice(best_labels) + cont = True + + yield from groups(labels).values() + + +@not_implemented_for("directed") +def label_propagation_communities(G): + """Generates community sets determined by label propagation + + Finds communities in `G` using a semi-synchronous label propagation + method [1]_. This method combines the advantages of both the synchronous + and asynchronous models. Not implemented for directed graphs. + + Parameters + ---------- + G : graph + An undirected NetworkX graph. + + Returns + ------- + communities : iterable + A dict_values object that contains a set of nodes for each community. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed + + References + ---------- + .. [1] Cordasco, G., & Gargano, L. (2010, December). Community detection + via semi-synchronous label propagation algorithms. In Business + Applications of Social Network Analysis (BASNA), 2010 IEEE International + Workshop on (pp. 1-8). IEEE. + """ + coloring = _color_network(G) + # Create a unique label for each node in the graph + labeling = {v: k for k, v in enumerate(G)} + while not _labeling_complete(labeling, G): + # Update the labels of every node with the same color. + for color, nodes in coloring.items(): + for n in nodes: + _update_label(n, labeling, G) + + clusters = defaultdict(set) + for node, label in labeling.items(): + clusters[label].add(node) + return clusters.values() + + +def _color_network(G): + """Colors the network so that neighboring nodes all have distinct colors. + + Returns a dict keyed by color to a set of nodes with that color. + """ + coloring = dict() # color => set(node) + colors = nx.coloring.greedy_color(G) + for node, color in colors.items(): + if color in coloring: + coloring[color].add(node) + else: + coloring[color] = {node} + return coloring + + +def _labeling_complete(labeling, G): + """Determines whether or not LPA is done. + + Label propagation is complete when all nodes have a label that is + in the set of highest frequency labels amongst its neighbors. + + Nodes with no neighbors are considered complete. + """ + return all( + labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0 + ) + + +def _most_frequent_labels(node, labeling, G): + """Returns a set of all labels with maximum frequency in `labeling`. + + Input `labeling` should be a dict keyed by node to labels. + """ + if not G[node]: + # Nodes with no neighbors are themselves a community and are labeled + # accordingly, hence the immediate if statement. + return {labeling[node]} + + # Compute the frequencies of all neighbours of node + freqs = Counter(labeling[q] for q in G[node]) + max_freq = max(freqs.values()) + return {label for label, freq in freqs.items() if freq == max_freq} + + +def _update_label(node, labeling, G): + """Updates the label of a node using the Prec-Max tie breaking algorithm + + The algorithm is explained in: 'Community Detection via Semi-Synchronous + Label Propagation Algorithms' Cordasco and Gargano, 2011 + """ + high_labels = _most_frequent_labels(node, labeling, G) + if len(high_labels) == 1: + labeling[node] = high_labels.pop() + elif len(high_labels) > 1: + # Prec-Max + if labeling[node] not in high_labels: + labeling[node] = max(high_labels) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/louvain.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/louvain.py new file mode 100644 index 0000000..e3ad78b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/louvain.py @@ -0,0 +1,350 @@ +"""Function for detecting communities based on Louvain Community Detection +Algorithm""" + +from collections import defaultdict, deque + +import networkx as nx +from networkx.algorithms.community import modularity +from networkx.utils import py_random_state + +__all__ = ["louvain_communities", "louvain_partitions"] + + +@py_random_state("seed") +def louvain_communities( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + r"""Find the best partition of a graph using the Louvain Community Detection + Algorithm. + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The algorithm works in 2 steps. On the first step it assigns every node to be + in its own community and then for each node it tries to find the maximum positive + modularity gain by moving each node to all of its neighbor communities. If no positive + gain is achieved the node remains in its original community. + + The modularity gain obtained by moving an isolated node $i$ into a community $C$ can + easily be calculated by the following formula (combining [1]_ [2]_ and some algebra): + + .. math:: + \Delta Q = \frac{k_{i,in}}{2m} - \gamma\frac{ \Sigma_{tot} \cdot k_i}{2m^2} + + where $m$ is the size of the graph, $k_{i,in}$ is the sum of the weights of the links + from $i$ to nodes in $C$, $k_i$ is the sum of the weights of the links incident to node $i$, + $\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\gamma$ + is the resolution parameter. + + For the directed case the modularity gain can be computed using this formula according to [3]_ + + .. math:: + \Delta Q = \frac{k_{i,in}}{m} + - \gamma\frac{k_i^{out} \cdot\Sigma_{tot}^{in} + k_i^{in} \cdot \Sigma_{tot}^{out}}{m^2} + + where $k_i^{out}$, $k_i^{in}$ are the outer and inner weighted degrees of node $i$ and + $\Sigma_{tot}^{in}$, $\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident + to nodes in $C$. + + The first phase continues until no individual move can improve the modularity. + + The second phase consists in building a new network whose nodes are now the communities + found in the first phase. To do so, the weights of the links between the new nodes are given by + the sum of the weight of the links between nodes in the corresponding two communities. Once this + phase is complete it is possible to reapply the first phase creating bigger communities with + increased modularity. + + The above two phases are executed until no modularity gain is achieved (or is less than + the `threshold`). + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + Examples + -------- + >>> import networkx as nx + >>> import networkx.algorithms.community as nx_comm + >>> G = nx.petersen_graph() + >>> nx_comm.louvain_communities(G, seed=123) + [{0, 4, 5, 7, 9}, {1, 2, 3, 6, 8}] + + Notes + ----- + The order in which the nodes are considered can affect the final output. In the algorithm + the ordering happens using a random shuffle. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008). https://doi.org/10.1088/1742-5468/2008/10/P10008 + .. [2] Traag, V.A., Waltman, L. & van Eck, N.J. From Louvain to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + .. [3] Nicolas Dugué, Anthony Perez. Directed Louvain : maximizing modularity in directed networks. + [Research Report] Université d’Orléans. 2015. hal-01231784. https://hal.archives-ouvertes.fr/hal-01231784 + + See Also + -------- + louvain_partitions + """ + + d = louvain_partitions(G, weight, resolution, threshold, seed) + q = deque(d, maxlen=1) + return q.pop() + + +@py_random_state("seed") +def louvain_partitions( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + """Yields partitions for each level of the Louvain Community Detection Algorithm + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The partitions at each level (step of the algorithm) form a dendogram of communities. + A dendrogram is a diagram representing a tree and each level represents + a partition of the G graph. The top level contains the smallest communities + and as you traverse to the bottom of the tree the communities get bigger + and the overal modularity increases making the partition better. + + Each level is generated by executing the two phases of the Louvain Community + Detection Algorithm. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008) + + See Also + -------- + louvain_communities + """ + + partition = [{u} for u in G.nodes()] + mod = modularity(G, partition, resolution=resolution, weight=weight) + is_directed = G.is_directed() + if G.is_multigraph(): + graph = _convert_multigraph(G, weight, is_directed) + else: + graph = G.__class__() + graph.add_nodes_from(G) + graph.add_weighted_edges_from(G.edges(data=weight, default=1)) + + m = graph.size(weight="weight") + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + improvement = True + while improvement: + yield partition + new_mod = modularity( + graph, inner_partition, resolution=resolution, weight="weight" + ) + if new_mod - mod <= threshold: + return + mod = new_mod + graph = _gen_graph(graph, inner_partition) + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + + +def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): + """Calculate one level of the Louvain partitions tree + + Parameters + ---------- + G : NetworkX Graph/DiGraph + The graph from which to detect communities + m : number + The size of the graph `G`. + partition : list of sets of nodes + A valid partition of the graph `G` + resolution : positive number + The resolution parameter for computing the modularity of a partition + is_directed : bool + True if `G` is a directed graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + """ + node2com = {u: i for i, u in enumerate(G.nodes())} + inner_partition = [{u} for u in G.nodes()] + if is_directed: + in_degrees = dict(G.in_degree(weight="weight")) + out_degrees = dict(G.out_degree(weight="weight")) + Stot_in = [deg for deg in in_degrees.values()] + Stot_out = [deg for deg in out_degrees.values()] + # Calculate weights for both in and out neighbours + nbrs = {} + for u in G: + nbrs[u] = defaultdict(float) + for _, n, wt in G.out_edges(u, data="weight"): + nbrs[u][n] += wt + for n, _, wt in G.in_edges(u, data="weight"): + nbrs[u][n] += wt + else: + degrees = dict(G.degree(weight="weight")) + Stot = [deg for deg in degrees.values()] + nbrs = {u: {v: data["weight"] for v, data in G[u].items() if v != u} for u in G} + rand_nodes = list(G.nodes) + seed.shuffle(rand_nodes) + nb_moves = 1 + improvement = False + while nb_moves > 0: + nb_moves = 0 + for u in rand_nodes: + best_mod = 0 + best_com = node2com[u] + weights2com = _neighbor_weights(nbrs[u], node2com) + if is_directed: + in_degree = in_degrees[u] + out_degree = out_degrees[u] + Stot_in[best_com] -= in_degree + Stot_out[best_com] -= out_degree + remove_cost = ( + -weights2com[best_com] / m + + resolution + * (out_degree * Stot_in[best_com] + in_degree * Stot_out[best_com]) + / m**2 + ) + else: + degree = degrees[u] + Stot[best_com] -= degree + remove_cost = -weights2com[best_com] / m + resolution * ( + Stot[best_com] * degree + ) / (2 * m**2) + for nbr_com, wt in weights2com.items(): + if is_directed: + gain = ( + remove_cost + + wt / m + - resolution + * ( + out_degree * Stot_in[nbr_com] + + in_degree * Stot_out[nbr_com] + ) + / m**2 + ) + else: + gain = ( + remove_cost + + wt / m + - resolution * (Stot[nbr_com] * degree) / (2 * m**2) + ) + if gain > best_mod: + best_mod = gain + best_com = nbr_com + if is_directed: + Stot_in[best_com] += in_degree + Stot_out[best_com] += out_degree + else: + Stot[best_com] += degree + if best_com != node2com[u]: + com = G.nodes[u].get("nodes", {u}) + partition[node2com[u]].difference_update(com) + inner_partition[node2com[u]].remove(u) + partition[best_com].update(com) + inner_partition[best_com].add(u) + improvement = True + nb_moves += 1 + node2com[u] = best_com + partition = list(filter(len, partition)) + inner_partition = list(filter(len, inner_partition)) + return partition, inner_partition, improvement + + +def _neighbor_weights(nbrs, node2com): + """Calculate weights between node and its neighbor communities. + + Parameters + ---------- + nbrs : dictionary + Dictionary with nodes' neighbours as keys and their edge weight as value. + node2com : dictionary + Dictionary with all graph's nodes as keys and their community index as value. + + """ + weights = defaultdict(float) + for nbr, wt in nbrs.items(): + weights[node2com[nbr]] += wt + return weights + + +def _gen_graph(G, partition): + """Generate a new graph based on the partitions of a given graph""" + H = G.__class__() + node2com = {} + for i, part in enumerate(partition): + nodes = set() + for node in part: + node2com[node] = i + nodes.update(G.nodes[node].get("nodes", {node})) + H.add_node(i, nodes=nodes) + + for node1, node2, wt in G.edges(data=True): + wt = wt["weight"] + com1 = node2com[node1] + com2 = node2com[node2] + temp = H.get_edge_data(com1, com2, {"weight": 0})["weight"] + H.add_edge(com1, com2, **{"weight": wt + temp}) + return H + + +def _convert_multigraph(G, weight, is_directed): + """Convert a Multigraph to normal Graph""" + if is_directed: + H = nx.DiGraph() + else: + H = nx.Graph() + H.add_nodes_from(G) + for u, v, wt in G.edges(data=weight, default=1): + if H.has_edge(u, v): + H[u][v]["weight"] += wt + else: + H.add_edge(u, v, weight=wt) + return H diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/lukes.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/lukes.py new file mode 100644 index 0000000..b34077a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/lukes.py @@ -0,0 +1,227 @@ +"""Lukes Algorithm for exact optimal weighted tree partitioning.""" + +from copy import deepcopy +from functools import lru_cache +from random import choice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["lukes_partitioning"] + +D_EDGE_W = "weight" +D_EDGE_VALUE = 1.0 +D_NODE_W = "weight" +D_NODE_VALUE = 1 +PKEY = "partitions" +CLUSTER_EVAL_CACHE_SIZE = 2048 + + +def _split_n_from(n, min_size_of_first_part): + # splits j in two parts of which the first is at least + # the second argument + assert n >= min_size_of_first_part + for p1 in range(min_size_of_first_part, n + 1): + yield p1, n - p1 + + +def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None): + + """Optimal partitioning of a weighted tree using the Lukes algorithm. + + This algorithm partitions a connected, acyclic graph featuring integer + node weights and float edge weights. The resulting clusters are such + that the total weight of the nodes in each cluster does not exceed + max_size and that the weight of the edges that are cut by the partition + is minimum. The algorithm is based on LUKES[1]. + + Parameters + ---------- + G : graph + + max_size : int + Maximum weight a partition can have in terms of sum of + node_weight for all nodes in the partition + + edge_weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + node_weight : key + Node data key to use as weight. If None, the weights are all + set to one. The data must be int. + + Returns + ------- + partition : list + A list of sets of nodes representing the clusters of the + partition. + + Raises + ------ + NotATree + If G is not a tree. + TypeError + If any of the values of node_weight is not int. + + References + ---------- + .. Lukes, J. A. (1974). + "Efficient Algorithm for the Partitioning of Trees." + IBM Journal of Research and Development, 18(3), 217–224. + + """ + # First sanity check and tree preparation + if not nx.is_tree(G): + raise nx.NotATree("lukes_partitioning works only on trees") + else: + if nx.is_directed(G): + root = [n for n, d in G.in_degree() if d == 0] + assert len(root) == 1 + root = root[0] + t_G = deepcopy(G) + else: + root = choice(list(G.nodes)) + # this has the desirable side effect of not inheriting attributes + t_G = nx.dfs_tree(G, root) + + # Since we do not want to screw up the original graph, + # if we have a blank attribute, we make a deepcopy + if edge_weight is None or node_weight is None: + safe_G = deepcopy(G) + if edge_weight is None: + nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W) + edge_weight = D_EDGE_W + if node_weight is None: + nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W) + node_weight = D_NODE_W + else: + safe_G = G + + # Second sanity check + # The values of node_weight MUST BE int. + # I cannot see any room for duck typing without incurring serious + # danger of subtle bugs. + all_n_attr = nx.get_node_attributes(safe_G, node_weight).values() + for x in all_n_attr: + if not isinstance(x, int): + raise TypeError( + "lukes_partitioning needs integer " + f"values for node_weight ({node_weight})" + ) + + # SUBROUTINES ----------------------- + # these functions are defined here for two reasons: + # - brevity: we can leverage global "safe_G" + # - caching: signatures are hashable + + @not_implemented_for("undirected") + # this is intended to be called only on t_G + def _leaves(gr): + for x in gr.nodes: + if not nx.descendants(gr, x): + yield x + + @not_implemented_for("undirected") + def _a_parent_of_leaves_only(gr): + tleaves = set(_leaves(gr)) + for n in set(gr.nodes) - tleaves: + if all([x in tleaves for x in nx.descendants(gr, n)]): + return n + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _value_of_cluster(cluster): + valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster] + return sum(safe_G.edges[e][edge_weight] for e in valid_edges) + + def _value_of_partition(partition): + return sum(_value_of_cluster(frozenset(c)) for c in partition) + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _weight_of_cluster(cluster): + return sum(safe_G.nodes[n][node_weight] for n in cluster) + + def _pivot(partition, node): + ccx = [c for c in partition if node in c] + assert len(ccx) == 1 + return ccx[0] + + def _concatenate_or_merge(partition_1, partition_2, x, i, ref_weigth): + + ccx = _pivot(partition_1, x) + cci = _pivot(partition_2, i) + merged_xi = ccx.union(cci) + + # We first check if we can do the merge. + # If so, we do the actual calculations, otherwise we concatenate + if _weight_of_cluster(frozenset(merged_xi)) <= ref_weigth: + cp1 = list(filter(lambda x: x != ccx, partition_1)) + cp2 = list(filter(lambda x: x != cci, partition_2)) + + option_2 = [merged_xi] + cp1 + cp2 + return option_2, _value_of_partition(option_2) + else: + option_1 = partition_1 + partition_2 + return option_1, _value_of_partition(option_1) + + # INITIALIZATION ----------------------- + leaves = set(_leaves(t_G)) + for lv in leaves: + t_G.nodes[lv][PKEY] = dict() + slot = safe_G.nodes[lv][node_weight] + t_G.nodes[lv][PKEY][slot] = [{lv}] + t_G.nodes[lv][PKEY][0] = [{lv}] + + for inner in [x for x in t_G.nodes if x not in leaves]: + t_G.nodes[inner][PKEY] = dict() + slot = safe_G.nodes[inner][node_weight] + t_G.nodes[inner][PKEY][slot] = [{inner}] + + # CORE ALGORITHM ----------------------- + while True: + x_node = _a_parent_of_leaves_only(t_G) + weight_of_x = safe_G.nodes[x_node][node_weight] + best_value = 0 + best_partition = None + bp_buffer = dict() + x_descendants = nx.descendants(t_G, x_node) + for i_node in x_descendants: + for j in range(weight_of_x, max_size + 1): + for a, b in _split_n_from(j, weight_of_x): + if ( + a not in t_G.nodes[x_node][PKEY].keys() + or b not in t_G.nodes[i_node][PKEY].keys() + ): + # it's not possible to form this particular weight sum + continue + + part1 = t_G.nodes[x_node][PKEY][a] + part2 = t_G.nodes[i_node][PKEY][b] + part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j) + + if j not in bp_buffer.keys() or bp_buffer[j][1] < value: + # we annotate in the buffer the best partition for j + bp_buffer[j] = part, value + + # we also keep track of the overall best partition + if best_value <= value: + best_value = value + best_partition = part + + # as illustrated in Lukes, once we finished a child, we can + # discharge the partitions we found into the graph + # (the key phrase is make all x == x') + # so that they are used by the subsequent children + for w, (best_part_for_vl, vl) in bp_buffer.items(): + t_G.nodes[x_node][PKEY][w] = best_part_for_vl + bp_buffer.clear() + + # the absolute best partition for this node + # across all weights has to be stored at 0 + t_G.nodes[x_node][PKEY][0] = best_partition + t_G.remove_nodes_from(x_descendants) + + if x_node == root: + # the 0-labeled partition of root + # is the optimal one for the whole tree + return t_G.nodes[root][PKEY][0] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/modularity_max.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/modularity_max.py new file mode 100644 index 0000000..67a4961 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/modularity_max.py @@ -0,0 +1,474 @@ +"""Functions for detecting communities based on modularity.""" + +from collections import defaultdict + +import networkx as nx +from networkx.algorithms.community.quality import modularity +from networkx.utils import not_implemented_for +from networkx.utils.mapped_queue import MappedQueue + +__all__ = [ + "greedy_modularity_communities", + "naive_greedy_modularity_communities", + "_naive_greedy_modularity_communities", +] + + +def _greedy_modularity_communities_generator(G, weight=None, resolution=1): + r"""Yield community partitions of G and the modularity change at each step. + + This function performs Clauset-Newman-Moore greedy modularity maximization [2]_ + At each step of the process it yields the change in modularity that will occur in + the next step followed by yielding the new community partition after that step. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until one community contains all nodes (the partition has one set). + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Yields + ------ + Alternating yield statements produce the following two objects: + + communities: dict_values + A dict_values of frozensets of nodes, one for each community. + This represents a partition of the nodes of the graph into communities. + The first yield is the partition with each node in its own community. + + dq: float + The change in modularity when merging the next two communities + that leads to the largest modularity. + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + directed = G.is_directed() + N = G.number_of_nodes() + + # Count edges (or the sum of edge-weights for weighted graphs) + m = G.size(weight) + q0 = 1 / m + + # Calculate degrees (notation from the papers) + # a : the fraction of (weighted) out-degree for each node + # b : the fraction of (weighted) in-degree for each node + if directed: + a = {node: deg_out * q0 for node, deg_out in G.out_degree(weight=weight)} + b = {node: deg_in * q0 for node, deg_in in G.in_degree(weight=weight)} + else: + a = b = {node: deg * q0 * 0.5 for node, deg in G.degree(weight=weight)} + + # this preliminary step collects the edge weights for each node pair + # It handles multigraph and digraph and works fine for graph. + dq_dict = defaultdict(lambda: defaultdict(float)) + for u, v, wt in G.edges(data=weight, default=1): + if u == v: + continue + dq_dict[u][v] += wt + dq_dict[v][u] += wt + + # now scale and subtract the expected edge-weights term + for u, nbrdict in dq_dict.items(): + for v, wt in nbrdict.items(): + dq_dict[u][v] = q0 * wt - resolution * (a[u] * b[v] + b[u] * a[v]) + + # Use -dq to get a max_heap instead of a min_heap + # dq_heap holds a heap for each node's neighbors + dq_heap = {u: MappedQueue({(u, v): -dq for v, dq in dq_dict[u].items()}) for u in G} + # H -> all_dq_heap holds a heap with the best items for each node + H = MappedQueue([dq_heap[n].heap[0] for n in G if len(dq_heap[n]) > 0]) + + # Initialize single-node communities + communities = {n: frozenset([n]) for n in G} + yield communities.values() + + # Merge the two communities that lead to the largest modularity + while len(H) > 1: + # Find best merge + # Remove from heap of row maxes + # Ties will be broken by choosing the pair with lowest min community id + try: + negdq, u, v = H.pop() + except IndexError: + break + dq = -negdq + yield dq + # Remove best merge from row u heap + dq_heap[u].pop() + # Push new row max onto H + if len(dq_heap[u]) > 0: + H.push(dq_heap[u].heap[0]) + # If this element was also at the root of row v, we need to remove the + # duplicate entry from H + if dq_heap[v].heap[0] == (v, u): + H.remove((v, u)) + # Remove best merge from row v heap + dq_heap[v].remove((v, u)) + # Push new row max onto H + if len(dq_heap[v]) > 0: + H.push(dq_heap[v].heap[0]) + else: + # Duplicate wasn't in H, just remove from row v heap + dq_heap[v].remove((v, u)) + + # Perform merge + communities[v] = frozenset(communities[u] | communities[v]) + del communities[u] + + # Get neighbor communities connected to the merged communities + u_nbrs = set(dq_dict[u]) + v_nbrs = set(dq_dict[v]) + all_nbrs = (u_nbrs | v_nbrs) - {u, v} + both_nbrs = u_nbrs & v_nbrs + # Update dq for merge of u into v + for w in all_nbrs: + # Calculate new dq value + if w in both_nbrs: + dq_vw = dq_dict[v][w] + dq_dict[u][w] + elif w in v_nbrs: + dq_vw = dq_dict[v][w] - resolution * (a[u] * b[w] + a[w] * b[u]) + else: # w in u_nbrs + dq_vw = dq_dict[u][w] - resolution * (a[v] * b[w] + a[w] * b[v]) + # Update rows v and w + for row, col in [(v, w), (w, v)]: + dq_heap_row = dq_heap[row] + # Update dict for v,w only (u is removed below) + dq_dict[row][col] = dq_vw + # Save old max of per-row heap + if len(dq_heap_row) > 0: + d_oldmax = dq_heap_row.heap[0] + else: + d_oldmax = None + # Add/update heaps + d = (row, col) + d_negdq = -dq_vw + # Save old value for finding heap index + if w in v_nbrs: + # Update existing element in per-row heap + dq_heap_row.update(d, d, priority=d_negdq) + else: + # We're creating a new nonzero element, add to heap + dq_heap_row.push(d, priority=d_negdq) + # Update heap of row maxes if necessary + if d_oldmax is None: + # No entries previously in this row, push new max + H.push(d, priority=d_negdq) + else: + # We've updated an entry in this row, has the max changed? + row_max = dq_heap_row.heap[0] + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + H.update(d_oldmax, row_max) + + # Remove row/col u from dq_dict matrix + for w in dq_dict[u]: + # Remove from dict + dq_old = dq_dict[w][u] + del dq_dict[w][u] + # Remove from heaps if we haven't already + if w != v: + # Remove both row and column + for row, col in [(w, u), (u, w)]: + dq_heap_row = dq_heap[row] + # Check if replaced dq is row max + d_old = (row, col) + if dq_heap_row.heap[0] == d_old: + # Update per-row heap and heap of row maxes + dq_heap_row.remove(d_old) + H.remove(d_old) + # Update row max + if len(dq_heap_row) > 0: + H.push(dq_heap_row.heap[0]) + else: + # Only update per-row heap + dq_heap_row.remove(d_old) + + del dq_dict[u] + # Mark row u as deleted, but keep placeholder + dq_heap[u] = MappedQueue() + # Merge u into v and update a + a[v] += a[u] + a[u] = 0 + if directed: + b[v] += b[u] + b[u] = 0 + + yield communities.values() + + +def greedy_modularity_communities( + G, weight=None, resolution=1, cutoff=1, best_n=None, n_communities=None +): + r"""Find communities in G using greedy modularity maximization. + + This function uses Clauset-Newman-Moore greedy modularity maximization [2]_ + to find the community partition with the largest modularity. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until no futher increase in modularity is possible (a maximum). + Two keyword arguments adjust the stopping condition. `cutoff` is a lower + limit on the number of communities so you can stop the process before + reaching a maximum (used to save computation time). `best_n` is an upper + limit on the number of communities so you can make the process continue + until at most n communities remain even if the maximum modularity occurs + for more. To obtain exactly n communities, set both `cutoff` and `best_n` to n. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float, optional (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + cutoff : int, optional (default=1) + A minimum number of communities below which the merging process stops. + The process stops at this number of communities even if modularity + is not maximized. The goal is to let the user stop the process early. + The process stops before the cutoff if it finds a maximum of modularity. + + best_n : int or None, optional (default=None) + A maximum number of communities above which the merging process will + not stop. This forces community merging to continue after modularity + starts to decrease until `best_n` communities remain. + If ``None``, don't force it to continue beyond a maximum. + + n_communities : int or None, optional (default=None) + + .. deprecated:: 3.0 + The `n_communities` parameter is deprecated - use `cutoff` and/or + `best_n` to set bounds on the desired number of communities instead. + + A minimum number of communities below which the merging process stops. + The process stops at this number of communities even if modularity + is not maximized. The goal is to let the user stop the process early. + The process stops before the cutoff if it finds a maximum of modularity. + + Raises + ------ + ValueError : If the `cutoff` or `best_n` value is not in the range + ``[1, G.number_of_nodes()]``, or if `best_n` < `cutoff`. + Also raised if `cutoff` is used with the deprecated `n_communities` + parameter. + + Returns + ------- + communities: list + A list of frozensets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> from networkx.algorithms.community import greedy_modularity_communities + >>> G = nx.karate_club_graph() + >>> c = greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + if (cutoff < 1) or (cutoff > G.number_of_nodes()): + raise ValueError(f"cutoff must be between 1 and {len(G)}. Got {cutoff}.") + if best_n is not None: + if (best_n < 1) or (best_n > G.number_of_nodes()): + raise ValueError(f"best_n must be between 1 and {len(G)}. Got {best_n}.") + if best_n < cutoff: + raise ValueError(f"Must have best_n >= cutoff. Got {best_n} < {cutoff}") + if best_n == 1: + return [set(G)] + else: + best_n = G.number_of_nodes() + if n_communities is not None: + import warnings + + warnings.warn( + "kwarg ``n_communities`` in greedy_modularity_communities is deprecated" + "and will be removed in version 3.0. Use ``cutoff`` instead.", + DeprecationWarning, + ) + if cutoff == 1: + cutoff = n_communities + else: + raise ValueError(f"Can not set both n_communities and cutoff.") + + # retrieve generator object to construct output + community_gen = _greedy_modularity_communities_generator( + G, weight=weight, resolution=resolution + ) + + # construct the first best community + communities = next(community_gen) + + # continue merging communities until one of the breaking criteria is satisfied + while len(communities) > cutoff: + try: + dq = next(community_gen) + # StopIteration occurs when communities are the connected components + except StopIteration: + communities = sorted(communities, key=len, reverse=True) + # if best_n requires more merging, merge big sets for highest modularity + while len(communities) > best_n: + comm1, comm2, *rest = communities + communities = [comm1 ^ comm2] + communities.extend(rest) + return communities + + # keep going unless max_mod is reached or best_n says to merge more + if dq < 0 and len(communities) <= best_n: + break + communities = next(community_gen) + + return sorted(communities, key=len, reverse=True) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def naive_greedy_modularity_communities(G, resolution=1, weight=None): + r"""Find communities in G using greedy modularity maximization. + + This implementation is O(n^4), much slower than alternatives, but it is + provided as an easy-to-understand reference implementation. + + Greedy modularity maximization begins with each node in its own community + and joins the pair of communities that most increases modularity until no + such pair exists. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + list + A list of sets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> from networkx.algorithms.community import \ + ... naive_greedy_modularity_communities + >>> G = nx.karate_club_graph() + >>> c = naive_greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + greedy_modularity_communities + modularity + """ + # First create one community for each node + communities = list(frozenset([u]) for u in G.nodes()) + # Track merges + merges = [] + # Greedily merge communities until no improvement is possible + old_modularity = None + new_modularity = modularity(G, communities, resolution=resolution, weight=weight) + while old_modularity is None or new_modularity > old_modularity: + # Save modularity for comparison + old_modularity = new_modularity + # Find best pair to merge + trial_communities = list(communities) + to_merge = None + for i, u in enumerate(communities): + for j, v in enumerate(communities): + # Skip i==j and empty communities + if j <= i or len(u) == 0 or len(v) == 0: + continue + # Merge communities u and v + trial_communities[j] = u | v + trial_communities[i] = frozenset([]) + trial_modularity = modularity( + G, trial_communities, resolution=resolution, weight=weight + ) + if trial_modularity >= new_modularity: + # Check if strictly better or tie + if trial_modularity > new_modularity: + # Found new best, save modularity and group indexes + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): + # Break ties by choosing pair with lowest min id + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + # Un-merge + trial_communities[i] = u + trial_communities[j] = v + if to_merge is not None: + # If the best merge improves modularity, use it + merges.append(to_merge) + i, j, dq = to_merge + u, v = communities[i], communities[j] + communities[j] = u | v + communities[i] = frozenset([]) + # Remove empty communities and sort + return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) + + +# old name +_naive_greedy_modularity_communities = naive_greedy_modularity_communities diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/quality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/quality.py new file mode 100644 index 0000000..7de8059 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/quality.py @@ -0,0 +1,443 @@ +"""Functions for measuring the quality of a partition (into +communities). + +""" + +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils import not_implemented_for +from networkx.utils.decorators import argmap + +__all__ = ["coverage", "modularity", "performance", "partition_quality"] + + +class NotAPartition(NetworkXError): + """Raised if a given collection is not a partition.""" + + def __init__(self, G, collection): + msg = f"{G} is not a valid partition of the graph {collection}" + super().__init__(msg) + + +def _require_partition(G, partition): + """Decorator to check that a valid partition is input to a function + + Raises :exc:`networkx.NetworkXError` if the partition is not valid. + + This decorator should be used on functions whose first two arguments + are a graph and a partition of the nodes of that graph (in that + order):: + + >>> @require_partition + ... def foo(G, partition): + ... print("partition is valid!") + ... + >>> G = nx.complete_graph(5) + >>> partition = [{0, 1}, {2, 3}, {4}] + >>> foo(G, partition) + partition is valid! + >>> partition = [{0}, {2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + >>> partition = [{0, 1}, {1, 2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + + """ + if is_partition(G, partition): + return G, partition + raise nx.NetworkXError("`partition` is not a valid partition of the nodes of G") + + +require_partition = argmap(_require_partition, (0, 1)) + + +def intra_community_edges(G, partition): + """Returns the number of intra-community edges for a partition of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The "intra-community edges" are those edges joining a pair of nodes + in the same block of the partition. + + """ + return sum(G.subgraph(block).size() for block in partition) + + +def inter_community_edges(G, partition): + """Returns the number of inter-community edges for a partition of `G`. + according to the given + partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The *inter-community edges* are those edges joining a pair of nodes + in different blocks of the partition. + + Implementation note: this function creates an intermediate graph + that may require the same amount of memory as that of `G`. + + """ + # Alternate implementation that does not require constructing a new + # graph object (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in G.edges() if aff[u] != aff[v]) + # + MG = nx.MultiDiGraph if G.is_directed() else nx.MultiGraph + return nx.quotient_graph(G, partition, create_using=MG).size() + + +def inter_community_non_edges(G, partition): + """Returns the number of inter-community non-edges according to the + given partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + A *non-edge* is a pair of nodes (undirected if `G` is undirected) + that are not adjacent in `G`. The *inter-community non-edges* are + those non-edges on a pair of nodes in different blocks of the + partition. + + Implementation note: this function creates two intermediate graphs, + which may require up to twice the amount of memory as required to + store `G`. + + """ + # Alternate implementation that does not require constructing two + # new graph objects (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in nx.non_edges(G) if aff[u] != aff[v]) + # + return inter_community_edges(nx.complement(G), partition) + + +@not_implemented_for("multigraph") +@require_partition +def performance(G, partition): + """Returns the performance of a partition. + + .. deprecated:: 2.6 + Use `partition_quality` instead. + + The *performance* of a partition is the number of + intra-community edges plus inter-community non-edges divided by the total + number of potential edges. + + Parameters + ---------- + G : NetworkX graph + A simple graph (directed or undirected). + + partition : sequence + Partition of the nodes of `G`, represented as a sequence of + sets of nodes. Each block of the partition represents a + community. + + Returns + ------- + float + The performance of the partition, as defined above. + + Raises + ------ + NetworkXError + If `partition` is not a valid partition of the nodes of `G`. + + References + ---------- + .. [1] Santo Fortunato. + "Community Detection in Graphs". + *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 + + + """ + # Compute the number of intra-community edges and inter-community + # edges. + intra_edges = intra_community_edges(G, partition) + inter_edges = inter_community_non_edges(G, partition) + # Compute the number of edges in the complete graph (directed or + # undirected, as it depends on `G`) on `n` nodes. + # + # (If `G` is an undirected graph, we divide by two since we have + # double-counted each potential edge. We use integer division since + # `total_pairs` is guaranteed to be even.) + n = len(G) + total_pairs = n * (n - 1) + if not G.is_directed(): + total_pairs //= 2 + return (intra_edges + inter_edges) / total_pairs + + +@require_partition +def coverage(G, partition): + """Returns the coverage of a partition. + + .. deprecated:: 2.6 + Use `partition_quality` instead. + + The *coverage* of a partition is the ratio of the number of + intra-community edges to the total number of edges in the graph. + + Parameters + ---------- + G : NetworkX graph + + partition : sequence + Partition of the nodes of `G`, represented as a sequence of + sets of nodes. Each block of the partition represents a + community. + + Returns + ------- + float + The coverage of the partition, as defined above. + + Raises + ------ + NetworkXError + If `partition` is not a valid partition of the nodes of `G`. + + Notes + ----- + If `G` is a multigraph, the multiplicity of edges is counted. + + References + ---------- + .. [1] Santo Fortunato. + "Community Detection in Graphs". + *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 + + + """ + intra_edges = intra_community_edges(G, partition) + total_edges = G.number_of_edges() + return intra_edges / total_edges + + +def modularity(G, communities, weight="weight", resolution=1): + r"""Returns the modularity of the given partition of the graph. + + Modularity is defined in [1]_ as + + .. math:: + Q = \frac{1}{2m} \sum_{ij} \left( A_{ij} - \gamma\frac{k_ik_j}{2m}\right) + \delta(c_i,c_j) + + where $m$ is the number of edges, $A$ is the adjacency matrix of `G`, + $k_i$ is the degree of $i$, $\gamma$ is the resolution parameter, + and $\delta(c_i, c_j)$ is 1 if $i$ and $j$ are in the same community else 0. + + According to [2]_ (and verified by some algebra) this can be reduced to + + .. math:: + Q = \sum_{c=1}^{n} + \left[ \frac{L_c}{m} - \gamma\left( \frac{k_c}{2m} \right) ^2 \right] + + where the sum iterates over all communities $c$, $m$ is the number of edges, + $L_c$ is the number of intra-community links for community $c$, + $k_c$ is the sum of degrees of the nodes in community $c$, + and $\gamma$ is the resolution parameter. + + The resolution parameter sets an arbitrary tradeoff between intra-group + edges and inter-group edges. More complex grouping patterns can be + discovered by analyzing the same network with multiple values of gamma + and then combining the results [3]_. That said, it is very common to + simply use gamma=1. More on the choice of gamma is in [4]_. + + The second formula is the one actually used in calculation of the modularity. + For directed graphs the second formula replaces $k_c$ with $k^{in}_c k^{out}_c$. + + Parameters + ---------- + G : NetworkX Graph + + communities : list or iterable of set of nodes + These node sets must represent a partition of G's nodes. + + weight : string or None, optional (default="weight") + The edge attribute that holds the numerical value used + as a weight. If None or an edge does not have that attribute, + then that edge has weight 1. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Returns + ------- + Q : float + The modularity of the paritition. + + Raises + ------ + NotAPartition + If `communities` is not a partition of the nodes of `G`. + + Examples + -------- + >>> import networkx.algorithms.community as nx_comm + >>> G = nx.barbell_graph(3, 0) + >>> nx_comm.modularity(G, [{0, 1, 2}, {3, 4, 5}]) + 0.35714285714285715 + >>> nx_comm.modularity(G, nx_comm.label_propagation_communities(G)) + 0.35714285714285715 + + References + ---------- + .. [1] M. E. J. Newman "Networks: An Introduction", page 224. + Oxford University Press, 2011. + .. [2] Clauset, Aaron, Mark EJ Newman, and Cristopher Moore. + "Finding community structure in very large networks." + Phys. Rev. E 70.6 (2004). + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community Detection" + Phys. Rev. E 74, 016110, 2006. https://doi.org/10.1103/PhysRevE.74.016110 + .. [4] M. E. J. Newman, "Equivalence between modularity optimization and + maximum likelihood methods for community detection" + Phys. Rev. E 94, 052315, 2016. https://doi.org/10.1103/PhysRevE.94.052315 + + """ + if not isinstance(communities, list): + communities = list(communities) + if not is_partition(G, communities): + raise NotAPartition(G, communities) + + directed = G.is_directed() + if directed: + out_degree = dict(G.out_degree(weight=weight)) + in_degree = dict(G.in_degree(weight=weight)) + m = sum(out_degree.values()) + norm = 1 / m**2 + else: + out_degree = in_degree = dict(G.degree(weight=weight)) + deg_sum = sum(out_degree.values()) + m = deg_sum / 2 + norm = 1 / deg_sum**2 + + def community_contribution(community): + comm = set(community) + L_c = sum(wt for u, v, wt in G.edges(comm, data=weight, default=1) if v in comm) + + out_degree_sum = sum(out_degree[u] for u in comm) + in_degree_sum = sum(in_degree[u] for u in comm) if directed else out_degree_sum + + return L_c / m - resolution * out_degree_sum * in_degree_sum * norm + + return sum(map(community_contribution, communities)) + + +@require_partition +def partition_quality(G, partition): + """Returns the coverage and performance of a partition of G. + + The *coverage* of a partition is the ratio of the number of + intra-community edges to the total number of edges in the graph. + + The *performance* of a partition is the number of + intra-community edges plus inter-community non-edges divided by the total + number of potential edges. + + This algorithm has complexity $O(C^2 + L)$ where C is the number of communities and L is the number of links. + + Parameters + ---------- + G : NetworkX graph + + partition : sequence + Partition of the nodes of `G`, represented as a sequence of + sets of nodes (blocks). Each block of the partition represents a + community. + + Returns + ------- + (float, float) + The (coverage, performance) tuple of the partition, as defined above. + + Raises + ------ + NetworkXError + If `partition` is not a valid partition of the nodes of `G`. + + Notes + ----- + If `G` is a multigraph; + - for coverage, the multiplicity of edges is counted + - for performance, the result is -1 (total number of possible edges is not defined) + + References + ---------- + .. [1] Santo Fortunato. + "Community Detection in Graphs". + *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 + + """ + + node_community = {} + for i, community in enumerate(partition): + for node in community: + node_community[node] = i + + # `performance` is not defined for multigraphs + if not G.is_multigraph(): + # Iterate over the communities, quadratic, to calculate `possible_inter_community_edges` + possible_inter_community_edges = sum( + len(p1) * len(p2) for p1, p2 in combinations(partition, 2) + ) + + if G.is_directed(): + possible_inter_community_edges *= 2 + else: + possible_inter_community_edges = 0 + + # Compute the number of edges in the complete graph -- `n` nodes, + # directed or undirected, depending on `G` + n = len(G) + total_pairs = n * (n - 1) + if not G.is_directed(): + total_pairs //= 2 + + intra_community_edges = 0 + inter_community_non_edges = possible_inter_community_edges + + # Iterate over the links to count `intra_community_edges` and `inter_community_non_edges` + for e in G.edges(): + if node_community[e[0]] == node_community[e[1]]: + intra_community_edges += 1 + else: + inter_community_non_edges -= 1 + + coverage = intra_community_edges / len(G.edges) + + if G.is_multigraph(): + performance = -1.0 + else: + performance = (intra_community_edges + inter_community_non_edges) / total_pairs + + return coverage, performance diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py new file mode 100644 index 0000000..f87e367 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py @@ -0,0 +1,128 @@ +import pytest + +from networkx import Graph, NetworkXError +from networkx.algorithms.community.asyn_fluid import asyn_fluidc + + +def test_exceptions(): + test = Graph() + test.add_node("a") + pytest.raises(NetworkXError, asyn_fluidc, test, "hi") + pytest.raises(NetworkXError, asyn_fluidc, test, -1) + pytest.raises(NetworkXError, asyn_fluidc, test, 3) + test.add_node("b") + pytest.raises(NetworkXError, asyn_fluidc, test, 1) + + +def test_single_node(): + test = Graph() + + test.add_node("a") + + # ground truth + ground_truth = {frozenset(["a"])} + + communities = asyn_fluidc(test, 1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_nodes(): + test = Graph() + + test.add_edge("a", "b") + + # ground truth + ground_truth = {frozenset(["a"]), frozenset(["b"])} + + communities = asyn_fluidc(test, 2) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_clique_communities(): + test = Graph() + + # c1 + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "c") + + # connection + test.add_edge("c", "d") + + # c2 + test.add_edge("d", "e") + test.add_edge("d", "f") + test.add_edge("f", "e") + + # ground truth + ground_truth = {frozenset(["a", "c", "b"]), frozenset(["e", "d", "f"])} + + communities = asyn_fluidc(test, 2, seed=7) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_five_clique_ring(): + test = Graph() + + # c1 + test.add_edge("1a", "1b") + test.add_edge("1a", "1c") + test.add_edge("1a", "1d") + test.add_edge("1b", "1c") + test.add_edge("1b", "1d") + test.add_edge("1c", "1d") + + # c2 + test.add_edge("2a", "2b") + test.add_edge("2a", "2c") + test.add_edge("2a", "2d") + test.add_edge("2b", "2c") + test.add_edge("2b", "2d") + test.add_edge("2c", "2d") + + # c3 + test.add_edge("3a", "3b") + test.add_edge("3a", "3c") + test.add_edge("3a", "3d") + test.add_edge("3b", "3c") + test.add_edge("3b", "3d") + test.add_edge("3c", "3d") + + # c4 + test.add_edge("4a", "4b") + test.add_edge("4a", "4c") + test.add_edge("4a", "4d") + test.add_edge("4b", "4c") + test.add_edge("4b", "4d") + test.add_edge("4c", "4d") + + # c5 + test.add_edge("5a", "5b") + test.add_edge("5a", "5c") + test.add_edge("5a", "5d") + test.add_edge("5b", "5c") + test.add_edge("5b", "5d") + test.add_edge("5c", "5d") + + # connections + test.add_edge("1a", "2c") + test.add_edge("2a", "3c") + test.add_edge("3a", "4c") + test.add_edge("4a", "5c") + test.add_edge("5a", "1c") + + # ground truth + ground_truth = { + frozenset(["1a", "1b", "1c", "1d"]), + frozenset(["2a", "2b", "2c", "2d"]), + frozenset(["3a", "3b", "3c", "3d"]), + frozenset(["4a", "4b", "4c", "4d"]), + frozenset(["5a", "5b", "5c", "5d"]), + } + + communities = asyn_fluidc(test, 5, seed=9) + result = {frozenset(c) for c in communities} + assert result == ground_truth diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_centrality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_centrality.py new file mode 100644 index 0000000..43b6d2b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_centrality.py @@ -0,0 +1,85 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.centrality` +module. + +""" +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.community import girvan_newman + + +def set_of_sets(iterable): + return set(map(frozenset, iterable)) + + +def validate_communities(result, expected): + assert set_of_sets(result) == set_of_sets(expected) + + +def validate_possible_communities(result, *expected): + assert any(set_of_sets(result) == set_of_sets(p) for p in expected) + + +class TestGirvanNewman: + """Unit tests for the + :func:`networkx.algorithms.community.centrality.girvan_newman` + function. + + """ + + def test_no_edges(self): + G = nx.empty_graph(3) + communities = list(girvan_newman(G)) + assert len(communities) == 1 + validate_communities(communities[0], [{0}, {1}, {2}]) + + def test_undirected(self): + # Start with the graph .-.-.-. + G = nx.path_graph(4) + communities = list(girvan_newman(G)) + assert len(communities) == 3 + # After one removal, we get the graph .-. .-. + validate_communities(communities[0], [{0, 1}, {2, 3}]) + # After the next, we get the graph .-. . ., but there are two + # symmetric possible versions. + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + # After the last removal, we always get the empty graph. + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_directed(self): + G = nx.DiGraph(nx.path_graph(4)) + communities = list(girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_selfloops(self): + G = nx.path_graph(4) + G.add_edge(0, 0) + G.add_edge(2, 2) + communities = list(girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_most_valuable_edge(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 3), (1, 2, 2), (2, 3, 1)]) + # Let the most valuable edge be the one with the highest weight. + + def heaviest(G): + return max(G.edges(data="weight"), key=itemgetter(2))[:2] + + communities = list(girvan_newman(G, heaviest)) + assert len(communities) == 3 + validate_communities(communities[0], [{0}, {1, 2, 3}]) + validate_communities(communities[1], [{0}, {1}, {2, 3}]) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_kclique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_kclique.py new file mode 100644 index 0000000..ffac175 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_kclique.py @@ -0,0 +1,92 @@ +from itertools import combinations + +import pytest + +import networkx as nx +from networkx.algorithms.community import k_clique_communities + + +def test_overlapping_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(2, 7), 2)) # Add another five clique + c = list(k_clique_communities(G, 4)) + assert c == [frozenset(range(7))] + c = set(k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(2, 7))} + + +def test_isolated_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(0, 5), 2)) # Add a five clique + G.add_edges_from(combinations(range(5, 10), 2)) # Add another five clique + c = set(k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(5, 10))} + + +class TestZacharyKarateClub: + def setup(self): + self.G = nx.karate_club_graph() + + def _check_communities(self, k, expected): + communities = set(k_clique_communities(self.G, k)) + assert communities == expected + + def test_k2(self): + # clique percolation with k=2 is just connected components + expected = {frozenset(self.G)} + self._check_communities(2, expected) + + def test_k3(self): + comm1 = [ + 0, + 1, + 2, + 3, + 7, + 8, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + ] + comm2 = [0, 4, 5, 6, 10, 16] + comm3 = [24, 25, 31] + expected = {frozenset(comm1), frozenset(comm2), frozenset(comm3)} + self._check_communities(3, expected) + + def test_k4(self): + expected = { + frozenset([0, 1, 2, 3, 7, 13]), + frozenset([8, 32, 30, 33]), + frozenset([32, 33, 29, 23]), + } + self._check_communities(4, expected) + + def test_k5(self): + expected = {frozenset([0, 1, 2, 3, 7, 13])} + self._check_communities(5, expected) + + def test_k6(self): + expected = set() + self._check_communities(6, expected) + + +def test_bad_k(): + with pytest.raises(nx.NetworkXError): + list(k_clique_communities(nx.Graph(), 1)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py new file mode 100644 index 0000000..3dc5554 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py @@ -0,0 +1,91 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.kernighan_lin` +module. +""" +from itertools import permutations + +import pytest + +import networkx as nx +from networkx.algorithms.community import kernighan_lin_bisection + + +def assert_partition_equal(x, y): + assert set(map(frozenset, x)) == set(map(frozenset, y)) + + +def test_partition(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_partition_argument(): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_partition_argument_non_integer_nodes(): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + partition = ({"A", "B"}, {"C", "D"}) + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_seed_argument(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G, seed=1) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_non_disjoint_partition(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1, 2}, {2, 3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_too_many_blocks(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1}, {2}, {3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_multigraph(): + G = nx.cycle_graph(4) + M = nx.MultiGraph(G.edges()) + M.add_edges_from(G.edges()) + M.remove_edge(1, 2) + for labels in permutations(range(4)): + mapping = dict(zip(M, labels)) + A, B = kernighan_lin_bisection(nx.relabel_nodes(M, mapping), seed=0) + assert_partition_equal( + [A, B], [{mapping[0], mapping[1]}, {mapping[2], mapping[3]}] + ) + + +def test_max_iter_argument(): + G = nx.Graph( + [ + ("A", "B", {"weight": 1}), + ("A", "C", {"weight": 2}), + ("A", "D", {"weight": 3}), + ("A", "E", {"weight": 2}), + ("A", "F", {"weight": 4}), + ("B", "C", {"weight": 1}), + ("B", "D", {"weight": 4}), + ("B", "E", {"weight": 2}), + ("B", "F", {"weight": 1}), + ("C", "D", {"weight": 3}), + ("C", "E", {"weight": 2}), + ("C", "F", {"weight": 1}), + ("D", "E", {"weight": 4}), + ("D", "F", {"weight": 3}), + ("E", "F", {"weight": 2}), + ] + ) + partition = ({"A", "B", "C"}, {"D", "E", "F"}) + C = kernighan_lin_bisection(G, partition, max_iter=1) + assert_partition_equal(C, ({"A", "F", "C"}, {"D", "E", "B"})) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_label_propagation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_label_propagation.py new file mode 100644 index 0000000..44e4489 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_label_propagation.py @@ -0,0 +1,163 @@ +from itertools import chain, combinations + +import pytest + +import networkx as nx +from networkx.algorithms.community import ( + asyn_lpa_communities, + label_propagation_communities, +) + + +def test_directed_not_supported(): + with pytest.raises(nx.NetworkXNotImplemented): + # not supported for directed graphs + test = nx.DiGraph() + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "d") + result = label_propagation_communities(test) + + +def test_iterator_vs_iterable(): + G = nx.empty_graph("a") + assert list(label_propagation_communities(G)) == [{"a"}] + for community in label_propagation_communities(G): + assert community == {"a"} + pytest.raises(TypeError, next, label_propagation_communities(G)) + + +def test_one_node(): + test = nx.Graph() + test.add_node("a") + + # The expected communities are: + ground_truth = {frozenset(["a"])} + + communities = label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_unconnected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "c") + test.add_edge("a", "d") + test.add_edge("d", "c") + # community 2 + test.add_edge("b", "e") + test.add_edge("e", "f") + test.add_edge("f", "b") + + # The expected communities are: + ground_truth = {frozenset(["a", "c", "d"]), frozenset(["b", "e", "f"])} + + communities = label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_connected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "b") + test.add_edge("c", "a") + test.add_edge("c", "b") + test.add_edge("d", "a") + test.add_edge("d", "b") + test.add_edge("d", "c") + test.add_edge("e", "a") + test.add_edge("e", "b") + test.add_edge("e", "c") + test.add_edge("e", "d") + # community 2 + test.add_edge("1", "2") + test.add_edge("3", "1") + test.add_edge("3", "2") + test.add_edge("4", "1") + test.add_edge("4", "2") + test.add_edge("4", "3") + test.add_edge("5", "1") + test.add_edge("5", "2") + test.add_edge("5", "3") + test.add_edge("5", "4") + # edge between community 1 and 2 + test.add_edge("a", "1") + # community 3 + test.add_edge("x", "y") + # community 4 with only a single node + test.add_node("z") + + # The expected communities are: + ground_truth1 = { + frozenset(["a", "b", "c", "d", "e"]), + frozenset(["1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth2 = { + frozenset(["a", "b", "c", "d", "e", "1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth = (ground_truth1, ground_truth2) + + communities = label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result in ground_truth + + +def test_termination(): + # ensure termination of asyn_lpa_communities in two cases + # that led to an endless loop in a previous version + test1 = nx.karate_club_graph() + test2 = nx.caveman_graph(2, 10) + test2.add_edges_from([(0, 20), (20, 10)]) + asyn_lpa_communities(test1) + asyn_lpa_communities(test2) + + +class TestAsynLpaCommunities: + def _check_communities(self, G, expected): + """Checks that the communities computed from the given graph ``G`` + using the :func:`~networkx.asyn_lpa_communities` function match + the set of nodes given in ``expected``. + + ``expected`` must be a :class:`set` of :class:`frozenset` + instances, each element of which is a node in the graph. + + """ + communities = asyn_lpa_communities(G) + result = {frozenset(c) for c in communities} + assert result == expected + + def test_null_graph(self): + G = nx.null_graph() + ground_truth = set() + self._check_communities(G, ground_truth) + + def test_single_node(self): + G = nx.empty_graph(1) + ground_truth = {frozenset([0])} + self._check_communities(G, ground_truth) + + def test_simple_communities(self): + # This graph is the disjoint union of two triangles. + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + self._check_communities(G, ground_truth) + + def test_seed_argument(self): + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + communities = asyn_lpa_communities(G, seed=1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + def test_several_communities(self): + # This graph is the disjoint union of five triangles. + ground_truth = {frozenset(range(3 * i, 3 * (i + 1))) for i in range(5)} + edges = chain.from_iterable(combinations(c, 2) for c in ground_truth) + G = nx.Graph(edges) + self._check_communities(G, ground_truth) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_louvain.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_louvain.py new file mode 100644 index 0000000..dc285b8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_louvain.py @@ -0,0 +1,181 @@ +import networkx as nx +from networkx.algorithms.community import ( + is_partition, + louvain_communities, + modularity, + partition_quality, +) + + +def test_modularity_increase(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition = [{u} for u in G.nodes()] + mod = modularity(G, partition) + partition = louvain_communities(G) + + assert modularity(G, partition) > mod + + +def test_valid_partition(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = G.to_directed() + partition = louvain_communities(G) + partition2 = louvain_communities(H) + + assert is_partition(G, partition) + assert is_partition(H, partition2) + + +def test_partition(): + G = nx.karate_club_graph() + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition = louvain_communities(G, seed=2, weight=None) + + assert part == partition + + +def test_directed_partition(): + """ + Test 2 cases that were looping infinitely + from issues #5175 and #5704 + """ + G = nx.DiGraph() + H = nx.DiGraph() + G.add_nodes_from(range(10)) + H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + H_edges = [ + (1, 2), + (1, 6), + (1, 9), + (2, 3), + (2, 4), + (2, 5), + (3, 4), + (4, 3), + (4, 5), + (5, 4), + (6, 7), + (6, 8), + (9, 10), + (9, 11), + (10, 11), + (11, 10), + ] + G.add_edges_from(G_edges) + H.add_edges_from(H_edges) + + G_expected_partition = [{0, 1, 2}, {3, 4}, {5}, {6}, {8, 7}, {9, 10}] + G_partition = louvain_communities(G, seed=123, weight=None) + + H_expected_partition = [{2, 3, 4, 5}, {8, 1, 6, 7}, {9, 10, 11}] + H_partition = louvain_communities(H, seed=123, weight=None) + + assert G_partition == G_expected_partition + assert H_partition == H_expected_partition + + +def test_none_weight_param(): + G = nx.karate_club_graph() + nx.set_edge_attributes( + G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" + ) + + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition1 = louvain_communities(G, weight=None, seed=2) + partition2 = louvain_communities(G, weight="foo", seed=2) + partition3 = louvain_communities(G, weight="weight", seed=2) + + assert part == partition1 + assert part != partition2 + assert part != partition3 + assert partition2 != partition3 + + +def test_quality(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = nx.gn_graph(200, seed=1234) + I = nx.MultiGraph(G) + J = nx.MultiDiGraph(H) + + partition = louvain_communities(G) + partition2 = louvain_communities(H) + partition3 = louvain_communities(I) + partition4 = louvain_communities(J) + + quality = partition_quality(G, partition)[0] + quality2 = partition_quality(H, partition2)[0] + quality3 = partition_quality(I, partition3)[0] + quality4 = partition_quality(J, partition4)[0] + + assert quality >= 0.65 + assert quality2 >= 0.65 + assert quality3 >= 0.65 + assert quality4 >= 0.65 + + +def test_multigraph(): + G = nx.karate_club_graph() + H = nx.MultiGraph(G) + G.add_edge(0, 1, weight=10) + H.add_edge(0, 1, weight=9) + G.add_edge(0, 9, foo=20) + H.add_edge(0, 9, foo=20) + + partition1 = louvain_communities(G, seed=1234) + partition2 = louvain_communities(H, seed=1234) + partition3 = louvain_communities(H, weight="foo", seed=1234) + + assert partition1 == partition2 != partition3 + + +def test_resolution(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + + partition1 = louvain_communities(G, resolution=0.5, seed=12) + partition2 = louvain_communities(G, seed=12) + partition3 = louvain_communities(G, resolution=2, seed=12) + + assert len(partition1) <= len(partition2) <= len(partition3) + + +def test_threshold(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition1 = louvain_communities(G, threshold=0.3, seed=2) + partition2 = louvain_communities(G, seed=2) + mod1 = modularity(G, partition1) + mod2 = modularity(G, partition2) + + assert mod1 < mod2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_lukes.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_lukes.py new file mode 100644 index 0000000..80e2de3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_lukes.py @@ -0,0 +1,154 @@ +from itertools import product + +import pytest + +import networkx as nx +from networkx.algorithms.community import lukes_partitioning + +EWL = "e_weight" +NWL = "n_weight" + + +# first test from the Lukes original paper +def paper_1_case(float_edge_wt=False, explicit_node_wt=True, directed=False): + + # problem-specific constants + limit = 3 + + # configuration + if float_edge_wt: + shift = 0.001 + else: + shift = 0 + + if directed: + example_1 = nx.DiGraph() + else: + example_1 = nx.Graph() + + # graph creation + example_1.add_edge(1, 2, **{EWL: 3 + shift}) + example_1.add_edge(1, 4, **{EWL: 2 + shift}) + example_1.add_edge(2, 3, **{EWL: 4 + shift}) + example_1.add_edge(2, 5, **{EWL: 6 + shift}) + + # node weights + if explicit_node_wt: + nx.set_node_attributes(example_1, 1, NWL) + wtu = NWL + else: + wtu = None + + # partitioning + clusters_1 = { + frozenset(x) + for x in lukes_partitioning(example_1, limit, node_weight=wtu, edge_weight=EWL) + } + + return clusters_1 + + +# second test from the Lukes original paper +def paper_2_case(explicit_edge_wt=True, directed=False): + + # problem specific constants + byte_block_size = 32 + + # configuration + if directed: + example_2 = nx.DiGraph() + else: + example_2 = nx.Graph() + + if explicit_edge_wt: + edic = {EWL: 1} + wtu = EWL + else: + edic = {} + wtu = None + + # graph creation + example_2.add_edge("name", "home_address", **edic) + example_2.add_edge("name", "education", **edic) + example_2.add_edge("education", "bs", **edic) + example_2.add_edge("education", "ms", **edic) + example_2.add_edge("education", "phd", **edic) + example_2.add_edge("name", "telephone", **edic) + example_2.add_edge("telephone", "home", **edic) + example_2.add_edge("telephone", "office", **edic) + example_2.add_edge("office", "no1", **edic) + example_2.add_edge("office", "no2", **edic) + + example_2.nodes["name"][NWL] = 20 + example_2.nodes["education"][NWL] = 10 + example_2.nodes["bs"][NWL] = 1 + example_2.nodes["ms"][NWL] = 1 + example_2.nodes["phd"][NWL] = 1 + example_2.nodes["home_address"][NWL] = 8 + example_2.nodes["telephone"][NWL] = 8 + example_2.nodes["home"][NWL] = 8 + example_2.nodes["office"][NWL] = 4 + example_2.nodes["no1"][NWL] = 1 + example_2.nodes["no2"][NWL] = 1 + + # partitioning + clusters_2 = { + frozenset(x) + for x in lukes_partitioning( + example_2, byte_block_size, node_weight=NWL, edge_weight=wtu + ) + } + + return clusters_2 + + +def test_paper_1_case(): + ground_truth = {frozenset([1, 4]), frozenset([2, 3, 5])} + + tf = (True, False) + for flt, nwt, drc in product(tf, tf, tf): + part = paper_1_case(flt, nwt, drc) + assert part == ground_truth + + +def test_paper_2_case(): + ground_truth = { + frozenset(["education", "bs", "ms", "phd"]), + frozenset(["name", "home_address"]), + frozenset(["telephone", "home", "office", "no1", "no2"]), + } + + tf = (True, False) + for ewt, drc in product(tf, tf): + part = paper_2_case(ewt, drc) + assert part == ground_truth + + +def test_mandatory_tree(): + not_a_tree = nx.complete_graph(4) + + with pytest.raises(nx.NotATree): + lukes_partitioning(not_a_tree, 5) + + +def test_mandatory_integrality(): + + byte_block_size = 32 + + ex_1_broken = nx.DiGraph() + + ex_1_broken.add_edge(1, 2, **{EWL: 3.2}) + ex_1_broken.add_edge(1, 4, **{EWL: 2.4}) + ex_1_broken.add_edge(2, 3, **{EWL: 4.0}) + ex_1_broken.add_edge(2, 5, **{EWL: 6.3}) + + ex_1_broken.nodes[1][NWL] = 1.2 # ! + ex_1_broken.nodes[2][NWL] = 1 + ex_1_broken.nodes[3][NWL] = 1 + ex_1_broken.nodes[4][NWL] = 1 + ex_1_broken.nodes[5][NWL] = 2 + + with pytest.raises(TypeError): + lukes_partitioning( + ex_1_broken, byte_block_size, node_weight=NWL, edge_weight=EWL + ) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_modularity_max.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_modularity_max.py new file mode 100644 index 0000000..acdb19d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_modularity_max.py @@ -0,0 +1,333 @@ +import pytest + +import networkx as nx +from networkx.algorithms.community import ( + greedy_modularity_communities, + naive_greedy_modularity_communities, +) + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities(func): + G = nx.karate_club_graph() + john_a = frozenset( + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + ) + mr_hi = frozenset([0, 4, 5, 6, 10, 11, 16, 19]) + overlap = frozenset([1, 2, 3, 7, 9, 12, 13, 17, 21]) + expected = {john_a, overlap, mr_hi} + assert set(func(G, weight=None)) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_categorical_labels(func): + # Using other than 0-starting contiguous integers as node-labels. + G = nx.Graph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = {frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})} + assert set(func(G)) == expected + + +def test_greedy_modularity_communities_components(): + # Test for gh-5530 + G = nx.Graph([(0, 1), (2, 3), (4, 5), (5, 6)]) + # usual case with 3 components + assert greedy_modularity_communities(G) == [{4, 5, 6}, {0, 1}, {2, 3}] + # best_n can make the algorithm continue even when modularity goes down + assert greedy_modularity_communities(G, best_n=3) == [{4, 5, 6}, {0, 1}, {2, 3}] + assert greedy_modularity_communities(G, best_n=2) == [{0, 1, 4, 5, 6}, {2, 3}] + assert greedy_modularity_communities(G, best_n=1) == [{0, 1, 2, 3, 4, 5, 6}] + + +def test_greedy_modularity_communities_relabeled(): + # Test for gh-4966 + G = nx.balanced_tree(2, 2) + mapping = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h"} + G = nx.relabel_nodes(G, mapping) + expected = [frozenset({"e", "d", "a", "b"}), frozenset({"c", "f", "g"})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_directed(): + G = nx.DiGraph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = [frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})] + assert greedy_modularity_communities(G) == expected + + # with loops + G = nx.DiGraph() + G.add_edges_from( + [(1, 1), (1, 2), (1, 3), (2, 3), (1, 4), (4, 4), (5, 5), (4, 5), (4, 6), (5, 6)] + ) + expected = [frozenset({1, 2, 3}), frozenset({4, 5, 6})] + assert greedy_modularity_communities(G) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_weighted(func): + G = nx.balanced_tree(2, 3) + for (a, b) in G.edges: + if ((a == 1) or (a == 2)) and (b != 0): + G[a][b]["weight"] = 10.0 + else: + G[a][b]["weight"] = 1.0 + + expected = [{0, 1, 3, 4, 7, 8, 9, 10}, {2, 5, 6, 11, 12, 13, 14}] + + assert func(G, weight="weight") == expected + assert func(G, weight="weight", resolution=0.9) == expected + assert func(G, weight="weight", resolution=0.3) == expected + assert func(G, weight="weight", resolution=1.1) != expected + + +def test_modularity_communities_floating_point(): + # check for floating point error when used as key in the mapped_queue dict. + # Test for gh-4992 and gh-5000 + G = nx.Graph() + G.add_weighted_edges_from( + [(0, 1, 12), (1, 4, 71), (2, 3, 15), (2, 4, 10), (3, 6, 13)] + ) + expected = [{0, 1, 4}, {2, 3, 6}] + assert greedy_modularity_communities(G, weight="weight") == expected + assert ( + greedy_modularity_communities(G, weight="weight", resolution=0.99) == expected + ) + + +def test_modularity_communities_directed_weighted(): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 3, 3), + (2, 3, 6), + (2, 6, 1), + (1, 4, 1), + (4, 5, 3), + (4, 6, 7), + (5, 6, 2), + (5, 7, 5), + (5, 8, 4), + (6, 8, 3), + ] + ) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # A large weight of the edge (2, 6) causes 6 to change group, even if it shares + # only one connection with the new group and 3 with the old one. + G[2][6]["weight"] = 20 + expected = [frozenset({1, 2, 3, 6}), frozenset({4, 5, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greedy_modularity_communities_multigraph(): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (1, 3), + (2, 3), + (1, 4), + (2, 4), + (4, 5), + (5, 6), + (5, 7), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G) == expected + + # Converting (4, 5) into a multi-edge causes node 4 to change group. + G.add_edge(4, 5) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_multigraph_weighted(): + G = nx.MultiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (1, 3, 6), + (1, 3, 6), + (2, 3, 4), + (1, 4, 1), + (1, 4, 1), + (2, 4, 3), + (2, 4, 3), + (4, 5, 1), + (5, 6, 3), + (5, 6, 7), + (5, 6, 4), + (5, 7, 9), + (5, 7, 9), + (6, 7, 8), + (7, 8, 2), + (7, 8, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Adding multi-edge (4, 5, 16) causes node 4 to change group. + G.add_edge(4, 5, weight=16) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Increasing the weight of edge (1, 4) causes node 4 to return to the former group. + G[1][4][1]["weight"] = 3 + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph(): + G = nx.MultiDiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (3, 1), + (2, 3), + (2, 3), + (3, 2), + (1, 4), + (2, 4), + (4, 2), + (4, 5), + (5, 6), + (5, 6), + (6, 5), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + (8, 4), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph_weighted(): + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (3, 1, 6), + (1, 3, 6), + (3, 2, 4), + (1, 4, 2), + (1, 4, 5), + (2, 4, 3), + (3, 2, 8), + (4, 2, 3), + (4, 3, 5), + (4, 5, 2), + (5, 6, 3), + (5, 6, 7), + (6, 5, 4), + (5, 7, 9), + (5, 7, 9), + (7, 6, 8), + (7, 8, 2), + (8, 7, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_resolution_parameter_impact(): + G = nx.barbell_graph(5, 3) + + gamma = 1 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 2.5 + expected = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 0.3 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + +def test_cutoff_parameter(): + G = nx.circular_ladder_graph(4) + + # No aggregation: + expected = [{k} for k in range(8)] + assert greedy_modularity_communities(G, cutoff=8) == expected + + # Aggregation to half order (number of nodes) + expected = [{k, k + 1} for k in range(0, 8, 2)] + assert greedy_modularity_communities(G, cutoff=4) == expected + + # Default aggregation case (here, 2 communities emerge) + expected = [frozenset(range(0, 4)), frozenset(range(4, 8))] + assert greedy_modularity_communities(G, cutoff=1) == expected + + +def test_best_n(): + G = nx.barbell_graph(5, 3) + + # Same result as without enforcing cutoff: + best_n = 3 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # One additional merging step: + best_n = 2 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # Two additional merging steps: + best_n = 1 + expected = [frozenset(range(0, 13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_quality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_quality.py new file mode 100644 index 0000000..1d6aeb8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_quality.py @@ -0,0 +1,147 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.quality` +module. + +""" +import pytest + +import networkx as nx +from networkx import barbell_graph +from networkx.algorithms.community import ( + coverage, + modularity, + partition_quality, + performance, +) +from networkx.algorithms.community.quality import inter_community_edges + + +class TestPerformance: + """Unit tests for the :func:`performance` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 8 / 15 == pytest.approx(performance(G, partition), abs=1e-7) + assert 8 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 14 / 15 == pytest.approx(performance(G, partition), abs=1e-7) + assert 14 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + +class TestCoverage: + """Unit tests for the :func:`coverage` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 3 / 7 == pytest.approx(coverage(G, partition), abs=1e-7) + assert 3 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 6 / 7 == pytest.approx(coverage(G, partition), abs=1e-7) + assert 6 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + +def test_modularity(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert (-16 / (14**2)) == pytest.approx(modularity(G, C), abs=1e-7) + C = [{0, 1, 2}, {3, 4, 5}] + assert (35 * 2) / (14**2) == pytest.approx(modularity(G, C), abs=1e-7) + + n = 1000 + G = nx.erdos_renyi_graph(n, 0.09, seed=42, directed=True) + C = [set(range(n // 2)), set(range(n // 2, n))] + assert 0.00017154251389292754 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.margulis_gabber_galil_graph(10) + mid_value = G.number_of_nodes() // 2 + nodes = list(G.nodes) + C = [set(nodes[:mid_value]), set(nodes[mid_value:])] + assert 0.13 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.DiGraph() + G.add_edges_from([(2, 1), (2, 3), (3, 4)]) + C = [{1, 2}, {3, 4}] + assert 2 / 9 == pytest.approx(modularity(G, C), abs=1e-7) + + +def test_modularity_resolution(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert modularity(G, C) == pytest.approx(3 / 7 - 100 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + + C = [{0, 1, 2}, {3, 4, 5}] + assert modularity(G, C) == pytest.approx(6 / 7 - 98 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + + G = nx.barbell_graph(5, 3) + C = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=1: modularity = 0.518229 + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + + C = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 2.5 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=2.5: modularity = -0.06553819 + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + + C = [frozenset(range(8)), frozenset(range(8, 13))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 0.3 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=0.3: modularity = 0.805990 + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + + +def test_inter_community_edges_with_digraphs(): + G = nx.complete_graph(2, create_using=nx.DiGraph()) + partition = [{0}, {1}] + assert inter_community_edges(G, partition) == 2 + + G = nx.complete_graph(10, create_using=nx.DiGraph()) + partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}] + assert inter_community_edges(G, partition) == 70 + + G = nx.cycle_graph(4, create_using=nx.DiGraph()) + partition = [{0, 1}, {2, 3}] + assert inter_community_edges(G, partition) == 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_utils.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_utils.py new file mode 100644 index 0000000..a031782 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/community/tests/test_utils.py @@ -0,0 +1,29 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.utils` module. + +""" + +import networkx as nx +from networkx.algorithms.community import is_partition + + +def test_is_partition(): + G = nx.empty_graph(3) + assert is_partition(G, [{0, 1}, {2}]) + assert is_partition(G, ({0, 1}, {2})) + assert is_partition(G, ([0, 1], [2])) + assert is_partition(G, [[0, 1], [2]]) + + +def test_not_covering(): + G = nx.empty_graph(3) + assert not is_partition(G, [{0}, {1}]) + + +def test_not_disjoint(): + G = nx.empty_graph(3) + assert not is_partition(G, [{0, 1}, {1, 2}]) + + +def test_not_node(): + G = nx.empty_graph(3) + assert not is_partition(G, [{0, 1}, {3}]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/__init__.py new file mode 100644 index 0000000..f9ae2ca --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/__init__.py @@ -0,0 +1,6 @@ +from .connected import * +from .strongly_connected import * +from .weakly_connected import * +from .attracting import * +from .biconnected import * +from .semiconnected import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/attracting.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/attracting.py new file mode 100644 index 0000000..8d2cd8b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/attracting.py @@ -0,0 +1,111 @@ +"""Attracting components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_attracting_components", + "attracting_components", + "is_attracting_component", +] + + +@not_implemented_for("undirected") +def attracting_components(G): + """Generates the attracting components in `G`. + + An attracting component in a directed graph `G` is a strongly connected + component with the property that a random walker on the graph will never + leave the component, once it enters the component. + + The nodes in attracting components can also be thought of as recurrent + nodes. If a random walker enters the attractor containing the node, then + the node will be visited infinitely often. + + To obtain induced subgraphs on each component use: + ``(G.subgraph(c).copy() for c in attracting_components(G))`` + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + attractors : generator of sets + A generator of sets of nodes, one for each attracting component of G. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + number_attracting_components + is_attracting_component + + """ + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + for n in cG: + if cG.out_degree(n) == 0: + yield scc[n] + + +@not_implemented_for("undirected") +def number_attracting_components(G): + """Returns the number of attracting components in `G`. + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + n : int + The number of attracting components in G. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + attracting_components + is_attracting_component + + """ + return sum(1 for ac in attracting_components(G)) + + +@not_implemented_for("undirected") +def is_attracting_component(G): + """Returns True if `G` consists of a single attracting component. + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + attracting : bool + True if `G` has a single attracting component. Otherwise, False. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + attracting_components + number_attracting_components + + """ + ac = list(attracting_components(G)) + if len(ac) == 1: + return len(ac[0]) == len(G) + return False diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/biconnected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/biconnected.py new file mode 100644 index 0000000..1eebe8a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/biconnected.py @@ -0,0 +1,382 @@ +"""Biconnected components and articulation points.""" +from itertools import chain + +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "biconnected_components", + "biconnected_component_edges", + "is_biconnected", + "articulation_points", +] + + +@not_implemented_for("directed") +def is_biconnected(G): + """Returns True if the graph is biconnected, False otherwise. + + A graph is biconnected if, and only if, it cannot be disconnected by + removing only one node (and all edges incident on that node). If + removing a node increases the number of disconnected components + in the graph, that node is called an articulation point, or cut + vertex. A biconnected graph has no articulation points. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + biconnected : bool + True if the graph is biconnected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> print(nx.is_biconnected(G)) + False + >>> G.add_edge(0, 3) + >>> print(nx.is_biconnected(G)) + True + + See Also + -------- + biconnected_components + articulation_points + biconnected_component_edges + is_strongly_connected + is_weakly_connected + is_connected + is_semiconnected + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + bccs = biconnected_components(G) + try: + bcc = next(bccs) + except StopIteration: + # No bicomponents (empty graph?) + return False + try: + next(bccs) + except StopIteration: + # Only one bicomponent + return len(bcc) == len(G) + else: + # Multiple bicomponents + return False + + +@not_implemented_for("directed") +def biconnected_component_edges(G): + """Returns a generator of lists of edges, one list for each biconnected + component of the input graph. + + Biconnected components are maximal subgraphs such that the removal of a + node (and all edges incident on that node) will not disconnect the + subgraph. Note that nodes may be part of more than one biconnected + component. Those nodes are articulation points, or cut vertices. + However, each edge belongs to one, and only one, biconnected component. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + edges : generator of lists + Generator of lists of edges, one list for each bicomponent. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.barbell_graph(4, 2) + >>> print(nx.is_biconnected(G)) + False + >>> bicomponents_edges = list(nx.biconnected_component_edges(G)) + >>> len(bicomponents_edges) + 5 + >>> G.add_edge(2, 8) + >>> print(nx.is_biconnected(G)) + True + >>> bicomponents_edges = list(nx.biconnected_component_edges(G)) + >>> len(bicomponents_edges) + 1 + + See Also + -------- + is_biconnected, + biconnected_components, + articulation_points, + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + yield from _biconnected_dfs(G, components=True) + + +@not_implemented_for("directed") +def biconnected_components(G): + """Returns a generator of sets of nodes, one set for each biconnected + component of the graph + + Biconnected components are maximal subgraphs such that the removal of a + node (and all edges incident on that node) will not disconnect the + subgraph. Note that nodes may be part of more than one biconnected + component. Those nodes are articulation points, or cut vertices. The + removal of articulation points will increase the number of connected + components of the graph. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + nodes : generator + Generator of sets of nodes, one set for each biconnected component. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.lollipop_graph(5, 1) + >>> print(nx.is_biconnected(G)) + False + >>> bicomponents = list(nx.biconnected_components(G)) + >>> len(bicomponents) + 2 + >>> G.add_edge(0, 5) + >>> print(nx.is_biconnected(G)) + True + >>> bicomponents = list(nx.biconnected_components(G)) + >>> len(bicomponents) + 1 + + You can generate a sorted list of biconnected components, largest + first, using sort. + + >>> G.remove_edge(0, 5) + >>> [len(c) for c in sorted(nx.biconnected_components(G), key=len, reverse=True)] + [5, 2] + + If you only want the largest connected component, it's more + efficient to use max instead of sort. + + >>> Gc = max(nx.biconnected_components(G), key=len) + + To create the components as subgraphs use: + ``(G.subgraph(c).copy() for c in biconnected_components(G))`` + + See Also + -------- + is_biconnected + articulation_points + biconnected_component_edges + k_components : this function is a special case where k=2 + bridge_components : similar to this function, but is defined using + 2-edge-connectivity instead of 2-node-connectivity. + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + for comp in _biconnected_dfs(G, components=True): + yield set(chain.from_iterable(comp)) + + +@not_implemented_for("directed") +def articulation_points(G): + """Yield the articulation points, or cut vertices, of a graph. + + An articulation point or cut vertex is any node whose removal (along with + all its incident edges) increases the number of connected components of + a graph. An undirected connected graph without articulation points is + biconnected. Articulation points belong to more than one biconnected + component of a graph. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Yields + ------ + node + An articulation point in the graph. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + + >>> G = nx.barbell_graph(4, 2) + >>> print(nx.is_biconnected(G)) + False + >>> len(list(nx.articulation_points(G))) + 4 + >>> G.add_edge(2, 8) + >>> print(nx.is_biconnected(G)) + True + >>> len(list(nx.articulation_points(G))) + 0 + + See Also + -------- + is_biconnected + biconnected_components + biconnected_component_edges + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + seen = set() + for articulation in _biconnected_dfs(G, components=False): + if articulation not in seen: + seen.add(articulation) + yield articulation + + +@not_implemented_for("directed") +def _biconnected_dfs(G, components=True): + # depth-first search algorithm to generate articulation points + # and biconnected components + visited = set() + for start in G: + if start in visited: + continue + discovery = {start: 0} # time of first discovery of node during search + low = {start: 0} + root_children = 0 + visited.add(start) + edge_stack = [] + stack = [(start, start, iter(G[start]))] + while stack: + grandparent, parent, children = stack[-1] + try: + child = next(children) + if grandparent == child: + continue + if child in visited: + if discovery[child] <= discovery[parent]: # back edge + low[parent] = min(low[parent], discovery[child]) + if components: + edge_stack.append((parent, child)) + else: + low[child] = discovery[child] = len(discovery) + visited.add(child) + stack.append((parent, child, iter(G[child]))) + if components: + edge_stack.append((parent, child)) + except StopIteration: + stack.pop() + if len(stack) > 1: + if low[parent] >= discovery[grandparent]: + if components: + ind = edge_stack.index((grandparent, parent)) + yield edge_stack[ind:] + edge_stack = edge_stack[:ind] + else: + yield grandparent + low[grandparent] = min(low[parent], low[grandparent]) + elif stack: # length 1 so grandparent is root + root_children += 1 + if components: + ind = edge_stack.index((grandparent, parent)) + yield edge_stack[ind:] + if not components: + # root node is articulation point if it has more than 1 child + if root_children > 1: + yield start diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/connected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/connected.py new file mode 100644 index 0000000..e6b122e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/connected.py @@ -0,0 +1,200 @@ +"""Connected components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = [ + "number_connected_components", + "connected_components", + "is_connected", + "node_connected_component", +] + + +@not_implemented_for("directed") +def connected_components(G): + """Generate connected components. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each component of G. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + Generate a sorted list of connected components, largest first. + + >>> G = nx.path_graph(4) + >>> nx.add_path(G, [10, 11, 12]) + >>> [len(c) for c in sorted(nx.connected_components(G), key=len, reverse=True)] + [4, 3] + + If you only want the largest connected component, it's more + efficient to use max instead of sort. + + >>> largest_cc = max(nx.connected_components(G), key=len) + + To create the induced subgraph of each component use: + + >>> S = [G.subgraph(c).copy() for c in nx.connected_components(G)] + + See Also + -------- + strongly_connected_components + weakly_connected_components + + Notes + ----- + For undirected graphs only. + + """ + seen = set() + for v in G: + if v not in seen: + c = _plain_bfs(G, v) + seen.update(c) + yield c + + +def number_connected_components(G): + """Returns the number of connected components. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + n : integer + Number of connected components + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (5, 6), (3, 4)]) + >>> nx.number_connected_components(G) + 3 + + See Also + -------- + connected_components + number_weakly_connected_components + number_strongly_connected_components + + Notes + ----- + For undirected graphs only. + + """ + return sum(1 for cc in connected_components(G)) + + +@not_implemented_for("directed") +def is_connected(G): + """Returns True if the graph is connected, False otherwise. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + connected : bool + True if the graph is connected, false otherwise. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> print(nx.is_connected(G)) + True + + See Also + -------- + is_strongly_connected + is_weakly_connected + is_semiconnected + is_biconnected + connected_components + + Notes + ----- + For undirected graphs only. + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "Connectivity is undefined ", "for the null graph." + ) + return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G) + + +@not_implemented_for("directed") +def node_connected_component(G, n): + """Returns the set of nodes in the component of graph containing node n. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + n : node label + A node in G + + Returns + ------- + comp : set + A set of nodes in the component of G containing node n. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (5, 6), (3, 4)]) + >>> nx.node_connected_component(G, 0) # nodes of component that contains node 0 + {0, 1, 2} + + See Also + -------- + connected_components + + Notes + ----- + For undirected graphs only. + + """ + return _plain_bfs(G, n) + + +def _plain_bfs(G, source): + """A fast BFS node generator""" + G_adj = G.adj + seen = set() + nextlevel = {source} + while nextlevel: + thislevel = nextlevel + nextlevel = set() + for v in thislevel: + if v not in seen: + seen.add(v) + nextlevel.update(G_adj[v]) + return seen diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/semiconnected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/semiconnected.py new file mode 100644 index 0000000..9603f9d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/semiconnected.py @@ -0,0 +1,64 @@ +"""Semiconnectedness.""" +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["is_semiconnected"] + + +@not_implemented_for("undirected") +def is_semiconnected(G, topo_order=None): + """Returns True if the graph is semiconnected, False otherwise. + + A graph is semiconnected if, and only if, for any pair of nodes, either one + is reachable from the other, or they are mutually reachable. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Returns + ------- + semiconnected : bool + True if the graph is semiconnected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + NetworkXPointlessConcept + If the graph is empty. + + Examples + -------- + >>> G = nx.path_graph(4, create_using=nx.DiGraph()) + >>> print(nx.is_semiconnected(G)) + True + >>> G = nx.DiGraph([(1, 2), (3, 2)]) + >>> print(nx.is_semiconnected(G)) + False + + See Also + -------- + is_strongly_connected + is_weakly_connected + is_connected + is_biconnected + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "Connectivity is undefined for the null graph." + ) + + if not nx.is_weakly_connected(G): + return False + + G = nx.condensation(G) + if topo_order is None: + topo_order = nx.topological_sort(G) + + return all(G.has_edge(u, v) for u, v in pairwise(topo_order)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/strongly_connected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/strongly_connected.py new file mode 100644 index 0000000..edb33d3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/strongly_connected.py @@ -0,0 +1,414 @@ +"""Strongly connected components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_strongly_connected_components", + "strongly_connected_components", + "is_strongly_connected", + "strongly_connected_components_recursive", + "kosaraju_strongly_connected_components", + "condensation", +] + + +@not_implemented_for("undirected") +def strongly_connected_components(G): + """Generate nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.strongly_connected_components(G), key=len) + + See Also + -------- + connected_components + weakly_connected_components + kosaraju_strongly_connected_components + + Notes + ----- + Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. + Nonrecursive version of algorithm. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + + """ + preorder = {} + lowlink = {} + scc_found = set() + scc_queue = [] + i = 0 # Preorder counter + neighbors = {v: iter(G[v]) for v in G} + for source in G: + if source not in scc_found: + queue = [source] + while queue: + v = queue[-1] + if v not in preorder: + i = i + 1 + preorder[v] = i + done = True + for w in neighbors[v]: + if w not in preorder: + queue.append(w) + done = False + break + if done: + lowlink[v] = preorder[v] + for w in G[v]: + if w not in scc_found: + if preorder[w] > preorder[v]: + lowlink[v] = min([lowlink[v], lowlink[w]]) + else: + lowlink[v] = min([lowlink[v], preorder[w]]) + queue.pop() + if lowlink[v] == preorder[v]: + scc = {v} + while scc_queue and preorder[scc_queue[-1]] > preorder[v]: + k = scc_queue.pop() + scc.add(k) + scc_found.update(scc) + yield scc + else: + scc_queue.append(v) + + +@not_implemented_for("undirected") +def kosaraju_strongly_connected_components(G, source=None): + """Generate nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted( + ... nx.kosaraju_strongly_connected_components(G), key=len, reverse=True + ... ) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.kosaraju_strongly_connected_components(G), key=len) + + See Also + -------- + strongly_connected_components + + Notes + ----- + Uses Kosaraju's algorithm. + + """ + post = list(nx.dfs_postorder_nodes(G.reverse(copy=False), source=source)) + + seen = set() + while post: + r = post.pop() + if r in seen: + continue + c = nx.dfs_preorder_nodes(G, r) + new = {v for v in c if v not in seen} + seen.update(new) + yield new + + +@not_implemented_for("undirected") +def strongly_connected_components_recursive(G): + """Generate nodes in strongly connected components of graph. + + Recursive version of algorithm. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted( + ... nx.strongly_connected_components_recursive(G), key=len, reverse=True + ... ) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.strongly_connected_components_recursive(G), key=len) + + To create the induced subgraph of the components use: + >>> S = [G.subgraph(c).copy() for c in nx.weakly_connected_components(G)] + + See Also + -------- + connected_components + + Notes + ----- + Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + + """ + + def visit(v, cnt): + root[v] = cnt + visited[v] = cnt + cnt += 1 + stack.append(v) + for w in G[v]: + if w not in visited: + yield from visit(w, cnt) + if w not in component: + root[v] = min(root[v], root[w]) + if root[v] == visited[v]: + component[v] = root[v] + tmpc = {v} # hold nodes in this component + while stack[-1] != v: + w = stack.pop() + component[w] = root[v] + tmpc.add(w) + stack.remove(v) + yield tmpc + + visited = {} + component = {} + root = {} + cnt = 0 + stack = [] + for source in G: + if source not in visited: + yield from visit(source, cnt) + + +@not_implemented_for("undirected") +def number_strongly_connected_components(G): + """Returns number of strongly connected components in graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of strongly connected components + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0), (2, 3), (4, 5), (3, 4), (5, 6), (6, 3), (6, 7)]) + >>> nx.number_strongly_connected_components(G) + 3 + + See Also + -------- + strongly_connected_components + number_connected_components + number_weakly_connected_components + + Notes + ----- + For directed graphs only. + """ + return sum(1 for scc in strongly_connected_components(G)) + + +@not_implemented_for("undirected") +def is_strongly_connected(G): + """Test directed graph for strong connectivity. + + A directed graph is strongly connected if and only if every vertex in + the graph is reachable from every other vertex. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is strongly connected, False otherwise. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0), (2, 4), (4, 2)]) + >>> nx.is_strongly_connected(G) + True + >>> G.remove_edge(2, 3) + >>> nx.is_strongly_connected(G) + False + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + See Also + -------- + is_weakly_connected + is_semiconnected + is_connected + is_biconnected + strongly_connected_components + + Notes + ----- + For directed graphs only. + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""" + ) + + return len(next(strongly_connected_components(G))) == len(G) + + +@not_implemented_for("undirected") +def condensation(G, scc=None): + """Returns the condensation of G. + + The condensation of G is the graph with each of the strongly connected + components contracted into a single node. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph. + + scc: list or generator (optional, default=None) + Strongly connected components. If provided, the elements in + `scc` must partition the nodes in `G`. If not provided, it will be + calculated as scc=nx.strongly_connected_components(G). + + Returns + ------- + C : NetworkX DiGraph + The condensation graph C of G. The node labels are integers + corresponding to the index of the component in the list of + strongly connected components of G. C has a graph attribute named + 'mapping' with a dictionary mapping the original nodes to the + nodes in C to which they belong. Each node in C also has a node + attribute 'members' with the set of original nodes in G that + form the SCC that the node in C represents. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Notes + ----- + After contracting all strongly connected components to a single node, + the resulting graph is a directed acyclic graph. + + """ + if scc is None: + scc = nx.strongly_connected_components(G) + mapping = {} + members = {} + C = nx.DiGraph() + # Add mapping dict as graph attribute + C.graph["mapping"] = mapping + if len(G) == 0: + return C + for i, component in enumerate(scc): + members[i] = component + mapping.update((n, i) for n in component) + number_of_components = i + 1 + C.add_nodes_from(range(number_of_components)) + C.add_edges_from( + (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v] + ) + # Add a list of members (ie original nodes) to each node (ie scc) in C. + nx.set_node_attributes(C, members, "members") + return C diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_attracting.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_attracting.py new file mode 100644 index 0000000..336c40d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_attracting.py @@ -0,0 +1,70 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestAttractingComponents: + @classmethod + def setup_class(cls): + cls.G1 = nx.DiGraph() + cls.G1.add_edges_from( + [ + (5, 11), + (11, 2), + (11, 9), + (11, 10), + (7, 11), + (7, 8), + (8, 9), + (3, 8), + (3, 10), + ] + ) + cls.G2 = nx.DiGraph() + cls.G2.add_edges_from([(0, 1), (0, 2), (1, 1), (1, 2), (2, 1)]) + + cls.G3 = nx.DiGraph() + cls.G3.add_edges_from([(0, 1), (1, 2), (2, 1), (0, 3), (3, 4), (4, 3)]) + + cls.G4 = nx.DiGraph() + + def test_attracting_components(self): + ac = list(nx.attracting_components(self.G1)) + assert {2} in ac + assert {9} in ac + assert {10} in ac + + ac = list(nx.attracting_components(self.G2)) + ac = [tuple(sorted(x)) for x in ac] + assert ac == [(1, 2)] + + ac = list(nx.attracting_components(self.G3)) + ac = [tuple(sorted(x)) for x in ac] + assert (1, 2) in ac + assert (3, 4) in ac + assert len(ac) == 2 + + ac = list(nx.attracting_components(self.G4)) + assert ac == [] + + def test_number_attacting_components(self): + assert nx.number_attracting_components(self.G1) == 3 + assert nx.number_attracting_components(self.G2) == 1 + assert nx.number_attracting_components(self.G3) == 2 + assert nx.number_attracting_components(self.G4) == 0 + + def test_is_attracting_component(self): + assert not nx.is_attracting_component(self.G1) + assert not nx.is_attracting_component(self.G2) + assert not nx.is_attracting_component(self.G3) + g2 = self.G3.subgraph([1, 2]) + assert nx.is_attracting_component(g2) + assert not nx.is_attracting_component(self.G4) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.attracting_components(G)) + pytest.raises(NetworkXNotImplemented, nx.number_attracting_components, G) + pytest.raises(NetworkXNotImplemented, nx.is_attracting_component, G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_biconnected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_biconnected.py new file mode 100644 index 0000000..19d2d88 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_biconnected.py @@ -0,0 +1,248 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +def assert_components_edges_equal(x, y): + sx = {frozenset(frozenset(e) for e in c) for c in x} + sy = {frozenset(frozenset(e) for e in c) for c in y} + assert sx == sy + + +def assert_components_equal(x, y): + sx = {frozenset(c) for c in x} + sy = {frozenset(c) for c in y} + assert sx == sy + + +def test_barbell(): + G = nx.barbell_graph(8, 4) + nx.add_path(G, [7, 20, 21, 22]) + nx.add_cycle(G, [22, 23, 24, 25]) + pts = set(nx.articulation_points(G)) + assert pts == {7, 8, 9, 10, 11, 12, 20, 21, 22} + + answer = [ + {12, 13, 14, 15, 16, 17, 18, 19}, + {0, 1, 2, 3, 4, 5, 6, 7}, + {22, 23, 24, 25}, + {11, 12}, + {10, 11}, + {9, 10}, + {8, 9}, + {7, 8}, + {21, 22}, + {20, 21}, + {7, 20}, + ] + assert_components_equal(list(nx.biconnected_components(G)), answer) + + G.add_edge(2, 17) + pts = set(nx.articulation_points(G)) + assert pts == {7, 20, 21, 22} + + +def test_articulation_points_repetitions(): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3)]) + assert list(nx.articulation_points(G)) == [1] + + +def test_articulation_points_cycle(): + G = nx.cycle_graph(3) + nx.add_cycle(G, [1, 3, 4]) + pts = set(nx.articulation_points(G)) + assert pts == {1} + + +def test_is_biconnected(): + G = nx.cycle_graph(3) + assert nx.is_biconnected(G) + nx.add_cycle(G, [1, 3, 4]) + assert not nx.is_biconnected(G) + + +def test_empty_is_biconnected(): + G = nx.empty_graph(5) + assert not nx.is_biconnected(G) + G.add_edge(0, 1) + assert not nx.is_biconnected(G) + + +def test_biconnected_components_cycle(): + G = nx.cycle_graph(3) + nx.add_cycle(G, [1, 3, 4]) + answer = [{0, 1, 2}, {1, 3, 4}] + assert_components_equal(list(nx.biconnected_components(G)), answer) + + +def test_biconnected_components1(): + # graph example from + # https://web.archive.org/web/20121229123447/http://www.ibluemojo.com/school/articul_algorithm.html + edges = [ + (0, 1), + (0, 5), + (0, 6), + (0, 14), + (1, 5), + (1, 6), + (1, 14), + (2, 4), + (2, 10), + (3, 4), + (3, 15), + (4, 6), + (4, 7), + (4, 10), + (5, 14), + (6, 14), + (7, 9), + (8, 9), + (8, 12), + (8, 13), + (10, 15), + (11, 12), + (11, 13), + (12, 13), + ] + G = nx.Graph(edges) + pts = set(nx.articulation_points(G)) + assert pts == {4, 6, 7, 8, 9} + comps = list(nx.biconnected_component_edges(G)) + answer = [ + [(3, 4), (15, 3), (10, 15), (10, 4), (2, 10), (4, 2)], + [(13, 12), (13, 8), (11, 13), (12, 11), (8, 12)], + [(9, 8)], + [(7, 9)], + [(4, 7)], + [(6, 4)], + [(14, 0), (5, 1), (5, 0), (14, 5), (14, 1), (6, 14), (6, 0), (1, 6), (0, 1)], + ] + assert_components_edges_equal(comps, answer) + + +def test_biconnected_components2(): + G = nx.Graph() + nx.add_cycle(G, "ABC") + nx.add_cycle(G, "CDE") + nx.add_cycle(G, "FIJHG") + nx.add_cycle(G, "GIJ") + G.add_edge("E", "G") + comps = list(nx.biconnected_component_edges(G)) + answer = [ + [ + tuple("GF"), + tuple("FI"), + tuple("IG"), + tuple("IJ"), + tuple("JG"), + tuple("JH"), + tuple("HG"), + ], + [tuple("EG")], + [tuple("CD"), tuple("DE"), tuple("CE")], + [tuple("AB"), tuple("BC"), tuple("AC")], + ] + assert_components_edges_equal(comps, answer) + + +def test_biconnected_davis(): + D = nx.davis_southern_women_graph() + bcc = list(nx.biconnected_components(D))[0] + assert set(D) == bcc # All nodes in a giant bicomponent + # So no articulation points + assert len(list(nx.articulation_points(D))) == 0 + + +def test_biconnected_karate(): + K = nx.karate_club_graph() + answer = [ + { + 0, + 1, + 2, + 3, + 7, + 8, + 9, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + }, + {0, 4, 5, 6, 10, 16}, + {0, 11}, + ] + bcc = list(nx.biconnected_components(K)) + assert_components_equal(bcc, answer) + assert set(nx.articulation_points(K)) == {0} + + +def test_biconnected_eppstein(): + # tests from http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py + G1 = nx.Graph( + { + 0: [1, 2, 5], + 1: [0, 5], + 2: [0, 3, 4], + 3: [2, 4, 5, 6], + 4: [2, 3, 5, 6], + 5: [0, 1, 3, 4], + 6: [3, 4], + } + ) + G2 = nx.Graph( + { + 0: [2, 5], + 1: [3, 8], + 2: [0, 3, 5], + 3: [1, 2, 6, 8], + 4: [7], + 5: [0, 2], + 6: [3, 8], + 7: [4], + 8: [1, 3, 6], + } + ) + assert nx.is_biconnected(G1) + assert not nx.is_biconnected(G2) + answer_G2 = [{1, 3, 6, 8}, {0, 2, 5}, {2, 3}, {4, 7}] + bcc = list(nx.biconnected_components(G2)) + assert_components_equal(bcc, answer_G2) + + +def test_null_graph(): + G = nx.Graph() + assert not nx.is_biconnected(G) + assert list(nx.biconnected_components(G)) == [] + assert list(nx.biconnected_component_edges(G)) == [] + assert list(nx.articulation_points(G)) == [] + + +def test_connected_raise(): + DG = nx.DiGraph() + with pytest.raises(NetworkXNotImplemented): + next(nx.biconnected_components(DG)) + with pytest.raises(NetworkXNotImplemented): + next(nx.biconnected_component_edges(DG)) + with pytest.raises(NetworkXNotImplemented): + next(nx.articulation_points(DG)) + pytest.raises(NetworkXNotImplemented, nx.is_biconnected, DG) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_connected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_connected.py new file mode 100644 index 0000000..bf3954e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_connected.py @@ -0,0 +1,113 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented +from networkx import convert_node_labels_to_integers as cnlti + + +class TestConnected: + @classmethod + def setup_class(cls): + G1 = cnlti(nx.grid_2d_graph(2, 2), first_label=0, ordering="sorted") + G2 = cnlti(nx.lollipop_graph(3, 3), first_label=4, ordering="sorted") + G3 = cnlti(nx.house_graph(), first_label=10, ordering="sorted") + cls.G = nx.union(G1, G2) + cls.G = nx.union(cls.G, G3) + cls.DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)]) + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1) + + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = [[3, 4, 5, 7], [1, 2, 8], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = [[2, 3, 4], [1]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = [[1, 2, 3]] + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = [[0], [1], [2], [3], [4], [5], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = [[0, 1, 2], [3, 4]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + C = [] + cls.gc.append((G, C)) + + def test_connected_components(self): + cc = nx.connected_components + G = self.G + C = { + frozenset([0, 1, 2, 3]), + frozenset([4, 5, 6, 7, 8, 9]), + frozenset([10, 11, 12, 13, 14]), + } + assert {frozenset(g) for g in cc(G)} == C + + def test_number_connected_components(self): + ncc = nx.number_connected_components + assert ncc(self.G) == 3 + + def test_number_connected_components2(self): + ncc = nx.number_connected_components + assert ncc(self.grid) == 1 + + def test_connected_components2(self): + cc = nx.connected_components + G = self.grid + C = {frozenset([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])} + assert {frozenset(g) for g in cc(G)} == C + + def test_node_connected_components(self): + ncc = nx.node_connected_component + G = self.grid + C = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + assert ncc(G, 1) == C + + def test_is_connected(self): + assert nx.is_connected(self.grid) + G = nx.Graph() + G.add_nodes_from([1, 2]) + assert not nx.is_connected(G) + + def test_connected_raise(self): + with pytest.raises(NetworkXNotImplemented): + next(nx.connected_components(self.DG)) + pytest.raises(NetworkXNotImplemented, nx.number_connected_components, self.DG) + pytest.raises(NetworkXNotImplemented, nx.node_connected_component, self.DG, 1) + pytest.raises(NetworkXNotImplemented, nx.is_connected, self.DG) + pytest.raises(nx.NetworkXPointlessConcept, nx.is_connected, nx.Graph()) + + def test_connected_mutability(self): + G = self.grid + seen = set() + for component in nx.connected_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_semiconnected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_semiconnected.py new file mode 100644 index 0000000..6376bbf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_semiconnected.py @@ -0,0 +1,55 @@ +from itertools import chain + +import pytest + +import networkx as nx + + +class TestIsSemiconnected: + def test_undirected(self): + pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.Graph()) + pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.MultiGraph()) + + def test_empty(self): + pytest.raises(nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.DiGraph()) + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.MultiDiGraph() + ) + + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.is_semiconnected(G) + + def test_path(self): + G = nx.path_graph(100, create_using=nx.DiGraph()) + assert nx.is_semiconnected(G) + G.add_edge(100, 99) + assert not nx.is_semiconnected(G) + + def test_cycle(self): + G = nx.cycle_graph(100, create_using=nx.DiGraph()) + assert nx.is_semiconnected(G) + G = nx.path_graph(100, create_using=nx.DiGraph()) + G.add_edge(0, 99) + assert nx.is_semiconnected(G) + + def test_tree(self): + G = nx.DiGraph() + G.add_edges_from( + chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)] for i in range(100)) + ) + assert not nx.is_semiconnected(G) + + def test_dumbbell(self): + G = nx.cycle_graph(100, create_using=nx.DiGraph()) + G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100)) + assert not nx.is_semiconnected(G) # G is disconnected. + G.add_edge(100, 99) + assert nx.is_semiconnected(G) + + def test_alternating_path(self): + G = nx.DiGraph( + chain.from_iterable([(i, i - 1), (i, i + 1)] for i in range(0, 100, 2)) + ) + assert not nx.is_semiconnected(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py new file mode 100644 index 0000000..ab4e893 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py @@ -0,0 +1,205 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestStronglyConnected: + @classmethod + def setup_class(cls): + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = {frozenset([3, 4, 5, 7]), frozenset([1, 2, 8]), frozenset([6])} + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = {frozenset([2, 3, 4]), frozenset([1])} + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = {frozenset([1, 2, 3])} + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = { + frozenset([0]), + frozenset([1]), + frozenset([2]), + frozenset([3]), + frozenset([4]), + frozenset([5]), + frozenset([6]), + } + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = {frozenset([0, 1, 2]), frozenset([3, 4])} + cls.gc.append((G, C)) + + def test_tarjan(self): + scc = nx.strongly_connected_components + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_tarjan_recursive(self): + scc = nx.strongly_connected_components_recursive + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_kosaraju(self): + scc = nx.kosaraju_strongly_connected_components + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_number_strongly_connected_components(self): + ncc = nx.number_strongly_connected_components + for G, C in self.gc: + assert ncc(G) == len(C) + + def test_is_strongly_connected(self): + for G, C in self.gc: + if len(C) == 1: + assert nx.is_strongly_connected(G) + else: + assert not nx.is_strongly_connected(G) + + def test_contract_scc1(self): + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 11), + (2, 12), + (3, 4), + (4, 3), + (4, 5), + (5, 6), + (6, 5), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 9), + (9, 7), + (10, 6), + (11, 2), + (11, 4), + (11, 6), + (12, 6), + (12, 11), + ] + ) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + # DAG + assert nx.is_directed_acyclic_graph(cG) + # nodes + assert sorted(cG.nodes()) == [0, 1, 2, 3] + # edges + mapping = {} + for i, component in enumerate(scc): + for n in component: + mapping[n] = i + edge = (mapping[2], mapping[3]) + assert cG.has_edge(*edge) + edge = (mapping[2], mapping[5]) + assert cG.has_edge(*edge) + edge = (mapping[3], mapping[5]) + assert cG.has_edge(*edge) + + def test_contract_scc_isolate(self): + # Bug found and fixed in [1687]. + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + assert list(cG.nodes()) == [0] + assert list(cG.edges()) == [] + + def test_contract_scc_edge(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + G.add_edge(2, 3) + G.add_edge(3, 4) + G.add_edge(4, 3) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + assert sorted(cG.nodes()) == [0, 1] + if 1 in scc[0]: + edge = (0, 1) + else: + edge = (1, 0) + assert list(cG.edges()) == [edge] + + def test_condensation_mapping_and_members(self): + G, C = self.gc[1] + C = sorted(C, key=len, reverse=True) + cG = nx.condensation(G) + mapping = cG.graph["mapping"] + assert all(n in G for n in mapping) + assert all(0 == cN for n, cN in mapping.items() if n in C[0]) + assert all(1 == cN for n, cN in mapping.items() if n in C[1]) + for n, d in cG.nodes(data=True): + assert set(C[n]) == cG.nodes[n]["members"] + + def test_null_graph(self): + G = nx.DiGraph() + assert list(nx.strongly_connected_components(G)) == [] + assert list(nx.kosaraju_strongly_connected_components(G)) == [] + assert list(nx.strongly_connected_components_recursive(G)) == [] + assert len(nx.condensation(G)) == 0 + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph() + ) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.strongly_connected_components(G)) + with pytest.raises(NetworkXNotImplemented): + next(nx.kosaraju_strongly_connected_components(G)) + with pytest.raises(NetworkXNotImplemented): + next(nx.strongly_connected_components_recursive(G)) + pytest.raises(NetworkXNotImplemented, nx.is_strongly_connected, G) + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph() + ) + pytest.raises(NetworkXNotImplemented, nx.condensation, G) + + strong_cc_methods = ( + nx.strongly_connected_components, + nx.kosaraju_strongly_connected_components, + nx.strongly_connected_components_recursive, + ) + + @pytest.mark.parametrize("get_components", strong_cc_methods) + def test_connected_mutability(self, get_components): + DG = nx.path_graph(5, create_using=nx.DiGraph) + G = nx.disjoint_union(DG, DG) + seen = set() + for component in get_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py new file mode 100644 index 0000000..e313263 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py @@ -0,0 +1,90 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestWeaklyConnected: + @classmethod + def setup_class(cls): + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = [[3, 4, 5, 7], [1, 2, 8], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = [[2, 3, 4], [1]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = [[1, 2, 3]] + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = [[0], [1], [2], [3], [4], [5], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = [[0, 1, 2], [3, 4]] + cls.gc.append((G, C)) + + def test_weakly_connected_components(self): + for G, C in self.gc: + U = G.to_undirected() + w = {frozenset(g) for g in nx.weakly_connected_components(G)} + c = {frozenset(g) for g in nx.connected_components(U)} + assert w == c + + def test_number_weakly_connected_components(self): + for G, C in self.gc: + U = G.to_undirected() + w = nx.number_weakly_connected_components(G) + c = nx.number_connected_components(U) + assert w == c + + def test_is_weakly_connected(self): + for G, C in self.gc: + U = G.to_undirected() + assert nx.is_weakly_connected(G) == nx.is_connected(U) + + def test_null_graph(self): + G = nx.DiGraph() + assert list(nx.weakly_connected_components(G)) == [] + assert nx.number_weakly_connected_components(G) == 0 + with pytest.raises(nx.NetworkXPointlessConcept): + next(nx.is_weakly_connected(G)) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.weakly_connected_components(G)) + pytest.raises(NetworkXNotImplemented, nx.number_weakly_connected_components, G) + pytest.raises(NetworkXNotImplemented, nx.is_weakly_connected, G) + + def test_connected_mutability(self): + DG = nx.path_graph(5, create_using=nx.DiGraph) + G = nx.disjoint_union(DG, DG) + seen = set() + for component in nx.weakly_connected_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/components/weakly_connected.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/weakly_connected.py new file mode 100644 index 0000000..822719a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/components/weakly_connected.py @@ -0,0 +1,184 @@ +"""Weakly connected components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_weakly_connected_components", + "weakly_connected_components", + "is_weakly_connected", +] + + +@not_implemented_for("undirected") +def weakly_connected_components(G): + """Generate weakly connected components of G. + + Parameters + ---------- + G : NetworkX graph + A directed graph + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each weakly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of weakly connected components, largest first. + + >>> G = nx.path_graph(4, create_using=nx.DiGraph()) + >>> nx.add_path(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted(nx.weakly_connected_components(G), key=len, reverse=True) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort: + + >>> largest_cc = max(nx.weakly_connected_components(G), key=len) + + See Also + -------- + connected_components + strongly_connected_components + + Notes + ----- + For directed graphs only. + + """ + seen = set() + for v in G: + if v not in seen: + c = set(_plain_bfs(G, v)) + seen.update(c) + yield c + + +@not_implemented_for("undirected") +def number_weakly_connected_components(G): + """Returns the number of weakly connected components in G. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of weakly connected components + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (2, 1), (3, 4)]) + >>> nx.number_weakly_connected_components(G) + 2 + + See Also + -------- + weakly_connected_components + number_connected_components + number_strongly_connected_components + + Notes + ----- + For directed graphs only. + + """ + return sum(1 for wcc in weakly_connected_components(G)) + + +@not_implemented_for("undirected") +def is_weakly_connected(G): + """Test directed graph for weak connectivity. + + A directed graph is weakly connected if and only if the graph + is connected when the direction of the edge between nodes is ignored. + + Note that if a graph is strongly connected (i.e. the graph is connected + even when we account for directionality), it is by definition weakly + connected as well. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is weakly connected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (2, 1)]) + >>> G.add_node(3) + >>> nx.is_weakly_connected(G) # node 3 is not connected to the graph + False + >>> G.add_edge(2, 3) + >>> nx.is_weakly_connected(G) + True + + See Also + -------- + is_strongly_connected + is_semiconnected + is_connected + is_biconnected + weakly_connected_components + + Notes + ----- + For directed graphs only. + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""" + ) + + return len(next(weakly_connected_components(G))) == len(G) + + +def _plain_bfs(G, source): + """A fast BFS node generator + + The direction of the edge between nodes is ignored. + + For directed graphs only. + + """ + Gsucc = G.succ + Gpred = G.pred + + seen = set() + nextlevel = {source} + while nextlevel: + thislevel = nextlevel + nextlevel = set() + for v in thislevel: + if v not in seen: + seen.add(v) + nextlevel.update(Gsucc[v]) + nextlevel.update(Gpred[v]) + yield v diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/__init__.py new file mode 100644 index 0000000..15bc5ab --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/__init__.py @@ -0,0 +1,11 @@ +"""Connectivity and cut algorithms +""" +from .connectivity import * +from .cuts import * +from .edge_augmentation import * +from .edge_kcomponents import * +from .disjoint_paths import * +from .kcomponents import * +from .kcutsets import * +from .stoerwagner import * +from .utils import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/connectivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/connectivity.py new file mode 100644 index 0000000..b782031 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/connectivity.py @@ -0,0 +1,818 @@ +""" +Flow based connectivity algorithms +""" + +import itertools +from operator import itemgetter + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# connectivity algorithms. +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_residual_network, + dinitz, + edmonds_karp, + shortest_augmenting_path, +) + +default_flow_func = edmonds_karp + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = [ + "average_node_connectivity", + "local_node_connectivity", + "node_connectivity", + "local_edge_connectivity", + "edge_connectivity", + "all_pairs_node_connectivity", +] + + +def local_node_connectivity( + G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None +): + r"""Computes local node connectivity for nodes s and t. + + Local node connectivity for two non adjacent nodes s and t is the + minimum number of nodes that must be removed (along with their incident + edges) to disconnect them. + + This is a flow based implementation of node connectivity. We compute the + maximum flow on an auxiliary digraph build from the original input + graph (see below for details). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node + + t : node + Target node + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + cutoff : integer, float + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This is only for the + algorithms that support the cutoff parameter: :meth:`edmonds_karp` + and :meth:`shortest_augmenting_path`. Other algorithms will ignore + this parameter. Default value: None. + + Returns + ------- + K : integer + local node connectivity for nodes s and t + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import local_node_connectivity + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> local_node_connectivity(G, 0, 6) + 5 + + If you need to compute local connectivity on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local node connectivity among + all pairs of nodes of the platonic icosahedral graph reusing + the data structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + ... + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = local_node_connectivity(G, u, v, auxiliary=H, residual=R) + ... result[u][v] = k + ... + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing node + connectivity. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> local_node_connectivity(G, 0, 6, flow_func=shortest_augmenting_path) + 5 + + Notes + ----- + This is a flow based implementation of node connectivity. We compute the + maximum flow using, by default, the :meth:`edmonds_karp` algorithm (see: + :meth:`maximum_flow`) on an auxiliary digraph build from the original + input graph: + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph H with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `v_A`, `v_B` linked by an (internal) + arc in H. Then for each edge (`u`, `v`) in G we add two arcs + (`u_B`, `v_A`) and (`v_B`, `u_A`) in H. Finally we set the attribute + capacity = 1 for each arc in H [1]_ . + + For a directed graph G having `n` nodes and `m` arcs we derive a + directed graph H with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `v_A`, `v_B` linked by an (internal) + arc (`v_A`, `v_B`) in H. Then for each arc (`u`, `v`) in G we add one arc + (`u_B`, `v_A`) in H. Finally we set the attribute capacity = 1 for + each arc in H. + + This is equal to the local node connectivity because the value of + a maximum s-t-flow is equal to the capacity of a minimum s-t-cut. + + See also + -------- + :meth:`local_edge_connectivity` + :meth:`node_connectivity` + :meth:`minimum_node_cut` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + + kwargs = dict(flow_func=flow_func, residual=residual) + if flow_func is shortest_augmenting_path: + kwargs["cutoff"] = cutoff + kwargs["two_phase"] = True + elif flow_func is edmonds_karp: + kwargs["cutoff"] = cutoff + elif flow_func is dinitz: + kwargs["cutoff"] = cutoff + elif flow_func is boykov_kolmogorov: + kwargs["cutoff"] = cutoff + + return nx.maximum_flow_value(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + + +def node_connectivity(G, s=None, t=None, flow_func=None): + r"""Returns node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. If source + and target nodes are provided, this function returns the local node + connectivity: the minimum number of nodes that must be removed to break + all paths from source to target in G. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic icosahedral graph is 5-node-connected + >>> G = nx.icosahedral_graph() + >>> nx.node_connectivity(G) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> nx.node_connectivity(G, flow_func=shortest_augmenting_path) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local node connectivity. + + >>> nx.node_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_node_connectivity` for details. + + Notes + ----- + This is a flow based implementation of node connectivity. The + algorithm works by solving $O((n-\delta-1+\delta(\delta-1)/2))$ + maximum flow problems on an auxiliary digraph. Where $\delta$ + is the minimum degree of G. For details about the auxiliary + digraph and the computation of local node connectivity see + :meth:`local_node_connectivity`. This implementation is based + on algorithm 11 in [1]_. + + See also + -------- + :meth:`local_node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t, flow_func=flow_func) + + # Global node connectivity + if G.is_directed(): + if not nx.is_weakly_connected(G): + return 0 + iter_func = itertools.permutations + # It is necessary to consider both predecessors + # and successors for directed graphs + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + return 0 + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) + + # Pick a node with minimum degree + # Node connectivity is bounded by degree. + v, K = min(G.degree(), key=itemgetter(1)) + # compute local node connectivity with all its non-neighbors nodes + for w in set(G) - set(neighbors(v)) - {v}: + kwargs["cutoff"] = K + K = min(K, local_node_connectivity(G, v, w, **kwargs)) + # Also for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + kwargs["cutoff"] = K + K = min(K, local_node_connectivity(G, x, y, **kwargs)) + + return K + + +def average_node_connectivity(G, flow_func=None): + r"""Returns the average connectivity of a graph G. + + The average connectivity `\bar{\kappa}` of a graph G is the average + of local node connectivity over all pairs of nodes of G [1]_ . + + .. math:: + + \bar{\kappa}(G) = \frac{\sum_{u,v} \kappa_{G}(u,v)}{{n \choose 2}} + + Parameters + ---------- + + G : NetworkX graph + Undirected graph + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`local_node_connectivity` + for details. The choice of the default function may change from + version to version and should not be relied on. Default value: None. + + Returns + ------- + K : float + Average node connectivity + + See also + -------- + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Beineke, L., O. Oellermann, and R. Pippert (2002). The average + connectivity of a graph. Discrete mathematics 252(1-3), 31-45. + http://www.sciencedirect.com/science/article/pii/S0012365X01001807 + + """ + if G.is_directed(): + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + # Reuse the auxiliary digraph and the residual network + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) + + num, den = 0, 0 + for u, v in iter_func(G, 2): + num += local_node_connectivity(G, u, v, **kwargs) + den += 1 + + if den == 0: # Null Graph + return 0 + return num / den + + +def all_pairs_node_connectivity(G, nbunch=None, flow_func=None): + """Compute node connectivity between all pairs of nodes of G. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + all_pairs : dict + A dictionary with node connectivity between all pairs of nodes + in G, or in nbunch if provided. + + See also + -------- + :meth:`local_node_connectivity` + :meth:`edge_connectivity` + :meth:`local_edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + # Reuse auxiliary digraph and residual network + H = build_auxiliary_node_connectivity(G) + mapping = H.graph["mapping"] + R = build_residual_network(H, "capacity") + kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) + + for u, v in iter_func(nbunch, 2): + K = local_node_connectivity(G, u, v, **kwargs) + all_pairs[u][v] = K + if not directed: + all_pairs[v][u] = K + + return all_pairs + + +def local_edge_connectivity( + G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None +): + r"""Returns local edge connectivity for nodes s and t in G. + + Local edge connectivity for two nodes s and t is the minimum number + of edges that must be removed to disconnect them. + + This is a flow based implementation of edge connectivity. We compute the + maximum flow on an auxiliary digraph build from the original + network (see below for details). This is equal to the local edge + connectivity because the value of a maximum s-t-flow is equal to the + capacity of a minimum s-t-cut (Ford and Fulkerson theorem) [1]_ . + + Parameters + ---------- + G : NetworkX graph + Undirected or directed graph + + s : node + Source node + + t : node + Target node + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph for computing flow based edge connectivity. If + provided it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + cutoff : integer, float + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This is only for the + algorithms that support the cutoff parameter: :meth:`edmonds_karp` + and :meth:`shortest_augmenting_path`. Other algorithms will ignore + this parameter. Default value: None. + + Returns + ------- + K : integer + local edge connectivity for nodes s and t. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import local_edge_connectivity + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> local_edge_connectivity(G, 0, 6) + 5 + + If you need to compute local connectivity on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge connectivity among + all pairs of nodes of the platonic icosahedral graph reusing + the data structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = local_edge_connectivity(G, u, v, auxiliary=H, residual=R) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + connectivity. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> local_edge_connectivity(G, 0, 6, flow_func=shortest_augmenting_path) + 5 + + Notes + ----- + This is a flow based implementation of edge connectivity. We compute the + maximum flow using, by default, the :meth:`edmonds_karp` algorithm on an + auxiliary digraph build from the original input graph: + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. This is an implementation of algorithm 1 + in [1]_. + + The maximum flow in the auxiliary network is equal to the local edge + connectivity because the value of a maximum s-t-flow is equal to the + capacity of a minimum s-t-cut (Ford and Fulkerson theorem). + + See also + -------- + :meth:`edge_connectivity` + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = dict(flow_func=flow_func, residual=residual) + if flow_func is shortest_augmenting_path: + kwargs["cutoff"] = cutoff + kwargs["two_phase"] = True + elif flow_func is edmonds_karp: + kwargs["cutoff"] = cutoff + elif flow_func is dinitz: + kwargs["cutoff"] = cutoff + elif flow_func is boykov_kolmogorov: + kwargs["cutoff"] = cutoff + + return nx.maximum_flow_value(H, s, t, **kwargs) + + +def edge_connectivity(G, s=None, t=None, flow_func=None, cutoff=None): + r"""Returns the edge connectivity of the graph or digraph G. + + The edge connectivity is equal to the minimum number of edges that + must be removed to disconnect G or render it trivial. If source + and target nodes are provided, this function returns the local edge + connectivity: the minimum number of edges that must be removed to + break all paths from source to target in G. + + Parameters + ---------- + G : NetworkX graph + Undirected or directed graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + cutoff : integer, float + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This is only for the + algorithms that support the cutoff parameter: e.g., :meth:`edmonds_karp` + and :meth:`shortest_augmenting_path`. Other algorithms will ignore + this parameter. Default value: None. + + Returns + ------- + K : integer + Edge connectivity for G, or local edge connectivity if source + and target were provided + + Examples + -------- + >>> # Platonic icosahedral graph is 5-edge-connected + >>> G = nx.icosahedral_graph() + >>> nx.edge_connectivity(G) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> nx.edge_connectivity(G, flow_func=shortest_augmenting_path) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of global edge connectivity. + For undirected graphs the algorithm works by finding a 'small' + dominating set of nodes of G (see algorithm 7 in [1]_ ) and + computing local maximum flow (see :meth:`local_edge_connectivity`) + between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_ . + For directed graphs, the algorithm does n calls to the maximum + flow function. This is an implementation of algorithm 8 in [1]_ . + + See also + -------- + :meth:`local_edge_connectivity` + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + :meth:`k_edge_components` + :meth:`k_edge_subgraphs` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local edge connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_edge_connectivity(G, s, t, flow_func=flow_func, cutoff=cutoff) + + # Global edge connectivity + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) + + if G.is_directed(): + # Algorithm 8 in [1] + if not nx.is_weakly_connected(G): + return 0 + + # initial value for \lambda is minimum degree + L = min(d for n, d in G.degree()) + nodes = list(G) + n = len(nodes) + + if cutoff is not None: + L = min(cutoff, L) + + for i in range(n): + kwargs["cutoff"] = L + try: + L = min(L, local_edge_connectivity(G, nodes[i], nodes[i + 1], **kwargs)) + except IndexError: # last node! + L = min(L, local_edge_connectivity(G, nodes[i], nodes[0], **kwargs)) + return L + else: # undirected + # Algorithm 6 in [1] + if not nx.is_connected(G): + return 0 + + # initial value for \lambda is minimum degree + L = min(d for n, d in G.degree()) + + if cutoff is not None: + L = min(cutoff, L) + + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating sets will always be of one node + # thus we return min degree + return L + + for w in D: + kwargs["cutoff"] = L + L = min(L, local_edge_connectivity(G, v, w, **kwargs)) + + return L diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/cuts.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/cuts.py new file mode 100644 index 0000000..92495b1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/cuts.py @@ -0,0 +1,599 @@ +""" +Flow based cut algorithms +""" +import itertools + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# cut algorithms. +from networkx.algorithms.flow import build_residual_network, edmonds_karp + +default_flow_func = edmonds_karp + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = [ + "minimum_st_node_cut", + "minimum_node_cut", + "minimum_st_edge_cut", + "minimum_edge_cut", +] + + +def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + """Returns the edges of the cut-set of a minimum (s, t)-cut. + + This function returns the set of edges of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + Edge weights are not considered. See :meth:`minimum_cut` for + computing minimum cuts considering edge weights. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`node_connectivity` for + details. The choice of the default function may change from version + to version and should not be relied on. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed from the graph, will disconnect it. + + See also + -------- + :meth:`minimum_cut` + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_edge_cut + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_edge_cut(G, 0, 6)) + 5 + + If you need to compute local edge cuts on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge cuts among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = len(minimum_st_edge_cut(G, u, v, auxiliary=H, residual=R)) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_edge_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = dict(capacity="capacity", flow_func=flow_func, residual=residual) + + cut_value, partition = nx.minimum_cut(H, s, t, **kwargs) + reachable, non_reachable = partition + # Any edge in the original graph linking the two sets in the + # partition is part of the edge cutset + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + + return cutset + + +def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + r"""Returns a set of nodes of minimum cardinality that disconnect source + from target in G. + + This function returns the set of nodes of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would destroy all paths between + source and target in G. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_node_cut + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_node_cut(G, 0, 6)) + 5 + + If you need to compute local st cuts between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute local st node cuts reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> len(minimum_st_node_cut(G, 0, 6, auxiliary=H, residual=R)) + 5 + + You can also use alternative flow algorithms for computing minimum st + node cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_node_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + if G.has_edge(s, t) or G.has_edge(t, s): + return {} + kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H) + + # The edge cut in the auxiliary digraph corresponds to the node cut in the + # original graph. + edge_cut = minimum_st_edge_cut(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + # Each node in the original graph maps to two nodes of the auxiliary graph + node_cut = {H.nodes[node]["id"] for edge in edge_cut for node in edge} + return node_cut - {s, t} + + +def minimum_node_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of nodes of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of nodes of minimum cardinality that, if removed, would destroy + all paths among source and target in G. If not, it returns a set + of nodes of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the nodes that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has node connectivity 5 + >>> G = nx.icosahedral_graph() + >>> node_cut = nx.minimum_node_cut(G) + >>> len(node_cut) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> node_cut == nx.minimum_node_cut(G, flow_func=shortest_augmenting_path) + True + + If you specify a pair of nodes (source and target) as parameters, + this function returns a local st node cut. + + >>> len(nx.minimum_node_cut(G, 3, 7)) + 5 + + If you need to perform several local st cuts among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`minimum_st_node_cut` for details. + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_st_node_cut` + :meth:`minimum_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local minimum node cut. + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_node_cut(G, s, t, flow_func=flow_func) + + # Global minimum node cut. + # Analog to the algorithm 11 for global node connectivity in [1]. + if G.is_directed(): + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network. + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) + + # Choose a node with minimum degree. + v = min(G, key=G.degree) + # Initial node cutset is all neighbors of the node with minimum degree. + min_cut = set(G[v]) + # Compute st node cuts between v and all its non-neighbors nodes in G. + for w in set(G) - set(neighbors(v)) - {v}: + this_cut = minimum_st_node_cut(G, v, w, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + # Also for non adjacent pairs of neighbors of v. + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + this_cut = minimum_st_node_cut(G, x, y, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + + return min_cut + + +def minimum_edge_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of edges of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of edges of minimum cardinality that, if removed, would break + all paths among source and target in G. If not, it returns a set of + edges of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the edges that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has edge connectivity 5 + >>> G = nx.icosahedral_graph() + >>> len(nx.minimum_edge_cut(G)) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(nx.minimum_edge_cut(G, flow_func=shortest_augmenting_path)) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of minimum edge cut. For + undirected graphs the algorithm works by finding a 'small' dominating + set of nodes of G (see algorithm 7 in [1]_) and computing the maximum + flow between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_. For + directed graphs, the algorithm does n calls to the max flow function. + The function raises an error if the directed graph is not weakly + connected and returns an empty set if it is weakly connected. + It is an implementation of algorithm 8 in [1]_. + + See also + -------- + :meth:`minimum_st_edge_cut` + :meth:`minimum_node_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = dict(flow_func=flow_func, residual=R, auxiliary=H) + + # Local minimum edge cut if s and t are not None + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_edge_cut(H, s, t, **kwargs) + + # Global minimum edge cut + # Analog to the algorithm for global edge connectivity + if G.is_directed(): + # Based on algorithm 8 in [1] + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + nodes = list(G) + n = len(nodes) + for i in range(n): + try: + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i + 1], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + except IndexError: # Last node! + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut + + else: # undirected + # Based on algorithm 6 in [1] + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating set will always be of one node + # thus we return min_cut, which now contains the edges of a node + # with minimum degree + return min_cut + for w in D: + this_cut = minimum_st_edge_cut(H, v, w, **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/disjoint_paths.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/disjoint_paths.py new file mode 100644 index 0000000..378a709 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/disjoint_paths.py @@ -0,0 +1,394 @@ +"""Flow based node and edge disjoint paths.""" +import networkx as nx + +# Define the default maximum flow function to use for the undelying +# maximum flow computations +from networkx.algorithms.flow import ( + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) +from networkx.exception import NetworkXNoPath + +default_flow_func = edmonds_karp +from itertools import filterfalse as _filterfalse + +# Functions to build auxiliary data structures. +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = ["edge_disjoint_paths", "node_disjoint_paths"] + + +def edge_disjoint_paths( + G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None +): + """Returns the edges disjoint paths between source and target. + + Edge disjoint paths are paths that do not share any edge. The + number of edge disjoint paths between source and target is equal + to their edge connectivity. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. The choice of the default function + may change from version to version and should not be relied on. + Default value: None. + + cutoff : int + Maximum number of paths to yield. Some of the maximum flow + algorithms, such as :meth:`edmonds_karp` (the default) and + :meth:`shortest_augmenting_path` support the cutoff parameter, + and will terminate when the flow value reaches or exceeds the + cutoff. Other algorithms will ignore this parameter. + Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based edge connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + paths : generator + A generator of edge independent paths. + + Raises + ------ + NetworkXNoPath + If there is no path between source and target. + + NetworkXError + If source or target are not in the graph G. + + See also + -------- + :meth:`node_disjoint_paths` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + We use in this example the platonic icosahedral graph, which has node + edge connectivity 5, thus there are 5 edge disjoint paths between any + pair of nodes. + + >>> G = nx.icosahedral_graph() + >>> len(list(nx.edge_disjoint_paths(G, 0, 6))) + 5 + + + If you need to compute edge disjoint paths on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute edge disjoint paths among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = {n: {} for n in G} + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as arguments + >>> for u, v in itertools.combinations(G, 2): + ... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R))) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge disjoint + paths. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) + 5 + + Notes + ----- + This is a flow based implementation of edge disjoint paths. We compute + the maximum flow between source and target on an auxiliary directed + network. The saturated edges in the residual network after running the + maximum flow algorithm correspond to edge disjoint paths between source + and target in the original network. This function handles both directed + and undirected graphs, and can use all flow algorithms from NetworkX flow + package. + + """ + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + # Maximum possible edge disjoint paths + possible = min(H.out_degree(s), H.in_degree(t)) + if not possible: + raise NetworkXNoPath + + if cutoff is None: + cutoff = possible + else: + cutoff = min(cutoff, possible) + + # Compute maximum flow between source and target. Flow functions in + # NetworkX return a residual network. + kwargs = dict( + capacity="capacity", residual=residual, cutoff=cutoff, value_only=True + ) + if flow_func is preflow_push: + del kwargs["cutoff"] + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + R = flow_func(H, s, t, **kwargs) + + if R.graph["flow_value"] == 0: + raise NetworkXNoPath + + # Saturated edges in the residual network form the edge disjoint paths + # between source and target + cutset = [ + (u, v) + for u, v, d in R.edges(data=True) + if d["capacity"] == d["flow"] and d["flow"] > 0 + ] + # This is equivalent of what flow.utils.build_flow_dict returns, but + # only for the nodes with saturated edges and without reporting 0 flows. + flow_dict = {n: {} for edge in cutset for n in edge} + for u, v in cutset: + flow_dict[u][v] = 1 + + # Rebuild the edge disjoint paths from the flow dictionary. + paths_found = 0 + for v in list(flow_dict[s]): + if paths_found >= cutoff: + # preflow_push does not support cutoff: we have to + # keep track of the paths founds and stop at cutoff. + break + path = [s] + if v == t: + path.append(v) + yield path + continue + u = v + while u != t: + path.append(u) + try: + u, _ = flow_dict[u].popitem() + except KeyError: + break + else: + path.append(t) + yield path + paths_found += 1 + + +def node_disjoint_paths( + G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None +): + r"""Computes node disjoint paths between source and target. + + Node disjoint paths are paths that only share their first and last + nodes. The number of node independent paths between two nodes is + equal to their local node connectivity. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + cutoff : int + Maximum number of paths to yield. Some of the maximum flow + algorithms, such as :meth:`edmonds_karp` (the default) and + :meth:`shortest_augmenting_path` support the cutoff parameter, + and will terminate when the flow value reaches or exceeds the + cutoff. Other algorithms will ignore this parameter. + Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + paths : generator + Generator of node disjoint paths. + + Raises + ------ + NetworkXNoPath + If there is no path between source and target. + + NetworkXError + If source or target are not in the graph G. + + Examples + -------- + We use in this example the platonic icosahedral graph, which has node + connectivity 5, thus there are 5 node disjoint paths between any pair + of non neighbor nodes. + + >>> G = nx.icosahedral_graph() + >>> len(list(nx.node_disjoint_paths(G, 0, 6))) + 5 + + If you need to compute node disjoint paths between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute node disjoint paths reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as arguments + >>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R))) + 5 + + You can also use alternative flow algorithms for computing node disjoint + paths. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) + 5 + + Notes + ----- + This is a flow based implementation of node disjoint paths. We compute + the maximum flow between source and target on an auxiliary directed + network. The saturated edges in the residual network after running the + maximum flow algorithm correspond to node disjoint paths between source + and target in the original network. This function handles both directed + and undirected graphs, and can use all flow algorithms from NetworkX flow + package. + + See also + -------- + :meth:`edge_disjoint_paths` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + """ + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + + # Maximum possible edge disjoint paths + possible = min(H.out_degree(f"{mapping[s]}B"), H.in_degree(f"{mapping[t]}A")) + if not possible: + raise NetworkXNoPath + + if cutoff is None: + cutoff = possible + else: + cutoff = min(cutoff, possible) + + kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H, cutoff=cutoff) + + # The edge disjoint paths in the auxiliary digraph correspond to the node + # disjoint paths in the original graph. + paths_edges = edge_disjoint_paths(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + for path in paths_edges: + # Each node in the original graph maps to two nodes in auxiliary graph + yield list(_unique_everseen(H.nodes[node]["id"] for node in path)) + + +def _unique_everseen(iterable): + # Adapted from https://docs.python.org/3/library/itertools.html examples + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + seen = set() + seen_add = seen.add + for element in _filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/edge_augmentation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/edge_augmentation.py new file mode 100644 index 0000000..a8c5e83 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/edge_augmentation.py @@ -0,0 +1,1256 @@ +""" +Algorithms for finding k-edge-augmentations + +A k-edge-augmentation is a set of edges, that once added to a graph, ensures +that the graph is k-edge-connected; i.e. the graph cannot be disconnected +unless k or more edges are removed. Typically, the goal is to find the +augmentation with minimum weight. In general, it is not guaranteed that a +k-edge-augmentation exists. + +See Also +-------- +:mod:`edge_kcomponents` : algorithms for finding k-edge-connected components +:mod:`connectivity` : algorithms for determening edge connectivity. +""" +import itertools as it +import math +from collections import defaultdict, namedtuple + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["k_edge_augmentation", "is_k_edge_connected", "is_locally_k_edge_connected"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def is_k_edge_connected(G, k): + """Tests to see if a graph is k-edge-connected. + + Is it impossible to disconnect the graph by removing fewer than k edges? + If so, then G is k-edge-connected. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + edge connectivity to test for + + Returns + ------- + boolean + True if G is k-edge-connected. + + See Also + -------- + :func:`is_locally_k_edge_connected` + + Examples + -------- + >>> G = nx.barbell_graph(10, 0) + >>> nx.is_k_edge_connected(G, k=1) + True + >>> nx.is_k_edge_connected(G, k=2) + False + """ + if k < 1: + raise ValueError(f"k must be positive, not {k}") + # First try to quickly determine if G is not k-edge-connected + if G.number_of_nodes() < k + 1: + return False + elif any(d < k for n, d in G.degree()): + return False + else: + # Otherwise perform the full check + if k == 1: + return nx.is_connected(G) + elif k == 2: + return not nx.has_bridges(G) + else: + return nx.edge_connectivity(G, cutoff=k) >= k + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def is_locally_k_edge_connected(G, s, t, k): + """Tests to see if an edge in a graph is locally k-edge-connected. + + Is it impossible to disconnect s and t by removing fewer than k edges? + If so, then s and t are locally k-edge-connected in G. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + s : node + Source node + + t : node + Target node + + k : integer + local edge connectivity for nodes s and t + + Returns + ------- + boolean + True if s and t are locally k-edge-connected in G. + + See Also + -------- + :func:`is_k_edge_connected` + + Examples + -------- + >>> from networkx.algorithms.connectivity import is_locally_k_edge_connected + >>> G = nx.barbell_graph(10, 0) + >>> is_locally_k_edge_connected(G, 5, 15, k=1) + True + >>> is_locally_k_edge_connected(G, 5, 15, k=2) + False + >>> is_locally_k_edge_connected(G, 1, 5, k=2) + True + """ + if k < 1: + raise ValueError(f"k must be positive, not {k}") + + # First try to quickly determine s, t is not k-locally-edge-connected in G + if G.degree(s) < k or G.degree(t) < k: + return False + else: + # Otherwise perform the full check + if k == 1: + return nx.has_path(G, s, t) + else: + localk = nx.connectivity.local_edge_connectivity(G, s, t, cutoff=k) + return localk >= k + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def k_edge_augmentation(G, k, avail=None, weight=None, partial=False): + """Finds set of edges to k-edge-connect G. + + Adding edges from the augmentation to G make it impossible to disconnect G + unless k or more edges are removed. This function uses the most efficient + function available (depending on the value of k and if the problem is + weighted or unweighted) to search for a minimum weight subset of available + edges that k-edge-connects G. In general, finding a k-edge-augmentation is + NP-hard, so solutions are not guaranteed to be minimal. Furthermore, a + k-edge-augmentation may not exist. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + The available edges that can be used in the augmentation. + + If unspecified, then all edges in the complement of G are available. + Otherwise, each item is an available edge (with an optional weight). + + In the unweighted case, each item is an edge ``(u, v)``. + + In the weighted case, each item is a 3-tuple ``(u, v, d)`` or a dict + with items ``(u, v): d``. The third item, ``d``, can be a dictionary + or a real number. If ``d`` is a dictionary ``d[weight]`` + correspondings to the weight. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples where the + third item in each tuple is a dictionary. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then all + a partial k-edge-augmentation is generated. Adding the edges in a + partial augmentation to G, minimizes the number of k-edge-connected + components and maximizes the edge connectivity between those + components. For details, see :func:`partial_k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges that, once added to G, would cause G to become k-edge-connected. + If partial is False, an error is raised if this is not possible. + Otherwise, generated edges form a partial augmentation, which + k-edge-connects any part of G where it is possible, and maximally + connects the remaining parts. + + Raises + ------ + NetworkXUnfeasible + If partial is False and no k-edge-augmentation exists. + + NetworkXNotImplemented + If the input graph is directed or a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + When k=1 this returns an optimal solution. + + When k=2 and ``avail`` is None, this returns an optimal solution. + Otherwise when k=2, this returns a 2-approximation of the optimal solution. + + For k>3, this problem is NP-hard and this uses a randomized algorithm that + produces a feasible solution, but provides no guarantees on the + solution weight. + + Examples + -------- + >>> # Unweighted cases + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> sorted(nx.k_edge_augmentation(G, k=1)) + [(1, 5)] + >>> sorted(nx.k_edge_augmentation(G, k=2)) + [(1, 5), (5, 4)] + >>> sorted(nx.k_edge_augmentation(G, k=3)) + [(1, 4), (1, 5), (2, 5), (3, 5), (4, 5)] + >>> complement = list(nx.k_edge_augmentation(G, k=5, partial=True)) + >>> G.add_edges_from(complement) + >>> nx.edge_connectivity(G) + 4 + + >>> # Weighted cases + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> # avail can be a tuple with a dict + >>> avail = [(1, 5, {"weight": 11}), (2, 5, {"weight": 10})] + >>> sorted(nx.k_edge_augmentation(G, k=1, avail=avail, weight="weight")) + [(2, 5)] + >>> # or avail can be a 3-tuple with a real number + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + >>> # or avail can be a dict + >>> avail = {(1, 5): 11, (2, 5): 10, (4, 3): 1, (4, 5): 51} + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + >>> # If augmentation is infeasible, then a partial solution can be found + >>> avail = {(1, 5): 11} + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail, partial=True)) + [(1, 5)] + """ + try: + if k <= 0: + raise ValueError(f"k must be a positive integer, not {k}") + elif G.number_of_nodes() < k + 1: + msg = f"impossible to {k} connect in graph with less than {k + 1} nodes" + raise nx.NetworkXUnfeasible(msg) + elif avail is not None and len(avail) == 0: + if not nx.is_k_edge_connected(G, k): + raise nx.NetworkXUnfeasible("no available edges") + aug_edges = [] + elif k == 1: + aug_edges = one_edge_augmentation( + G, avail=avail, weight=weight, partial=partial + ) + elif k == 2: + aug_edges = bridge_augmentation(G, avail=avail, weight=weight) + else: + # raise NotImplementedError(f'not implemented for k>2. k={k}') + aug_edges = greedy_k_edge_augmentation( + G, k=k, avail=avail, weight=weight, seed=0 + ) + # Do eager evaulation so we can catch any exceptions + # Before executing partial code. + yield from list(aug_edges) + except nx.NetworkXUnfeasible: + if partial: + # Return all available edges + if avail is None: + aug_edges = complement_edges(G) + else: + # If we can't k-edge-connect the entire graph, try to + # k-edge-connect as much as possible + aug_edges = partial_k_edge_augmentation( + G, k=k, avail=avail, weight=weight + ) + yield from aug_edges + else: + raise + + +def partial_k_edge_augmentation(G, k, avail, weight=None): + """Finds augmentation that k-edge-connects as much of the graph as possible. + + When a k-edge-augmentation is not possible, we can still try to find a + small set of edges that partially k-edge-connects as much of the graph as + possible. All possible edges are generated between remaining parts. + This minimizes the number of k-edge-connected subgraphs in the resulting + graph and maxmizes the edge connectivity between those subgraphs. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges in the partial augmentation of G. These edges k-edge-connect any + part of G where it is possible, and maximally connects the remaining + parts. In other words, all edges from avail are generated except for + those within subgraphs that have already become k-edge-connected. + + Notes + ----- + Construct H that augments G with all edges in avail. + Find the k-edge-subgraphs of H. + For each k-edge-subgraph, if the number of nodes is more than k, then find + the k-edge-augmentation of that graph and add it to the solution. Then add + all edges in avail between k-edge subgraphs to the solution. + + See Also + -------- + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> G.add_node(8) + >>> avail = [(1, 3), (1, 4), (1, 5), (2, 4), (2, 5), (3, 5), (1, 8)] + >>> sorted(partial_k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (1, 8)] + """ + + def _edges_between_disjoint(H, only1, only2): + """finds edges between disjoint nodes""" + only1_adj = {u: set(H.adj[u]) for u in only1} + for u, neighbs in only1_adj.items(): + # Find the neighbors of u in only1 that are also in only2 + neighbs12 = neighbs.intersection(only2) + for v in neighbs12: + yield (u, v) + + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + + # Find which parts of the graph can be k-edge-connected + H = G.copy() + H.add_edges_from( + ( + (u, v, {"weight": w, "generator": (u, v)}) + for (u, v), w in zip(avail, avail_w) + ) + ) + k_edge_subgraphs = list(nx.k_edge_subgraphs(H, k=k)) + + # Generate edges to k-edge-connect internal subgraphs + for nodes in k_edge_subgraphs: + if len(nodes) > 1: + # Get the k-edge-connected subgraph + C = H.subgraph(nodes).copy() + # Find the internal edges that were available + sub_avail = { + d["generator"]: d["weight"] + for (u, v, d) in C.edges(data=True) + if "generator" in d + } + # Remove potential augmenting edges + C.remove_edges_from(sub_avail.keys()) + # Find a subset of these edges that makes the compoment + # k-edge-connected and ignore the rest + yield from nx.k_edge_augmentation(C, k=k, avail=sub_avail) + + # Generate all edges between CCs that could not be k-edge-connected + for cc1, cc2 in it.combinations(k_edge_subgraphs, 2): + for (u, v) in _edges_between_disjoint(H, cc1, cc2): + d = H.get_edge_data(u, v) + edge = d.get("generator", None) + if edge is not None: + yield edge + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def one_edge_augmentation(G, avail=None, weight=None, partial=False): + """Finds minimum weight set of edges to connect G. + + Equivalent to :func:`k_edge_augmentation` when k=1. Adding the resulting + edges to G will make it 1-edge-connected. The solution is optimal for both + weighted and non-weighted variants. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then the + augmenting edges minimize the number of connected components. + + Yields + ------ + edge : tuple + Edges in the one-augmentation of G + + Raises + ------ + NetworkXUnfeasible + If partial is False and no one-edge-augmentation exists. + + Notes + ----- + Uses either :func:`unconstrained_one_edge_augmentation` or + :func:`weighted_one_edge_augmentation` depending on whether ``avail`` is + specified. Both algorithms are based on finding a minimum spanning tree. + As such both algorithms find optimal solutions and run in linear time. + + See Also + -------- + :func:`k_edge_augmentation` + """ + if avail is None: + return unconstrained_one_edge_augmentation(G) + else: + return weighted_one_edge_augmentation( + G, avail=avail, weight=weight, partial=partial + ) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def bridge_augmentation(G, avail=None, weight=None): + """Finds the a set of edges that bridge connects G. + + Equivalent to :func:`k_edge_augmentation` when k=2, and partial=False. + Adding the resulting edges to G will make it 2-edge-connected. If no + constraints are specified the returned set of edges is minimum an optimal, + otherwise the solution is approximated. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges in the bridge-augmentation of G + + Raises + ------ + NetworkXUnfeasible + If no bridge-augmentation exists. + + Notes + ----- + If there are no constraints the solution can be computed in linear time + using :func:`unconstrained_bridge_augmentation`. Otherwise, the problem + becomes NP-hard and is the solution is approximated by + :func:`weighted_bridge_augmentation`. + + See Also + -------- + :func:`k_edge_augmentation` + """ + if G.number_of_nodes() < 3: + raise nx.NetworkXUnfeasible("impossible to bridge connect less than 3 nodes") + if avail is None: + return unconstrained_bridge_augmentation(G) + else: + return weighted_bridge_augmentation(G, avail, weight=weight) + + +# --- Algorithms and Helpers --- + + +def _ordered(u, v): + """Returns the nodes in an undirected edge in lower-triangular order""" + return (u, v) if u < v else (v, u) + + +def _unpack_available_edges(avail, weight=None, G=None): + """Helper to separate avail into edges and corresponding weights""" + if weight is None: + weight = "weight" + if isinstance(avail, dict): + avail_uv = list(avail.keys()) + avail_w = list(avail.values()) + else: + + def _try_getitem(d): + try: + return d[weight] + except TypeError: + return d + + avail_uv = [tup[0:2] for tup in avail] + avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail] + + if G is not None: + # Edges already in the graph are filtered + flags = [not G.has_edge(u, v) for u, v in avail_uv] + avail_uv = list(it.compress(avail_uv, flags)) + avail_w = list(it.compress(avail_w, flags)) + return avail_uv, avail_w + + +MetaEdge = namedtuple("MetaEdge", ("meta_uv", "uv", "w")) + + +def _lightest_meta_edges(mapping, avail_uv, avail_w): + """Maps available edges in the original graph to edges in the metagraph. + + Parameters + ---------- + mapping : dict + mapping produced by :func:`collapse`, that maps each node in the + original graph to a node in the meta graph + + avail_uv : list + list of edges + + avail_w : list + list of edge weights + + Notes + ----- + Each node in the metagraph is a k-edge-connected component in the original + graph. We don't care about any edge within the same k-edge-connected + component, so we ignore self edges. We also are only intereseted in the + minimum weight edge bridging each k-edge-connected component so, we group + the edges by meta-edge and take the lightest in each group. + + Examples + -------- + >>> # Each group represents a meta-node + >>> groups = ([1, 2, 3], [4, 5], [6]) + >>> mapping = {n: meta_n for meta_n, ns in enumerate(groups) for n in ns} + >>> avail_uv = [(1, 2), (3, 6), (1, 4), (5, 2), (6, 1), (2, 6), (3, 1)] + >>> avail_w = [20, 99, 20, 15, 50, 99, 20] + >>> sorted(_lightest_meta_edges(mapping, avail_uv, avail_w)) + [MetaEdge(meta_uv=(0, 1), uv=(5, 2), w=15), MetaEdge(meta_uv=(0, 2), uv=(6, 1), w=50)] + """ + grouped_wuv = defaultdict(list) + for w, (u, v) in zip(avail_w, avail_uv): + # Order the meta-edge so it can be used as a dict key + meta_uv = _ordered(mapping[u], mapping[v]) + # Group each available edge using the meta-edge as a key + grouped_wuv[meta_uv].append((w, u, v)) + + # Now that all available edges are grouped, choose one per group + for (mu, mv), choices_wuv in grouped_wuv.items(): + # Ignore available edges within the same meta-node + if mu != mv: + # Choose the lightest available edge belonging to each meta-edge + w, u, v = min(choices_wuv) + yield MetaEdge((mu, mv), (u, v), w) + + +def unconstrained_one_edge_augmentation(G): + """Finds the smallest set of edges to connect G. + + This is a variant of the unweighted MST problem. + If G is not empty, a feasible solution always exists. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Yields + ------ + edge : tuple + Edges in the one-edge-augmentation of G + + See Also + -------- + :func:`one_edge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> G.add_nodes_from([6, 7, 8]) + >>> sorted(unconstrained_one_edge_augmentation(G)) + [(1, 4), (4, 6), (6, 7), (7, 8)] + """ + ccs1 = list(nx.connected_components(G)) + C = collapse(G, ccs1) + # When we are not constrained, we can just make a meta graph tree. + meta_nodes = list(C.nodes()) + # build a path in the metagraph + meta_aug = list(zip(meta_nodes, meta_nodes[1:])) + # map that path to the original graph + inverse = defaultdict(list) + for k, v in C.graph["mapping"].items(): + inverse[v].append(k) + for mu, mv in meta_aug: + yield (inverse[mu][0], inverse[mv][0]) + + +def weighted_one_edge_augmentation(G, avail, weight=None, partial=False): + """Finds the minimum weight set of edges to connect G if one exists. + + This is a variant of the weighted MST problem. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then the + augmenting edges minimize the number of connected components. + + Yields + ------ + edge : tuple + Edges in the subset of avail chosen to connect G. + + See Also + -------- + :func:`one_edge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> G.add_nodes_from([6, 7, 8]) + >>> # any edge not in avail has an implicit weight of infinity + >>> avail = [(1, 3), (1, 5), (4, 7), (4, 8), (6, 1), (8, 1), (8, 2)] + >>> sorted(weighted_one_edge_augmentation(G, avail)) + [(1, 5), (4, 7), (6, 1), (8, 1)] + >>> # find another solution by giving large weights to edges in the + >>> # previous solution (note some of the old edges must be used) + >>> avail = [(1, 3), (1, 5, 99), (4, 7, 9), (6, 1, 99), (8, 1, 99), (8, 2)] + >>> sorted(weighted_one_edge_augmentation(G, avail)) + [(1, 5), (4, 7), (6, 1), (8, 2)] + """ + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + # Collapse CCs in the original graph into nodes in a metagraph + # Then find an MST of the metagraph instead of the original graph + C = collapse(G, nx.connected_components(G)) + mapping = C.graph["mapping"] + # Assign each available edge to an edge in the metagraph + candidate_mapping = _lightest_meta_edges(mapping, avail_uv, avail_w) + # nx.set_edge_attributes(C, name='weight', values=0) + C.add_edges_from( + (mu, mv, {"weight": w, "generator": uv}) + for (mu, mv), uv, w in candidate_mapping + ) + # Find MST of the meta graph + meta_mst = nx.minimum_spanning_tree(C) + if not partial and not nx.is_connected(meta_mst): + raise nx.NetworkXUnfeasible("Not possible to connect G with available edges") + # Yield the edge that generated the meta-edge + for mu, mv, d in meta_mst.edges(data=True): + if "generator" in d: + edge = d["generator"] + yield edge + + +def unconstrained_bridge_augmentation(G): + """Finds an optimal 2-edge-augmentation of G using the fewest edges. + + This is an implementation of the algorithm detailed in [1]_. + The basic idea is to construct a meta-graph of bridge-ccs, connect leaf + nodes of the trees to connect the entire graph, and finally connect the + leafs of the tree in dfs-preorder to bridge connect the entire graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Yields + ------ + edge : tuple + Edges in the bridge augmentation of G + + Notes + ----- + Input: a graph G. + First find the bridge components of G and collapse each bridge-cc into a + node of a metagraph graph C, which is guaranteed to be a forest of trees. + + C contains p "leafs" --- nodes with exactly one incident edge. + C contains q "isolated nodes" --- nodes with no incident edges. + + Theorem: If p + q > 1, then at least :math:`ceil(p / 2) + q` edges are + needed to bridge connect C. This algorithm achieves this min number. + + The method first adds enough edges to make G into a tree and then pairs + leafs in a simple fashion. + + Let n be the number of trees in C. Let v(i) be an isolated vertex in the + i-th tree if one exists, otherwise it is a pair of distinct leafs nodes + in the i-th tree. Alternating edges from these sets (i.e. adding edges + A1 = [(v(i)[0], v(i + 1)[1]), v(i + 1)[0], v(i + 2)[1])...]) connects C + into a tree T. This tree has p' = p + 2q - 2(n -1) leafs and no isolated + vertices. A1 has n - 1 edges. The next step finds ceil(p' / 2) edges to + biconnect any tree with p' leafs. + + Convert T into an arborescence T' by picking an arbitrary root node with + degree >= 2 and directing all edges away from the root. Note the + implementation implicitly constructs T'. + + The leafs of T are the nodes with no existing edges in T'. + Order the leafs of T' by DFS prorder. Then break this list in half + and add the zipped pairs to A2. + + The set A = A1 + A2 is the minimum augmentation in the metagraph. + + To convert this to edges in the original graph + + References + ---------- + .. [1] Eswaran, Kapali P., and R. Endre Tarjan. (1975) Augmentation problems. + http://epubs.siam.org/doi/abs/10.1137/0205044 + + See Also + -------- + :func:`bridge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 7)] + >>> G = nx.path_graph((1, 2, 3, 2, 4, 5, 6, 7)) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 3), (3, 7)] + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> G.add_node(4) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 4), (4, 0)] + """ + # ----- + # Mapping of terms from (Eswaran and Tarjan): + # G = G_0 - the input graph + # C = G_0' - the bridge condensation of G. (This is a forest of trees) + # A1 = A_1 - the edges to connect the forest into a tree + # leaf = pendant - a node with degree of 1 + + # alpha(v) = maps the node v in G to its meta-node in C + # beta(x) = maps the meta-node x in C to any node in the bridge + # component of G corresponding to x. + + # find the 2-edge-connected components of G + bridge_ccs = list(nx.connectivity.bridge_components(G)) + # condense G into an forest C + C = collapse(G, bridge_ccs) + + # Choose pairs of distinct leaf nodes in each tree. If this is not + # possible then make a pair using the single isolated node in the tree. + vset1 = [ + tuple(cc) * 2 # case1: an isolated node + if len(cc) == 1 + else sorted(cc, key=C.degree)[0:2] # case2: pair of leaf nodes + for cc in nx.connected_components(C) + ] + if len(vset1) > 1: + # Use this set to construct edges that connect C into a tree. + nodes1 = [vs[0] for vs in vset1] + nodes2 = [vs[1] for vs in vset1] + A1 = list(zip(nodes1[1:], nodes2)) + else: + A1 = [] + # Connect each tree in the forest to construct an arborescence + T = C.copy() + T.add_edges_from(A1) + + # If there are only two leaf nodes, we simply connect them. + leafs = [n for n, d in T.degree() if d == 1] + if len(leafs) == 1: + A2 = [] + if len(leafs) == 2: + A2 = [tuple(leafs)] + else: + # Choose an arbitrary non-leaf root + try: + root = next(n for n, d in T.degree() if d > 1) + except StopIteration: # no nodes found with degree > 1 + return + # order the leaves of C by (induced directed) preorder + v2 = [n for n in nx.dfs_preorder_nodes(T, root) if T.degree(n) == 1] + # connecting first half of the leafs in pre-order to the second + # half will bridge connect the tree with the fewest edges. + half = math.ceil(len(v2) / 2) + A2 = list(zip(v2[:half], v2[-half:])) + + # collect the edges used to augment the original forest + aug_tree_edges = A1 + A2 + + # Construct the mapping (beta) from meta-nodes to regular nodes + inverse = defaultdict(list) + for k, v in C.graph["mapping"].items(): + inverse[v].append(k) + # sort so we choose minimum degree nodes first + inverse = { + mu: sorted(mapped, key=lambda u: (G.degree(u), u)) + for mu, mapped in inverse.items() + } + + # For each meta-edge, map back to an arbitrary pair in the original graph + G2 = G.copy() + for mu, mv in aug_tree_edges: + # Find the first available edge that doesn't exist and return it + for u, v in it.product(inverse[mu], inverse[mv]): + if not G2.has_edge(u, v): + G2.add_edge(u, v) + yield u, v + break + + +def weighted_bridge_augmentation(G, avail, weight=None): + """Finds an approximate min-weight 2-edge-augmentation of G. + + This is an implementation of the approximation algorithm detailed in [1]_. + It chooses a set of edges from avail to add to G that renders it + 2-edge-connected if such a subset exists. This is done by finding a + minimum spanning arborescence of a specially constructed metagraph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : set of 2 or 3 tuples. + candidate edges (with optional weights) to choose from + + weight : string + key to use to find weights if avail is a set of 3-tuples where the + third item in each tuple is a dictionary. + + Yields + ------ + edge : tuple + Edges in the subset of avail chosen to bridge augment G. + + Notes + ----- + Finding a weighted 2-edge-augmentation is NP-hard. + Any edge not in ``avail`` is considered to have a weight of infinity. + The approximation factor is 2 if ``G`` is connected and 3 if it is not. + Runs in :math:`O(m + n log(n))` time + + References + ---------- + .. [1] Khuller, Samir, and Ramakrishna Thurimella. (1993) Approximation + algorithms for graph augmentation. + http://www.sciencedirect.com/science/article/pii/S0196677483710102 + + See Also + -------- + :func:`bridge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> # When the weights are equal, (1, 4) is the best + >>> avail = [(1, 4, 1), (1, 3, 1), (2, 4, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail)) + [(1, 4)] + >>> # Giving (1, 4) a high weight makes the two edge solution the best. + >>> avail = [(1, 4, 1000), (1, 3, 1), (2, 4, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail)) + [(1, 3), (2, 4)] + >>> # ------ + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail=avail)) + [(1, 5), (4, 5)] + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] + >>> sorted(weighted_bridge_augmentation(G, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + """ + + if weight is None: + weight = "weight" + + # If input G is not connected the approximation factor increases to 3 + if not nx.is_connected(G): + H = G.copy() + connectors = list(one_edge_augmentation(H, avail=avail, weight=weight)) + H.add_edges_from(connectors) + + yield from connectors + else: + connectors = [] + H = G + + if len(avail) == 0: + if nx.has_bridges(H): + raise nx.NetworkXUnfeasible("no augmentation possible") + + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=H) + + # Collapse input into a metagraph. Meta nodes are bridge-ccs + bridge_ccs = nx.connectivity.bridge_components(H) + C = collapse(H, bridge_ccs) + + # Use the meta graph to shrink avail to a small feasible subset + mapping = C.graph["mapping"] + # Choose the minimum weight feasible edge in each group + meta_to_wuv = { + (mu, mv): (w, uv) + for (mu, mv), uv, w in _lightest_meta_edges(mapping, avail_uv, avail_w) + } + + # Mapping of terms from (Khuller and Thurimella): + # C : G_0 = (V, E^0) + # This is the metagraph where each node is a 2-edge-cc in G. + # The edges in C represent bridges in the original graph. + # (mu, mv) : E - E^0 # they group both avail and given edges in E + # T : \Gamma + # D : G^D = (V, E_D) + + # The paper uses ancestor because children point to parents, which is + # contrary to networkx standards. So, we actually need to run + # nx.least_common_ancestor on the reversed Tree. + + # Pick an arbitrary leaf from C as the root + try: + root = next(n for n, d in C.degree() if d == 1) + except StopIteration: # no nodes found with degree == 1 + return + # Root C into a tree TR by directing all edges away from the root + # Note in their paper T directs edges towards the root + TR = nx.dfs_tree(C, root) + + # Add to D the directed edges of T and set their weight to zero + # This indicates that it costs nothing to use edges that were given. + D = nx.reverse(TR).copy() + + nx.set_edge_attributes(D, name="weight", values=0) + + # The LCA of mu and mv in T is the shared ancestor of mu and mv that is + # located farthest from the root. + lca_gen = nx.tree_all_pairs_lowest_common_ancestor( + TR, root=root, pairs=meta_to_wuv.keys() + ) + + for (mu, mv), lca in lca_gen: + w, uv = meta_to_wuv[(mu, mv)] + if lca == mu: + # If u is an ancestor of v in TR, then add edge u->v to D + D.add_edge(lca, mv, weight=w, generator=uv) + elif lca == mv: + # If v is an ancestor of u in TR, then add edge v->u to D + D.add_edge(lca, mu, weight=w, generator=uv) + else: + # If neither u nor v is a ancestor of the other in TR + # let t = lca(TR, u, v) and add edges t->u and t->v + # Track the original edge that GENERATED these edges. + D.add_edge(lca, mu, weight=w, generator=uv) + D.add_edge(lca, mv, weight=w, generator=uv) + + # Then compute a minimum rooted branching + try: + # Note the original edges must be directed towards to root for the + # branching to give us a bridge-augmentation. + A = _minimum_rooted_branching(D, root) + except nx.NetworkXException as err: + # If there is no branching then augmentation is not possible + raise nx.NetworkXUnfeasible("no 2-edge-augmentation possible") from err + + # For each edge e, in the branching that did not belong to the directed + # tree T, add the corresponding edge that **GENERATED** it (this is not + # necesarilly e itself!) + + # ensure the third case does not generate edges twice + bridge_connectors = set() + for mu, mv in A.edges(): + data = D.get_edge_data(mu, mv) + if "generator" in data: + # Add the avail edge that generated the branching edge. + edge = data["generator"] + bridge_connectors.add(edge) + + yield from bridge_connectors + + +def _minimum_rooted_branching(D, root): + """Helper function to compute a minimum rooted branching (aka rooted + arborescence) + + Before the branching can be computed, the directed graph must be rooted by + removing the predecessors of root. + + A branching / arborescence of rooted graph G is a subgraph that contains a + directed path from the root to every other vertex. It is the directed + analog of the minimum spanning tree problem. + + References + ---------- + [1] Khuller, Samir (2002) Advanced Algorithms Lecture 24 Notes. + https://web.archive.org/web/20121030033722/https://www.cs.umd.edu/class/spring2011/cmsc651/lec07.pdf + """ + rooted = D.copy() + # root the graph by removing all predecessors to `root`. + rooted.remove_edges_from([(u, root) for u in D.predecessors(root)]) + # Then compute the branching / arborescence. + A = nx.minimum_spanning_arborescence(rooted) + return A + + +def collapse(G, grouped_nodes): + """Collapses each group of nodes into a single node. + + This is similar to condensation, but works on undirected graphs. + + Parameters + ---------- + G : NetworkX Graph + + grouped_nodes: list or generator + Grouping of nodes to collapse. The grouping must be disjoint. + If grouped_nodes are strongly_connected_components then this is + equivalent to :func:`condensation`. + + Returns + ------- + C : NetworkX Graph + The collapsed graph C of G with respect to the node grouping. The node + labels are integers corresponding to the index of the component in the + list of grouped_nodes. C has a graph attribute named 'mapping' with a + dictionary mapping the original nodes to the nodes in C to which they + belong. Each node in C also has a node attribute 'members' with the set + of original nodes in G that form the group that the node in C + represents. + + Examples + -------- + >>> # Collapses a graph using disjoint groups, but not necesarilly connected + >>> G = nx.Graph([(1, 0), (2, 3), (3, 1), (3, 4), (4, 5), (5, 6), (5, 7)]) + >>> G.add_node("A") + >>> grouped_nodes = [{0, 1, 2, 3}, {5, 6, 7}] + >>> C = collapse(G, grouped_nodes) + >>> members = nx.get_node_attributes(C, "members") + >>> sorted(members.keys()) + [0, 1, 2, 3] + >>> member_values = set(map(frozenset, members.values())) + >>> assert {0, 1, 2, 3} in member_values + >>> assert {4} in member_values + >>> assert {5, 6, 7} in member_values + >>> assert {"A"} in member_values + """ + mapping = {} + members = {} + C = G.__class__() + i = 0 # required if G is empty + remaining = set(G.nodes()) + for i, group in enumerate(grouped_nodes): + group = set(group) + assert remaining.issuperset( + group + ), "grouped nodes must exist in G and be disjoint" + remaining.difference_update(group) + members[i] = group + mapping.update((n, i) for n in group) + # remaining nodes are in their own group + for i, node in enumerate(remaining, start=i + 1): + group = {node} + members[i] = group + mapping.update((n, i) for n in group) + number_of_groups = i + 1 + C.add_nodes_from(range(number_of_groups)) + C.add_edges_from( + (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v] + ) + # Add a list of members (ie original nodes) to each node (ie scc) in C. + nx.set_node_attributes(C, name="members", values=members) + # Add mapping dict as graph attribute + C.graph["mapping"] = mapping + return C + + +def complement_edges(G): + """Returns only the edges in the complement of G + + Parameters + ---------- + G : NetworkX Graph + + Yields + ------ + edge : tuple + Edges in the complement of G + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> sorted(complement_edges(G)) + [(1, 3), (1, 4), (2, 4)] + >>> G = nx.path_graph((1, 2, 3, 4), nx.DiGraph()) + >>> sorted(complement_edges(G)) + [(1, 3), (1, 4), (2, 1), (2, 4), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)] + >>> G = nx.complete_graph(1000) + >>> sorted(complement_edges(G)) + [] + """ + G_adj = G._adj # Store as a variable to eliminate attribute lookup + if G.is_directed(): + for u, v in it.combinations(G.nodes(), 2): + if v not in G_adj[u]: + yield (u, v) + if u not in G_adj[v]: + yield (v, u) + else: + for u, v in it.combinations(G.nodes(), 2): + if v not in G_adj[u]: + yield (u, v) + + +def _compat_shuffle(rng, input): + """wrapper around rng.shuffle for python 2 compatibility reasons""" + rng.shuffle(input) + + +@py_random_state(4) +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def greedy_k_edge_augmentation(G, k, avail=None, weight=None, seed=None): + """Greedy algorithm for finding a k-edge-augmentation + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + edge : tuple + Edges in the greedy augmentation of G + + Notes + ----- + The algorithm is simple. Edges are incrementally added between parts of the + graph that are not yet locally k-edge-connected. Then edges are from the + augmenting set are pruned as long as local-edge-connectivity is not broken. + + This algorithm is greedy and does not provide optimality guarantees. It + exists only to provide :func:`k_edge_augmentation` with the ability to + generate a feasible solution for arbitrary k. + + See Also + -------- + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> sorted(greedy_k_edge_augmentation(G, k=2)) + [(1, 7)] + >>> sorted(greedy_k_edge_augmentation(G, k=1, avail=[])) + [] + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> avail = {(u, v): 1 for (u, v) in complement_edges(G)} + >>> # randomized pruning process can produce different solutions + >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=2)) + [(1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 4), (2, 6), (3, 7), (5, 7)] + >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=3)) + [(1, 3), (1, 5), (1, 6), (2, 4), (2, 6), (3, 7), (4, 7), (5, 7)] + """ + # Result set + aug_edges = [] + + done = is_k_edge_connected(G, k) + if done: + return + if avail is None: + # all edges are available + avail_uv = list(complement_edges(G)) + avail_w = [1] * len(avail_uv) + else: + # Get the unique set of unweighted edges + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + + # Greedy: order lightest edges. Use degree sum to tie-break + tiebreaker = [sum(map(G.degree, uv)) for uv in avail_uv] + avail_wduv = sorted(zip(avail_w, tiebreaker, avail_uv)) + avail_uv = [uv for w, d, uv in avail_wduv] + + # Incrementally add edges in until we are k-connected + H = G.copy() + for (u, v) in avail_uv: + done = False + if not is_locally_k_edge_connected(H, u, v, k=k): + # Only add edges in parts that are not yet locally k-edge-connected + aug_edges.append((u, v)) + H.add_edge(u, v) + # Did adding this edge help? + if H.degree(u) >= k and H.degree(v) >= k: + done = is_k_edge_connected(H, k) + if done: + break + + # Check for feasibility + if not done: + raise nx.NetworkXUnfeasible("not able to k-edge-connect with available edges") + + # Randomized attempt to reduce the size of the solution + _compat_shuffle(seed, aug_edges) + for (u, v) in list(aug_edges): + # Don't remove if we know it would break connectivity + if H.degree(u) <= k or H.degree(v) <= k: + continue + H.remove_edge(u, v) + aug_edges.remove((u, v)) + if not is_k_edge_connected(H, k=k): + # If removing this edge breaks feasibility, undo + H.add_edge(u, v) + aug_edges.append((u, v)) + + # Generate results + yield from aug_edges diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py new file mode 100644 index 0000000..a5d6977 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py @@ -0,0 +1,581 @@ +""" +Algorithms for finding k-edge-connected components and subgraphs. + +A k-edge-connected component (k-edge-cc) is a maximal set of nodes in G, such +that all pairs of node have an edge-connectivity of at least k. + +A k-edge-connected subgraph (k-edge-subgraph) is a maximal set of nodes in G, +such that the subgraph of G defined by the nodes has an edge-connectivity at +least k. +""" +import itertools as it +from functools import partial + +import networkx as nx +from networkx.algorithms import bridges +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "k_edge_components", + "k_edge_subgraphs", + "bridge_components", + "EdgeComponentAuxGraph", +] + + +@not_implemented_for("multigraph") +def k_edge_components(G, k): + """Generates nodes in each maximal k-edge-connected component in G. + + Parameters + ---------- + G : NetworkX graph + + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_components : a generator of k-edge-ccs. Each set of returned nodes + will have k-edge-connectivity in the graph G. + + See Also + -------- + :func:`local_edge_connectivity` + :func:`k_edge_subgraphs` : similar to this function, but the subgraph + defined by the nodes must also have k-edge-connectivity. + :func:`k_components` : similar to this function, but uses node-connectivity + instead of edge-connectivity + + Raises + ------ + NetworkXNotImplemented + If the input graph is a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + Attempts to use the most efficient implementation available based on k. + If k=1, this is simply connected components for directed graphs and + connected components for undirected graphs. + If k=2 on an efficient bridge connected component algorithm from _[1] is + run based on the chain decomposition. + Otherwise, the algorithm from _[2] is used. + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... (5, 6, 7, 8, 5, 7, 8, 6), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # note this returns {1, 4} unlike k_edge_subgraphs + >>> sorted(map(sorted, nx.k_edge_components(G, k=3))) + [[1, 4], [2], [3], [5, 6, 7, 8]] + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29 + .. [2] Wang, Tianhao, et al. (2015) A simple algorithm for finding all + k-edge-connected components. + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + """ + # Compute k-edge-ccs using the most efficient algorithms available. + if k < 1: + raise ValueError("k cannot be less than 1") + if G.is_directed(): + if k == 1: + return nx.strongly_connected_components(G) + else: + # TODO: investigate https://arxiv.org/abs/1412.6466 for k=2 + aux_graph = EdgeComponentAuxGraph.construct(G) + return aux_graph.k_edge_components(k) + else: + if k == 1: + return nx.connected_components(G) + elif k == 2: + return bridge_components(G) + else: + aux_graph = EdgeComponentAuxGraph.construct(G) + return aux_graph.k_edge_components(k) + + +@not_implemented_for("multigraph") +def k_edge_subgraphs(G, k): + """Generates nodes in each maximal k-edge-connected subgraph in G. + + Parameters + ---------- + G : NetworkX graph + + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_subgraphs : a generator of k-edge-subgraphs + Each k-edge-subgraph is a maximal set of nodes that defines a subgraph + of G that is k-edge-connected. + + See Also + -------- + :func:`edge_connectivity` + :func:`k_edge_components` : similar to this function, but nodes only + need to have k-edge-connctivity within the graph G and the subgraphs + might not be k-edge-connected. + + Raises + ------ + NetworkXNotImplemented + If the input graph is a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + Attempts to use the most efficient implementation available based on k. + If k=1, or k=2 and the graph is undirected, then this simply calls + `k_edge_components`. Otherwise the algorithm from _[1] is used. + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... (5, 6, 7, 8, 5, 7, 8, 6), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # note this does not return {1, 4} unlike k_edge_components + >>> sorted(map(sorted, nx.k_edge_subgraphs(G, k=3))) + [[1], [2], [3], [4], [5, 6, 7, 8]] + + References + ---------- + .. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs + from a large graph. ACM International Conference on Extending Database + Technology 2012 480-–491. + https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf + """ + if k < 1: + raise ValueError("k cannot be less than 1") + if G.is_directed(): + if k <= 1: + # For directed graphs , + # When k == 1, k-edge-ccs and k-edge-subgraphs are the same + return k_edge_components(G, k) + else: + return _k_edge_subgraphs_nodes(G, k) + else: + if k <= 2: + # For undirected graphs, + # when k <= 2, k-edge-ccs and k-edge-subgraphs are the same + return k_edge_components(G, k) + else: + return _k_edge_subgraphs_nodes(G, k) + + +def _k_edge_subgraphs_nodes(G, k): + """Helper to get the nodes from the subgraphs. + + This allows k_edge_subgraphs to return a generator. + """ + for C in general_k_edge_subgraphs(G, k): + yield set(C.nodes()) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def bridge_components(G): + """Finds all bridge-connected components G. + + Parameters + ---------- + G : NetworkX undirected graph + + Returns + ------- + bridge_components : a generator of 2-edge-connected components + + + See Also + -------- + :func:`k_edge_subgraphs` : this function is a special case for an + undirected graph where k=2. + :func:`biconnected_components` : similar to this function, but is defined + using 2-node-connectivity instead of 2-edge-connectivity. + + Raises + ------ + NetworkXNotImplemented + If the input graph is directed or a multigraph. + + Notes + ----- + Bridge-connected components are also known as 2-edge-connected components. + + Examples + -------- + >>> # The barbell graph with parameter zero has a single bridge + >>> G = nx.barbell_graph(5, 0) + >>> from networkx.algorithms.connectivity.edge_kcomponents import bridge_components + >>> sorted(map(sorted, bridge_components(G))) + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + """ + H = G.copy() + H.remove_edges_from(bridges(G)) + yield from nx.connected_components(H) + + +class EdgeComponentAuxGraph: + r"""A simple algorithm to find all k-edge-connected components in a graph. + + Constructing the AuxillaryGraph (which may take some time) allows for the + k-edge-ccs to be found in linear time for arbitrary k. + + Notes + ----- + This implementation is based on [1]_. The idea is to construct an auxiliary + graph from which the k-edge-ccs can be extracted in linear time. The + auxiliary graph is constructed in $O(|V|\cdot F)$ operations, where F is the + complexity of max flow. Querying the components takes an additional $O(|V|)$ + operations. This algorithm can be slow for large graphs, but it handles an + arbitrary k and works for both directed and undirected inputs. + + The undirected case for k=1 is exactly connected components. + The undirected case for k=2 is exactly bridge connected components. + The directed case for k=1 is exactly strongly connected components. + + References + ---------- + .. [1] Wang, Tianhao, et al. (2015) A simple algorithm for finding all + k-edge-connected components. + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph + >>> # Build an interesting graph with multiple levels of k-edge-ccs + >>> paths = [ + ... (1, 2, 3, 4, 1, 3, 4, 2), # a 3-edge-cc (a 4 clique) + ... (5, 6, 7, 5), # a 2-edge-cc (a 3 clique) + ... (1, 5), # combine first two ccs into a 1-edge-cc + ... (0,), # add an additional disconnected 1-edge-cc + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # Constructing the AuxGraph takes about O(n ** 4) + >>> aux_graph = EdgeComponentAuxGraph.construct(G) + >>> # Once constructed, querying takes O(n) + >>> sorted(map(sorted, aux_graph.k_edge_components(k=1))) + [[0], [1, 2, 3, 4, 5, 6, 7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=2))) + [[0], [1, 2, 3, 4], [5, 6, 7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=3))) + [[0], [1, 2, 3, 4], [5], [6], [7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=4))) + [[0], [1], [2], [3], [4], [5], [6], [7]] + + The auxiliary graph is primarilly used for k-edge-ccs but it + can also speed up the queries of k-edge-subgraphs by refining the + search space. + + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> aux_graph = EdgeComponentAuxGraph.construct(G) + >>> sorted(map(sorted, aux_graph.k_edge_subgraphs(k=3))) + [[1], [2], [3], [4]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=3))) + [[1, 4], [2], [3]] + """ + + # @not_implemented_for('multigraph') # TODO: fix decor for classmethods + @classmethod + def construct(EdgeComponentAuxGraph, G): + """Builds an auxiliary graph encoding edge-connectivity between nodes. + + Notes + ----- + Given G=(V, E), initialize an empty auxiliary graph A. + Choose an arbitrary source node s. Initialize a set N of available + nodes (that can be used as the sink). The algorithm picks an + arbitrary node t from N - {s}, and then computes the minimum st-cut + (S, T) with value w. If G is directed the minimum of the st-cut or + the ts-cut is used instead. Then, the edge (s, t) is added to the + auxiliary graph with weight w. The algorithm is called recursively + first using S as the available nodes and s as the source, and then + using T and t. Recursion stops when the source is the only available + node. + + Parameters + ---------- + G : NetworkX graph + """ + # workaround for classmethod decorator + not_implemented_for("multigraph")(lambda G: G)(G) + + def _recursive_build(H, A, source, avail): + # Terminate once the flow has been compute to every node. + if {source} == avail: + return + # pick an arbitrary node as the sink + sink = arbitrary_element(avail - {source}) + # find the minimum cut and its weight + value, (S, T) = nx.minimum_cut(H, source, sink) + if H.is_directed(): + # check if the reverse direction has a smaller cut + value_, (T_, S_) = nx.minimum_cut(H, sink, source) + if value_ < value: + value, S, T = value_, S_, T_ + # add edge with weight of cut to the aux graph + A.add_edge(source, sink, weight=value) + # recursively call until all but one node is used + _recursive_build(H, A, source, avail.intersection(S)) + _recursive_build(H, A, sink, avail.intersection(T)) + + # Copy input to ensure all edges have unit capacity + H = G.__class__() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + + # A is the auxiliary graph to be constructed + # It is a weighted undirected tree + A = nx.Graph() + + # Pick an arbitrary node as the source + if H.number_of_nodes() > 0: + source = arbitrary_element(H.nodes()) + # Initialize a set of elements that can be chosen as the sink + avail = set(H.nodes()) + + # This constructs A + _recursive_build(H, A, source, avail) + + # This class is a container the holds the auxiliary graph A and + # provides access the k_edge_components function. + self = EdgeComponentAuxGraph() + self.A = A + self.H = H + return self + + def k_edge_components(self, k): + """Queries the auxiliary graph for k-edge-connected components. + + Parameters + ---------- + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_components : a generator of k-edge-ccs + + Notes + ----- + Given the auxiliary graph, the k-edge-connected components can be + determined in linear time by removing all edges with weights less than + k from the auxiliary graph. The resulting connected components are the + k-edge-ccs in the original graph. + """ + if k < 1: + raise ValueError("k cannot be less than 1") + A = self.A + # "traverse the auxiliary graph A and delete all edges with weights less + # than k" + aux_weights = nx.get_edge_attributes(A, "weight") + # Create a relevant graph with the auxiliary edges with weights >= k + R = nx.Graph() + R.add_nodes_from(A.nodes()) + R.add_edges_from(e for e, w in aux_weights.items() if w >= k) + + # Return the nodes that are k-edge-connected in the original graph + yield from nx.connected_components(R) + + def k_edge_subgraphs(self, k): + """Queries the auxiliary graph for k-edge-connected subgraphs. + + Parameters + ---------- + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_subgraphs : a generator of k-edge-subgraphs + + Notes + ----- + Refines the k-edge-ccs into k-edge-subgraphs. The running time is more + than $O(|V|)$. + + For single values of k it is faster to use `nx.k_edge_subgraphs`. + But for multiple values of k, it can be faster to build AuxGraph and + then use this method. + """ + if k < 1: + raise ValueError("k cannot be less than 1") + H = self.H + A = self.A + # "traverse the auxiliary graph A and delete all edges with weights less + # than k" + aux_weights = nx.get_edge_attributes(A, "weight") + # Create a relevant graph with the auxiliary edges with weights >= k + R = nx.Graph() + R.add_nodes_from(A.nodes()) + R.add_edges_from(e for e, w in aux_weights.items() if w >= k) + + # Return the components whose subgraphs are k-edge-connected + for cc in nx.connected_components(R): + if len(cc) < k: + # Early return optimization + for node in cc: + yield {node} + else: + # Call subgraph solution to refine the results + C = H.subgraph(cc) + yield from k_edge_subgraphs(C, k) + + +def _low_degree_nodes(G, k, nbunch=None): + """Helper for finding nodes with degree less than k.""" + # Nodes with degree less than k cannot be k-edge-connected. + if G.is_directed(): + # Consider both in and out degree in the directed case + seen = set() + for node, degree in G.out_degree(nbunch): + if degree < k: + seen.add(node) + yield node + for node, degree in G.in_degree(nbunch): + if node not in seen and degree < k: + seen.add(node) + yield node + else: + # Only the degree matters in the undirected case + for node, degree in G.degree(nbunch): + if degree < k: + yield node + + +def _high_degree_components(G, k): + """Helper for filtering components that can't be k-edge-connected. + + Removes and generates each node with degree less than k. Then generates + remaining components where all nodes have degree at least k. + """ + # Iteravely remove parts of the graph that are not k-edge-connected + H = G.copy() + singletons = set(_low_degree_nodes(H, k)) + while singletons: + # Only search neighbors of removed nodes + nbunch = set(it.chain.from_iterable(map(H.neighbors, singletons))) + nbunch.difference_update(singletons) + H.remove_nodes_from(singletons) + for node in singletons: + yield {node} + singletons = set(_low_degree_nodes(H, k, nbunch)) + + # Note: remaining connected components may not be k-edge-connected + if G.is_directed(): + yield from nx.strongly_connected_components(H) + else: + yield from nx.connected_components(H) + + +def general_k_edge_subgraphs(G, k): + """General algorithm to find all maximal k-edge-connected subgraphs in G. + + Returns + ------- + k_edge_subgraphs : a generator of nx.Graphs that are k-edge-subgraphs + Each k-edge-subgraph is a maximal set of nodes that defines a subgraph + of G that is k-edge-connected. + + Notes + ----- + Implementation of the basic algorithm from _[1]. The basic idea is to find + a global minimum cut of the graph. If the cut value is at least k, then the + graph is a k-edge-connected subgraph and can be added to the results. + Otherwise, the cut is used to split the graph in two and the procedure is + applied recursively. If the graph is just a single node, then it is also + added to the results. At the end, each result is either guaranteed to be + a single node or a subgraph of G that is k-edge-connected. + + This implementation contains optimizations for reducing the number of calls + to max-flow, but there are other optimizations in _[1] that could be + implemented. + + References + ---------- + .. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs + from a large graph. ACM International Conference on Extending Database + Technology 2012 480-–491. + https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf + + Examples + -------- + >>> from networkx.utils import pairwise + >>> paths = [ + ... (11, 12, 13, 14, 11, 13, 14, 12), # a 4-clique + ... (21, 22, 23, 24, 21, 23, 24, 22), # another 4-clique + ... # connect the cliques with high degree but low connectivity + ... (50, 13), + ... (12, 50, 22), + ... (13, 102, 23), + ... (14, 101, 24), + ... ] + >>> G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + >>> sorted(map(len, k_edge_subgraphs(G, k=3))) + [1, 1, 1, 4, 4] + """ + if k < 1: + raise ValueError("k cannot be less than 1") + + # Node pruning optimization (incorporates early return) + # find_ccs is either connected_components/strongly_connected_components + find_ccs = partial(_high_degree_components, k=k) + + # Quick return optimization + if G.number_of_nodes() < k: + for node in G.nodes(): + yield G.subgraph([node]).copy() + return + + # Intermediate results + R0 = {G.subgraph(cc).copy() for cc in find_ccs(G)} + # Subdivide CCs in the intermediate results until they are k-conn + while R0: + G1 = R0.pop() + if G1.number_of_nodes() == 1: + yield G1 + else: + # Find a global minimum cut + cut_edges = nx.minimum_edge_cut(G1) + cut_value = len(cut_edges) + if cut_value < k: + # G1 is not k-edge-connected, so subdivide it + G1.remove_edges_from(cut_edges) + for cc in find_ccs(G1): + R0.add(G1.subgraph(cc).copy()) + else: + # Otherwise we found a k-edge-connected subgraph + yield G1 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/kcomponents.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/kcomponents.py new file mode 100644 index 0000000..4e88d32 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/kcomponents.py @@ -0,0 +1,224 @@ +""" +Moody and White algorithm for k-components +""" +from collections import defaultdict +from itertools import combinations +from operator import itemgetter + +import networkx as nx + +# Define the default maximum flow function. +from networkx.algorithms.flow import edmonds_karp +from networkx.utils import not_implemented_for + +default_flow_func = edmonds_karp + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +def k_components(G, flow_func=None): + r"""Returns the k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + Parameters + ---------- + G : NetworkX graph + + flow_func : function + Function to perform the underlying flow computations. Default value + :meth:`edmonds_karp`. This function performs better in sparse graphs with + right tailed degree distributions. :meth:`shortest_augmenting_path` will + perform better in denser graphs. + + Returns + ------- + k_components : dict + Dictionary with all connectivity levels `k` in the input Graph as keys + and a list of sets of nodes that form a k-component of level `k` as + values. + + Raises + ------ + NetworkXNotImplemented + If the input graph is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> G = nx.petersen_graph() + >>> k_components = nx.k_components(G) + + Notes + ----- + Moody and White [1]_ (appendix A) provide an algorithm for identifying + k-components in a graph, which is based on Kanevsky's algorithm [2]_ + for finding all minimum-size node cut-sets of a graph (implemented in + :meth:`all_node_cuts` function): + + 1. Compute node connectivity, k, of the input graph G. + + 2. Identify all k-cutsets at the current level of connectivity using + Kanevsky's algorithm. + + 3. Generate new graph components based on the removal of + these cutsets. Nodes in a cutset belong to both sides + of the induced cut. + + 4. If the graph is neither complete nor trivial, return to 1; + else end. + + This implementation also uses some heuristics (see [3]_ for details) + to speed up the computation. + + See also + -------- + node_connectivity + all_node_cuts + biconnected_components : special case of this function when k=2 + k_edge_components : similar to this function, but uses edge-connectivity + instead of node-connectivity + + References + ---------- + .. [1] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf + + .. [2] Kanevsky, A. (1993). Finding all minimum-size separating vertex + sets in a graph. Networks 23(6), 533--541. + http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract + + .. [3] Torrents, J. and F. Ferraro (2015). Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values. Note that + # k-compoents can overlap (but only k - 1 nodes). + k_components = defaultdict(list) + # Define default flow function + if flow_func is None: + flow_func = default_flow_func + # Bicomponents as a base to check for higher order k-components + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + bicomponents = [G.subgraph(c) for c in nx.biconnected_components(G)] + for bicomponent in bicomponents: + bicomp = set(bicomponent) + # avoid considering dyads as bicomponents + if len(bicomp) > 2: + k_components[2].append(bicomp) + for B in bicomponents: + if len(B) <= 2: + continue + k = nx.node_connectivity(B, flow_func=flow_func) + if k > 2: + k_components[k].append(set(B)) + # Perform cuts in a DFS like order. + cuts = list(nx.all_node_cuts(B, k=k, flow_func=flow_func)) + stack = [(k, _generate_partition(B, cuts, k))] + while stack: + (parent_k, partition) = stack[-1] + try: + nodes = next(partition) + C = B.subgraph(nodes) + this_k = nx.node_connectivity(C, flow_func=flow_func) + if this_k > parent_k and this_k > 2: + k_components[this_k].append(set(C)) + cuts = list(nx.all_node_cuts(C, k=this_k, flow_func=flow_func)) + if cuts: + stack.append((this_k, _generate_partition(C, cuts, this_k))) + except StopIteration: + stack.pop() + + # This is necessary because k-components may only be reported at their + # maximum k level. But we want to return a dictionary in which keys are + # connectivity levels and values list of sets of components, without + # skipping any connectivity level. Also, it's possible that subsets of + # an already detected k-component appear at a level k. Checking for this + # in the while loop above penalizes the common case. Thus we also have to + # _consolidate all connectivity levels in _reconstruct_k_components. + return _reconstruct_k_components(k_components) + + +def _consolidate(sets, k): + """Merge sets that share k or more elements. + + See: http://rosettacode.org/wiki/Set_consolidation + + The iterative python implementation posted there is + faster than this because of the overhead of building a + Graph and calling nx.connected_components, but it's not + clear for us if we can use it in NetworkX because there + is no licence for the code. + + """ + G = nx.Graph() + nodes = {i: s for i, s in enumerate(sets)} + G.add_nodes_from(nodes) + G.add_edges_from( + (u, v) for u, v in combinations(nodes, 2) if len(nodes[u] & nodes[v]) >= k + ) + for component in nx.connected_components(G): + yield set.union(*[nodes[n] for n in component]) + + +def _generate_partition(G, cuts, k): + def has_nbrs_in_partition(G, node, partition): + for n in G[node]: + if n in partition: + return True + return False + + components = [] + nodes = {n for n, d in G.degree() if d > k} - {n for cut in cuts for n in cut} + H = G.subgraph(nodes) + for cc in nx.connected_components(H): + component = set(cc) + for cut in cuts: + for node in cut: + if has_nbrs_in_partition(G, node, cc): + component.add(node) + if len(component) < G.order(): + components.append(component) + yield from _consolidate(components, k + 1) + + +def _reconstruct_k_components(k_comps): + result = dict() + max_k = max(k_comps) + for k in reversed(range(1, max_k + 1)): + if k == max_k: + result[k] = list(_consolidate(k_comps[k], k)) + elif k not in k_comps: + result[k] = list(_consolidate(result[k + 1], k)) + else: + nodes_at_k = set.union(*k_comps[k]) + to_add = [c for c in result[k + 1] if any(n not in nodes_at_k for n in c)] + if to_add: + result[k] = list(_consolidate(k_comps[k] + to_add, k)) + else: + result[k] = list(_consolidate(k_comps[k], k)) + return result + + +def build_k_number_dict(kcomps): + result = {} + for k, comps in sorted(kcomps.items(), key=itemgetter(0)): + for comp in comps: + for node in comp: + result[node] = k + return result diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/kcutsets.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/kcutsets.py new file mode 100644 index 0000000..c3104c8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/kcutsets.py @@ -0,0 +1,231 @@ +""" +Kanevsky all minimum node k cutsets algorithm. +""" +import copy +from collections import defaultdict +from itertools import combinations +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.flow import ( + build_residual_network, + edmonds_karp, + shortest_augmenting_path, +) + +from .utils import build_auxiliary_node_connectivity + +default_flow_func = edmonds_karp + + +__all__ = ["all_node_cuts"] + + +def all_node_cuts(G, k=None, flow_func=None): + r"""Returns all minimum k cutsets of an undirected graph G. + + This implementation is based on Kanevsky's algorithm [1]_ for finding all + minimum-size node cut-sets of an undirected graph G; ie the set (or sets) + of nodes of cardinality equal to the node connectivity of G. Thus if + removed, would break G into two or more connected components. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + k : Integer + Node connectivity of the input graph. If k is None, then it is + computed. Default value: None. + + flow_func : function + Function to perform the underlying flow computations. Default value + edmonds_karp. This function performs better in sparse graphs with + right tailed degree distributions. shortest_augmenting_path will + perform better in denser graphs. + + + Returns + ------- + cuts : a generator of node cutsets + Each node cutset has cardinality equal to the node connectivity of + the input graph. + + Examples + -------- + >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2 + >>> G = nx.grid_2d_graph(5, 5) + >>> cutsets = list(nx.all_node_cuts(G)) + >>> len(cutsets) + 4 + >>> all(2 == len(cutset) for cutset in cutsets) + True + >>> nx.node_connectivity(G) + 2 + + Notes + ----- + This implementation is based on the sequential algorithm for finding all + minimum-size separating vertex sets in a graph [1]_. The main idea is to + compute minimum cuts using local maximum flow computations among a set + of nodes of highest degree and all other non-adjacent nodes in the Graph. + Once we find a minimum cut, we add an edge between the high degree + node and the target node of the local maximum flow computation to make + sure that we will not find that minimum cut again. + + See also + -------- + node_connectivity + edmonds_karp + shortest_augmenting_path + + References + ---------- + .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex + sets in a graph. Networks 23(6), 533--541. + http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract + + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is disconnected.") + + # Address some corner cases first. + # For complete Graphs + if nx.density(G) == 1: + for cut_set in combinations(G, len(G) - 1): + yield set(cut_set) + return + # Initialize data structures. + # Keep track of the cuts already computed so we do not repeat them. + seen = [] + # Even-Tarjan reduction is what we call auxiliary digraph + # for node connectivity. + H = build_auxiliary_node_connectivity(G) + H_nodes = H.nodes # for speed + mapping = H.graph["mapping"] + # Keep a copy of original predecessors, H will be modified later. + # Shallow copy is enough. + original_H_pred = copy.copy(H._pred) + R = build_residual_network(H, "capacity") + kwargs = dict(capacity="capacity", residual=R) + # Define default flow function + if flow_func is None: + flow_func = default_flow_func + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + # Begin the actual algorithm + # step 1: Find node connectivity k of G + if k is None: + k = nx.node_connectivity(G, flow_func=flow_func) + # step 2: + # Find k nodes with top degree, call it X: + X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]} + # Check if X is a k-node-cutset + if _is_separating_set(G, X): + seen.append(X) + yield X + + for x in X: + # step 3: Compute local connectivity flow of x with all other + # non adjacent nodes in G + non_adjacent = set(G) - X - set(G[x]) + for v in non_adjacent: + # step 4: compute maximum flow in an Even-Tarjan reduction H of G + # and step 5: build the associated residual network R + R = flow_func(H, f"{mapping[x]}B", f"{mapping[v]}A", **kwargs) + flow_value = R.graph["flow_value"] + + if flow_value == k: + # Find the nodes incident to the flow. + E1 = flowed_edges = [ + (u, w) for (u, w, d) in R.edges(data=True) if d["flow"] != 0 + ] + VE1 = incident_nodes = {n for edge in E1 for n in edge} + # Remove saturated edges form the residual network. + # Note that reversed edges are introduced with capacity 0 + # in the residual graph and they need to be removed too. + saturated_edges = [ + (u, w, d) + for (u, w, d) in R.edges(data=True) + if d["capacity"] == d["flow"] or d["capacity"] == 0 + ] + R.remove_edges_from(saturated_edges) + R_closure = nx.transitive_closure(R) + # step 6: shrink the strongly connected components of + # residual flow network R and call it L. + L = nx.condensation(R) + cmap = L.graph["mapping"] + inv_cmap = defaultdict(list) + for n, scc in cmap.items(): + inv_cmap[scc].append(n) + # Find the incident nodes in the condensed graph. + VE1 = {cmap[n] for n in VE1} + # step 7: Compute all antichains of L; + # they map to closed sets in H. + # Any edge in H that links a closed set is part of a cutset. + for antichain in nx.antichains(L): + # Only antichains that are subsets of incident nodes counts. + # Lemma 8 in reference. + if not set(antichain).issubset(VE1): + continue + # Nodes in an antichain of the condensation graph of + # the residual network map to a closed set of nodes that + # define a node partition of the auxiliary digraph H + # through taking all of antichain's predecessors in the + # transitive closure. + S = set() + for scc in antichain: + S.update(inv_cmap[scc]) + S_ancestors = set() + for n in S: + S_ancestors.update(R_closure._pred[n]) + S.update(S_ancestors) + if f"{mapping[x]}B" not in S or f"{mapping[v]}A" in S: + continue + # Find the cutset that links the node partition (S,~S) in H + cutset = set() + for u in S: + cutset.update((u, w) for w in original_H_pred[u] if w not in S) + # The edges in H that form the cutset are internal edges + # (ie edges that represent a node of the original graph G) + if any([H_nodes[u]["id"] != H_nodes[w]["id"] for u, w in cutset]): + continue + node_cut = {H_nodes[u]["id"] for u, _ in cutset} + + if len(node_cut) == k: + # The cut is invalid if it includes internal edges of + # end nodes. The other half of Lemma 8 in ref. + if x in node_cut or v in node_cut: + continue + if node_cut not in seen: + yield node_cut + seen.append(node_cut) + + # Add an edge (x, v) to make sure that we do not + # find this cutset again. This is equivalent + # of adding the edge in the input graph + # G.add_edge(x, v) and then regenerate H and R: + # Add edges to the auxiliary digraph. + # See build_residual_network for convention we used + # in residual graphs. + H.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1) + H.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1) + # Add edges to the residual network. + R.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1) + R.add_edge(f"{mapping[v]}A", f"{mapping[x]}B", capacity=0) + R.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1) + R.add_edge(f"{mapping[x]}A", f"{mapping[v]}B", capacity=0) + + # Add again the saturated edges to reuse the residual network + R.add_edges_from(saturated_edges) + + +def _is_separating_set(G, cut): + """Assumes that the input graph is connected""" + if len(cut) == len(G) - 1: + return True + + H = nx.restricted_view(G, cut, []) + if nx.is_connected(H): + return False + return True diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/stoerwagner.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/stoerwagner.py new file mode 100644 index 0000000..7bdb79c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/stoerwagner.py @@ -0,0 +1,149 @@ +""" +Stoer-Wagner minimum cut algorithm. +""" +from itertools import islice + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + +__all__ = ["stoer_wagner"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def stoer_wagner(G, weight="weight", heap=BinaryHeap): + r"""Returns the weighted minimum edge cut using the Stoer-Wagner algorithm. + + Determine the minimum edge cut of a connected graph using the + Stoer-Wagner algorithm. In weighted cases, all weights must be + nonnegative. + + The running time of the algorithm depends on the type of heaps used: + + ============== ============================================= + Type of heap Running time + ============== ============================================= + Binary heap $O(n (m + n) \log n)$ + Fibonacci heap $O(nm + n^2 \log n)$ + Pairing heap $O(2^{2 \sqrt{\log \log n}} nm + n^2 \log n)$ + ============== ============================================= + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute named by the + weight parameter below. If this attribute is not present, the edge is + considered to have unit weight. + + weight : string + Name of the weight attribute of the edges. If the attribute is not + present, unit weight is assumed. Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + cut_value : integer or float + The sum of weights of edges in a minimum cut. + + partition : pair of node lists + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or a multigraph. + + NetworkXError + If the graph has less than two nodes, is not connected or has a + negative-weighted edge. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge("x", "a", weight=3) + >>> G.add_edge("x", "b", weight=1) + >>> G.add_edge("a", "c", weight=3) + >>> G.add_edge("b", "c", weight=5) + >>> G.add_edge("b", "d", weight=4) + >>> G.add_edge("d", "e", weight=2) + >>> G.add_edge("c", "y", weight=2) + >>> G.add_edge("e", "y", weight=3) + >>> cut_value, partition = nx.stoer_wagner(G) + >>> cut_value + 4 + """ + n = len(G) + if n < 2: + raise nx.NetworkXError("graph has less than two nodes.") + if not nx.is_connected(G): + raise nx.NetworkXError("graph is not connected.") + + # Make a copy of the graph for internal use. + G = nx.Graph( + (u, v, {"weight": e.get(weight, 1)}) for u, v, e in G.edges(data=True) if u != v + ) + + for u, v, e in G.edges(data=True): + if e["weight"] < 0: + raise nx.NetworkXError("graph has a negative-weighted edge.") + + cut_value = float("inf") + nodes = set(G) + contractions = [] # contracted node pairs + + # Repeatedly pick a pair of nodes to contract until only one node is left. + for i in range(n - 1): + # Pick an arbitrary node u and create a set A = {u}. + u = arbitrary_element(G) + A = {u} + # Repeatedly pick the node "most tightly connected" to A and add it to + # A. The tightness of connectivity of a node not in A is defined by the + # of edges connecting it to nodes in A. + h = heap() # min-heap emulating a max-heap + for v, e in G[u].items(): + h.insert(v, -e["weight"]) + # Repeat until all but one node has been added to A. + for j in range(n - i - 2): + u = h.pop()[0] + A.add(u) + for v, e in G[u].items(): + if v not in A: + h.insert(v, h.get(v, 0) - e["weight"]) + # A and the remaining node v define a "cut of the phase". There is a + # minimum cut of the original graph that is also a cut of the phase. + # Due to contractions in earlier phases, v may in fact represent + # multiple nodes in the original graph. + v, w = h.min() + w = -w + if w < cut_value: + cut_value = w + best_phase = i + # Contract v and the last node added to A. + contractions.append((u, v)) + for w, e in G[v].items(): + if w != u: + if w not in G[u]: + G.add_edge(u, w, weight=e["weight"]) + else: + G[u][w]["weight"] += e["weight"] + G.remove_node(v) + + # Recover the optimal partitioning from the contractions. + G = nx.Graph(islice(contractions, best_phase)) + v = contractions[best_phase][1] + G.add_node(v) + reachable = set(nx.single_source_shortest_path_length(G, v)) + partition = (list(reachable), list(nodes - reachable)) + + return cut_value, partition diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py new file mode 100644 index 0000000..a13aa07 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py @@ -0,0 +1,421 @@ +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity import ( + local_edge_connectivity, + local_node_connectivity, +) + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +# helper functions for tests + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {max_attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_average_connectivity(): + # figure 1 from: + # Beineke, L., O. Oellermann, and R. Pippert (2002). The average + # connectivity of a graph. Discrete mathematics 252(1-3), 31-45 + # http://www.sciencedirect.com/science/article/pii/S0012365X01001807 + G1 = nx.path_graph(3) + G1.add_edges_from([(1, 3), (1, 4)]) + G2 = nx.path_graph(3) + G2.add_edges_from([(1, 3), (1, 4), (0, 3), (0, 4), (3, 4)]) + G3 = nx.Graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.average_node_connectivity(G1, **kwargs) == 1, errmsg + assert nx.average_node_connectivity(G2, **kwargs) == 2.2, errmsg + assert nx.average_node_connectivity(G3, **kwargs) == 0, errmsg + + +def test_average_connectivity_directed(): + G = nx.DiGraph([(1, 3), (1, 4), (1, 5)]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.average_node_connectivity(G) == 0.25, errmsg + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for flow_func in flow_funcs: + for i in range(3): + G = next(Ggen) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G, flow_func=flow_func) == 1, errmsg + + +def test_brandes_erlebach(): + # Figure 1 chapter 7: Connectivity + # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == local_edge_connectivity(G, 1, 11, **kwargs), errmsg + assert 3 == nx.edge_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == local_node_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == nx.node_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == nx.edge_connectivity(G, **kwargs), errmsg + assert 2 == nx.node_connectivity(G, **kwargs), errmsg + if flow_func is flow.preflow_push: + assert 3 == nx.edge_connectivity(G, 1, 11, cutoff=2, **kwargs), errmsg + else: + assert 2 == nx.edge_connectivity(G, 1, 11, cutoff=2, **kwargs), errmsg + + +def test_white_harary_1(): + # Figure 1b white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + # A graph with high adhesion (edge connectivity) and low cohesion + # (vertex connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_white_harary_2(): + # Figure 8 white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.add_edge(0, 4) + # kappa <= lambda <= delta + assert 3 == min(nx.core_number(G).values()) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_complete_graphs(): + for n in range(5, 20, 5): + for flow_func in flow_funcs: + G = nx.complete_graph(n) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert n - 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert n - 1 == nx.node_connectivity( + G.to_directed(), flow_func=flow_func + ), errmsg + assert n - 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + assert n - 1 == nx.edge_connectivity( + G.to_directed(), flow_func=flow_func + ), errmsg + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 0 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 0 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_petersen(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_tutte(): + G = nx.tutte_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_octahedral(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 4 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 4 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_icosahedral(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 5 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 5 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_missing_source(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.node_connectivity, G, 10, 1, flow_func=flow_func + ) + + +def test_missing_target(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.node_connectivity, G, 1, 10, flow_func=flow_func + ) + + +def test_edge_missing_source(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.edge_connectivity, G, 10, 1, flow_func=flow_func + ) + + +def test_edge_missing_target(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.edge_connectivity, G, 1, 10, flow_func=flow_func + ) + + +def test_not_weakly_connected(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G) == 0, errmsg + assert nx.edge_connectivity(G) == 0, errmsg + + +def test_not_connected(): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G) == 0, errmsg + assert nx.edge_connectivity(G) == 0, errmsg + + +def test_directed_edge_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + assert 1 == local_edge_connectivity(G, 1, 4, flow_func=flow_func), errmsg + assert 1 == nx.edge_connectivity(G, 1, 4, flow_func=flow_func), errmsg + assert 2 == nx.edge_connectivity(D, flow_func=flow_func), errmsg + assert 2 == local_edge_connectivity(D, 1, 4, flow_func=flow_func), errmsg + assert 2 == nx.edge_connectivity(D, 1, 4, flow_func=flow_func), errmsg + + +def test_cutoff(): + G = nx.complete_graph(5) + for local_func in [local_edge_connectivity, local_node_connectivity]: + for flow_func in flow_funcs: + if flow_func is flow.preflow_push: + # cutoff is not supported by preflow_push + continue + for cutoff in [3, 2, 1]: + result = local_func(G, 0, 4, flow_func=flow_func, cutoff=cutoff) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" + + +def test_invalid_auxiliary(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, local_node_connectivity, G, 0, 3, auxiliary=G) + + +def test_interface_only_source(): + G = nx.complete_graph(5) + for interface_func in [nx.node_connectivity, nx.edge_connectivity]: + pytest.raises(nx.NetworkXError, interface_func, G, s=0) + + +def test_interface_only_target(): + G = nx.complete_graph(5) + for interface_func in [nx.node_connectivity, nx.edge_connectivity]: + pytest.raises(nx.NetworkXError, interface_func, G, t=3) + + +def test_edge_connectivity_flow_vs_stoer_wagner(): + graph_funcs = [nx.icosahedral_graph, nx.octahedral_graph, nx.dodecahedral_graph] + for graph_func in graph_funcs: + G = graph_func() + assert nx.stoer_wagner(G)[0] == nx.edge_connectivity(G) + + +class TestAllPairsNodeConnectivity: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1, seed=42) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True, seed=42) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = nx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = nx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = nx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = nx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = nx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) + + def test_all_pairs_connectivity_icosahedral(self): + G = nx.icosahedral_graph() + C = nx.all_pairs_node_connectivity(G) + assert all(5 == C[u][v] for u, v in itertools.combinations(G, 2)) + + def test_all_pairs_connectivity(self): + G = nx.Graph() + nodes = [0, 1, 2, 3] + nx.add_path(G, nodes) + A = {n: {} for n in G} + for u, v in itertools.combinations(nodes, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_directed(self): + G = nx.DiGraph() + nodes = [0, 1, 2, 3] + nx.add_path(G, nodes) + A = {n: {} for n in G} + for u, v in itertools.permutations(nodes, 2): + A[u][v] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_nbunch_combinations(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + A = {n: {} for n in nbunch} + for u, v in itertools.combinations(nbunch, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_nbunch_iter(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + A = {n: {} for n in nbunch} + for u, v in itertools.combinations(nbunch, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch)) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py new file mode 100644 index 0000000..c4af519 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py @@ -0,0 +1,309 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity import minimum_st_edge_cut, minimum_st_node_cut +from networkx.utils import arbitrary_element + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + +# Tests for node and edge cutsets + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(1): # change 1 to 3 or more for more realizations. + G = next(Ggen) + cut = nx.minimum_node_cut(G, flow_func=flow_func) + assert len(cut) == 1, errmsg + assert cut.pop() in set(nx.articulation_points(G)), errmsg + + +def test_brandes_erlebach_book(): + # Figure 1 chapter 7: Connectivity + # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cutsets + assert 3 == len(nx.minimum_edge_cut(G, 1, 11, **kwargs)), errmsg + edge_cut = nx.minimum_edge_cut(G, **kwargs) + # Node 5 has only two edges + assert 2 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + assert {6, 7} == minimum_st_node_cut(G, 1, 11, **kwargs), errmsg + assert {6, 7} == nx.minimum_node_cut(G, 1, 11, **kwargs), errmsg + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 2 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_white_harary_paper(): + # Figure 1b white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 3 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert {0} == node_cut, errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_petersen_cutset(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 3 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 3 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_octahedral_cutset(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 4 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 4 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_icosahedral_cutset(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 5 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 5 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_node_cutset_exception(): + G = nx.Graph() + G.add_edges_from([(1, 2), (3, 4)]) + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, nx.minimum_node_cut, G, flow_func=flow_func) + + +def test_node_cutset_random_graphs(): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(3): + G = nx.fast_gnp_random_graph(50, 0.25, seed=42) + if not nx.is_connected(G): + ccs = iter(nx.connected_components(G)) + start = arbitrary_element(next(ccs)) + G.add_edges_from((start, arbitrary_element(c)) for c in ccs) + cutset = nx.minimum_node_cut(G, flow_func=flow_func) + assert nx.node_connectivity(G) == len(cutset), errmsg + G.remove_nodes_from(cutset) + assert not nx.is_connected(G), errmsg + + +def test_edge_cutset_random_graphs(): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(3): + G = nx.fast_gnp_random_graph(50, 0.25, seed=42) + if not nx.is_connected(G): + ccs = iter(nx.connected_components(G)) + start = arbitrary_element(next(ccs)) + G.add_edges_from((start, arbitrary_element(c)) for c in ccs) + cutset = nx.minimum_edge_cut(G, flow_func=flow_func) + assert nx.edge_connectivity(G) == len(cutset), errmsg + G.remove_edges_from(cutset) + assert not nx.is_connected(G), errmsg + + +def test_empty_graphs(): + G = nx.Graph() + D = nx.DiGraph() + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXPointlessConcept, interface_func, G, flow_func=flow_func + ) + pytest.raises( + nx.NetworkXPointlessConcept, interface_func, D, flow_func=flow_func + ) + + +def test_unbounded(): + G = nx.complete_graph(5) + for flow_func in flow_funcs: + assert 4 == len(minimum_st_edge_cut(G, 1, 4, flow_func=flow_func)) + + +def test_missing_source(): + G = nx.path_graph(4) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 10, 1, flow_func=flow_func + ) + + +def test_missing_target(): + G = nx.path_graph(4) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 1, 10, flow_func=flow_func + ) + + +def test_not_weakly_connected(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, interface_func, G, flow_func=flow_func) + + +def test_not_connected(): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, interface_func, G, flow_func=flow_func) + + +def tests_min_cut_complete(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + assert 4 == len(interface_func(G, flow_func=flow_func)) + + +def tests_min_cut_complete_directed(): + G = nx.complete_graph(5) + G = G.to_directed() + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + assert 4 == len(interface_func(G, flow_func=flow_func)) + + +def tests_minimum_st_node_cut(): + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12]) + G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)]) + nodelist = minimum_st_node_cut(G, 7, 11) + assert nodelist == {} + + +def test_invalid_auxiliary(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, minimum_st_node_cut, G, 0, 3, auxiliary=G) + + +def test_interface_only_source(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + pytest.raises(nx.NetworkXError, interface_func, G, s=0) + + +def test_interface_only_target(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + pytest.raises(nx.NetworkXError, interface_func, G, t=3) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py new file mode 100644 index 0000000..74bb3f2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py @@ -0,0 +1,249 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.utils import pairwise + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.edmonds_karp, + flow.dinitz, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +def is_path(G, path): + return all(v in G[u] for u, v in pairwise(path)) + + +def are_edge_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + paths_edges = [list(pairwise(p)) for p in paths] + num_of_edges = sum(len(e) for e in paths_edges) + num_unique_edges = len(set.union(*[set(es) for es in paths_edges])) + if num_of_edges == num_unique_edges: + return True + return False + + +def are_node_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + # first and last nodes are source and target + st = {paths[0][0], paths[0][-1]} + num_of_nodes = len([n for path in paths for n in path if n not in st]) + num_unique_nodes = len({n for path in paths for n in path if n not in st}) + if num_of_nodes == num_unique_nodes: + return True + return False + + +def test_graph_from_pr_2053(): + G = nx.Graph() + G.add_edges_from( + [ + ("A", "B"), + ("A", "D"), + ("A", "F"), + ("A", "G"), + ("B", "C"), + ("B", "D"), + ("B", "G"), + ("C", "D"), + ("C", "E"), + ("C", "Z"), + ("D", "E"), + ("D", "F"), + ("E", "F"), + ("E", "Z"), + ("F", "Z"), + ("G", "Z"), + ] + ) + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_paths = list(nx.edge_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_edge_disjoint_paths(G, edge_paths), errmsg + assert nx.edge_connectivity(G, "A", "Z") == len(edge_paths), errmsg + # node disjoint paths + node_paths = list(nx.node_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_node_disjoint_paths(G, node_paths), errmsg + assert nx.node_connectivity(G, "A", "Z") == len(node_paths), errmsg + + +def test_florentine_families(): + G = nx.florentine_families_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, "Medici", "Strozzi") == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, "Medici", "Strozzi") == len(node_dpaths), errmsg + + +def test_karate(): + G = nx.karate_club_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 33, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, 0, 33) == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 33, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, 0, 33) == len(node_dpaths), errmsg + + +def test_petersen_disjoint_paths(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 3 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 3 == len(node_dpaths), errmsg + + +def test_octahedral_disjoint_paths(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 5, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 4 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 5, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 4 == len(node_dpaths), errmsg + + +def test_icosahedral_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 5 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 5 == len(node_dpaths), errmsg + + +def test_cutoff_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = dict(flow_func=flow_func) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for cutoff in [2, 4]: + kwargs["cutoff"] = cutoff + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert cutoff == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert cutoff == len(node_dpaths), errmsg + + +def test_missing_source_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 10, 1)) + + +def test_missing_source_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 10, 1)) + + +def test_missing_target_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 1, 10)) + + +def test_missing_target_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 1, 10)) + + +def test_not_weakly_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_weakly_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_not_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_isolated_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_isolated_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_invalid_auxiliary(): + with pytest.raises(nx.NetworkXError): + G = nx.complete_graph(5) + list(nx.node_disjoint_paths(G, 0, 3, auxiliary=G)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py new file mode 100644 index 0000000..37ee297 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py @@ -0,0 +1,497 @@ +import itertools as it +import random + +import pytest + +import networkx as nx +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity.edge_augmentation import ( + _unpack_available_edges, + collapse, + complement_edges, + is_k_edge_connected, + is_locally_k_edge_connected, +) +from networkx.utils import pairwise + +# This should be set to the largest k for which an efficient algorithm is +# explicitly defined. +MAX_EFFICIENT_K = 2 + + +def tarjan_bridge_graph(): + # graph from tarjan paper + # RE Tarjan - "A note on finding the bridges of a graph" + # Information Processing Letters, 1974 - Elsevier + # doi:10.1016/0020-0190(74)90003-9. + # define 2-connected components and bridges + ccs = [ + (1, 2, 4, 3, 1, 4), + (5, 6, 7, 5), + (8, 9, 10, 8), + (17, 18, 16, 15, 17), + (11, 12, 14, 13, 11, 14), + ] + bridges = [(4, 8), (3, 5), (3, 17)] + G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges))) + return G + + +def test_weight_key(): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + G.add_edges_from([(3, 8), (1, 2), (2, 3)]) + impossible = {(3, 6), (3, 9)} + rng = random.Random(0) + avail_uv = list(set(complement_edges(G)) - impossible) + avail = [(u, v, {"cost": rng.random()}) for u, v in avail_uv] + + _augment_and_check(G, k=1) + _augment_and_check(G, k=1, avail=avail_uv) + _augment_and_check(G, k=1, avail=avail, weight="cost") + + _check_augmentations(G, avail, weight="cost") + + +def test_is_locally_k_edge_connected_exceptions(): + pytest.raises(nx.NetworkXNotImplemented, is_k_edge_connected, nx.DiGraph(), k=0) + pytest.raises(nx.NetworkXNotImplemented, is_k_edge_connected, nx.MultiGraph(), k=0) + pytest.raises(ValueError, is_k_edge_connected, nx.Graph(), k=0) + + +def test_is_k_edge_connected(): + G = nx.barbell_graph(10, 0) + assert is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + + G = nx.Graph() + G.add_nodes_from([5, 15]) + assert not is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + + G = nx.complete_graph(5) + assert is_k_edge_connected(G, k=1) + assert is_k_edge_connected(G, k=2) + assert is_k_edge_connected(G, k=3) + assert is_k_edge_connected(G, k=4) + + +def test_is_k_edge_connected_exceptions(): + pytest.raises( + nx.NetworkXNotImplemented, is_locally_k_edge_connected, nx.DiGraph(), 1, 2, k=0 + ) + pytest.raises( + nx.NetworkXNotImplemented, + is_locally_k_edge_connected, + nx.MultiGraph(), + 1, + 2, + k=0, + ) + pytest.raises(ValueError, is_locally_k_edge_connected, nx.Graph(), 1, 2, k=0) + + +def test_is_locally_k_edge_connected(): + G = nx.barbell_graph(10, 0) + assert is_locally_k_edge_connected(G, 5, 15, k=1) + assert not is_locally_k_edge_connected(G, 5, 15, k=2) + + G = nx.Graph() + G.add_nodes_from([5, 15]) + assert not is_locally_k_edge_connected(G, 5, 15, k=2) + + +def test_null_graph(): + G = nx.Graph() + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_cliques(): + for n in range(1, 10): + G = nx.complete_graph(n) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_clique_and_node(): + for n in range(1, 10): + G = nx.complete_graph(n) + G.add_node(n + 1) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_point_graph(): + G = nx.Graph() + G.add_node(1) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_edgeless_graph(): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + _check_augmentations(G) + + +def test_invalid_k(): + G = nx.Graph() + pytest.raises(ValueError, list, k_edge_augmentation(G, k=-1)) + pytest.raises(ValueError, list, k_edge_augmentation(G, k=0)) + + +def test_unfeasible(): + G = tarjan_bridge_graph() + pytest.raises(nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=1, avail=[])) + + pytest.raises(nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=2, avail=[])) + + pytest.raises( + nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=2, avail=[(7, 9)]) + ) + + # partial solutions should not error if real solutions are infeasible + aug_edges = list(k_edge_augmentation(G, k=2, avail=[(7, 9)], partial=True)) + assert aug_edges == [(7, 9)] + + _check_augmentations(G, avail=[], max_k=MAX_EFFICIENT_K + 2) + + _check_augmentations(G, avail=[(7, 9)], max_k=MAX_EFFICIENT_K + 2) + + +def test_tarjan(): + G = tarjan_bridge_graph() + + aug_edges = set(_augment_and_check(G, k=2)[0]) + print(f"aug_edges = {aug_edges!r}") + # can't assert edge exactly equality due to non-determinant edge order + # but we do know the size of the solution must be 3 + assert len(aug_edges) == 3 + + avail = [ + (9, 7), + (8, 5), + (2, 10), + (6, 13), + (11, 18), + (1, 17), + (2, 3), + (16, 17), + (18, 14), + (15, 14), + ] + aug_edges = set(_augment_and_check(G, avail=avail, k=2)[0]) + + # Can't assert exact length since approximation depends on the order of a + # dict traversal. + assert len(aug_edges) <= 3 * 2 + + _check_augmentations(G, avail) + + +def test_configuration(): + # seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497] + seeds = [1001, 1002, 1003, 1004] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.Graph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_augmentations(G) + + +def test_shell(): + # seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425] + seeds = [18] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed) + _check_augmentations(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_augmentations(G) + + +def test_star(): + G = nx.star_graph(3) + _check_augmentations(G) + + G = nx.star_graph(5) + _check_augmentations(G) + + G = nx.star_graph(10) + _check_augmentations(G) + + +def test_barbell(): + G = nx.barbell_graph(5, 0) + _check_augmentations(G) + + G = nx.barbell_graph(5, 2) + _check_augmentations(G) + + G = nx.barbell_graph(5, 3) + _check_augmentations(G) + + G = nx.barbell_graph(5, 4) + _check_augmentations(G) + + +def test_bridge(): + G = nx.Graph([(2393, 2257), (2393, 2685), (2685, 2257), (1758, 2257)]) + _check_augmentations(G) + + +def test_gnp_augmentation(): + rng = random.Random(0) + G = nx.gnp_random_graph(30, 0.005, seed=0) + # Randomly make edges available + avail = { + (u, v): 1 + rng.random() for u, v in complement_edges(G) if rng.random() < 0.25 + } + _check_augmentations(G, avail) + + +def _assert_solution_properties(G, aug_edges, avail_dict=None): + """Checks that aug_edges are consistently formatted""" + if avail_dict is not None: + assert all( + e in avail_dict for e in aug_edges + ), "when avail is specified aug-edges should be in avail" + + unique_aug = set(map(tuple, map(sorted, aug_edges))) + unique_aug = list(map(tuple, map(sorted, aug_edges))) + assert len(aug_edges) == len(unique_aug), "edges should be unique" + + assert not any(u == v for u, v in unique_aug), "should be no self-edges" + + assert not any( + G.has_edge(u, v) for u, v in unique_aug + ), "aug edges and G.edges should be disjoint" + + +def _augment_and_check( + G, k, avail=None, weight=None, verbose=False, orig_k=None, max_aug_k=None +): + """ + Does one specific augmentation and checks for properties of the result + """ + if orig_k is None: + try: + orig_k = nx.edge_connectivity(G) + except nx.NetworkXPointlessConcept: + orig_k = 0 + info = {} + try: + if avail is not None: + # ensure avail is in dict form + avail_dict = dict(zip(*_unpack_available_edges(avail, weight=weight))) + else: + avail_dict = None + try: + # Find the augmentation if possible + generator = nx.k_edge_augmentation(G, k=k, weight=weight, avail=avail) + assert not isinstance(generator, list), "should always return an iter" + aug_edges = [] + for edge in generator: + aug_edges.append(edge) + except nx.NetworkXUnfeasible: + infeasible = True + info["infeasible"] = True + assert len(aug_edges) == 0, "should not generate anything if unfeasible" + + if avail is None: + n_nodes = G.number_of_nodes() + assert n_nodes <= k, ( + "unconstrained cases are only unfeasible if |V| <= k. " + f"Got |V|={n_nodes} and k={k}" + ) + else: + if max_aug_k is None: + G_aug_all = G.copy() + G_aug_all.add_edges_from(avail_dict.keys()) + try: + max_aug_k = nx.edge_connectivity(G_aug_all) + except nx.NetworkXPointlessConcept: + max_aug_k = 0 + + assert max_aug_k < k, ( + "avail should only be unfeasible if using all edges " + "does not achieve k-edge-connectivity" + ) + + # Test for a partial solution + partial_edges = list( + nx.k_edge_augmentation(G, k=k, weight=weight, partial=True, avail=avail) + ) + + info["n_partial_edges"] = len(partial_edges) + + if avail_dict is None: + assert set(partial_edges) == set( + complement_edges(G) + ), "unweighted partial solutions should be the complement" + elif len(avail_dict) > 0: + H = G.copy() + + # Find the partial / full augmented connectivity + H.add_edges_from(partial_edges) + partial_conn = nx.edge_connectivity(H) + + H.add_edges_from(set(avail_dict.keys())) + full_conn = nx.edge_connectivity(H) + + # Full connectivity should be no better than our partial + # solution. + assert ( + partial_conn == full_conn + ), "adding more edges should not increase k-conn" + + # Find the new edge-connectivity after adding the augmenting edges + aug_edges = partial_edges + else: + infeasible = False + + # Find the weight of the augmentation + num_edges = len(aug_edges) + if avail is not None: + total_weight = sum(avail_dict[e] for e in aug_edges) + else: + total_weight = num_edges + + info["total_weight"] = total_weight + info["num_edges"] = num_edges + + # Find the new edge-connectivity after adding the augmenting edges + G_aug = G.copy() + G_aug.add_edges_from(aug_edges) + try: + aug_k = nx.edge_connectivity(G_aug) + except nx.NetworkXPointlessConcept: + aug_k = 0 + info["aug_k"] = aug_k + + # Do checks + if not infeasible and orig_k < k: + assert info["aug_k"] >= k, f"connectivity should increase to k={k} or more" + + assert info["aug_k"] >= orig_k, "augmenting should never reduce connectivity" + + _assert_solution_properties(G, aug_edges, avail_dict) + + except Exception: + info["failed"] = True + print(f"edges = {list(G.edges())}") + print(f"nodes = {list(G.nodes())}") + print(f"aug_edges = {list(aug_edges)}") + print(f"info = {info}") + raise + else: + if verbose: + print(f"info = {info}") + + if infeasible: + aug_edges = None + return aug_edges, info + + +def _check_augmentations(G, avail=None, max_k=None, weight=None, verbose=False): + """Helper to check weighted/unweighted cases with multiple values of k""" + # Using all available edges, find the maximum edge-connectivity + try: + orig_k = nx.edge_connectivity(G) + except nx.NetworkXPointlessConcept: + orig_k = 0 + + if avail is not None: + all_aug_edges = _unpack_available_edges(avail, weight=weight)[0] + G_aug_all = G.copy() + G_aug_all.add_edges_from(all_aug_edges) + try: + max_aug_k = nx.edge_connectivity(G_aug_all) + except nx.NetworkXPointlessConcept: + max_aug_k = 0 + else: + max_aug_k = G.number_of_nodes() - 1 + + if max_k is None: + max_k = min(4, max_aug_k) + + avail_uniform = {e: 1 for e in complement_edges(G)} + + if verbose: + print("\n=== CHECK_AUGMENTATION ===") + print(f"G.number_of_nodes = {G.number_of_nodes()!r}") + print(f"G.number_of_edges = {G.number_of_edges()!r}") + print(f"max_k = {max_k!r}") + print(f"max_aug_k = {max_aug_k!r}") + print(f"orig_k = {orig_k!r}") + + # check augmentation for multiple values of k + for k in range(1, max_k + 1): + if verbose: + print("---------------") + print(f"Checking k = {k}") + + # Check the unweighted version + if verbose: + print("unweighted case") + aug_edges1, info1 = _augment_and_check(G, k=k, verbose=verbose, orig_k=orig_k) + + # Check that the weighted version with all available edges and uniform + # weights gives a similar solution to the unweighted case. + if verbose: + print("weighted uniform case") + aug_edges2, info2 = _augment_and_check( + G, + k=k, + avail=avail_uniform, + verbose=verbose, + orig_k=orig_k, + max_aug_k=G.number_of_nodes() - 1, + ) + + # Check the weighted version + if avail is not None: + if verbose: + print("weighted case") + aug_edges3, info3 = _augment_and_check( + G, + k=k, + avail=avail, + weight=weight, + verbose=verbose, + max_aug_k=max_aug_k, + orig_k=orig_k, + ) + + if aug_edges1 is not None: + # Check approximation ratios + if k == 1: + # when k=1, both solutions should be optimal + assert info2["total_weight"] == info1["total_weight"] + if k == 2: + # when k=2, the weighted version is an approximation + if orig_k == 0: + # the approximation ratio is 3 if G is not connected + assert info2["total_weight"] <= info1["total_weight"] * 3 + else: + # the approximation ratio is 2 if G is was connected + assert info2["total_weight"] <= info1["total_weight"] * 2 + _check_unconstrained_bridge_property(G, info1) + + +def _check_unconstrained_bridge_property(G, info1): + # Check Theorem 5 from Eswaran and Tarjan. (1975) Augmentation problems + import math + + bridge_ccs = list(nx.connectivity.bridge_components(G)) + # condense G into an forest C + C = collapse(G, bridge_ccs) + + p = len([n for n, d in C.degree() if d == 1]) # leafs + q = len([n for n, d in C.degree() if d == 0]) # isolated + if p + q > 1: + size_target = math.ceil(p / 2) + q + size_aug = info1["num_edges"] + assert ( + size_aug == size_target + ), "augmentation size is different from what theory predicts" diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py new file mode 100644 index 0000000..4a1f681 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py @@ -0,0 +1,488 @@ +import itertools as it + +import pytest + +import networkx as nx +from networkx.algorithms.connectivity import EdgeComponentAuxGraph, bridge_components +from networkx.algorithms.connectivity.edge_kcomponents import general_k_edge_subgraphs +from networkx.utils import pairwise + +# ---------------- +# Helper functions +# ---------------- + + +def fset(list_of_sets): + """allows == to be used for list of sets""" + return set(map(frozenset, list_of_sets)) + + +def _assert_subgraph_edge_connectivity(G, ccs_subgraph, k): + """ + tests properties of k-edge-connected subgraphs + + the actual edge connectivity should be no less than k unless the cc is a + single node. + """ + for cc in ccs_subgraph: + C = G.subgraph(cc) + if len(cc) > 1: + connectivity = nx.edge_connectivity(C) + assert connectivity >= k + + +def _memo_connectivity(G, u, v, memo): + edge = (u, v) + if edge in memo: + return memo[edge] + if not G.is_directed(): + redge = (v, u) + if redge in memo: + return memo[redge] + memo[edge] = nx.edge_connectivity(G, *edge) + return memo[edge] + + +def _all_pairs_connectivity(G, cc, k, memo): + # Brute force check + for u, v in it.combinations(cc, 2): + # Use a memoization dict to save on computation + connectivity = _memo_connectivity(G, u, v, memo) + if G.is_directed(): + connectivity = min(connectivity, _memo_connectivity(G, v, u, memo)) + assert connectivity >= k + + +def _assert_local_cc_edge_connectivity(G, ccs_local, k, memo): + """ + tests properties of k-edge-connected components + + the local edge connectivity between each pair of nodes in the original + graph should be no less than k unless the cc is a single node. + """ + for cc in ccs_local: + if len(cc) > 1: + # Strategy for testing a bit faster: If the subgraph has high edge + # connectivity then it must have local connectivity + C = G.subgraph(cc) + connectivity = nx.edge_connectivity(C) + if connectivity < k: + # Otherwise do the brute force (with memoization) check + _all_pairs_connectivity(G, cc, k, memo) + + +# Helper function +def _check_edge_connectivity(G): + """ + Helper - generates all k-edge-components using the aux graph. Checks the + both local and subgraph edge connectivity of each cc. Also checks that + alternate methods of computing the k-edge-ccs generate the same result. + """ + # Construct the auxiliary graph that can be used to make each k-cc or k-sub + aux_graph = EdgeComponentAuxGraph.construct(G) + + # memoize the local connectivity in this graph + memo = {} + + for k in it.count(1): + # Test "local" k-edge-components and k-edge-subgraphs + ccs_local = fset(aux_graph.k_edge_components(k)) + ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) + + # Check connectivity properties that should be guaranteed by the + # algorithms. + _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) + _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) + + if k == 1 or k == 2 and not G.is_directed(): + assert ( + ccs_local == ccs_subgraph + ), "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" + + if G.is_directed(): + # Test special case methods are the same as the aux graph + if k == 1: + alt_sccs = fset(nx.strongly_connected_components(G)) + assert alt_sccs == ccs_local, "k=1 failed alt" + assert alt_sccs == ccs_subgraph, "k=1 failed alt" + else: + # Test special case methods are the same as the aux graph + if k == 1: + alt_ccs = fset(nx.connected_components(G)) + assert alt_ccs == ccs_local, "k=1 failed alt" + assert alt_ccs == ccs_subgraph, "k=1 failed alt" + elif k == 2: + alt_bridge_ccs = fset(bridge_components(G)) + assert alt_bridge_ccs == ccs_local, "k=2 failed alt" + assert alt_bridge_ccs == ccs_subgraph, "k=2 failed alt" + # if new methods for k == 3 or k == 4 are implemented add them here + + # Check the general subgraph method works by itself + alt_subgraph_ccs = fset( + [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] + ) + assert alt_subgraph_ccs == ccs_subgraph, "alt subgraph method failed" + + # Stop once k is larger than all special case methods + # and we cannot break down ccs any further. + if k > 2 and all(len(cc) == 1 for cc in ccs_local): + break + + +# ---------------- +# Misc tests +# ---------------- + + +def test_zero_k_exception(): + G = nx.Graph() + # functions that return generators error immediately + pytest.raises(ValueError, nx.k_edge_components, G, k=0) + pytest.raises(ValueError, nx.k_edge_subgraphs, G, k=0) + + # actual generators only error when you get the first item + aux_graph = EdgeComponentAuxGraph.construct(G) + pytest.raises(ValueError, list, aux_graph.k_edge_components(k=0)) + pytest.raises(ValueError, list, aux_graph.k_edge_subgraphs(k=0)) + + pytest.raises(ValueError, list, general_k_edge_subgraphs(G, k=0)) + + +def test_empty_input(): + G = nx.Graph() + assert [] == list(nx.k_edge_components(G, k=5)) + assert [] == list(nx.k_edge_subgraphs(G, k=5)) + + G = nx.DiGraph() + assert [] == list(nx.k_edge_components(G, k=5)) + assert [] == list(nx.k_edge_subgraphs(G, k=5)) + + +def test_not_implemented(): + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, EdgeComponentAuxGraph.construct, G) + pytest.raises(nx.NetworkXNotImplemented, nx.k_edge_components, G, k=2) + pytest.raises(nx.NetworkXNotImplemented, nx.k_edge_subgraphs, G, k=2) + with pytest.raises(nx.NetworkXNotImplemented): + next(bridge_components(G)) + with pytest.raises(nx.NetworkXNotImplemented): + next(bridge_components(nx.DiGraph())) + + +def test_general_k_edge_subgraph_quick_return(): + # tests quick return optimization + G = nx.Graph() + G.add_node(0) + subgraphs = list(general_k_edge_subgraphs(G, k=1)) + assert len(subgraphs) == 1 + for subgraph in subgraphs: + assert subgraph.number_of_nodes() == 1 + + G.add_node(1) + subgraphs = list(general_k_edge_subgraphs(G, k=1)) + assert len(subgraphs) == 2 + for subgraph in subgraphs: + assert subgraph.number_of_nodes() == 1 + + +# ---------------- +# Undirected tests +# ---------------- + + +def test_random_gnp(): + # seeds = [1550709854, 1309423156, 4208992358, 2785630813, 1915069929] + seeds = [12, 13] + + for seed in seeds: + G = nx.gnp_random_graph(20, 0.2, seed=seed) + _check_edge_connectivity(G) + + +def test_configuration(): + # seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497] + seeds = [14, 15] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.Graph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_edge_connectivity(G) + + +def test_shell(): + # seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425] + seeds = [20] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed) + _check_edge_connectivity(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_edge_connectivity(G) + + +def test_tarjan_bridge(): + # graph from tarjan paper + # RE Tarjan - "A note on finding the bridges of a graph" + # Information Processing Letters, 1974 - Elsevier + # doi:10.1016/0020-0190(74)90003-9. + # define 2-connected components and bridges + ccs = [ + (1, 2, 4, 3, 1, 4), + (5, 6, 7, 5), + (8, 9, 10, 8), + (17, 18, 16, 15, 17), + (11, 12, 14, 13, 11, 14), + ] + bridges = [(4, 8), (3, 5), (3, 17)] + G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges))) + _check_edge_connectivity(G) + + +def test_bridge_cc(): + # define 2-connected components and bridges + cc2 = [(1, 2, 4, 3, 1, 4), (8, 9, 10, 8), (11, 12, 13, 11)] + bridges = [(4, 8), (3, 5), (20, 21), (22, 23, 24)] + G = nx.Graph(it.chain(*(pairwise(path) for path in cc2 + bridges))) + bridge_ccs = fset(bridge_components(G)) + target_ccs = fset( + [{1, 2, 3, 4}, {5}, {8, 9, 10}, {11, 12, 13}, {20}, {21}, {22}, {23}, {24}] + ) + assert bridge_ccs == target_ccs + _check_edge_connectivity(G) + + +def test_undirected_aux_graph(): + # Graph similar to the one in + # http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + a, b, c, d, e, f, g, h, i = "abcdefghi" + paths = [ + (a, d, b, f, c), + (a, e, b), + (a, e, b, c, g, b, a), + (c, b), + (f, g, f), + (h, i), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + components_1 = fset(aux_graph.k_edge_subgraphs(k=1)) + target_1 = fset([{a, b, c, d, e, f, g}, {h, i}]) + assert target_1 == components_1 + + # Check that the undirected case for k=1 agrees with CCs + alt_1 = fset(nx.k_edge_subgraphs(G, k=1)) + assert alt_1 == components_1 + + components_2 = fset(aux_graph.k_edge_subgraphs(k=2)) + target_2 = fset([{a, b, c, d, e, f, g}, {h}, {i}]) + assert target_2 == components_2 + + # Check that the undirected case for k=2 agrees with bridge components + alt_2 = fset(nx.k_edge_subgraphs(G, k=2)) + assert alt_2 == components_2 + + components_3 = fset(aux_graph.k_edge_subgraphs(k=3)) + target_3 = fset([{a}, {b, c, f, g}, {d}, {e}, {h}, {i}]) + assert target_3 == components_3 + + components_4 = fset(aux_graph.k_edge_subgraphs(k=4)) + target_4 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}]) + assert target_4 == components_4 + + _check_edge_connectivity(G) + + +def test_local_subgraph_difference(): + paths = [ + (11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique + (21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique + # paths connecting each node of the 4 cliques + (11, 101, 21), + (12, 102, 22), + (13, 103, 23), + (14, 104, 24), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + # Each clique is returned separately in k-edge-subgraphs + subgraph_ccs = fset(aux_graph.k_edge_subgraphs(3)) + subgraph_target = fset( + [{101}, {102}, {103}, {104}, {21, 22, 23, 24}, {11, 12, 13, 14}] + ) + assert subgraph_ccs == subgraph_target + + # But in k-edge-ccs they are returned together + # because they are locally 3-edge-connected + local_ccs = fset(aux_graph.k_edge_components(3)) + local_target = fset([{101}, {102}, {103}, {104}, {11, 12, 13, 14, 21, 22, 23, 24}]) + assert local_ccs == local_target + + +def test_local_subgraph_difference_directed(): + dipaths = [(1, 2, 3, 4, 1), (1, 3, 1)] + G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths])) + + assert fset(nx.k_edge_components(G, k=1)) == fset(nx.k_edge_subgraphs(G, k=1)) + + # Unlike undirected graphs, when k=2, for directed graphs there is a case + # where the k-edge-ccs are not the same as the k-edge-subgraphs. + # (in directed graphs ccs and subgraphs are the same when k=2) + assert fset(nx.k_edge_components(G, k=2)) != fset(nx.k_edge_subgraphs(G, k=2)) + + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + _check_edge_connectivity(G) + + +def test_triangles(): + paths = [ + (11, 12, 13, 11), # first 3-clique + (21, 22, 23, 21), # second 3-clique + (11, 21), # connected by an edge + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + + # subgraph and ccs are the same in all cases here + assert fset(nx.k_edge_components(G, k=1)) == fset(nx.k_edge_subgraphs(G, k=1)) + + assert fset(nx.k_edge_components(G, k=2)) == fset(nx.k_edge_subgraphs(G, k=2)) + + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + _check_edge_connectivity(G) + + +def test_four_clique(): + paths = [ + (11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique + (21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique + # paths connecting the 4 cliques such that they are + # 3-connected in G, but not in the subgraph. + # Case where the nodes bridging them do not have degree less than 3. + (100, 13), + (12, 100, 22), + (13, 200, 23), + (14, 300, 24), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + + # The subgraphs and ccs are different for k=3 + local_ccs = fset(nx.k_edge_components(G, k=3)) + subgraphs = fset(nx.k_edge_subgraphs(G, k=3)) + assert local_ccs != subgraphs + + # The cliques ares in the same cc + clique1 = frozenset(paths[0]) + clique2 = frozenset(paths[1]) + assert clique1.union(clique2).union({100}) in local_ccs + + # but different subgraphs + assert clique1 in subgraphs + assert clique2 in subgraphs + + assert G.degree(100) == 3 + + _check_edge_connectivity(G) + + +def test_five_clique(): + # Make a graph that can be disconnected less than 4 edges, but no node has + # degree less than 4. + G = nx.disjoint_union(nx.complete_graph(5), nx.complete_graph(5)) + paths = [ + # add aux-connections + (1, 100, 6), + (2, 100, 7), + (3, 200, 8), + (4, 200, 100), + ] + G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + assert min(dict(nx.degree(G)).values()) == 4 + + # For k=3 they are the same + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + # For k=4 they are the different + # the aux nodes are in the same CC as clique 1 but no the same subgraph + assert fset(nx.k_edge_components(G, k=4)) != fset(nx.k_edge_subgraphs(G, k=4)) + + # For k=5 they are not the same + assert fset(nx.k_edge_components(G, k=5)) != fset(nx.k_edge_subgraphs(G, k=5)) + + # For k=6 they are the same + assert fset(nx.k_edge_components(G, k=6)) == fset(nx.k_edge_subgraphs(G, k=6)) + _check_edge_connectivity(G) + + +# ---------------- +# Undirected tests +# ---------------- + + +def test_directed_aux_graph(): + # Graph similar to the one in + # http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + a, b, c, d, e, f, g, h, i = "abcdefghi" + dipaths = [ + (a, d, b, f, c), + (a, e, b), + (a, e, b, c, g, b, a), + (c, b), + (f, g, f), + (h, i), + ] + G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + components_1 = fset(aux_graph.k_edge_subgraphs(k=1)) + target_1 = fset([{a, b, c, d, e, f, g}, {h}, {i}]) + assert target_1 == components_1 + + # Check that the directed case for k=1 agrees with SCCs + alt_1 = fset(nx.strongly_connected_components(G)) + assert alt_1 == components_1 + + components_2 = fset(aux_graph.k_edge_subgraphs(k=2)) + target_2 = fset([{i}, {e}, {d}, {b, c, f, g}, {h}, {a}]) + assert target_2 == components_2 + + components_3 = fset(aux_graph.k_edge_subgraphs(k=3)) + target_3 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}]) + assert target_3 == components_3 + + +def test_random_gnp_directed(): + # seeds = [3894723670, 500186844, 267231174, 2181982262, 1116750056] + seeds = [21] + for seed in seeds: + G = nx.gnp_random_graph(20, 0.2, directed=True, seed=seed) + _check_edge_connectivity(G) + + +def test_configuration_directed(): + # seeds = [671221681, 2403749451, 124433910, 672335939, 1193127215] + seeds = [67] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.DiGraph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_edge_connectivity(G) + + +def test_shell_directed(): + # seeds = [3134027055, 4079264063, 1350769518, 1405643020, 530038094] + seeds = [31] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed).to_directed() + _check_edge_connectivity(G) + + +def test_karate_directed(): + G = nx.karate_club_graph().to_directed() + _check_edge_connectivity(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py new file mode 100644 index 0000000..f4436ac --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py @@ -0,0 +1,296 @@ +# Test for Moody and White k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.connectivity.kcomponents import ( + _consolidate, + build_k_number_dict, +) + +## +# A nice synthetic graph +## + + +def torrents_and_ferraro_graph(): + # Graph from https://arxiv.org/pdf/1503.04476v1 p.26 + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # This edge makes the graph biconnected; it's + # needed because K5s share only one node. + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.2, directed=True, seed=42) + nx.k_components(G) + + +# Helper function +def _check_connectivity(G, k_components): + for k, components in k_components.items(): + if k < 3: + continue + # check that k-components have node connectivity >= k. + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +@pytest.mark.slow +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + + +@pytest.mark.slow +def test_random_gnp(): + G = nx.gnp_random_graph(50, 0.2, seed=42) + result = nx.k_components(G) + _check_connectivity(G, result) + + +@pytest.mark.slow +def test_shell(): + constructor = [(20, 80, 0.8), (80, 180, 0.6)] + G = nx.random_shell_graph(constructor, seed=42) + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_configuration(): + deg_seq = nx.random_powerlaw_tree_sequence(100, tries=5, seed=72) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(nx.selfloop_edges(G)) + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_karate(): + G = nx.karate_club_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_karate_component_number(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + G = nx.karate_club_graph() + k_components = nx.k_components(G) + k_num = build_k_number_dict(k_components) + assert karate_k_num == k_num + + +def test_davis_southern_women(): + G = nx.davis_southern_women_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_davis_southern_women_detail_3_and_4(): + solution = { + 3: [ + { + "Nora Fayette", + "E10", + "Myra Liddel", + "E12", + "E14", + "Frances Anderson", + "Evelyn Jefferson", + "Ruth DeSand", + "Helen Lloyd", + "Eleanor Nye", + "E9", + "E8", + "E5", + "E4", + "E7", + "E6", + "E1", + "Verne Sanderson", + "E3", + "E2", + "Theresa Anderson", + "Pearl Oglethorpe", + "Katherina Rogers", + "Brenda Rogers", + "E13", + "Charlotte McDowd", + "Sylvia Avondale", + "Laura Mandeville", + } + ], + 4: [ + { + "Nora Fayette", + "E10", + "Verne Sanderson", + "E12", + "Frances Anderson", + "Evelyn Jefferson", + "Ruth DeSand", + "Helen Lloyd", + "Eleanor Nye", + "E9", + "E8", + "E5", + "E4", + "E7", + "E6", + "Myra Liddel", + "E3", + "Theresa Anderson", + "Katherina Rogers", + "Brenda Rogers", + "Charlotte McDowd", + "Sylvia Avondale", + "Laura Mandeville", + } + ], + } + G = nx.davis_southern_women_graph() + result = nx.k_components(G) + for k, components in result.items(): + if k < 3: + continue + assert len(components) == len(solution[k]) + for component in components: + assert component in solution[k] + + +def test_set_consolidation_rosettacode(): + # Tests from http://rosettacode.org/wiki/Set_consolidation + def list_of_sets_equal(result, solution): + assert {frozenset(s) for s in result} == {frozenset(s) for s in solution} + + question = [{"A", "B"}, {"C", "D"}] + solution = [{"A", "B"}, {"C", "D"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"A", "B"}, {"B", "C"}] + solution = [{"A", "B", "C"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"A", "B"}, {"C", "D"}, {"D", "B"}] + solution = [{"A", "C", "B", "D"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"H", "I", "K"}, {"A", "B"}, {"C", "D"}, {"D", "B"}, {"F", "G", "H"}] + solution = [{"A", "C", "B", "D"}, {"G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [ + {"A", "H"}, + {"H", "I", "K"}, + {"A", "B"}, + {"C", "D"}, + {"D", "B"}, + {"F", "G", "H"}, + ] + solution = [{"A", "C", "B", "D", "G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [ + {"H", "I", "K"}, + {"A", "B"}, + {"C", "D"}, + {"D", "B"}, + {"F", "G", "H"}, + {"A", "H"}, + ] + solution = [{"A", "C", "B", "D", "G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py new file mode 100644 index 0000000..91426f1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py @@ -0,0 +1,267 @@ +# Jordi Torrents +# Test for k-cutsets +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity.kcutsets import _is_separating_set + +MAX_CUTSETS_TO_TEST = 4 # originally 100. cut to decrease testing time + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +## +# Some nice synthetic graphs +## +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function +def _check_separating_sets(G): + for cc in nx.connected_components(G): + if len(cc) < 3: + continue + Gc = G.subgraph(cc) + node_conn = nx.node_connectivity(Gc) + all_cuts = nx.all_node_cuts(Gc) + # Only test a limited number of cut sets to reduce test time. + for cut in itertools.islice(all_cuts, MAX_CUTSETS_TO_TEST): + assert node_conn == len(cut) + assert not nx.is_connected(nx.restricted_view(G, cut, [])) + + +@pytest.mark.slow +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_separating_sets(G) + + +def test_example_1(): + G = graph_example_1() + _check_separating_sets(G) + + +def test_random_gnp(): + G = nx.gnp_random_graph(100, 0.1, seed=42) + _check_separating_sets(G) + + +def test_shell(): + constructor = [(20, 80, 0.8), (80, 180, 0.6)] + G = nx.random_shell_graph(constructor, seed=42) + _check_separating_sets(G) + + +def test_configuration(): + deg_seq = nx.random_powerlaw_tree_sequence(100, tries=5, seed=72) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_separating_sets(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_separating_sets(G) + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for i in range(1): # change 1 to 3 or more for more realizations. + G = next(Ggen) + articulation_points = list({a} for a in nx.articulation_points(G)) + for cut in nx.all_node_cuts(G): + assert cut in articulation_points + + +def test_grid_2d_graph(): + # All minimum node cuts of a 2d grid + # are the four pairs of nodes that are + # neighbors of the four corner nodes. + G = nx.grid_2d_graph(5, 5) + solution = [{(0, 1), (1, 0)}, {(3, 0), (4, 1)}, {(3, 4), (4, 3)}, {(0, 3), (1, 4)}] + for cut in nx.all_node_cuts(G): + assert cut in solution + + +def test_disconnected_graph(): + G = nx.fast_gnp_random_graph(100, 0.01, seed=42) + cuts = nx.all_node_cuts(G) + pytest.raises(nx.NetworkXError, next, cuts) + + +@pytest.mark.slow +def test_alternative_flow_functions(): + graphs = [nx.grid_2d_graph(4, 4), nx.cycle_graph(5)] + for G in graphs: + node_conn = nx.node_connectivity(G) + for flow_func in flow_funcs: + all_cuts = nx.all_node_cuts(G, flow_func=flow_func) + # Only test a limited number of cut sets to reduce test time. + for cut in itertools.islice(all_cuts, MAX_CUTSETS_TO_TEST): + assert node_conn == len(cut) + assert not nx.is_connected(nx.restricted_view(G, cut, [])) + + +def test_is_separating_set_complete_graph(): + G = nx.complete_graph(5) + assert _is_separating_set(G, {0, 1, 2, 3}) + + +def test_is_separating_set(): + for i in [5, 10, 15]: + G = nx.star_graph(i) + max_degree_node = max(G, key=G.degree) + assert _is_separating_set(G, {max_degree_node}) + + +def test_non_repeated_cuts(): + # The algorithm was repeating the cut {0, 1} for the giant biconnected + # component of the Karate club graph. + K = nx.karate_club_graph() + bcc = max(list(nx.biconnected_components(K)), key=len) + G = K.subgraph(bcc) + solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}] + cuts = list(nx.all_node_cuts(G)) + if len(solution) != len(cuts): + print(nx.info(G)) + print(f"Solution: {solution}") + print(f"Result: {cuts}") + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution + + +def test_cycle_graph(): + G = nx.cycle_graph(5) + solution = [{0, 2}, {0, 3}, {1, 3}, {1, 4}, {2, 4}] + cuts = list(nx.all_node_cuts(G)) + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution + + +def test_complete_graph(): + G = nx.complete_graph(5) + solution = [{0, 1, 2, 3}, {0, 1, 2, 4}, {0, 1, 3, 4}, {0, 2, 3, 4}, {1, 2, 3, 4}] + cuts = list(nx.all_node_cuts(G)) + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py new file mode 100644 index 0000000..2b9e2ba --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py @@ -0,0 +1,102 @@ +from itertools import chain + +import pytest + +import networkx as nx + + +def _check_partition(G, cut_value, partition, weight): + assert isinstance(partition, tuple) + assert len(partition) == 2 + assert isinstance(partition[0], list) + assert isinstance(partition[1], list) + assert len(partition[0]) > 0 + assert len(partition[1]) > 0 + assert sum(map(len, partition)) == len(G) + assert set(chain.from_iterable(partition)) == set(G) + partition = tuple(map(set, partition)) + w = 0 + for u, v, e in G.edges(data=True): + if (u in partition[0]) == (v in partition[1]): + w += e.get(weight, 1) + assert w == cut_value + + +def _test_stoer_wagner(G, answer, weight="weight"): + cut_value, partition = nx.stoer_wagner(G, weight, heap=nx.utils.PairingHeap) + assert cut_value == answer + _check_partition(G, cut_value, partition, weight) + cut_value, partition = nx.stoer_wagner(G, weight, heap=nx.utils.BinaryHeap) + assert cut_value == answer + _check_partition(G, cut_value, partition, weight) + + +def test_graph1(): + G = nx.Graph() + G.add_edge("x", "a", weight=3) + G.add_edge("x", "b", weight=1) + G.add_edge("a", "c", weight=3) + G.add_edge("b", "c", weight=5) + G.add_edge("b", "d", weight=4) + G.add_edge("d", "e", weight=2) + G.add_edge("c", "y", weight=2) + G.add_edge("e", "y", weight=3) + _test_stoer_wagner(G, 4) + + +def test_graph2(): + G = nx.Graph() + G.add_edge("x", "a") + G.add_edge("x", "b") + G.add_edge("a", "c") + G.add_edge("b", "c") + G.add_edge("b", "d") + G.add_edge("d", "e") + G.add_edge("c", "y") + G.add_edge("e", "y") + _test_stoer_wagner(G, 2) + + +def test_graph3(): + # Source: + # Stoer, M. and Wagner, F. (1997). "A simple min-cut algorithm". Journal of + # the ACM 44 (4), 585-591. + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=3) + G.add_edge(2, 5, weight=2) + G.add_edge(2, 6, weight=2) + G.add_edge(3, 4, weight=4) + G.add_edge(3, 7, weight=2) + G.add_edge(4, 7, weight=2) + G.add_edge(4, 8, weight=2) + G.add_edge(5, 6, weight=3) + G.add_edge(6, 7, weight=1) + G.add_edge(7, 8, weight=3) + _test_stoer_wagner(G, 4) + + +def test_weight_name(): + G = nx.Graph() + G.add_edge(1, 2, weight=1, cost=8) + G.add_edge(1, 3, cost=2) + G.add_edge(2, 3, cost=4) + _test_stoer_wagner(G, 6, weight="cost") + + +def test_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_node(1) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_node(2) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_edge(1, 2, weight=-2) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) + G = nx.MultiDiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/utils.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/utils.py new file mode 100644 index 0000000..06d2fdc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/connectivity/utils.py @@ -0,0 +1,85 @@ +""" +Utilities for connectivity package +""" +import networkx as nx + +__all__ = ["build_auxiliary_node_connectivity", "build_auxiliary_edge_connectivity"] + + +def build_auxiliary_node_connectivity(G): + r"""Creates a directed graph D from an undirected graph G to compute flow + based node connectivity. + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph D with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc in D. Then for each edge (`u`, `v`) in G we add two arcs (`uB`, `vA`) + and (`vB`, `uA`) in D. Finally we set the attribute capacity = 1 for each + arc in D [1]_. + + For a directed graph having `n` nodes and `m` arcs we derive a + directed graph D with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc (`vA`, `vB`) in D. Then for each arc (`u`, `v`) in G we add one + arc (`uB`, `vA`) in D. Finally we set the attribute capacity = 1 for + each arc in D. + + A dictionary with a mapping between nodes in the original graph and the + auxiliary digraph is stored as a graph attribute: H.graph['mapping']. + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31955-9_7 + + """ + directed = G.is_directed() + + mapping = {} + H = nx.DiGraph() + + for i, node in enumerate(G): + mapping[node] = i + H.add_node(f"{i}A", id=node) + H.add_node(f"{i}B", id=node) + H.add_edge(f"{i}A", f"{i}B", capacity=1) + + edges = [] + for (source, target) in G.edges(): + edges.append((f"{mapping[source]}B", f"{mapping[target]}A")) + if not directed: + edges.append((f"{mapping[target]}B", f"{mapping[source]}A")) + H.add_edges_from(edges, capacity=1) + + # Store mapping as graph attribute + H.graph["mapping"] = mapping + return H + + +def build_auxiliary_edge_connectivity(G): + """Auxiliary digraph for computing flow based edge connectivity + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. Part of algorithm 1 in [1]_ . + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. (this is a + chapter, look for the reference of the book). + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + """ + if G.is_directed(): + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + return H + else: + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + for (source, target) in G.edges(): + H.add_edges_from([(source, target), (target, source)], capacity=1) + return H diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/core.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/core.py new file mode 100644 index 0000000..e39eb84 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/core.py @@ -0,0 +1,544 @@ +""" +Find the k-cores of a graph. + +The k-core is found by recursively pruning nodes with degrees less than k. + +See the following references for details: + +An O(m) Algorithm for Cores Decomposition of Networks +Vladimir Batagelj and Matjaz Zaversnik, 2003. +https://arxiv.org/abs/cs.DS/0310049 + +Generalized Cores +Vladimir Batagelj and Matjaz Zaversnik, 2002. +https://arxiv.org/pdf/cs/0202039 + +For directed graphs a more general notion is that of D-cores which +looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core +is the k-core. + +D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy +Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011. +http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf + +Multi-scale structure and topological anomaly detection via a new network \ +statistic: The onion decomposition +L. Hébert-Dufresne, J. A. Grochow, and A. Allard +Scientific Reports 6, 31708 (2016) +http://doi.org/10.1038/srep31708 + +""" +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = [ + "core_number", + "find_cores", + "k_core", + "k_shell", + "k_crust", + "k_corona", + "k_truss", + "onion_layers", +] + + +@not_implemented_for("multigraph") +def core_number(G): + """Returns the core number for each vertex. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + The core number of a node is the largest value k of a k-core containing + that node. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + + Returns + ------- + core_number : dictionary + A dictionary keyed by node to the core number. + + Raises + ------ + NetworkXError + The k-core is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + degrees = dict(G.degree()) + # Sort nodes by degree. + nodes = sorted(degrees, key=degrees.get) + bin_boundaries = [0] + curr_degree = 0 + for i, v in enumerate(nodes): + if degrees[v] > curr_degree: + bin_boundaries.extend([i] * (degrees[v] - curr_degree)) + curr_degree = degrees[v] + node_pos = {v: pos for pos, v in enumerate(nodes)} + # The initial guess for the core number of a node is its degree. + core = degrees + nbrs = {v: list(nx.all_neighbors(G, v)) for v in G} + for v in nodes: + for u in nbrs[v]: + if core[u] > core[v]: + nbrs[u].remove(v) + pos = node_pos[u] + bin_start = bin_boundaries[core[u]] + node_pos[u] = bin_start + node_pos[nodes[bin_start]] = pos + nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start] + bin_boundaries[core[u]] += 1 + core[u] -= 1 + return core + + +def find_cores(G): + import warnings + + msg = ( + "\nfind_cores is deprecated as of version 2.7 and will be removed " + "in version 3.0.\n" + "The find_cores function is renamed core_number\n" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return nx.core_number(G) + + +def _core_subgraph(G, k_filter, k=None, core=None): + """Returns the subgraph induced by nodes passing filter `k_filter`. + + Parameters + ---------- + G : NetworkX graph + The graph or directed graph to process + k_filter : filter function + This function filters the nodes chosen. It takes three inputs: + A node of G, the filter's cutoff, and the core dict of the graph. + The function should return a Boolean value. + k : int, optional + The order of the core. If not specified use the max core number. + This value is used as the cutoff for the filter. + core : dict, optional + Precomputed core numbers keyed by node for the graph `G`. + If not specified, the core numbers will be computed from `G`. + + """ + if core is None: + core = core_number(G) + if k is None: + k = max(core.values()) + nodes = (v for v in core if k_filter(v, k, core)) + return G.subgraph(nodes).copy() + + +def k_core(G, k=None, core_number=None): + """Returns the k-core of G. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int, optional + The order of the core. If not specified return the main core. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-core subgraph + + Raises + ------ + NetworkXError + The k-core is not defined for graphs with self loops or parallel edges. + + Notes + ----- + The main core is the core with the largest degree. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + + def k_filter(v, k, c): + return c[v] >= k + + return _core_subgraph(G, k_filter, k, core_number) + + +def k_shell(G, k=None, core_number=None): + """Returns the k-shell of G. + + The k-shell is the subgraph induced by nodes with core number k. + That is, nodes in the k-core that are not in the (k+1)-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the outer shell. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + + Returns + ------- + G : NetworkX graph + The k-shell subgraph + + Raises + ------ + NetworkXError + The k-shell is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + This is similar to k_corona but in that case only neighbors in the + k-core are considered. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + k_corona + + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + + def k_filter(v, k, c): + return c[v] == k + + return _core_subgraph(G, k_filter, k, core_number) + + +def k_crust(G, k=None, core_number=None): + """Returns the k-crust of G. + + The k-crust is the graph G with the edges of the k-core removed + and isolated nodes found after the removal of edges are also removed. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the main crust. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-crust subgraph + + Raises + ------ + NetworkXError + The k-crust is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + This definition of k-crust is different than the definition in [1]_. + The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + # Default for k is one less than in _core_subgraph, so just inline. + # Filter is c[v] <= k + if core_number is None: + core_number = nx.core_number(G) + if k is None: + k = max(core_number.values()) - 1 + nodes = (v for v in core_number if core_number[v] <= k) + return G.subgraph(nodes).copy() + + +def k_corona(G, k, core_number=None): + """Returns the k-corona of G. + + The k-corona is the subgraph of nodes in the k-core which have + exactly k neighbours in the k-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int + The order of the corona. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-corona subgraph + + Raises + ------ + NetworkXError + The k-cornoa is not defined for graphs with self loops or + parallel edges. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] k -core (bootstrap) percolation on complex networks: + Critical phenomena and nonlocal effects, + A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes, + Phys. Rev. E 73, 056101 (2006) + http://link.aps.org/doi/10.1103/PhysRevE.73.056101 + """ + + def func(v, k, c): + return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k) + + return _core_subgraph(G, func, k, core_number) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def k_truss(G, k): + """Returns the k-truss of `G`. + + The k-truss is the maximal induced subgraph of `G` which contains at least + three vertices where every edge is incident to at least `k-2` triangles. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + k : int + The order of the truss + + Returns + ------- + H : NetworkX graph + The k-truss subgraph + + Raises + ------ + NetworkXError + + The k-truss is not defined for graphs with self loops or parallel edges + or directed graphs. + + Notes + ----- + A k-clique is a (k-2)-truss and a k-truss is a (k+1)-core. + + Not implemented for digraphs or graphs with parallel edges or self loops. + + Graph, node, and edge attributes are copied to the subgraph. + + K-trusses were originally defined in [2] which states that the k-truss + is the maximal induced subgraph where each edge belongs to at least + `k-2` triangles. A more recent paper, [1], uses a slightly different + definition requiring that each edge belong to at least `k` triangles. + This implementation uses the original definition of `k-2` triangles. + + References + ---------- + .. [1] Bounds and Algorithms for k-truss. Paul Burkhardt, Vance Faber, + David G. Harris, 2018. https://arxiv.org/abs/1806.05523v2 + .. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan + Cohen, 2005. + """ + H = G.copy() + + n_dropped = 1 + while n_dropped > 0: + n_dropped = 0 + to_drop = [] + seen = set() + for u in H: + nbrs_u = set(H[u]) + seen.add(u) + new_nbrs = [v for v in nbrs_u if v not in seen] + for v in new_nbrs: + if len(nbrs_u & set(H[v])) < (k - 2): + to_drop.append((u, v)) + H.remove_edges_from(to_drop) + n_dropped = len(to_drop) + H.remove_nodes_from(list(nx.isolates(H))) + + return H + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def onion_layers(G): + """Returns the layer of each vertex in an onion decomposition of the graph. + + The onion decomposition refines the k-core decomposition by providing + information on the internal organization of each k-shell. It is usually + used alongside the `core numbers`. + + Parameters + ---------- + G : NetworkX graph + A simple graph without self loops or parallel edges + + Returns + ------- + od_layers : dictionary + A dictionary keyed by vertex to the onion layer. The layers are + contiguous integers starting at 1. + + Raises + ------ + NetworkXError + The onion decomposition is not implemented for graphs with self loops + or parallel edges or for directed graphs. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + Not implemented for directed graphs. + + See Also + -------- + core_number + + References + ---------- + .. [1] Multi-scale structure and topological anomaly detection via a new + network statistic: The onion decomposition + L. Hébert-Dufresne, J. A. Grochow, and A. Allard + Scientific Reports 6, 31708 (2016) + http://doi.org/10.1038/srep31708 + .. [2] Percolation and the effective structure of complex networks + A. Allard and L. Hébert-Dufresne + Physical Review X 9, 011023 (2019) + http://doi.org/10.1103/PhysRevX.9.011023 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph contains self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + # Dictionaries to register the k-core/onion decompositions. + od_layers = {} + # Adjacency list + neighbors = {v: list(nx.all_neighbors(G, v)) for v in G} + # Effective degree of nodes. + degrees = dict(G.degree()) + # Performs the onion decomposition. + current_core = 1 + current_layer = 1 + # Sets vertices of degree 0 to layer 1, if any. + isolated_nodes = [v for v in nx.isolates(G)] + if len(isolated_nodes) > 0: + for v in isolated_nodes: + od_layers[v] = current_layer + degrees.pop(v) + current_layer = 2 + # Finds the layer for the remaining nodes. + while len(degrees) > 0: + # Sets the order for looking at nodes. + nodes = sorted(degrees, key=degrees.get) + # Sets properly the current core. + min_degree = degrees[nodes[0]] + if min_degree > current_core: + current_core = min_degree + # Identifies vertices in the current layer. + this_layer = [] + for n in nodes: + if degrees[n] > current_core: + break + this_layer.append(n) + # Identifies the core/layer of the vertices in the current layer. + for v in this_layer: + od_layers[v] = current_layer + for n in neighbors[v]: + neighbors[n].remove(v) + degrees[n] = degrees[n] - 1 + degrees.pop(v) + # Updates the layer count. + current_layer = current_layer + 1 + # Returns the dictionaries containing the onion layer of each vertices. + return od_layers diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/covering.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/covering.py new file mode 100644 index 0000000..b683128 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/covering.py @@ -0,0 +1,140 @@ +""" Functions related to graph covers.""" + +from functools import partial +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = ["min_edge_cover", "is_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def min_edge_cover(G, matching_algorithm=None): + """Returns the min cardinality edge cover of the graph as a set of edges. + + A smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. This function follows that process. A maximum matching + algorithm can be specified for the first step of the algorithm. + The resulting set may return a set with one 2-tuple for each edge, + (the usual case) or with both 2-tuples `(u, v)` and `(v, u)` for + each edge. The latter is only done when a bipartite matching algorithm + is specified as `matching_algorithm`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching for `G`. + The function must take one input, the graph `G`, and return + either a set of edges (with only one direction for the pair of nodes) + or a dictionary mapping each node to its mate. If not specified, + :func:`~networkx.algorithms.matching.max_weight_matching` is used. + Common bipartite matching functions include + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + or + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`. + + Returns + ------- + min_cover : set + + A set of the edges in a minimum edge cover in the form of tuples. + It contains only one of the equivalent 2-tuples `(u, v)` and `(v, u)` + for each edge. If a bipartite method is used to compute the matching, + the returned set contains both the 2-tuples `(u, v)` and `(v, u)` + for each edge of a minimum edge cover. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> sorted(nx.min_edge_cover(G)) + [(2, 1), (3, 0)] + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + The minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + + Minimum edge cover for `G` can also be found using the `min_edge_covering` + function in :mod:`networkx.algorithms.bipartite.covering` which is + simply this function with a default matching algorithm of + :func:`~networkx.algorithms.bipartite.matching.hopcraft_karp_matching` + """ + if len(G) == 0: + return set() + if nx.number_of_isolates(G) > 0: + # ``min_cover`` does not exist as there is an isolated node + raise nx.NetworkXException( + "Graph has a node with no edge incident on it, " "so no edge cover exists." + ) + if matching_algorithm is None: + matching_algorithm = partial(nx.max_weight_matching, maxcardinality=True) + maximum_matching = matching_algorithm(G) + # ``min_cover`` is superset of ``maximum_matching`` + try: + # bipartite matching algs return dict so convert if needed + min_cover = set(maximum_matching.items()) + bipartite_cover = True + except AttributeError: + min_cover = maximum_matching + bipartite_cover = False + # iterate for uncovered nodes + uncovered_nodes = set(G) - {v for u, v in min_cover} - {u for u, v in min_cover} + for v in uncovered_nodes: + # Since `v` is uncovered, each edge incident to `v` will join it + # with a covered node (otherwise, if there were an edge joining + # uncovered nodes `u` and `v`, the maximum matching algorithm + # would have found it), so we can choose an arbitrary edge + # incident to `v`. (This applies only in a simple graph, not a + # multigraph.) + u = arbitrary_element(G[v]) + min_cover.add((u, v)) + if bipartite_cover: + min_cover.add((v, u)) + return min_cover + + +@not_implemented_for("directed") +def is_edge_cover(G, cover): + """Decides whether a set of edges is a valid edge cover of the graph. + + Given a set of edges, whether it is an edge covering can + be decided if we just check whether all nodes of the graph + has an edge from the set, incident on it. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + cover : set + Set of edges to be checked. + + Returns + ------- + bool + Whether the set of edges is a valid edge cover of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> cover = {(2, 1), (3, 0)} + >>> nx.is_edge_cover(G, cover) + True + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + """ + return set(G) <= set(chain.from_iterable(cover)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/cuts.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/cuts.py new file mode 100644 index 0000000..ae1cb02 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/cuts.py @@ -0,0 +1,392 @@ +"""Functions for finding and evaluating cuts in a graph. + +""" + +from itertools import chain + +import networkx as nx + +__all__ = [ + "boundary_expansion", + "conductance", + "cut_size", + "edge_expansion", + "mixing_expansion", + "node_expansion", + "normalized_cut_size", + "volume", +] + + +# TODO STILL NEED TO UPDATE ALL THE DOCUMENTATION! + + +def cut_size(G, S, T=None, weight=None): + """Returns the size of the cut between two sets of nodes. + + A *cut* is a partition of the nodes of a graph into two sets. The + *cut size* is the sum of the weights of the edges "between" the two + sets of nodes. + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. If not specified, this is taken to + be the set complement of `S`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + Total weight of all edges from nodes in set `S` to nodes in + set `T` (and, in the case of directed graphs, all edges from + nodes in `T` to nodes in `S`). + + Examples + -------- + In the graph with two cliques joined by a single edges, the natural + bipartition of the graph into two blocks, one for each clique, + yields a cut of weight one:: + + >>> G = nx.barbell_graph(3, 0) + >>> S = {0, 1, 2} + >>> T = {3, 4, 5} + >>> nx.cut_size(G, S, T) + 1 + + Each parallel edge in a multigraph is counted when determining the + cut size:: + + >>> G = nx.MultiGraph(["ab", "ab"]) + >>> S = {"a"} + >>> T = {"b"} + >>> nx.cut_size(G, S, T) + 2 + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + """ + edges = nx.edge_boundary(G, S, T, data=weight, default=1) + if G.is_directed(): + edges = chain(edges, nx.edge_boundary(G, T, S, data=weight, default=1)) + return sum(weight for u, v, weight in edges) + + +def volume(G, S, weight=None): + """Returns the volume of a set of nodes. + + The *volume* of a set *S* is the sum of the (out-)degrees of nodes + in *S* (taking into account parallel edges in multigraphs). [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The volume of the set of nodes represented by `S` in the graph + `G`. + + See also + -------- + conductance + cut_size + edge_expansion + edge_boundary + normalized_cut_size + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + degree = G.out_degree if G.is_directed() else G.degree + return sum(d for v, d in degree(S, weight=weight)) + + +def normalized_cut_size(G, S, T=None, weight=None): + """Returns the normalized size of the cut between two sets of nodes. + + The *normalized cut size* is the cut size times the sum of the + reciprocal sizes of the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The normalized cut size between the two sets `S` and `T`. + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + See also + -------- + conductance + cut_size + edge_expansion + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges * ((1 / volume_S) + (1 / volume_T)) + + +def conductance(G, S, T=None, weight=None): + """Returns the conductance of two sets of nodes. + + The *conductance* is the quotient of the cut size and the smaller of + the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The conductance between the two sets `S` and `T`. + + See also + -------- + cut_size + edge_expansion + normalized_cut_size + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges / min(volume_S, volume_T) + + +def edge_expansion(G, S, T=None, weight=None): + """Returns the edge expansion between two node sets. + + The *edge expansion* is the quotient of the cut size and the smaller + of the cardinalities of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The edge expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Fan Chung. + *Spectral Graph Theory*. + (CBMS Regional Conference Series in Mathematics, No. 92), + American Mathematical Society, 1997, ISBN 0-8218-0315-8 + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + return num_cut_edges / min(len(S), len(T)) + + +def mixing_expansion(G, S, T=None, weight=None): + """Returns the mixing expansion between two node sets. + + The *mixing expansion* is the quotient of the cut size and twice the + number of edges in the graph. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The mixing expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + edge_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + num_cut_edges = cut_size(G, S, T=T, weight=weight) + num_total_edges = G.number_of_edges() + return num_cut_edges / (2 * num_total_edges) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +def node_expansion(G, S): + """Returns the node expansion of the set `S`. + + The *node expansion* is the quotient of the size of the node + boundary of *S* and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The node expansion of the set `S`. + + See also + -------- + boundary_expansion + edge_expansion + mixing_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + neighborhood = set(chain.from_iterable(G.neighbors(v) for v in S)) + return len(neighborhood) / len(S) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +def boundary_expansion(G, S): + """Returns the boundary expansion of the set `S`. + + The *boundary expansion* is the quotient of the size + of the node boundary and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The boundary expansion of the set `S`. + + See also + -------- + edge_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends in Theoretical Computer Science* + 7.1–3 (2011): 1–336. + + + """ + return len(nx.node_boundary(G, S)) / len(S) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/cycles.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/cycles.py new file mode 100644 index 0000000..48d32ae --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/cycles.py @@ -0,0 +1,617 @@ +""" +======================== +Cycle finding algorithms +======================== +""" + +from collections import defaultdict + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "cycle_basis", + "simple_cycles", + "recursive_simple_cycles", + "find_cycle", + "minimum_cycle_basis", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def cycle_basis(G, root=None): + """Returns a list of cycles which form a basis for cycles of G. + + A basis for cycles of a network is a minimal collection of + cycles such that any cycle in the network can be written + as a sum of cycles in the basis. Here summation of cycles + is defined as "exclusive or" of the edges. Cycle bases are + useful, e.g. when deriving equations for electric circuits + using Kirchhoff's Laws. + + Parameters + ---------- + G : NetworkX Graph + root : node, optional + Specify starting node for basis. + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> print(nx.cycle_basis(G, 0)) + [[3, 4, 5, 0], [1, 2, 3, 0]] + + Notes + ----- + This is adapted from algorithm CACM 491 [1]_. + + References + ---------- + .. [1] Paton, K. An algorithm for finding a fundamental set of + cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. + + See Also + -------- + simple_cycles + """ + gnodes = set(G.nodes()) + cycles = [] + while gnodes: # loop over connected components + if root is None: + root = gnodes.pop() + stack = [root] + pred = {root: root} + used = {root: set()} + while stack: # walk the spanning tree finding cycles + z = stack.pop() # use last-in so cycles easier to find + zused = used[z] + for nbr in G[z]: + if nbr not in used: # new node + pred[nbr] = z + stack.append(nbr) + used[nbr] = {z} + elif nbr == z: # self loops + cycles.append([z]) + elif nbr not in zused: # found a cycle + pn = used[nbr] + cycle = [nbr, z] + p = pred[z] + while p not in pn: + cycle.append(p) + p = pred[p] + cycle.append(p) + cycles.append(cycle) + used[nbr].add(z) + gnodes -= set(pred) + root = None + return cycles + + +@not_implemented_for("undirected") +def simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. + + This is a nonrecursive, iterator/generator version of Johnson's + algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> sorted(nx.simple_cycles(G)) + [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + + To filter the cycles so that they don't include certain nodes or edges, + copy your graph and eliminate those nodes or edges before calling. + For example, to exclude self-loops from the above example: + + >>> H = G.copy() + >>> H.remove_edges_from(nx.selfloop_edges(G)) + >>> sorted(nx.simple_cycles(H)) + [[0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy. + G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. + .. [3] A search strategy for the elementary cycles of a directed graph. + J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, + v. 16, no. 2, 192-204, 1976. + + See Also + -------- + cycle_basis + """ + + def _unblock(thisnode, blocked, B): + stack = {thisnode} + while stack: + node = stack.pop() + if node in blocked: + blocked.remove(node) + stack.update(B[node]) + B[node].clear() + + # Johnson's algorithm requires some ordering of the nodes. + # We assign the arbitrary ordering given by the strongly connected comps + # There is no need to track the ordering as each node removed as processed. + # Also we save the actual graph so we can mutate it. We only take the + # edges because we do not want to copy edge and node attributes here. + subG = type(G)(G.edges()) + sccs = [scc for scc in nx.strongly_connected_components(subG) if len(scc) > 1] + + # Johnson's algorithm exclude self cycle edges like (v, v) + # To be backward compatible, we record those cycles in advance + # and then remove from subG + for v in subG: + if subG.has_edge(v, v): + yield [v] + subG.remove_edge(v, v) + + while sccs: + scc = sccs.pop() + sccG = subG.subgraph(scc) + # order of scc determines ordering of nodes + startnode = scc.pop() + # Processing node runs "circuit" routine from recursive version + path = [startnode] + blocked = set() # vertex: blocked from search? + closed = set() # nodes involved in a cycle + blocked.add(startnode) + B = defaultdict(set) # graph portions that yield no elementary circuit + stack = [(startnode, list(sccG[startnode]))] # sccG gives comp nbrs + while stack: + thisnode, nbrs = stack[-1] + if nbrs: + nextnode = nbrs.pop() + if nextnode == startnode: + yield path[:] + closed.update(path) + # print "Found a cycle", path, closed + elif nextnode not in blocked: + path.append(nextnode) + stack.append((nextnode, list(sccG[nextnode]))) + closed.discard(nextnode) + blocked.add(nextnode) + continue + # done with nextnode... look for more neighbors + if not nbrs: # no more nbrs + if thisnode in closed: + _unblock(thisnode, blocked, B) + else: + for nbr in sccG[thisnode]: + if thisnode not in B[nbr]: + B[nbr].add(thisnode) + stack.pop() + # assert path[-1] == thisnode + path.pop() + # done processing this node + H = subG.subgraph(scc) # make smaller to avoid work in SCC routine + sccs.extend(scc for scc in nx.strongly_connected_components(H) if len(scc) > 1) + + +@not_implemented_for("undirected") +def recursive_simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. + + This version uses a recursive algorithm to build a list of cycles. + You should probably use the iterator version called simple_cycles(). + Warning: This recursive version uses lots of RAM! + It appears in NetworkX for pedagogical value. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + A list of cycles, where each cycle is represented by a list of nodes + along the cycle. + + Example: + + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> nx.recursive_simple_cycles(G) + [[0], [2], [0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + See Also + -------- + simple_cycles, cycle_basis + """ + # Jon Olav Vik, 2010-08-09 + def _unblock(thisnode): + """Recursively unblock and remove nodes from B[thisnode].""" + if blocked[thisnode]: + blocked[thisnode] = False + while B[thisnode]: + _unblock(B[thisnode].pop()) + + def circuit(thisnode, startnode, component): + closed = False # set to True if elementary path is closed + path.append(thisnode) + blocked[thisnode] = True + for nextnode in component[thisnode]: # direct successors of thisnode + if nextnode == startnode: + result.append(path[:]) + closed = True + elif not blocked[nextnode]: + if circuit(nextnode, startnode, component): + closed = True + if closed: + _unblock(thisnode) + else: + for nextnode in component[thisnode]: + if thisnode not in B[nextnode]: # TODO: use set for speedup? + B[nextnode].append(thisnode) + path.pop() # remove thisnode from path + return closed + + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found + + # Johnson's algorithm exclude self cycle edges like (v, v) + # To be backward compatible, we record those cycles in advance + # and then remove from subG + for v in G: + if G.has_edge(v, v): + result.append([v]) + G.remove_edge(v, v) + + # Johnson's algorithm requires some ordering of the nodes. + # They might not be sortable so we assign an arbitrary ordering. + ordering = dict(zip(G, range(len(G)))) + for s in ordering: + # Build the subgraph induced by s and following nodes in the ordering + subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s]) + # Find the strongly connected component in the subgraph + # that contains the least node according to the ordering + strongcomp = nx.strongly_connected_components(subgraph) + mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns)) + component = G.subgraph(mincomp) + if len(component) > 1: + # smallest node in the component according to the ordering + startnode = min(component, key=ordering.__getitem__) + for node in component: + blocked[node] = False + B[node][:] = [] + dummy = circuit(startnode, startnode, component) + return result + + +def find_cycle(G, source=None, orientation=None): + """Returns a cycle found via depth-first traversal. + + The cycle is a list of edges indicating the cyclic path. + Orientation of directed edges is controlled by `orientation`. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Returns + ------- + edges : directed edges + A list of directed edges indicating the path taken for the loop. + If no cycle is found, then an exception is raised. + For graphs, an edge is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, an edge is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Raises + ------ + NetworkXNoCycle + If no cycle was found. + + Examples + -------- + In this example, we construct a DAG and find, in the first call, that there + are no directed cycles, and so an exception is raised. In the second call, + we ignore edge orientations and find that there is an undirected cycle. + Note that the second call finds a directed cycle while effectively + traversing an undirected graph, and so, we found an "undirected cycle". + This means that this DAG structure does not form a directed tree (which + is also known as a polytree). + + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + >>> nx.find_cycle(G, orientation="original") + Traceback (most recent call last): + ... + networkx.exception.NetworkXNoCycle: No cycle found. + >>> list(nx.find_cycle(G, orientation="ignore")) + [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')] + + See Also + -------- + simple_cycles + """ + if not G.is_directed() or orientation in (None, "original"): + + def tailhead(edge): + return edge[:2] + + elif orientation == "reverse": + + def tailhead(edge): + return edge[1], edge[0] + + elif orientation == "ignore": + + def tailhead(edge): + if edge[-1] == "reverse": + return edge[1], edge[0] + return edge[:2] + + explored = set() + cycle = [] + final_node = None + for start_node in G.nbunch_iter(source): + if start_node in explored: + # No loop is possible. + continue + + edges = [] + # All nodes seen in this iteration of edge_dfs + seen = {start_node} + # Nodes in active path. + active_nodes = {start_node} + previous_head = None + + for edge in nx.edge_dfs(G, start_node, orientation): + # Determine if this edge is a continuation of the active path. + tail, head = tailhead(edge) + if head in explored: + # Then we've already explored it. No loop is possible. + continue + if previous_head is not None and tail != previous_head: + # This edge results from backtracking. + # Pop until we get a node whose head equals the current tail. + # So for example, we might have: + # (0, 1), (1, 2), (2, 3), (1, 4) + # which must become: + # (0, 1), (1, 4) + while True: + try: + popped_edge = edges.pop() + except IndexError: + edges = [] + active_nodes = {tail} + break + else: + popped_head = tailhead(popped_edge)[1] + active_nodes.remove(popped_head) + + if edges: + last_head = tailhead(edges[-1])[1] + if tail == last_head: + break + edges.append(edge) + + if head in active_nodes: + # We have a loop! + cycle.extend(edges) + final_node = head + break + else: + seen.add(head) + active_nodes.add(head) + previous_head = head + + if cycle: + break + else: + explored.update(seen) + + else: + assert len(cycle) == 0 + raise nx.exception.NetworkXNoCycle("No cycle found.") + + # We now have a list of edges which ends on a cycle. + # So we need to remove from the beginning edges that are not relevant. + + for i, edge in enumerate(cycle): + tail, head = tailhead(edge) + if tail == final_node: + break + + return cycle[i:] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def minimum_cycle_basis(G, weight=None): + """Returns a minimum weight cycle basis for G + + Minimum weight means a cycle basis for which the total weight + (length for unweighted graphs) of all the cycles is minimum. + + Parameters + ---------- + G : NetworkX Graph + weight: string + name of the edge attribute to use for edge weights + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. Note that the nodes are not + necessarily returned in a order by which they appear in the cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> print([sorted(c) for c in nx.minimum_cycle_basis(G)]) + [[0, 1, 2, 3], [0, 3, 4, 5]] + + References: + [1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for + Minimum Cycle Basis of Graphs." + http://link.springer.com/article/10.1007/s00453-007-9064-z + [2] de Pina, J. 1995. Applications of shortest path methods. + Ph.D. thesis, University of Amsterdam, Netherlands + + See Also + -------- + simple_cycles, cycle_basis + """ + # We first split the graph in commected subgraphs + return sum( + (_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)), + [], + ) + + +def _min_cycle_basis(comp, weight): + cb = [] + # We extract the edges not in a spanning tree. We do not really need a + # *minimum* spanning tree. That is why we call the next function with + # weight=None. Depending on implementation, it may be faster as well + spanning_tree_edges = list(nx.minimum_spanning_edges(comp, weight=None, data=False)) + edges_excl = [frozenset(e) for e in comp.edges() if e not in spanning_tree_edges] + N = len(edges_excl) + + # We maintain a set of vectors orthogonal to sofar found cycles + set_orth = [{edge} for edge in edges_excl] + for k in range(N): + # kth cycle is "parallel" to kth vector in set_orth + new_cycle = _min_cycle(comp, set_orth[k], weight=weight) + cb.append(list(set().union(*new_cycle))) + # now update set_orth so that k+1,k+2... th elements are + # orthogonal to the newly found cycle, as per [p. 336, 1] + base = set_orth[k] + set_orth[k + 1 :] = [ + orth ^ base if len(orth & new_cycle) % 2 else orth + for orth in set_orth[k + 1 :] + ] + return cb + + +def _min_cycle(G, orth, weight=None): + """ + Computes the minimum weight cycle in G, + orthogonal to the vector orth as per [p. 338, 1] + """ + T = nx.Graph() + + nodes_idx = {node: idx for idx, node in enumerate(G.nodes())} + idx_nodes = {idx: node for node, idx in nodes_idx.items()} + + nnodes = len(nodes_idx) + + # Add 2 copies of each edge in G to T. If edge is in orth, add cross edge; + # otherwise in-plane edge + for u, v, data in G.edges(data=True): + uidx, vidx = nodes_idx[u], nodes_idx[v] + edge_w = data.get(weight, 1) + if frozenset((u, v)) in orth: + T.add_edges_from( + [(uidx, nnodes + vidx), (nnodes + uidx, vidx)], weight=edge_w + ) + else: + T.add_edges_from( + [(uidx, vidx), (nnodes + uidx, nnodes + vidx)], weight=edge_w + ) + + all_shortest_pathlens = dict(nx.shortest_path_length(T, weight=weight)) + cross_paths_w_lens = { + n: all_shortest_pathlens[n][nnodes + n] for n in range(nnodes) + } + + # Now compute shortest paths in T, which translates to cyles in G + start = min(cross_paths_w_lens, key=cross_paths_w_lens.get) + end = nnodes + start + min_path = nx.shortest_path(T, source=start, target=end, weight="weight") + + # Now we obtain the actual path, re-map nodes in T to those in G + min_path_nodes = [node if node < nnodes else node - nnodes for node in min_path] + # Now remove the edges that occur two times + mcycle_pruned = _path_to_cycle(min_path_nodes) + + return {frozenset((idx_nodes[u], idx_nodes[v])) for u, v in mcycle_pruned} + + +def _path_to_cycle(path): + """ + Removes the edges from path that occur even number of times. + Returns a set of edges + """ + edges = set() + for edge in pairwise(path): + # Toggle whether to keep the current edge. + edges ^= {edge} + return edges diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/d_separation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/d_separation.py new file mode 100644 index 0000000..caf26d0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/d_separation.py @@ -0,0 +1,142 @@ +""" +Algorithm for testing d-separation in DAGs. + +*d-separation* is a test for conditional independence in probability +distributions that can be factorized using DAGs. It is a purely +graphical test that uses the underlying graph and makes no reference +to the actual distribution parameters. See [1]_ for a formal +definition. + +The implementation is based on the conceptually simple linear time +algorithm presented in [2]_. Refer to [3]_, [4]_ for a couple of +alternative algorithms. + + +Examples +-------- + +>>> +>>> # HMM graph with five states and observation nodes +... g = nx.DiGraph() +>>> g.add_edges_from( +... [ +... ("S1", "S2"), +... ("S2", "S3"), +... ("S3", "S4"), +... ("S4", "S5"), +... ("S1", "O1"), +... ("S2", "O2"), +... ("S3", "O3"), +... ("S4", "O4"), +... ("S5", "O5"), +... ] +... ) +>>> +>>> # states/obs before 'S3' are d-separated from states/obs after 'S3' +... nx.d_separated(g, {"S1", "S2", "O1", "O2"}, {"S4", "S5", "O4", "O5"}, {"S3"}) +True + + +References +---------- + +.. [1] Pearl, J. (2009). Causality. Cambridge: Cambridge University Press. + +.. [2] Darwiche, A. (2009). Modeling and reasoning with Bayesian networks. + Cambridge: Cambridge University Press. + +.. [3] Shachter, R. D. (1998). + Bayes-ball: rational pastime (for determining irrelevance and requisite + information in belief networks and influence diagrams). + In , Proceedings of the Fourteenth Conference on Uncertainty in Artificial + Intelligence (pp. 480–487). + San Francisco, CA, USA: Morgan Kaufmann Publishers Inc. + +.. [4] Koller, D., & Friedman, N. (2009). + Probabilistic graphical models: principles and techniques. The MIT Press. + +""" + +from collections import deque + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for + +__all__ = ["d_separated"] + + +@not_implemented_for("undirected") +def d_separated(G, x, y, z): + """ + Return whether node sets ``x`` and ``y`` are d-separated by ``z``. + + Parameters + ---------- + G : graph + A NetworkX DAG. + + x : set + First set of nodes in ``G``. + + y : set + Second set of nodes in ``G``. + + z : set + Set of conditioning nodes in ``G``. Can be empty set. + + Returns + ------- + b : bool + A boolean that is true if ``x`` is d-separated from ``y`` given ``z`` in ``G``. + + Raises + ------ + NetworkXError + The *d-separation* test is commonly used with directed + graphical models which are acyclic. Accordingly, the algorithm + raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + """ + + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + union_xyz = x.union(y).union(z) + + if any(n not in G.nodes for n in union_xyz): + raise nx.NodeNotFound("one or more specified nodes not found in the graph") + + G_copy = G.copy() + + # transform the graph by removing leaves that are not in x | y | z + # until no more leaves can be removed. + leaves = deque([n for n in G_copy.nodes if G_copy.out_degree[n] == 0]) + while len(leaves) > 0: + leaf = leaves.popleft() + if leaf not in union_xyz: + for p in G_copy.predecessors(leaf): + if G_copy.out_degree[p] == 1: + leaves.append(p) + G_copy.remove_node(leaf) + + # transform the graph by removing outgoing edges from the + # conditioning set. + edges_to_remove = list(G_copy.out_edges(z)) + G_copy.remove_edges_from(edges_to_remove) + + # use disjoint-set data structure to check if any node in `x` + # occurs in the same weakly connected component as a node in `y`. + disjoint_set = UnionFind(G_copy.nodes()) + for component in nx.weakly_connected_components(G_copy): + disjoint_set.union(*component) + disjoint_set.union(*x) + disjoint_set.union(*y) + + if x and y and disjoint_set[next(iter(x))] == disjoint_set[next(iter(y))]: + return False + else: + return True diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/dag.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/dag.py new file mode 100644 index 0000000..c96032d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/dag.py @@ -0,0 +1,1136 @@ +"""Algorithms for directed acyclic graphs (DAGs). + +Note that most of these functions are only guaranteed to work for DAGs. +In general, these functions do not check for acyclic-ness, so it is up +to the user to check for that. +""" + +import heapq +from collections import deque +from functools import partial +from itertools import chain, product, starmap +from math import gcd + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for, pairwise + +__all__ = [ + "descendants", + "ancestors", + "topological_sort", + "lexicographical_topological_sort", + "all_topological_sorts", + "topological_generations", + "is_directed_acyclic_graph", + "is_aperiodic", + "transitive_closure", + "transitive_closure_dag", + "transitive_reduction", + "antichains", + "dag_longest_path", + "dag_longest_path_length", + "dag_to_branching", +] + +chaini = chain.from_iterable + + +def descendants(G, source): + """Returns all nodes reachable from `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The descendants of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(list(nx.descendants(DG, 2))) + [3, 4] + + See also + -------- + ancestors + """ + return {child for parent, child in nx.bfs_edges(G, source)} + + +def ancestors(G, source): + """Returns all nodes having a path to `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The ancestors of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(list(nx.ancestors(DG, 2))) + [0, 1] + + See also + -------- + descendants + """ + return {child for parent, child in nx.bfs_edges(G, source, reverse=True)} + + +def has_cycle(G): + """Decides whether the directed graph has a cycle.""" + try: + # Feed the entire iterator into a zero-length deque. + deque(topological_sort(G), maxlen=0) + except nx.NetworkXUnfeasible: + return True + else: + return False + + +def is_directed_acyclic_graph(G): + """Returns True if the graph `G` is a directed acyclic graph (DAG) or + False if not. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + True if `G` is a DAG, False otherwise + + Examples + -------- + Undirected graph:: + + >>> G = nx.Graph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed graph with cycle:: + + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed acyclic graph:: + + >>> G = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + True + + See also + -------- + topological_sort + """ + return G.is_directed() and not has_cycle(G) + + +def topological_generations(G): + """Stratifies a DAG into generations. + + A topological generation is node collection in which ancestors of a node in each + generation are guaranteed to be in a previous generation, and any descendants of + a node are guaranteed to be in a following generation. Nodes are guaranteed to + be in the earliest possible generation that they can belong to. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + sets of nodes + Yields sets of nodes representing each generation. + + Raises + ------ + NetworkXError + Generations are defined for directed graphs only. If the graph + `G` is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological generations + exist and a :exc:`NetworkXUnfeasible` exception is raised. This can also + be raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (3, 1)]) + >>> [sorted(generation) for generation in nx.topological_generations(DG)] + [[2, 3], [1]] + + Notes + ----- + The generation in which a node resides can also be determined by taking the + max-path-distance from the node to the farthest leaf node. That value can + be obtained with this function using `enumerate(topological_generations(G))`. + + See also + -------- + topological_sort + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + multigraph = G.is_multigraph() + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + zero_indegree = [v for v, d in G.in_degree() if d == 0] + + while zero_indegree: + this_generation = zero_indegree + zero_indegree = [] + for node in this_generation: + if node not in G: + raise RuntimeError("Graph changed during iteration") + for child in G.neighbors(node): + try: + indegree_map[child] -= len(G[node][child]) if multigraph else 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + zero_indegree.append(child) + del indegree_map[child] + yield this_generation + + if indegree_map: + raise nx.NetworkXUnfeasible( + "Graph contains a cycle or graph changed during iteration" + ) + + +def topological_sort(G): + """Returns a generator of nodes in topologically sorted order. + + A topological sort is a nonunique permutation of the nodes of a + directed graph such that an edge from u to v implies that u + appears before v in the topological sort order. This ordering is + valid only if the graph has no directed cycles. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + nodes + Yields the nodes in topological sorted order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + To get the reverse order of the topological sort: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> list(reversed(list(nx.topological_sort(DG)))) + [3, 2, 1] + + If your DiGraph naturally has the edges representing tasks/inputs + and nodes representing people/processes that initiate tasks, then + topological_sort is not quite what you need. You will have to change + the tasks to nodes with dependence reflected by edges. The result is + a kind of topological sort of the edges. This can be done + with :func:`networkx.line_graph` as follows: + + >>> list(nx.topological_sort(nx.line_graph(DG))) + [(1, 2), (2, 3)] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + is_directed_acyclic_graph, lexicographical_topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + for generation in nx.topological_generations(G): + yield from generation + + +def lexicographical_topological_sort(G, key=None): + """Returns a generator of nodes in lexicographically topologically sorted + order. + + A topological sort is a nonunique permutation of the nodes such that an + edge from u to v implies that u appears before v in the topological sort + order. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + key : function, optional + This function maps nodes to keys with which to resolve ambiguities in + the sort order. Defaults to the identity function. + + Yields + ------ + nodes + Yields the nodes in lexicographical topological sort order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)]) + >>> list(nx.lexicographical_topological_sort(DG)) + [2, 1, 3, 5, 4] + >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x)) + [2, 5, 1, 4, 3] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + if not G.is_directed(): + msg = "Topological sort not defined on undirected graphs." + raise nx.NetworkXError(msg) + + if key is None: + + def key(node): + return node + + nodeid_map = {n: i for i, n in enumerate(G)} + + def create_tuple(node): + return key(node), nodeid_map[node], node + + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + # These nodes have zero indegree and ready to be returned. + zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0] + heapq.heapify(zero_indegree) + + while zero_indegree: + _, _, node = heapq.heappop(zero_indegree) + + if node not in G: + raise RuntimeError("Graph changed during iteration") + for _, child in G.edges(node): + try: + indegree_map[child] -= 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + heapq.heappush(zero_indegree, create_tuple(child)) + del indegree_map[child] + + yield node + + if indegree_map: + msg = "Graph contains a cycle or graph changed during iteration" + raise nx.NetworkXUnfeasible(msg) + + +@not_implemented_for("undirected") +def all_topological_sorts(G): + """Returns a generator of _all_ topological sorts of the directed graph G. + + A topological sort is a nonunique permutation of the nodes such that an + edge from u to v implies that u appears before v in the topological sort + order. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Yields + ------ + topological_sort_order : list + a list of nodes in `G`, representing one of the topological sort orders + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` is not acyclic + + Examples + -------- + To enumerate all topological sorts of directed graph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + >>> list(nx.all_topological_sorts(DG)) + [[1, 2, 4, 3], [1, 2, 3, 4]] + + Notes + ----- + Implements an iterative version of the algorithm given in [1]. + + References + ---------- + .. [1] Knuth, Donald E., Szwarcfiter, Jayme L. (1974). + "A Structured Program to Generate All Topological Sorting Arrangements" + Information Processing Letters, Volume 2, Issue 6, 1974, Pages 153-157, + ISSN 0020-0190, + https://doi.org/10.1016/0020-0190(74)90001-5. + Elsevier (North-Holland), Amsterdam + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + # the names of count and D are chosen to match the global variables in [1] + # number of edges originating in a vertex v + count = dict(G.in_degree()) + # vertices with indegree 0 + D = deque([v for v, d in G.in_degree() if d == 0]) + # stack of first value chosen at a position k in the topological sort + bases = [] + current_sort = [] + + # do-while construct + while True: + assert all([count[v] == 0 for v in D]) + + if len(current_sort) == len(G): + yield list(current_sort) + + # clean-up stack + while len(current_sort) > 0: + assert len(bases) == len(current_sort) + q = current_sort.pop() + + # "restores" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] += 1 + assert count[j] >= 0 + # remove entries from D + while len(D) > 0 and count[D[-1]] > 0: + D.pop() + + # corresponds to a circular shift of the values in D + # if the first value chosen (the base) is in the first + # position of D again, we are done and need to consider the + # previous condition + D.appendleft(q) + if D[-1] == bases[-1]: + # all possible values have been chosen at current position + # remove corresponding marker + bases.pop() + else: + # there are still elements that have not been fixed + # at the current position in the topological sort + # stop removing elements, escape inner loop + break + + else: + if len(D) == 0: + raise nx.NetworkXUnfeasible("Graph contains a cycle.") + + # choose next node + q = D.pop() + # "erase" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] -= 1 + assert count[j] >= 0 + if count[j] == 0: + D.append(j) + current_sort.append(q) + + # base for current position might _not_ be fixed yet + if len(bases) < len(current_sort): + bases.append(q) + + if len(bases) == 0: + break + + +def is_aperiodic(G): + """Returns True if `G` is aperiodic. + + A directed graph is aperiodic if there is no integer k > 1 that + divides the length of every cycle in the graph. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + bool + True if the graph is aperiodic False otherwise + + Raises + ------ + NetworkXError + If `G` is not directed + + Examples + -------- + A graph consisting of one cycle, the length of which is 2. Therefore ``k = 2`` + divides the length of every cycle in the graph and thus the graph + is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1)]) + >>> nx.is_aperiodic(DG) + False + + A graph consisting of two cycles: one of length 2 and the other of length 3. + The cycle lengths are coprime, so there is no single value of k where ``k > 1`` + that divides each cycle length and therefore the graph is *aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1), (1, 4), (4, 1)]) + >>> nx.is_aperiodic(DG) + True + + A graph consisting of two cycles: one of length 2 and the other of length 4. + The lengths of the cycles share a common factor ``k = 2``, and therefore + the graph is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1), (3, 4), (4, 5), (5, 6), (6, 3)]) + >>> nx.is_aperiodic(DG) + False + + An acyclic graph, therefore the graph is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_aperiodic(DG) + False + + Notes + ----- + This uses the method outlined in [1]_, which runs in $O(m)$ time + given $m$ edges in `G`. Note that a graph is not aperiodic if it is + acyclic as every integer trivial divides length 0 cycles. + + References + ---------- + .. [1] Jarvis, J. P.; Shier, D. R. (1996), + "Graph-theoretic analysis of finite Markov chains," + in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling: + A Multidisciplinary Approach, CRC Press. + """ + if not G.is_directed(): + raise nx.NetworkXError("is_aperiodic not defined for undirected graphs") + + s = arbitrary_element(G) + levels = {s: 0} + this_level = [s] + g = 0 + lev = 1 + while this_level: + next_level = [] + for u in this_level: + for v in G[u]: + if v in levels: # Non-Tree Edge + g = gcd(g, levels[u] - levels[v] + 1) + else: # Tree Edge + next_level.append(v) + levels[v] = lev + this_level = next_level + lev += 1 + if len(levels) == len(G): # All nodes in tree + return g == 1 + else: + return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels))) + + +def transitive_closure(G, reflexive=False): + """Returns transitive closure of a graph + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a path from v to w in G. + + Handling of paths from v to v has some flexibility within this definition. + A reflexive transitive closure creates a self-loop for the path + from v to v of length 0. The usual transitive closure creates a + self-loop only if a cycle exists (a path from v to v with length > 0). + We also allow an option for no self-loops. + + Parameters + ---------- + G : NetworkX Graph + A directed/undirected graph/multigraph. + reflexive : Bool or None, optional (default: False) + Determines when cycles create self-loops in the Transitive Closure. + If True, trivial cycles (length 0) create self-loops. The result + is a reflexive transitive closure of G. + If False (the default) non-trivial cycles create self-loops. + If None, self-loops are not created. + + Returns + ------- + NetworkX graph + The transitive closure of `G` + + Raises + ------ + NetworkXError + If `reflexive` not in `{None, True, False}` + + Examples + -------- + The treatment of trivial (i.e. length 0) cycles is controlled by the + `reflexive` parameter. + + Trivial (i.e. length 0) cycles do not create self-loops when + ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + However, nontrivial (i.e. length greater then 0) cycles create self-loops + when ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (1, 1), (2, 3), (2, 1), (2, 2), (3, 1), (3, 2), (3, 3)]) + + Trivial cycles (length 0) create self-loops when ``reflexive=True``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=True) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 1), (1, 3), (2, 3), (2, 2), (3, 3)]) + + And the third option is not to create self-loops at all when ``reflexive=None``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=None) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (2, 1), (3, 1), (3, 2)]) + + References + ---------- + .. [1] https://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py + """ + TC = G.copy() + + if reflexive not in {None, True, False}: + raise nx.NetworkXError("Incorrect value for the parameter `reflexive`") + + for v in G: + if reflexive is None: + TC.add_edges_from((v, u) for u in nx.descendants(G, v) if u not in TC[v]) + elif reflexive is True: + TC.add_edges_from( + (v, u) for u in nx.descendants(G, v) | {v} if u not in TC[v] + ) + elif reflexive is False: + TC.add_edges_from((v, e[1]) for e in nx.edge_bfs(G, v) if e[1] not in TC[v]) + + return TC + + +@not_implemented_for("undirected") +def transitive_closure_dag(G, topo_order=None): + """Returns the transitive closure of a directed acyclic graph. + + This function is faster than the function `transitive_closure`, but fails + if the graph has a cycle. + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a non-null path from v to w in G. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Returns + ------- + NetworkX DiGraph + The transitive closure of `G` + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` has a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure_dag(DG) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + Notes + ----- + This algorithm is probably simple enough to be well-known but I didn't find + a mention in the literature. + """ + if topo_order is None: + topo_order = list(topological_sort(G)) + + TC = G.copy() + + # idea: traverse vertices following a reverse topological order, connecting + # each vertex to its descendants at distance 2 as we go + for v in reversed(topo_order): + TC.add_edges_from((v, u) for u in nx.descendants_at_distance(TC, v, 2)) + + return TC + + +@not_implemented_for("undirected") +def transitive_reduction(G): + """Returns transitive reduction of a directed graph + + The transitive reduction of G = (V,E) is a graph G- = (V,E-) such that + for all v,w in V there is an edge (v,w) in E- if and only if (v,w) is + in E and there is no path from v to w in G with length greater than 1. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + Returns + ------- + NetworkX DiGraph + The transitive reduction of `G` + + Raises + ------ + NetworkXError + If `G` is not a directed acyclic graph (DAG) transitive reduction is + not uniquely defined and a :exc:`NetworkXError` exception is raised. + + Examples + -------- + To perform transitive reduction on a DiGraph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (1, 3)]) + >>> TR = nx.transitive_reduction(DG) + >>> list(TR.edges) + [(1, 2), (2, 3)] + + To avoid unnecessary data copies, this implementation does not return a + DiGraph with node/edge data. + To perform transitive reduction on a DiGraph and transfer node/edge data: + + >>> DG = nx.DiGraph() + >>> DG.add_edges_from([(1, 2), (2, 3), (1, 3)], color='red') + >>> TR = nx.transitive_reduction(DG) + >>> TR.add_nodes_from(DG.nodes(data=True)) + >>> TR.add_edges_from((u, v, DG.edges[u, v]) for u, v in TR.edges) + >>> list(TR.edges(data=True)) + [(1, 2, {'color': 'red'}), (2, 3, {'color': 'red'})] + + References + ---------- + https://en.wikipedia.org/wiki/Transitive_reduction + + """ + if not is_directed_acyclic_graph(G): + msg = "Directed Acyclic Graph required for transitive_reduction" + raise nx.NetworkXError(msg) + TR = nx.DiGraph() + TR.add_nodes_from(G.nodes()) + descendants = {} + # count before removing set stored in descendants + check_count = dict(G.in_degree) + for u in G: + u_nbrs = set(G[u]) + for v in G[u]: + if v in u_nbrs: + if v not in descendants: + descendants[v] = {y for x, y in nx.dfs_edges(G, v)} + u_nbrs -= descendants[v] + check_count[v] -= 1 + if check_count[v] == 0: + del descendants[v] + TR.add_edges_from((u, v) for v in u_nbrs) + return TR + + +@not_implemented_for("undirected") +def antichains(G, topo_order=None): + """Generates antichains from a directed acyclic graph (DAG). + + An antichain is a subset of a partially ordered set such that any + two elements in the subset are incomparable. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Yields + ------ + antichain : list + a list of nodes in `G` representing an antichain + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + NetworkXUnfeasible + If `G` contains a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (1, 3)]) + >>> list(nx.antichains(DG)) + [[], [3], [2], [2, 3], [1]] + + Notes + ----- + This function was originally developed by Peter Jipsen and Franco Saliola + for the SAGE project. It's included in NetworkX with permission from the + authors. Original SAGE code at: + + https://github.com/sagemath/sage/blob/master/src/sage/combinat/posets/hasse_diagram.py + + References + ---------- + .. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation, + AMS, Vol 42, 1995, p. 226. + """ + if topo_order is None: + topo_order = list(nx.topological_sort(G)) + + TC = nx.transitive_closure_dag(G, topo_order) + antichains_stacks = [([], list(reversed(topo_order)))] + + while antichains_stacks: + (antichain, stack) = antichains_stacks.pop() + # Invariant: + # - the elements of antichain are independent + # - the elements of stack are independent from those of antichain + yield antichain + while stack: + x = stack.pop() + new_antichain = antichain + [x] + new_stack = [t for t in stack if not ((t in TC[x]) or (x in TC[t]))] + antichains_stacks.append((new_antichain, new_stack)) + + +@not_implemented_for("undirected") +def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None): + """Returns the longest path in a directed acyclic graph (DAG). + + If `G` has edges with `weight` attribute the edge data are used as + weight values. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : str, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + topo_order: list or tuple, optional + A topological order for `G` (if None, the function will compute one) + + Returns + ------- + list + Longest path + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})]) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path(DG) + [0, 1, 2] + >>> nx.dag_longest_path(DG, weight="cost") + [0, 2] + + In the case where multiple valid topological orderings exist, `topo_order` + can be used to specify a specific ordering: + + >>> DG = nx.DiGraph([(0, 1), (0, 2)]) + >>> sorted(nx.all_topological_sorts(DG)) # Valid topological orderings + [[0, 1, 2], [0, 2, 1]] + >>> nx.dag_longest_path(DG, topo_order=[0, 1, 2]) + [0, 1] + >>> nx.dag_longest_path(DG, topo_order=[0, 2, 1]) + [0, 2] + + See also + -------- + dag_longest_path_length + + """ + if not G: + return [] + + if topo_order is None: + topo_order = nx.topological_sort(G) + + dist = {} # stores {v : (length, u)} + for v in topo_order: + us = [ + (dist[u][0] + data.get(weight, default_weight), u) + for u, data in G.pred[v].items() + ] + + # Use the best predecessor if there is one and its distance is + # non-negative, otherwise terminate. + maxu = max(us, key=lambda x: x[0]) if us else (0, v) + dist[v] = maxu if maxu[0] >= 0 else (0, v) + + u = None + v = max(dist, key=lambda x: dist[x][0]) + path = [] + while u != v: + path.append(v) + u = v + v = dist[v][1] + + path.reverse() + return path + + +@not_implemented_for("undirected") +def dag_longest_path_length(G, weight="weight", default_weight=1): + """Returns the longest path length in a DAG + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : string, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + Returns + ------- + int + Longest path length + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})]) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path_length(DG) + 2 + >>> nx.dag_longest_path_length(DG, weight="cost") + 42 + + See also + -------- + dag_longest_path + """ + path = nx.dag_longest_path(G, weight, default_weight) + path_length = 0 + for (u, v) in pairwise(path): + path_length += G[u][v].get(weight, default_weight) + + return path_length + + +def root_to_leaf_paths(G): + """Yields root-to-leaf paths in a directed acyclic graph. + + `G` must be a directed acyclic graph. If not, the behavior of this + function is undefined. A "root" in this graph is a node of in-degree + zero and a "leaf" a node of out-degree zero. + + When invoked, this function iterates over each path from any root to + any leaf. A path is a list of nodes. + + """ + roots = (v for v, d in G.in_degree() if d == 0) + leaves = (v for v, d in G.out_degree() if d == 0) + all_paths = partial(nx.all_simple_paths, G) + # TODO In Python 3, this would be better as `yield from ...`. + return chaini(starmap(all_paths, product(roots, leaves))) + + +@not_implemented_for("multigraph") +@not_implemented_for("undirected") +def dag_to_branching(G): + """Returns a branching representing all (overlapping) paths from + root nodes to leaf nodes in the given directed acyclic graph. + + As described in :mod:`networkx.algorithms.tree.recognition`, a + *branching* is a directed forest in which each node has at most one + parent. In other words, a branching is a disjoint union of + *arborescences*. For this function, each node of in-degree zero in + `G` becomes a root of one of the arborescences, and there will be + one leaf node for each distinct path from that root to a leaf node + in `G`. + + Each node `v` in `G` with *k* parents becomes *k* distinct nodes in + the returned branching, one for each parent, and the sub-DAG rooted + at `v` is duplicated for each copy. The algorithm then recurses on + the children of each copy of `v`. + + Parameters + ---------- + G : NetworkX graph + A directed acyclic graph. + + Returns + ------- + DiGraph + The branching in which there is a bijection between root-to-leaf + paths in `G` (in which multiple paths may share the same leaf) + and root-to-leaf paths in the branching (in which there is a + unique path from a root to a leaf). + + Each node has an attribute 'source' whose value is the original + node to which this node corresponds. No other graph, node, or + edge attributes are copied into this new graph. + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed, or if `G` is a multigraph. + + HasACycle + If `G` is not acyclic. + + Examples + -------- + To examine which nodes in the returned branching were produced by + which original node in the directed acyclic graph, we can collect + the mapping from source node to new nodes into a dictionary. For + example, consider the directed diamond graph:: + + >>> from collections import defaultdict + >>> from operator import itemgetter + >>> + >>> G = nx.DiGraph(nx.utils.pairwise("abd")) + >>> G.add_edges_from(nx.utils.pairwise("acd")) + >>> B = nx.dag_to_branching(G) + >>> + >>> sources = defaultdict(set) + >>> for v, source in B.nodes(data="source"): + ... sources[source].add(v) + >>> len(sources["a"]) + 1 + >>> len(sources["d"]) + 2 + + To copy node attributes from the original graph to the new graph, + you can use a dictionary like the one constructed in the above + example:: + + >>> for source, nodes in sources.items(): + ... for v in nodes: + ... B.nodes[v].update(G.nodes[source]) + + Notes + ----- + This function is not idempotent in the sense that the node labels in + the returned branching may be uniquely generated each time the + function is invoked. In fact, the node labels may not be integers; + in order to relabel the nodes to be more readable, you can use the + :func:`networkx.convert_node_labels_to_integers` function. + + The current implementation of this function uses + :func:`networkx.prefix_tree`, so it is subject to the limitations of + that function. + + """ + if has_cycle(G): + msg = "dag_to_branching is only defined for acyclic graphs" + raise nx.HasACycle(msg) + paths = root_to_leaf_paths(G) + B = nx.prefix_tree(paths) + # Remove the synthetic `root`(0) and `NIL`(-1) nodes from the tree + B.remove_node(0) + B.remove_node(-1) + return B diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/distance_measures.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/distance_measures.py new file mode 100644 index 0000000..3f59a2a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/distance_measures.py @@ -0,0 +1,707 @@ +"""Graph diameter, radius, eccentricity and other properties.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "extrema_bounding", + "eccentricity", + "diameter", + "radius", + "periphery", + "center", + "barycenter", + "resistance_distance", +] + + +def extrema_bounding(G, compute="diameter"): + """Compute requested extreme distance metric of undirected graph G + + .. deprecated:: 2.8 + + extrema_bounding is deprecated and will be removed in NetworkX 3.0. + Use the corresponding distance measure with the `usebounds=True` option + instead. + + Computation is based on smart lower and upper bounds, and in practice + linear in the number of nodes, rather than quadratic (except for some + border cases such as complete graphs or circle shaped graphs). + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + compute : string denoting the requesting metric + "diameter" for the maximal eccentricity value, + "radius" for the minimal eccentricity value, + "periphery" for the set of nodes with eccentricity equal to the diameter, + "center" for the set of nodes with eccentricity equal to the radius, + "eccentricities" for the maximum distance from each node to all other nodes in G + + Returns + ------- + value : value of the requested metric + int for "diameter" and "radius" or + list of nodes for "center" and "periphery" or + dictionary of eccentricity values keyed by node for "eccentricities" + + Raises + ------ + NetworkXError + If the graph consists of multiple components + ValueError + If `compute` is not one of "diameter", "radius", "periphery", "center", + or "eccentricities". + + Notes + ----- + This algorithm was proposed in the following papers: + + F.W. Takes and W.A. Kosters, Determining the Diameter of Small World + Networks, in Proceedings of the 20th ACM International Conference on + Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011. + doi: https://doi.org/10.1145/2063576.2063748 + + F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of + Large Graphs, Algorithms 6(1): 100-118, 2013. + doi: https://doi.org/10.3390/a6010100 + + M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes, + Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected) + Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015. + doi: https://doi.org/10.1016/j.tcs.2015.02.033 + """ + import warnings + + msg = "extrema_bounding is deprecated and will be removed in networkx 3.0\n" + # NOTE: _extrema_bounding does input checking, so it is skipped here + if compute in {"diameter", "radius", "periphery", "center"}: + msg += f"Use nx.{compute}(G, usebounds=True) instead." + if compute == "eccentricities": + msg += f"Use nx.eccentricity(G) instead." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + return _extrema_bounding(G, compute=compute) + + +def _extrema_bounding(G, compute="diameter"): + """Compute requested extreme distance metric of undirected graph G + + Computation is based on smart lower and upper bounds, and in practice + linear in the number of nodes, rather than quadratic (except for some + border cases such as complete graphs or circle shaped graphs). + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + compute : string denoting the requesting metric + "diameter" for the maximal eccentricity value, + "radius" for the minimal eccentricity value, + "periphery" for the set of nodes with eccentricity equal to the diameter, + "center" for the set of nodes with eccentricity equal to the radius, + "eccentricities" for the maximum distance from each node to all other nodes in G + + Returns + ------- + value : value of the requested metric + int for "diameter" and "radius" or + list of nodes for "center" and "periphery" or + dictionary of eccentricity values keyed by node for "eccentricities" + + Raises + ------ + NetworkXError + If the graph consists of multiple components + ValueError + If `compute` is not one of "diameter", "radius", "periphery", "center", or "eccentricities". + Notes + ----- + This algorithm was proposed in the following papers: + + F.W. Takes and W.A. Kosters, Determining the Diameter of Small World + Networks, in Proceedings of the 20th ACM International Conference on + Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011. + doi: https://doi.org/10.1145/2063576.2063748 + + F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of + Large Graphs, Algorithms 6(1): 100-118, 2013. + doi: https://doi.org/10.3390/a6010100 + + M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes, + Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected) + Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015. + doi: https://doi.org/10.1016/j.tcs.2015.02.033 + """ + + # init variables + degrees = dict(G.degree()) # start with the highest degree node + minlowernode = max(degrees, key=degrees.get) + N = len(degrees) # number of nodes + # alternate between smallest lower and largest upper bound + high = False + # status variables + ecc_lower = dict.fromkeys(G, 0) + ecc_upper = dict.fromkeys(G, N) + candidates = set(G) + + # (re)set bound extremes + minlower = N + maxlower = 0 + minupper = N + maxupper = 0 + + # repeat the following until there are no more candidates + while candidates: + if high: + current = maxuppernode # select node with largest upper bound + else: + current = minlowernode # select node with smallest lower bound + high = not high + + # get distances from/to current node and derive eccentricity + dist = dict(nx.single_source_shortest_path_length(G, current)) + if len(dist) != N: + msg = "Cannot compute metric because graph is not connected." + raise nx.NetworkXError(msg) + current_ecc = max(dist.values()) + + # print status update + # print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/" + # + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is " + # + str(current_ecc)) + # print(ecc_upper) + + # (re)set bound extremes + maxuppernode = None + minlowernode = None + + # update node bounds + for i in candidates: + # update eccentricity bounds + d = dist[i] + ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d))) + ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d) + + # update min/max values of lower and upper bounds + minlower = min(ecc_lower[i], minlower) + maxlower = max(ecc_lower[i], maxlower) + minupper = min(ecc_upper[i], minupper) + maxupper = max(ecc_upper[i], maxupper) + + # update candidate set + if compute == "diameter": + ruled_out = { + i + for i in candidates + if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper + } + elif compute == "radius": + ruled_out = { + i + for i in candidates + if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower + } + elif compute == "periphery": + ruled_out = { + i + for i in candidates + if ecc_upper[i] < maxlower + and (maxlower == maxupper or ecc_lower[i] > maxupper) + } + elif compute == "center": + ruled_out = { + i + for i in candidates + if ecc_lower[i] > minupper + and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower) + } + elif compute == "eccentricities": + ruled_out = set() + else: + msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'" + raise ValueError(msg) + + ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i]) + candidates -= ruled_out + + # for i in ruled_out: + # print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper)) + # print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower)) + # print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower + # and 2 * ecc_lower[4] >= maxupper)) + + # updating maxuppernode and minlowernode for selection in next round + for i in candidates: + if ( + minlowernode is None + or ( + ecc_lower[i] == ecc_lower[minlowernode] + and degrees[i] > degrees[minlowernode] + ) + or (ecc_lower[i] < ecc_lower[minlowernode]) + ): + minlowernode = i + + if ( + maxuppernode is None + or ( + ecc_upper[i] == ecc_upper[maxuppernode] + and degrees[i] > degrees[maxuppernode] + ) + or (ecc_upper[i] > ecc_upper[maxuppernode]) + ): + maxuppernode = i + + # print status update + # print (" min=" + str(minlower) + "/" + str(minupper) + + # " max=" + str(maxlower) + "/" + str(maxupper) + + # " candidates: " + str(len(candidates))) + # print("cand:",candidates) + # print("ecc_l",ecc_lower) + # print("ecc_u",ecc_upper) + # wait = input("press Enter to continue") + + # return the correct value of the requested metric + if compute == "diameter": + return maxlower + elif compute == "radius": + return minupper + elif compute == "periphery": + p = [v for v in G if ecc_lower[v] == maxlower] + return p + elif compute == "center": + c = [v for v in G if ecc_upper[v] == minupper] + return c + elif compute == "eccentricities": + return ecc_lower + return None + + +def eccentricity(G, v=None, sp=None): + """Returns the eccentricity of nodes in G. + + The eccentricity of a node v is the maximum distance from v to + all other nodes in G. + + Parameters + ---------- + G : NetworkX graph + A graph + + v : node, optional + Return value of specified node + + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + Returns + ------- + ecc : dictionary + A dictionary of eccentricity values keyed by node. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> dict(nx.eccentricity(G)) + {1: 2, 2: 3, 3: 2, 4: 2, 5: 3} + + >>> dict(nx.eccentricity(G, v=[1, 5])) # This returns the eccentrity of node 1 & 5 + {1: 2, 5: 3} + + """ + # if v is None: # none, use entire graph + # nodes=G.nodes() + # elif v in G: # is v a single node + # nodes=[v] + # else: # assume v is a container of nodes + # nodes=v + order = G.order() + + e = {} + for n in G.nbunch_iter(v): + if sp is None: + length = nx.single_source_shortest_path_length(G, n) + L = len(length) + else: + try: + length = sp[n] + L = len(length) + except TypeError as err: + raise nx.NetworkXError('Format of "sp" is invalid.') from err + if L != order: + if G.is_directed(): + msg = ( + "Found infinite path length because the digraph is not" + " strongly connected" + ) + else: + msg = "Found infinite path length because the graph is not" " connected" + raise nx.NetworkXError(msg) + + e[n] = max(length.values()) + + if v in G: + return e[v] # return single value + else: + return e + + +def diameter(G, e=None, usebounds=False): + """Returns the diameter of the graph G. + + The diameter is the maximum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + Returns + ------- + d : integer + Diameter of graph + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.diameter(G) + 3 + + See Also + -------- + eccentricity + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="diameter") + if e is None: + e = eccentricity(G) + return max(e.values()) + + +def periphery(G, e=None, usebounds=False): + """Returns the periphery of the graph G. + + The periphery is the set of nodes with eccentricity equal to the diameter. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + Returns + ------- + p : list + List of nodes in periphery + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.periphery(G) + [2, 5] + + See Also + -------- + barycenter + center + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="periphery") + if e is None: + e = eccentricity(G) + diameter = max(e.values()) + p = [v for v in e if e[v] == diameter] + return p + + +def radius(G, e=None, usebounds=False): + """Returns the radius of the graph G. + + The radius is the minimum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + Returns + ------- + r : integer + Radius of graph + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.radius(G) + 2 + + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="radius") + if e is None: + e = eccentricity(G) + return min(e.values()) + + +def center(G, e=None, usebounds=False): + """Returns the center of the graph G. + + The center is the set of nodes with eccentricity equal to radius. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + Returns + ------- + c : list + List of nodes in center + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.center(G)) + [1, 3, 4] + + See Also + -------- + barycenter + periphery + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="center") + if e is None: + e = eccentricity(G) + radius = min(e.values()) + p = [v for v in e if e[v] == radius] + return p + + +def barycenter(G, weight=None, attr=None, sp=None): + r"""Calculate barycenter of a connected graph, optionally with edge weights. + + The :dfn:`barycenter` a + :func:`connected ` graph + :math:`G` is the subgraph induced by the set of its nodes :math:`v` + minimizing the objective function + + .. math:: + + \sum_{u \in V(G)} d_G(u, v), + + where :math:`d_G` is the (possibly weighted) :func:`path length + `. + The barycenter is also called the :dfn:`median`. See [West01]_, p. 78. + + Parameters + ---------- + G : :class:`networkx.Graph` + The connected graph :math:`G`. + weight : :class:`str`, optional + Passed through to + :func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`. + attr : :class:`str`, optional + If given, write the value of the objective function to each node's + `attr` attribute. Otherwise do not store the value. + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + Returns + ------- + list + Nodes of `G` that induce the barycenter of `G`. + + Raises + ------ + NetworkXNoPath + If `G` is disconnected. `G` may appear disconnected to + :func:`barycenter` if `sp` is given but is missing shortest path + lengths for any pairs. + ValueError + If `sp` and `weight` are both given. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.barycenter(G) + [1, 3, 4] + + See Also + -------- + center + periphery + """ + if sp is None: + sp = nx.shortest_path_length(G, weight=weight) + else: + sp = sp.items() + if weight is not None: + raise ValueError("Cannot use both sp, weight arguments together") + smallest, barycenter_vertices, n = float("inf"), [], len(G) + for v, dists in sp: + if len(dists) < n: + raise nx.NetworkXNoPath( + f"Input graph {G} is disconnected, so every induced subgraph " + "has infinite barycentricity." + ) + barycentricity = sum(dists.values()) + if attr is not None: + G.nodes[v][attr] = barycentricity + if barycentricity < smallest: + smallest = barycentricity + barycenter_vertices = [v] + elif barycentricity == smallest: + barycenter_vertices.append(v) + return barycenter_vertices + + +def _count_lu_permutations(perm_array): + """Counts the number of permutations in SuperLU perm_c or perm_r""" + perm_cnt = 0 + arr = perm_array.tolist() + for i in range(len(arr)): + if i != arr[i]: + perm_cnt += 1 + n = arr.index(i) + arr[n] = arr[i] + arr[i] = i + + return perm_cnt + + +@not_implemented_for("directed") +def resistance_distance(G, nodeA, nodeB, weight=None, invert_weight=True): + """Returns the resistance distance between node A and node B on graph G. + + The resistance distance between two nodes of a graph is akin to treating + the graph as a grid of resistorses with a resistance equal to the provided + weight. + + If weight is not provided, then a weight of 1 is used for all edges. + + Parameters + ---------- + G : NetworkX graph + A graph + + nodeA : node + A node within graph G. + + nodeB : node + A node within graph G, exclusive of Node A. + + weight : string or None, optional (default=None) + The edge data key used to compute the resistance distance. + If None, then each edge has weight 1. + + invert_weight : boolean (default=True) + Proper calculation of resistance distance requires building the + Laplacian matrix with the reciprocal of the weight. Not required + if the weight is already inverted. Weight cannot be zero. + + Returns + ------- + rd : float + Value of effective resistance distance + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.resistance_distance(G, 1, 3) + 0.625 + + Notes + ----- + Overview discussion: + * https://en.wikipedia.org/wiki/Resistance_distance + * http://mathworld.wolfram.com/ResistanceDistance.html + + Additional details: + Vaya Sapobi Samui Vos, “Methods for determining the effective resistance,” M.S., + Mathematisch Instituut, Universiteit Leiden, Leiden, Netherlands, 2016 + Available: `Link to thesis `_ + """ + import numpy as np + import scipy as sp + import scipy.sparse.linalg # call as sp.sparse.linalg + + if not nx.is_connected(G): + msg = "Graph G must be strongly connected." + raise nx.NetworkXError(msg) + elif nodeA not in G: + msg = "Node A is not in graph G." + raise nx.NetworkXError(msg) + elif nodeB not in G: + msg = "Node B is not in graph G." + raise nx.NetworkXError(msg) + elif nodeA == nodeB: + msg = "Node A and Node B cannot be the same." + raise nx.NetworkXError(msg) + + G = G.copy() + node_list = list(G) + + if invert_weight and weight is not None: + if G.is_multigraph(): + for (u, v, k, d) in G.edges(keys=True, data=True): + d[weight] = 1 / d[weight] + else: + for (u, v, d) in G.edges(data=True): + d[weight] = 1 / d[weight] + # Replace with collapsing topology or approximated zero? + + # Using determinants to compute the effective resistance is more memory + # efficent than directly calculating the psuedo-inverse + L = nx.laplacian_matrix(G, node_list, weight=weight).asformat("csc") + indices = list(range(L.shape[0])) + # w/ nodeA removed + indices.remove(node_list.index(nodeA)) + L_a = L[indices, :][:, indices] + # Both nodeA and nodeB removed + indices.remove(node_list.index(nodeB)) + L_ab = L[indices, :][:, indices] + + # Factorize Laplacian submatrixes and extract diagonals + # Order the diagonals to minimize the likelihood over overflows + # during computing the determinant + lu_a = sp.sparse.linalg.splu(L_a, options=dict(SymmetricMode=True)) + LdiagA = lu_a.U.diagonal() + LdiagA_s = np.product(np.sign(LdiagA)) * np.product(lu_a.L.diagonal()) + LdiagA_s *= (-1) ** _count_lu_permutations(lu_a.perm_r) + LdiagA_s *= (-1) ** _count_lu_permutations(lu_a.perm_c) + LdiagA = np.absolute(LdiagA) + LdiagA = np.sort(LdiagA) + + lu_ab = sp.sparse.linalg.splu(L_ab, options=dict(SymmetricMode=True)) + LdiagAB = lu_ab.U.diagonal() + LdiagAB_s = np.product(np.sign(LdiagAB)) * np.product(lu_ab.L.diagonal()) + LdiagAB_s *= (-1) ** _count_lu_permutations(lu_ab.perm_r) + LdiagAB_s *= (-1) ** _count_lu_permutations(lu_ab.perm_c) + LdiagAB = np.absolute(LdiagAB) + LdiagAB = np.sort(LdiagAB) + + # Calculate the ratio of determinant, rd = det(L_ab)/det(L_a) + Ldet = np.product(np.divide(np.append(LdiagAB, [1]), LdiagA)) + rd = Ldet * LdiagAB_s / LdiagA_s + + return rd diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/distance_regular.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/distance_regular.py new file mode 100644 index 0000000..bef9f01 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/distance_regular.py @@ -0,0 +1,231 @@ +""" +======================= +Distance-regular graphs +======================= +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +from .distance_measures import diameter + +__all__ = [ + "is_distance_regular", + "is_strongly_regular", + "intersection_array", + "global_parameters", +] + + +def is_distance_regular(G): + """Returns True if the graph is distance regular, False otherwise. + + A connected graph G is distance-regular if for any nodes x,y + and any integers i,j=0,1,...,d (where d is the graph + diameter), the number of vertices at distance i from x and + distance j from y depends only on i,j and the graph distance + between x and y, independently of the choice of x and y. + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + bool + True if the graph is Distance Regular, False otherwise + + Examples + -------- + >>> G = nx.hypercube_graph(6) + >>> nx.is_distance_regular(G) + True + + See Also + -------- + intersection_array, global_parameters + + Notes + ----- + For undirected and simple graphs only + + References + ---------- + .. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A. + Distance-Regular Graphs. New York: Springer-Verlag, 1989. + .. [2] Weisstein, Eric W. "Distance-Regular Graph." + http://mathworld.wolfram.com/Distance-RegularGraph.html + + """ + try: + intersection_array(G) + return True + except nx.NetworkXError: + return False + + +def global_parameters(b, c): + """Returns global parameters for a given intersection array. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + Thus, a distance regular graph has the global parameters, + [[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the + intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + where a_i+b_i+c_i=k , k= degree of every vertex. + + Parameters + ---------- + b : list + + c : list + + Returns + ------- + iterable + An iterable over three tuples. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> b, c = nx.intersection_array(G) + >>> list(nx.global_parameters(b, c)) + [(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)] + + References + ---------- + .. [1] Weisstein, Eric W. "Global Parameters." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/GlobalParameters.html + + See Also + -------- + intersection_array + """ + return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c)) + + +@not_implemented_for("directed", "multigraph") +def intersection_array(G): + """Returns the intersection array of a distance-regular graph. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + A distance regular graph's intersection array is given by, + [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + b,c: tuple of lists + + Examples + -------- + >>> G = nx.icosahedral_graph() + >>> nx.intersection_array(G) + ([5, 2, 1], [1, 2, 5]) + + References + ---------- + .. [1] Weisstein, Eric W. "Intersection Array." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/IntersectionArray.html + + See Also + -------- + global_parameters + """ + # test for regular graph (all degrees must be equal) + degree = iter(G.degree()) + (_, k) = next(degree) + for _, knext in degree: + if knext != k: + raise nx.NetworkXError("Graph is not distance regular.") + k = knext + path_length = dict(nx.all_pairs_shortest_path_length(G)) + diameter = max(max(path_length[n].values()) for n in path_length) + bint = {} # 'b' intersection array + cint = {} # 'c' intersection array + for u in G: + for v in G: + try: + i = path_length[u][v] + except KeyError as err: # graph must be connected + raise nx.NetworkXError("Graph is not distance regular.") from err + # number of neighbors of v at a distance of i-1 from u + c = len([n for n in G[v] if path_length[n][u] == i - 1]) + # number of neighbors of v at a distance of i+1 from u + b = len([n for n in G[v] if path_length[n][u] == i + 1]) + # b,c are independent of u and v + if cint.get(i, c) != c or bint.get(i, b) != b: + raise nx.NetworkXError("Graph is not distance regular") + bint[i] = b + cint[i] = c + return ( + [bint.get(j, 0) for j in range(diameter)], + [cint.get(j + 1, 0) for j in range(diameter)], + ) + + +# TODO There is a definition for directed strongly regular graphs. +@not_implemented_for("directed", "multigraph") +def is_strongly_regular(G): + """Returns True if and only if the given graph is strongly + regular. + + An undirected graph is *strongly regular* if + + * it is regular, + * each pair of adjacent vertices has the same number of neighbors in + common, + * each pair of nonadjacent vertices has the same number of neighbors + in common. + + Each strongly regular graph is a distance-regular graph. + Conversely, if a distance-regular graph has diameter two, then it is + a strongly regular graph. For more information on distance-regular + graphs, see :func:`is_distance_regular`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + bool + Whether `G` is strongly regular. + + Examples + -------- + + The cycle graph on five vertices is strongly regular. It is + two-regular, each pair of adjacent vertices has no shared neighbors, + and each pair of nonadjacent vertices has one shared neighbor:: + + >>> G = nx.cycle_graph(5) + >>> nx.is_strongly_regular(G) + True + + """ + # Here is an alternate implementation based directly on the + # definition of strongly regular graphs: + # + # return (all_equal(G.degree().values()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in G.edges()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in non_edges(G))) + # + # We instead use the fact that a distance-regular graph of diameter + # two is strongly regular. + return is_distance_regular(G) and diameter(G) == 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/dominance.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/dominance.py new file mode 100644 index 0000000..cd119ba --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/dominance.py @@ -0,0 +1,133 @@ +""" +Dominance algorithms. +""" + +from functools import reduce + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["immediate_dominators", "dominance_frontiers"] + + +@not_implemented_for("undirected") +def immediate_dominators(G, start): + """Returns the immediate dominators of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + idom : dict keyed by nodes + A dict containing the immediate dominators of each node reachable from + `start`. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Notes + ----- + Except for `start`, the immediate dominators are the parents of their + corresponding nodes in the dominator tree. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted(nx.immediate_dominators(G, 1).items()) + [(1, 1), (2, 1), (3, 1), (4, 3), (5, 1)] + + References + ---------- + .. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy. + A simple, fast dominance algorithm. + Software Practice & Experience, 4:110, 2001. + """ + if start not in G: + raise nx.NetworkXError("start is not in G") + + idom = {start: start} + + order = list(nx.dfs_postorder_nodes(G, start)) + dfn = {u: i for i, u in enumerate(order)} + order.pop() + order.reverse() + + def intersect(u, v): + while u != v: + while dfn[u] < dfn[v]: + u = idom[u] + while dfn[u] > dfn[v]: + v = idom[v] + return u + + changed = True + while changed: + changed = False + for u in order: + new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom)) + if u not in idom or idom[u] != new_idom: + idom[u] = new_idom + changed = True + + return idom + + +def dominance_frontiers(G, start): + """Returns the dominance frontiers of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + df : dict keyed by nodes + A dict containing the dominance frontiers of each node reachable from + `start` as lists. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted((u, sorted(df)) for u, df in nx.dominance_frontiers(G, 1).items()) + [(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])] + + References + ---------- + .. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy. + A simple, fast dominance algorithm. + Software Practice & Experience, 4:110, 2001. + """ + idom = nx.immediate_dominators(G, start) + + df = {u: set() for u in idom} + for u in idom: + if len(G.pred[u]) >= 2: + for v in G.pred[u]: + if v in idom: + while v != idom[u]: + df[v].add(u) + v = idom[v] + return df diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/dominating.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/dominating.py new file mode 100644 index 0000000..32fff4d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/dominating.py @@ -0,0 +1,92 @@ +"""Functions for computing dominating sets in a graph.""" +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element + +__all__ = ["dominating_set", "is_dominating_set"] + + +def dominating_set(G, start_with=None): + r"""Finds a dominating set for the graph G. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + start_with : node (default=None) + Node to use as a starting point for the algorithm. + + Returns + ------- + D : set + A dominating set for G. + + Notes + ----- + This function is an implementation of algorithm 7 in [2]_ which + finds some dominating set, not necessarily the smallest one. + + See also + -------- + is_dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + .. [2] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + all_nodes = set(G) + if start_with is None: + start_with = arbitrary_element(all_nodes) + if start_with not in G: + raise nx.NetworkXError(f"node {start_with} is not in G") + dominating_set = {start_with} + dominated_nodes = set(G[start_with]) + remaining_nodes = all_nodes - dominated_nodes - dominating_set + while remaining_nodes: + # Choose an arbitrary node and determine its undominated neighbors. + v = remaining_nodes.pop() + undominated_neighbors = set(G[v]) - dominating_set + # Add the node to the dominating set and the neighbors to the + # dominated set. Finally, remove all of those nodes from the set + # of remaining nodes. + dominating_set.add(v) + dominated_nodes |= undominated_neighbors + remaining_nodes -= undominated_neighbors + return dominating_set + + +def is_dominating_set(G, nbunch): + """Checks if `nbunch` is a dominating set for `G`. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + nbunch : iterable + An iterable of nodes in the graph `G`. + + See also + -------- + dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + """ + testset = {n for n in nbunch if n in G} + nbrs = set(chain.from_iterable(G[n] for n in testset)) + return len(set(G) - testset - nbrs) == 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/efficiency_measures.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/efficiency_measures.py new file mode 100644 index 0000000..45f19cd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/efficiency_measures.py @@ -0,0 +1,147 @@ +"""Provides functions for computing the efficiency of nodes and graphs.""" + +import networkx as nx +from networkx.exception import NetworkXNoPath + +from ..utils import not_implemented_for + +__all__ = ["efficiency", "local_efficiency", "global_efficiency"] + + +@not_implemented_for("directed") +def efficiency(G, u, v): + """Returns the efficiency of a pair of nodes in a graph. + + The *efficiency* of a pair of nodes is the multiplicative inverse of the + shortest path distance between the nodes [1]_. Returns 0 if no path + between nodes. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + u, v : node + Nodes in the graph ``G``. + + Returns + ------- + float + Multiplicative inverse of the shortest path distance between the nodes. + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + try: + eff = 1 / nx.shortest_path_length(G, u, v) + except NetworkXNoPath: + eff = 0 + return eff + + +@not_implemented_for("directed") +def global_efficiency(G): + """Returns the average global efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *average + global efficiency* of a graph is the average efficiency of all pairs of + nodes [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average global efficiency. + + Returns + ------- + float + The average global efficiency of the graph. + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + n = len(G) + denom = n * (n - 1) + if denom != 0: + lengths = nx.all_pairs_shortest_path_length(G) + g_eff = 0 + for source, targets in lengths: + for target, distance in targets.items(): + if distance > 0: + g_eff += 1 / distance + g_eff /= denom + # g_eff = sum(1 / d for s, tgts in lengths + # for t, d in tgts.items() if d > 0) / denom + else: + g_eff = 0 + # TODO This can be made more efficient by computing all pairs shortest + # path lengths in parallel. + return g_eff + + +@not_implemented_for("directed") +def local_efficiency(G): + """Returns the average local efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *local + efficiency* of a node in the graph is the average global efficiency of the + subgraph induced by the neighbors of the node. The *average local + efficiency* is the average of the local efficiencies of each node [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + + Returns + ------- + float + The average local efficiency of the graph. + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + # TODO This summation can be trivially parallelized. + efficiency_list = (global_efficiency(G.subgraph(G[v])) for v in G) + return sum(efficiency_list) / len(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/euler.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/euler.py new file mode 100644 index 0000000..4643a9f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/euler.py @@ -0,0 +1,452 @@ +""" +Eulerian circuits and graphs. +""" +from itertools import combinations + +import networkx as nx + +from ..utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_eulerian", + "eulerian_circuit", + "eulerize", + "is_semieulerian", + "has_eulerian_path", + "eulerian_path", +] + + +def is_eulerian(G): + """Returns True if and only if `G` is Eulerian. + + A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian + circuit* is a closed walk that includes each edge of a graph exactly + once. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not + considered to have Eulerian circuits. Therefore, if the graph is not + connected (or not strongly connected, for directed graphs), this function + returns False. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + Examples + -------- + >>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]})) + True + >>> nx.is_eulerian(nx.complete_graph(5)) + True + >>> nx.is_eulerian(nx.petersen_graph()) + False + + If you prefer to allow graphs with isolated vertices to have Eulerian circuits, + you can first remove such vertices and then call `is_eulerian` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.is_eulerian(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.is_eulerian(G) + True + + + """ + if G.is_directed(): + # Every node must have equal in degree and out degree and the + # graph must be strongly connected + return all( + G.in_degree(n) == G.out_degree(n) for n in G + ) and nx.is_strongly_connected(G) + # An undirected Eulerian graph has no vertices of odd degree and + # must be connected. + return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G) + + +def is_semieulerian(G): + """Return True iff `G` is semi-Eulerian. + + G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit. + + See Also + -------- + has_eulerian_path + is_eulerian + """ + return has_eulerian_path(G) and not is_eulerian(G) + + +def _find_path_start(G): + """Return a suitable starting vertex for an Eulerian path. + + If no path exists, return None. + """ + if not has_eulerian_path(G): + return None + + if is_eulerian(G): + return arbitrary_element(G) + + if G.is_directed(): + v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v)) + # Determines which is the 'start' node (as opposed to the 'end') + if G.out_degree(v1) > G.in_degree(v1): + return v1 + else: + return v2 + + else: + # In an undirected graph randomly choose one of the possibilities + start = [v for v in G if G.degree(v) % 2 != 0][0] + return start + + +def _simplegraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [source] + last_vertex = None + while vertex_stack: + current_vertex = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex) + last_vertex = current_vertex + vertex_stack.pop() + else: + _, next_vertex = arbitrary_element(edges(current_vertex)) + vertex_stack.append(next_vertex) + G.remove_edge(current_vertex, next_vertex) + + +def _multigraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [(source, None)] + last_vertex = None + last_key = None + while vertex_stack: + current_vertex, current_key = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex, last_key) + last_vertex, last_key = current_vertex, current_key + vertex_stack.pop() + else: + triple = arbitrary_element(edges(current_vertex, keys=True)) + _, next_vertex, next_key = triple + vertex_stack.append((next_vertex, next_key)) + G.remove_edge(current_vertex, next_vertex, next_key) + + +def eulerian_circuit(G, source=None, keys=False): + """Returns an iterator over the edges of an Eulerian circuit in `G`. + + An *Eulerian circuit* is a closed walk that includes each edge of a + graph exactly once. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + source : node, optional + Starting node for circuit. + + keys : bool + If False, edges generated by this function will be of the form + ``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``. + This option is ignored unless `G` is a multigraph. + + Returns + ------- + edges : iterator + An iterator over edges in the Eulerian circuit. + + Raises + ------ + NetworkXError + If the graph is not Eulerian. + + See Also + -------- + is_eulerian + + Notes + ----- + This is a linear time implementation of an algorithm adapted from [1]_. + + For general information about Euler tours, see [2]_. + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + + Examples + -------- + To get an Eulerian circuit in an undirected graph:: + + >>> G = nx.complete_graph(3) + >>> list(nx.eulerian_circuit(G)) + [(0, 2), (2, 1), (1, 0)] + >>> list(nx.eulerian_circuit(G, source=1)) + [(1, 2), (2, 0), (0, 1)] + + To get the sequence of vertices in an Eulerian circuit:: + + >>> [u for u, v in nx.eulerian_circuit(G)] + [0, 2, 1] + + """ + if not is_eulerian(G): + raise nx.NetworkXError("G is not Eulerian.") + if G.is_directed(): + G = G.reverse() + else: + G = G.copy() + if source is None: + source = arbitrary_element(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + + +def has_eulerian_path(G, source=None): + """Return True iff `G` has an Eulerian path. + + An Eulerian path is a path in a graph which uses each edge of a graph + exactly once. If `source` is specified, then this function checks + whether an Eulerian path that starts at node `source` exists. + + A directed graph has an Eulerian path iff: + - at most one vertex has out_degree - in_degree = 1, + - at most one vertex has in_degree - out_degree = 1, + - every other vertex has equal in_degree and out_degree, + - and all of its vertices belong to a single connected + component of the underlying undirected graph. + + If `source` is not None, an Eulerian path starting at `source` exists if no + other node has out_degree - in_degree = 1. This is equivalent to either + there exists an Eulerian circuit or `source` has out_degree - in_degree = 1 + and the conditions above hold. + + An undirected graph has an Eulerian path iff: + - exactly zero or two vertices have odd degree, + - and all of its vertices belong to a single connected component. + + If `source` is not None, an Eulerian path starting at `source` exists if + either there exists an Eulerian circuit or `source` has an odd degree and the + conditions above hold. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not considered + to have an Eulerian path. Therefore, if the graph is not connected (or not strongly + connected, for directed graphs), this function returns False. + + Parameters + ---------- + G : NetworkX Graph + The graph to find an euler path in. + + source : node, optional + Starting node for path. + + Returns + ------- + Bool : True if G has an Eulerian path. + + Example + ------- + If you prefer to allow graphs with isolated vertices to have Eulerian path, + you can first remove such vertices and then call `has_eulerian_path` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.has_eulerian_path(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.has_eulerian_path(G) + True + + See Also + -------- + is_eulerian + eulerian_path + """ + if nx.is_eulerian(G): + return True + + if G.is_directed(): + ins = G.in_degree + outs = G.out_degree + # Since we know it is not eulerian, outs - ins must be 1 for source + if source is not None and outs[source] - ins[source] != 1: + return False + + unbalanced_ins = 0 + unbalanced_outs = 0 + for v in G: + if ins[v] - outs[v] == 1: + unbalanced_ins += 1 + elif outs[v] - ins[v] == 1: + unbalanced_outs += 1 + elif ins[v] != outs[v]: + return False + + return ( + unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G) + ) + else: + # We know it is not eulerian, so degree of source must be odd. + if source is not None and G.degree[source] % 2 != 1: + return False + + # Sum is 2 since we know it is not eulerian (which implies sum is 0) + return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G) + + +def eulerian_path(G, source=None, keys=False): + """Return an iterator over the edges of an Eulerian path in `G`. + + Parameters + ---------- + G : NetworkX Graph + The graph in which to look for an eulerian path. + source : node or None (default: None) + The node at which to start the search. None means search over all + starting nodes. + keys : Bool (default: False) + Indicates whether to yield edge 3-tuples (u, v, edge_key). + The default yields edge 2-tuples + + Yields + ------ + Edge tuples along the eulerian path. + + Warning: If `source` provided is not the start node of an Euler path + will raise error even if an Euler Path exists. + """ + if not has_eulerian_path(G, source): + raise nx.NetworkXError("Graph has no Eulerian paths.") + if G.is_directed(): + G = G.reverse() + if source is None or nx.is_eulerian(G) is False: + source = _find_path_start(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + else: + G = G.copy() + if source is None: + source = _find_path_start(G) + if G.is_multigraph(): + if keys: + yield from reversed( + [(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)] + ) + + +@not_implemented_for("directed") +def eulerize(G): + """Transforms a graph into an Eulerian graph + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + G : NetworkX multigraph + + Raises + ------ + NetworkXError + If the graph is not connected. + + See Also + -------- + is_eulerian + eulerian_circuit + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + .. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf + + Examples + -------- + >>> G = nx.complete_graph(10) + >>> H = nx.eulerize(G) + >>> nx.is_eulerian(H) + True + + """ + if G.order() == 0: + raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph") + if not nx.is_connected(G): + raise nx.NetworkXError("G is not connected") + odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1] + G = nx.MultiGraph(G) + if len(odd_degree_nodes) == 0: + return G + + # get all shortest paths between vertices of odd degree + odd_deg_pairs_paths = [ + (m, {n: nx.shortest_path(G, source=m, target=n)}) + for m, n in combinations(odd_degree_nodes, 2) + ] + + # use inverse path lengths as edge-weights in a new graph + # store the paths in the graph for easy indexing later + Gp = nx.Graph() + for n, Ps in odd_deg_pairs_paths: + for m, P in Ps.items(): + if n != m: + Gp.add_edge(m, n, weight=1 / len(P), path=P) + + # find the minimum weight matching of edges in the weighted graph + best_matching = nx.Graph(list(nx.max_weight_matching(Gp))) + + # duplicate each edge along each path in the set of paths in Gp + for m, n in best_matching.edges(): + path = Gp[m][n]["path"] + G.add_edges_from(nx.utils.pairwise(path)) + return G diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/__init__.py new file mode 100644 index 0000000..c5d19ab --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/__init__.py @@ -0,0 +1,11 @@ +from .maxflow import * +from .mincost import * +from .boykovkolmogorov import * +from .dinitz_alg import * +from .edmondskarp import * +from .gomory_hu import * +from .preflowpush import * +from .shortestaugmentingpath import * +from .capacityscaling import * +from .networksimplex import * +from .utils import build_flow_dict, build_residual_network diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/boykovkolmogorov.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/boykovkolmogorov.py new file mode 100644 index 0000000..fd96681 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/boykovkolmogorov.py @@ -0,0 +1,367 @@ +""" +Boykov-Kolmogorov algorithm for maximum flow problems. +""" +from collections import deque +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["boykov_kolmogorov"] + + +def boykov_kolmogorov( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + r"""Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has worse case complexity $O(n^2 m |C|)$ for $n$ nodes, $m$ + edges, and $|C|$ the cost of the minimum cut [1]_. This implementation + uses the marking heuristic defined in [2]_ which improves its running + time in many practical problems. + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import boykov_kolmogorov + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = boykov_kolmogorov(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + A nice feature of the Boykov-Kolmogorov algorithm is that a partition + of the nodes that defines a minimum cut can be easily computed based + on the search trees used during the algorithm. These trees are stored + in the graph attribute `trees` of the residual network. + + >>> source_tree, target_tree = R.graph["trees"] + >>> partition = (set(source_tree), set(G) - set(source_tree)) + + Or equivalently: + + >>> partition = (set(G) - set(target_tree), set(target_tree)) + + References + ---------- + .. [1] Boykov, Y., & Kolmogorov, V. (2004). An experimental comparison + of min-cut/max-flow algorithms for energy minimization in vision. + Pattern Analysis and Machine Intelligence, IEEE Transactions on, + 26(9), 1124-1137. + https://doi.org/10.1109/TPAMI.2004.60 + + .. [2] Vladimir Kolmogorov. Graph-based Algorithms for Multi-camera + Reconstruction Problem. PhD thesis, Cornell University, CS Department, + 2003. pp. 109-114. + https://web.archive.org/web/20170809091249/https://pub.ist.ac.at/~vnk/papers/thesis.pdf + + """ + R = boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "boykov_kolmogorov" + return R + + +def boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff): + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + # This is way too slow + # nx.set_edge_attributes(R, 0, 'flow') + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + # Use an arbitrary high value as infinite. It is computed + # when building the residual network. + INF = R.graph["inf"] + + if cutoff is None: + cutoff = INF + + R_succ = R.succ + R_pred = R.pred + + def grow(): + """Bidirectional breadth-first search for the growth stage. + + Returns a connecting edge, that is and edge that connects + a node from the source search tree with a node from the + target search tree. + The first node in the connecting edge is always from the + source tree and the last node from the target tree. + """ + while active: + u = active[0] + if u in source_tree: + this_tree = source_tree + other_tree = target_tree + neighbors = R_succ + else: + this_tree = target_tree + other_tree = source_tree + neighbors = R_pred + for v, attr in neighbors[u].items(): + if attr["capacity"] - attr["flow"] > 0: + if v not in this_tree: + if v in other_tree: + return (u, v) if this_tree is source_tree else (v, u) + this_tree[v] = u + dist[v] = dist[u] + 1 + timestamp[v] = timestamp[u] + active.append(v) + elif v in this_tree and _is_closer(u, v): + this_tree[v] = u + dist[v] = dist[u] + 1 + timestamp[v] = timestamp[u] + _ = active.popleft() + return None, None + + def augment(u, v): + """Augmentation stage. + + Reconstruct path and determine its residual capacity. + We start from a connecting edge, which links a node + from the source tree to a node from the target tree. + The connecting edge is the output of the grow function + and the input of this function. + """ + attr = R_succ[u][v] + flow = min(INF, attr["capacity"] - attr["flow"]) + path = [u] + # Trace a path from u to s in source_tree. + w = u + while w != s: + n = w + w = source_tree[n] + attr = R_pred[n][w] + flow = min(flow, attr["capacity"] - attr["flow"]) + path.append(w) + path.reverse() + # Trace a path from v to t in target_tree. + path.append(v) + w = v + while w != t: + n = w + w = target_tree[n] + attr = R_succ[n][w] + flow = min(flow, attr["capacity"] - attr["flow"]) + path.append(w) + # Augment flow along the path and check for saturated edges. + it = iter(path) + u = next(it) + these_orphans = [] + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + if R_succ[u][v]["flow"] == R_succ[u][v]["capacity"]: + if v in source_tree: + source_tree[v] = None + these_orphans.append(v) + if u in target_tree: + target_tree[u] = None + these_orphans.append(u) + u = v + orphans.extend(sorted(these_orphans, key=dist.get)) + return flow + + def adopt(): + """Adoption stage. + + Reconstruct search trees by adopting or discarding orphans. + During augmentation stage some edges got saturated and thus + the source and target search trees broke down to forests, with + orphans as roots of some of its trees. We have to reconstruct + the search trees rooted to source and target before we can grow + them again. + """ + while orphans: + u = orphans.popleft() + if u in source_tree: + tree = source_tree + neighbors = R_pred + else: + tree = target_tree + neighbors = R_succ + nbrs = ((n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree) + for v, attr, d in sorted(nbrs, key=itemgetter(2)): + if attr["capacity"] - attr["flow"] > 0: + if _has_valid_root(v, tree): + tree[u] = v + dist[u] = dist[v] + 1 + timestamp[u] = time + break + else: + nbrs = ( + (n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree + ) + for v, attr, d in sorted(nbrs, key=itemgetter(2)): + if attr["capacity"] - attr["flow"] > 0: + if v not in active: + active.append(v) + if tree[v] == u: + tree[v] = None + orphans.appendleft(v) + if u in active: + active.remove(u) + del tree[u] + + def _has_valid_root(n, tree): + path = [] + v = n + while v is not None: + path.append(v) + if v == s or v == t: + base_dist = 0 + break + elif timestamp[v] == time: + base_dist = dist[v] + break + v = tree[v] + else: + return False + length = len(path) + for i, u in enumerate(path, 1): + dist[u] = base_dist + length - i + timestamp[u] = time + return True + + def _is_closer(u, v): + return timestamp[v] <= timestamp[u] and dist[v] > dist[u] + 1 + + source_tree = {s: None} + target_tree = {t: None} + active = deque([s, t]) + orphans = deque() + flow_value = 0 + # data structures for the marking heuristic + time = 1 + timestamp = {s: time, t: time} + dist = {s: 0, t: 0} + while flow_value < cutoff: + # Growth stage + u, v = grow() + if u is None: + break + time += 1 + # Augmentation stage + flow_value += augment(u, v) + # Adoption stage + adopt() + + if flow_value * 2 > INF: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + + # Add source and target tree in a graph attribute. + # A partition that defines a minimum cut can be directly + # computed from the search trees as explained in the docstrings. + R.graph["trees"] = (source_tree, target_tree) + # Add the standard flow_value graph attribute. + R.graph["flow_value"] = flow_value + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/capacityscaling.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/capacityscaling.py new file mode 100644 index 0000000..b565077 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/capacityscaling.py @@ -0,0 +1,404 @@ +""" +Capacity scaling minimum cost flow algorithm. +""" + +__all__ = ["capacity_scaling"] + +from itertools import chain +from math import log + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + + +def _detect_unboundedness(R): + """Detect infinite-capacity negative cycles.""" + G = nx.DiGraph() + G.add_nodes_from(R) + + # Value simulating infinity. + inf = R.graph["inf"] + # True infinity. + f_inf = float("inf") + for u in R: + for v, e in R[u].items(): + # Compute the minimum weight of infinite-capacity (u, v) edges. + w = f_inf + for k, e in e.items(): + if e["capacity"] == inf: + w = min(w, e["weight"]) + if w != f_inf: + G.add_edge(u, v, weight=w) + + if nx.negative_edge_cycle(G): + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + +@not_implemented_for("undirected") +def _build_residual_network(G, demand, capacity, weight): + """Build a residual network and initialize a zero flow.""" + if sum(G.nodes[u].get(demand, 0) for u in G) != 0: + raise nx.NetworkXUnfeasible("Sum of the demands should be 0.") + + R = nx.MultiDiGraph() + R.add_nodes_from( + (u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G + ) + + inf = float("inf") + # Detect selfloops with infinite capacities and negative weights. + for u, v, e in nx.selfloop_edges(G, data=True): + if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf: + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + # Extract edges with positive capacities. Self loops excluded. + if G.is_multigraph(): + edge_list = [ + (u, v, k, e) + for u, v, k, e in G.edges(data=True, keys=True) + if u != v and e.get(capacity, inf) > 0 + ] + else: + edge_list = [ + (u, v, 0, e) + for u, v, e in G.edges(data=True) + if u != v and e.get(capacity, inf) > 0 + ] + # Simulate infinity with the larger of the sum of absolute node imbalances + # the sum of finite edge capacities or any positive value if both sums are + # zero. This allows the infinite-capacity edges to be distinguished for + # unboundedness detection and directly participate in residual capacity + # calculation. + inf = ( + max( + sum(abs(R.nodes[u]["excess"]) for u in R), + 2 + * sum( + e[capacity] + for u, v, k, e in edge_list + if capacity in e and e[capacity] != inf + ), + ) + or 1 + ) + for u, v, k, e in edge_list: + r = min(e.get(capacity, inf), inf) + w = e.get(weight, 0) + # Add both (u, v) and (v, u) into the residual network marked with the + # original key. (key[1] == True) indicates the (u, v) is in the + # original network. + R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0) + R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + _detect_unboundedness(R) + + return R + + +def _build_flow_dict(G, R, capacity, weight): + """Build a flow dictionary from a residual network.""" + inf = float("inf") + flow_dict = {} + if G.is_multigraph(): + for u in G: + flow_dict[u] = {} + for v, es in G[u].items(): + flow_dict[u][v] = { + # Always saturate negative selfloops. + k: ( + 0 + if ( + u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + ) + else e[capacity] + ) + for k, e in es.items() + } + for v, es in R[u].items(): + if v in flow_dict[u]: + flow_dict[u][v].update( + (k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0 + ) + else: + for u in G: + flow_dict[u] = { + # Always saturate negative selfloops. + v: ( + 0 + if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0) + else e[capacity] + ) + for v, e in G[u].items() + } + flow_dict[u].update( + (v, e["flow"]) + for v, es in R[u].items() + for e in es.values() + if e["flow"] > 0 + ) + return flow_dict + + +def capacity_scaling( + G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap +): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a capacity scaling successive shortest augmenting path algorithm. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph or MultiDiGraph on which a minimum cost flow satisfying all + demands is to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + flowCost : integer + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + If G is a digraph, a dict-of-dicts keyed by nodes such that + flowDict[u][v] is the flow on edge (u, v). + If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes + so that flowDict[u][v][key] is the flow on edge (u, v, key). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed, + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm does not work if edge weights are floating-point numbers. + + See also + -------- + :meth:`network_simplex` + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.capacity_scaling(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.capacity_scaling( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + """ + R = _build_residual_network(G, demand, capacity, weight) + + inf = float("inf") + # Account cost of negative selfloops. + flow_cost = sum( + 0 + if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + else e[capacity] * e[weight] + for u, v, e in nx.selfloop_edges(G, data=True) + ) + + # Determine the maxmimum edge capacity. + wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True)))) + if wmax == -inf: + # Residual network has no edges. + return flow_cost, _build_flow_dict(G, R, capacity, weight) + + R_nodes = R.nodes + R_succ = R.succ + + delta = 2 ** int(log(wmax, 2)) + while delta >= 1: + # Saturate Δ-residual edges with negative reduced costs to achieve + # Δ-optimality. + for u in R: + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + for k, e in es.items(): + flow = e["capacity"] - e["flow"] + if e["weight"] - p_u + R_nodes[v]["potential"] < 0: + flow = e["capacity"] - e["flow"] + if flow >= delta: + e["flow"] += flow + R_succ[v][u][(k[0], not k[1])]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + # Determine the Δ-active nodes. + S = set() + T = set() + S_add = S.add + S_remove = S.remove + T_add = T.add + T_remove = T.remove + for u in R: + excess = R_nodes[u]["excess"] + if excess >= delta: + S_add(u) + elif excess <= -delta: + T_add(u) + # Repeatedly augment flow from S to T along shortest paths until + # Δ-feasibility is achieved. + while S and T: + s = arbitrary_element(S) + t = None + # Search for a shortest path in terms of reduce costs from s to + # any t in T in the Δ-residual network. + d = {} + pred = {s: None} + h = heap() + h_insert = h.insert + h_get = h.get + h_insert(s, 0) + while h: + u, d_u = h.pop() + d[u] = d_u + if u in T: + # Path found. + t = u + break + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + if v in d: + continue + wmin = inf + # Find the minimum-weighted (u, v) Δ-residual edge. + for k, e in es.items(): + if e["capacity"] - e["flow"] >= delta: + w = e["weight"] + if w < wmin: + wmin = w + kmin = k + emin = e + if wmin == inf: + continue + # Update the distance label of v. + d_v = d_u + wmin - p_u + R_nodes[v]["potential"] + if h_insert(v, d_v): + pred[v] = (u, kmin, emin) + if t is not None: + # Augment Δ units of flow from s to t. + while u != s: + v = u + u, k, e = pred[v] + e["flow"] += delta + R_succ[v][u][(k[0], not k[1])]["flow"] -= delta + # Account node excess and deficit. + R_nodes[s]["excess"] -= delta + R_nodes[t]["excess"] += delta + if R_nodes[s]["excess"] < delta: + S_remove(s) + if R_nodes[t]["excess"] > -delta: + T_remove(t) + # Update node potentials. + d_t = d[t] + for u, d_u in d.items(): + R_nodes[u]["potential"] -= d_u - d_t + else: + # Path not found. + S_remove(s) + delta //= 2 + + if any(R.nodes[u]["excess"] != 0 for u in R): + raise nx.NetworkXUnfeasible("No flow satisfying all demands.") + + # Calculate the flow cost. + for u in R: + for v, es in R_succ[u].items(): + for e in es.values(): + flow = e["flow"] + if flow > 0: + flow_cost += flow * e["weight"] + + return flow_cost, _build_flow_dict(G, R, capacity, weight) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/dinitz_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/dinitz_alg.py new file mode 100644 index 0000000..51860fa --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/dinitz_alg.py @@ -0,0 +1,211 @@ +""" +Dinitz' algorithm for maximum flow problems. +""" +from collections import deque + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network +from networkx.utils import pairwise + +__all__ = ["dinitz"] + + +def dinitz(G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None): + """Find a maximum single-commodity flow using Dinitz' algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges [1]_. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import dinitz + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = dinitz(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + References + ---------- + .. [1] Dinitz' Algorithm: The Original Version and Even's Version. + 2006. Yefim Dinitz. In Theoretical Computer Science. Lecture + Notes in Computer Science. Volume 3895. pp 218-240. + https://doi.org/10.1007/11685654_10 + + """ + R = dinitz_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "dinitz" + return R + + +def dinitz_impl(G, s, t, capacity, residual, cutoff): + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + # Use an arbitrary high value as infinite. It is computed + # when building the residual network. + INF = R.graph["inf"] + + if cutoff is None: + cutoff = INF + + R_succ = R.succ + R_pred = R.pred + + def breath_first_search(): + parents = {} + queue = deque([s]) + while queue: + if t in parents: + break + u = queue.popleft() + for v in R_succ[u]: + attr = R_succ[u][v] + if v not in parents and attr["capacity"] - attr["flow"] > 0: + parents[v] = u + queue.append(v) + return parents + + def depth_first_search(parents): + """Build a path using DFS starting from the sink""" + path = [] + u = t + flow = INF + while u != s: + path.append(u) + v = parents[u] + flow = min(flow, R_pred[u][v]["capacity"] - R_pred[u][v]["flow"]) + u = v + path.append(s) + # Augment the flow along the path found + if flow > 0: + for u, v in pairwise(path): + R_pred[u][v]["flow"] += flow + R_pred[v][u]["flow"] -= flow + return flow + + flow_value = 0 + while flow_value < cutoff: + parents = breath_first_search() + if t not in parents: + break + this_flow = depth_first_search(parents) + if this_flow * 2 > INF: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + flow_value += this_flow + + R.graph["flow_value"] = flow_value + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/edmondskarp.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/edmondskarp.py new file mode 100644 index 0000000..ef5cc0c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/edmondskarp.py @@ -0,0 +1,239 @@ +""" +Edmonds-Karp algorithm for maximum flow problems. +""" + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["edmonds_karp"] + + +def edmonds_karp_core(R, s, t, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def bidirectional_bfs(): + """Bidirectional breadth-first search for an augmenting path.""" + pred = {s: None} + q_s = [s] + succ = {t: None} + q_t = [t] + while True: + q = [] + if len(q_s) <= len(q_t): + for u in q_s: + for v, attr in R_succ[u].items(): + if v not in pred and attr["flow"] < attr["capacity"]: + pred[v] = u + if v in succ: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_s = q + else: + for u in q_t: + for v, attr in R_pred[u].items(): + if v not in succ and attr["flow"] < attr["capacity"]: + succ[v] = u + if v in pred: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_t = q + + # Look for shortest augmenting paths using breadth-first search. + flow_value = 0 + while flow_value < cutoff: + v, pred, succ = bidirectional_bfs() + if pred is None: + break + path = [v] + # Trace a path from s to v. + u = v + while u != s: + u = pred[u] + path.append(u) + path.reverse() + # Trace a path from v to t. + u = v + while u != t: + u = succ[u] + path.append(u) + flow_value += augment(path) + + return flow_value + + +def edmonds_karp_impl(G, s, t, capacity, residual, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + if cutoff is None: + cutoff = float("inf") + R.graph["flow_value"] = edmonds_karp_core(R, s, t, cutoff) + + return R + + +def edmonds_karp( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + """Find a maximum single-commodity flow using the Edmonds-Karp algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n m^2)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import edmonds_karp + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = edmonds_karp(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = edmonds_karp_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "edmonds_karp" + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/gomory_hu.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/gomory_hu.py new file mode 100644 index 0000000..f244339 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/gomory_hu.py @@ -0,0 +1,176 @@ +""" +Gomory-Hu tree of undirected Graphs. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +from .edmondskarp import edmonds_karp +from .utils import build_residual_network + +default_flow_func = edmonds_karp + +__all__ = ["gomory_hu_tree"] + + +@not_implemented_for("directed") +def gomory_hu_tree(G, capacity="capacity", flow_func=None): + r"""Returns the Gomory-Hu tree of an undirected graph G. + + A Gomory-Hu tree of an undirected graph with capacities is a + weighted tree that represents the minimum s-t cuts for all s-t + pairs in the graph. + + It only requires `n-1` minimum cut computations instead of the + obvious `n(n-1)/2`. The tree represents all s-t cuts as the + minimum cut value among any pair of nodes is the minimum edge + weight in the shortest path between the two nodes in the + Gomory-Hu tree. + + The Gomory-Hu tree also has the property that removing the + edge with the minimum weight in the shortest path between + any two nodes leaves two connected components that form + a partition of the nodes in G that defines the minimum s-t + cut. + + See Examples section below for details. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + Function to perform the underlying flow computations. Default value + :func:`edmonds_karp`. This function performs better in sparse graphs + with right tailed degree distributions. + :func:`shortest_augmenting_path` will perform better in denser + graphs. + + Returns + ------- + Tree : NetworkX graph + A NetworkX graph representing the Gomory-Hu tree of the input graph. + + Raises + ------ + NetworkXNotImplemented + Raised if the input graph is directed. + + NetworkXError + Raised if the input graph is an empty Graph. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nx.set_edge_attributes(G, 1, "capacity") + >>> T = nx.gomory_hu_tree(G) + >>> # The value of the minimum cut between any pair + ... # of nodes in G is the minimum edge weight in the + ... # shortest path between the two nodes in the + ... # Gomory-Hu tree. + ... def minimum_edge_weight_in_shortest_path(T, u, v): + ... path = nx.shortest_path(T, u, v, weight="weight") + ... return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + >>> u, v = 0, 33 + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v) + 10 + >>> # The Comory-Hu tree also has the property that removing the + ... # edge with the minimum weight in the shortest path between + ... # any two nodes leaves two connected components that form + ... # a partition of the nodes in G that defines the minimum s-t + ... # cut. + ... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> T.remove_edge(*edge) + >>> U, V = list(nx.connected_components(T)) + >>> # Thus U and V form a partition that defines a minimum cut + ... # between u and v in G. You can compute the edge cut set, + ... # that is, the set of edges that if removed from G will + ... # disconnect u from v in G, with this information: + ... cutset = set() + >>> for x, nbrs in ((n, G[n]) for n in U): + ... cutset.update((x, y) for y in nbrs if y in V) + >>> # Because we have set the capacities of all edges to 1 + ... # the cutset contains ten edges + ... len(cutset) + 10 + >>> # You can use any maximum flow algorithm for the underlying + ... # flow computations using the argument flow_func + ... from networkx.algorithms import flow + >>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov) + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov) + 10 + + Notes + ----- + This implementation is based on Gusfield approach [1]_ to compute + Comory-Hu trees, which does not require node contractions and has + the same computational complexity than the original method. + + See also + -------- + :func:`minimum_cut` + :func:`maximum_flow` + + References + ---------- + .. [1] Gusfield D: Very simple methods for all pairs network flow analysis. + SIAM J Comput 19(1):143-155, 1990. + + """ + if flow_func is None: + flow_func = default_flow_func + + if len(G) == 0: # empty graph + msg = "Empty Graph does not have a Gomory-Hu tree representation" + raise nx.NetworkXError(msg) + + # Start the tree as a star graph with an arbitrary node at the center + tree = {} + labels = {} + iter_nodes = iter(G) + root = next(iter_nodes) + for n in iter_nodes: + tree[n] = root + + # Reuse residual network + R = build_residual_network(G, capacity) + + # For all the leaves in the star graph tree (that is n-1 nodes). + for source in tree: + # Find neighbor in the tree + target = tree[source] + # compute minimum cut + cut_value, partition = nx.minimum_cut( + G, source, target, capacity=capacity, flow_func=flow_func, residual=R + ) + labels[(source, target)] = cut_value + # Update the tree + # Source will always be in partition[0] and target in partition[1] + for node in partition[0]: + if node != source and node in tree and tree[node] == target: + tree[node] = source + labels[node, source] = labels.get((node, target), cut_value) + # + if target != root and tree[target] in partition[0]: + labels[source, tree[target]] = labels[target, tree[target]] + labels[target, source] = cut_value + tree[source] = tree[target] + tree[target] = source + + # Build the tree + T = nx.Graph() + T.add_nodes_from(G) + T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items())) + return T diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/maxflow.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/maxflow.py new file mode 100644 index 0000000..8d2fb8f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/maxflow.py @@ -0,0 +1,611 @@ +""" +Maximum flow (and minimum cut) algorithms on capacitated graphs. +""" +import networkx as nx + +from .boykovkolmogorov import boykov_kolmogorov +from .dinitz_alg import dinitz +from .edmondskarp import edmonds_karp +from .preflowpush import preflow_push +from .shortestaugmentingpath import shortest_augmenting_path +from .utils import build_flow_dict + +# Define the default flow function for computing maximum flow. +default_flow_func = preflow_push +# Functions that don't support cutoff for minimum cut computations. +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + +__all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"] + + +def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find a maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + flow_dict : dict + A dictionary containing the value of the flow that went through + each edge. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow returns both the value of the maximum flow and a + dictionary with all flows. + + >>> flow_value, flow_dict = nx.maximum_flow(G, "x", "y") + >>> flow_value + 3.0 + >>> print(flow_dict["x"]["b"]) + 1.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow(G, "x", "y", flow_func=shortest_augmenting_path)[ + ... 0 + ... ] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs) + flow_dict = build_flow_dict(flowG, R) + + return (R.graph["flow_value"], flow_dict) + + +def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find the value of maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow_value computes only the value of the + maximum flow: + + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow_value( + ... G, "x", "y", flow_func=shortest_augmenting_path + ... ) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] + + +def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value and the node partition of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut computes both the value of the + minimum cut and the node partition: + + >>> cut_value, partition = nx.minimum_cut(G, "x", "y") + >>> reachable, non_reachable = partition + + 'partition' here is a tuple with the two sets of nodes that define + the minimum cut. You can compute the cut set of edges that induce + the minimum cut as follows: + + >>> cutset = set() + >>> for u, nbrs in ((n, G[n]) for n in reachable): + ... cutset.update((u, v) for v in nbrs if v in non_reachable) + >>> print(sorted(cutset)) + [('c', 'y'), ('x', 'b')] + >>> cut_value == sum(G.edges[u, v]["capacity"] for (u, v) in cutset) + True + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut(G, "x", "y", flow_func=shortest_augmenting_path)[0] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func in flow_funcs: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + # Remove saturated edges from the residual network + cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]] + R.remove_edges_from(cutset) + + # Then, reachable and non reachable nodes from source in the + # residual network form the node partition that defines + # the minimum cut. + non_reachable = set(dict(nx.shortest_path_length(R, target=_t))) + partition = (set(flowG) - non_reachable, non_reachable) + # Finally add again cutset edges to the residual network to make + # sure that it is reusable. + if cutset is not None: + R.add_edges_from(cutset) + return (R.graph["flow_value"], partition) + + +def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut_value computes only the value of the + minimum cut: + + >>> cut_value = nx.minimum_cut_value(G, "x", "y") + >>> cut_value + 3.0 + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut_value( + ... G, "x", "y", flow_func=shortest_augmenting_path + ... ) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func in flow_funcs: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/mincost.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/mincost.py new file mode 100644 index 0000000..6089c71 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/mincost.py @@ -0,0 +1,331 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["min_cost_flow_cost", "min_cost_flow", "cost_of_flow", "max_flow_min_cost"] + +import networkx as nx + + +def min_cost_flow_cost(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find the cost of a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost = nx.min_cost_flow_cost(G) + >>> flowCost + 24 + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[0] + + +def min_cost_flow(G, demand="demand", capacity="capacity", weight="weight"): + r"""Returns a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowDict = nx.min_cost_flow(G) + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[1] + + +def cost_of_flow(G, flowDict, weight="weight"): + """Compute the cost of the flow given by flowDict on graph G. + + Note that this function does not check for the validity of the + flow flowDict. This function will fail if the graph G and the + flow don't have the same edge set. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Returns + ------- + cost : Integer, float + The total cost of the flow. This is given by the sum over all + edges of the product of the edge's flow and the edge's weight. + + See also + -------- + max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + """ + return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data=True))) + + +def max_flow_min_cost(G, s, t, capacity="capacity", weight="weight"): + """Returns a maximum (s, t)-flow of minimum cost. + + G is a digraph with edge costs and capacities. There is a source + node s and a sink node t. This function finds a maximum flow from + s to t whose total cost is minimized. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + s: node label + Source of the flow. + + t: node label + Destination of the flow. + + capacity: string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight: string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict: dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnbounded + This exception is raised if there is an infinite capacity path + from s to t in G. In this case there is no maximum flow. This + exception is also raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + is unbounded below. + + See also + -------- + cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... [ + ... (1, 2, {"capacity": 12, "weight": 4}), + ... (1, 3, {"capacity": 20, "weight": 6}), + ... (2, 3, {"capacity": 6, "weight": -3}), + ... (2, 6, {"capacity": 14, "weight": 1}), + ... (3, 4, {"weight": 9}), + ... (3, 5, {"capacity": 10, "weight": 5}), + ... (4, 2, {"capacity": 19, "weight": 13}), + ... (4, 5, {"capacity": 4, "weight": 0}), + ... (5, 7, {"capacity": 28, "weight": 2}), + ... (6, 5, {"capacity": 11, "weight": 1}), + ... (6, 7, {"weight": 8}), + ... (7, 4, {"capacity": 6, "weight": 6}), + ... ] + ... ) + >>> mincostFlow = nx.max_flow_min_cost(G, 1, 7) + >>> mincost = nx.cost_of_flow(G, mincostFlow) + >>> mincost + 373 + >>> from networkx.algorithms.flow import maximum_flow + >>> maxFlow = maximum_flow(G, 1, 7)[1] + >>> nx.cost_of_flow(G, maxFlow) >= mincost + True + >>> mincostFlowValue = sum((mincostFlow[u][7] for u in G.predecessors(7))) - sum( + ... (mincostFlow[7][v] for v in G.successors(7)) + ... ) + >>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7) + True + + """ + maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity) + H = nx.DiGraph(G) + H.add_node(s, demand=-maxFlow) + H.add_node(t, demand=maxFlow) + return min_cost_flow(H, capacity=capacity, weight=weight) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/networksimplex.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/networksimplex.py new file mode 100644 index 0000000..3e2eaf5 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/networksimplex.py @@ -0,0 +1,664 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["network_simplex"] + +from itertools import chain, islice, repeat +from math import ceil, sqrt + +import networkx as nx +from networkx.utils import not_implemented_for + + +class _DataEssentialsAndFunctions: + def __init__( + self, G, multigraph, demand="demand", capacity="capacity", weight="weight" + ): + + # Number all nodes and edges and hereafter reference them using ONLY their numbers + self.node_list = list(G) # nodes + self.node_indices = {u: i for i, u in enumerate(self.node_list)} # node indices + self.node_demands = [ + G.nodes[u].get(demand, 0) for u in self.node_list + ] # node demands + + self.edge_sources = [] # edge sources + self.edge_targets = [] # edge targets + if multigraph: + self.edge_keys = [] # edge keys + self.edge_indices = {} # edge indices + self.edge_capacities = [] # edge capacities + self.edge_weights = [] # edge weights + + if not multigraph: + edges = G.edges(data=True) + else: + edges = G.edges(data=True, keys=True) + + inf = float("inf") + edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0) + for i, e in enumerate(edges): + self.edge_sources.append(self.node_indices[e[0]]) + self.edge_targets.append(self.node_indices[e[1]]) + if multigraph: + self.edge_keys.append(e[2]) + self.edge_indices[e[:-1]] = i + self.edge_capacities.append(e[-1].get(capacity, inf)) + self.edge_weights.append(e[-1].get(weight, 0)) + + # spanning tree specific data to be initialized + + self.edge_count = None # number of edges + self.edge_flow = None # edge flows + self.node_potentials = None # node potentials + self.parent = None # parent nodes + self.parent_edge = None # edges to parents + self.subtree_size = None # subtree sizes + self.next_node_dft = None # next nodes in depth-first thread + self.prev_node_dft = None # previous nodes in depth-first thread + self.last_descendent_dft = None # last descendants in depth-first thread + self._spanning_tree_initialized = ( + False # False until initialize_spanning_tree() is called + ) + + def initialize_spanning_tree(self, n, faux_inf): + self.edge_count = len(self.edge_indices) # number of edges + self.edge_flow = list( + chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands)) + ) # edge flows + self.node_potentials = [ + faux_inf if d <= 0 else -faux_inf for d in self.node_demands + ] # node potentials + self.parent = list(chain(repeat(-1, n), [None])) # parent nodes + self.parent_edge = list( + range(self.edge_count, self.edge_count + n) + ) # edges to parents + self.subtree_size = list(chain(repeat(1, n), [n + 1])) # subtree sizes + self.next_node_dft = list( + chain(range(1, n), [-1, 0]) + ) # next nodes in depth-first thread + self.prev_node_dft = list(range(-1, n)) # previous nodes in depth-first thread + self.last_descendent_dft = list( + chain(range(n), [n - 1]) + ) # last descendants in depth-first thread + self._spanning_tree_initialized = True # True only if all the assignments pass + + def find_apex(self, p, q): + """ + Find the lowest common ancestor of nodes p and q in the spanning tree. + """ + size_p = self.subtree_size[p] + size_q = self.subtree_size[q] + while True: + while size_p < size_q: + p = self.parent[p] + size_p = self.subtree_size[p] + while size_p > size_q: + q = self.parent[q] + size_q = self.subtree_size[q] + if size_p == size_q: + if p != q: + p = self.parent[p] + size_p = self.subtree_size[p] + q = self.parent[q] + size_q = self.subtree_size[q] + else: + return p + + def trace_path(self, p, w): + """ + Returns the nodes and edges on the path from node p to its ancestor w. + """ + Wn = [p] + We = [] + while p != w: + We.append(self.parent_edge[p]) + p = self.parent[p] + Wn.append(p) + return Wn, We + + def find_cycle(self, i, p, q): + """ + Returns the nodes and edges on the cycle containing edge i == (p, q) + when the latter is added to the spanning tree. + + The cycle is oriented in the direction from p to q. + """ + w = self.find_apex(p, q) + Wn, We = self.trace_path(p, w) + Wn.reverse() + We.reverse() + if We != [i]: + We.append(i) + WnR, WeR = self.trace_path(q, w) + del WnR[-1] + Wn += WnR + We += WeR + return Wn, We + + def augment_flow(self, Wn, We, f): + """ + Augment f units of flow along a cycle represented by Wn and We. + """ + for i, p in zip(We, Wn): + if self.edge_sources[i] == p: + self.edge_flow[i] += f + else: + self.edge_flow[i] -= f + + def trace_subtree(self, p): + """ + Yield the nodes in the subtree rooted at a node p. + """ + yield p + l = self.last_descendent_dft[p] + while p != l: + p = self.next_node_dft[p] + yield p + + def remove_edge(self, s, t): + """ + Remove an edge (s, t) where parent[t] == s from the spanning tree. + """ + size_t = self.subtree_size[t] + prev_t = self.prev_node_dft[t] + last_t = self.last_descendent_dft[t] + next_last_t = self.next_node_dft[last_t] + # Remove (s, t). + self.parent[t] = None + self.parent_edge[t] = None + # Remove the subtree rooted at t from the depth-first thread. + self.next_node_dft[prev_t] = next_last_t + self.prev_node_dft[next_last_t] = prev_t + self.next_node_dft[last_t] = t + self.prev_node_dft[t] = last_t + # Update the subtree sizes and last descendants of the (old) acenstors + # of t. + while s is not None: + self.subtree_size[s] -= size_t + if self.last_descendent_dft[s] == last_t: + self.last_descendent_dft[s] = prev_t + s = self.parent[s] + + def make_root(self, q): + """ + Make a node q the root of its containing subtree. + """ + ancestors = [] + while q is not None: + ancestors.append(q) + q = self.parent[q] + ancestors.reverse() + for p, q in zip(ancestors, islice(ancestors, 1, None)): + size_p = self.subtree_size[p] + last_p = self.last_descendent_dft[p] + prev_q = self.prev_node_dft[q] + last_q = self.last_descendent_dft[q] + next_last_q = self.next_node_dft[last_q] + # Make p a child of q. + self.parent[p] = q + self.parent[q] = None + self.parent_edge[p] = self.parent_edge[q] + self.parent_edge[q] = None + self.subtree_size[p] = size_p - self.subtree_size[q] + self.subtree_size[q] = size_p + # Remove the subtree rooted at q from the depth-first thread. + self.next_node_dft[prev_q] = next_last_q + self.prev_node_dft[next_last_q] = prev_q + self.next_node_dft[last_q] = q + self.prev_node_dft[q] = last_q + if last_p == last_q: + self.last_descendent_dft[p] = prev_q + last_p = prev_q + # Add the remaining parts of the subtree rooted at p as a subtree + # of q in the depth-first thread. + self.prev_node_dft[p] = last_q + self.next_node_dft[last_q] = p + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.last_descendent_dft[q] = last_p + + def add_edge(self, i, p, q): + """ + Add an edge (p, q) to the spanning tree where q is the root of a subtree. + """ + last_p = self.last_descendent_dft[p] + next_last_p = self.next_node_dft[last_p] + size_q = self.subtree_size[q] + last_q = self.last_descendent_dft[q] + # Make q a child of p. + self.parent[q] = p + self.parent_edge[q] = i + # Insert the subtree rooted at q into the depth-first thread. + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.prev_node_dft[next_last_p] = last_q + self.next_node_dft[last_q] = next_last_p + # Update the subtree sizes and last descendants of the (new) ancestors + # of q. + while p is not None: + self.subtree_size[p] += size_q + if self.last_descendent_dft[p] == last_p: + self.last_descendent_dft[p] = last_q + p = self.parent[p] + + def update_potentials(self, i, p, q): + """ + Update the potentials of the nodes in the subtree rooted at a node + q connected to its parent p by an edge i. + """ + if q == self.edge_targets[i]: + d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q] + else: + d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q] + for q in self.trace_subtree(q): + self.node_potentials[q] += d + + def reduced_cost(self, i): + """Returns the reduced cost of an edge i.""" + c = ( + self.edge_weights[i] + - self.node_potentials[self.edge_sources[i]] + + self.node_potentials[self.edge_targets[i]] + ) + return c if self.edge_flow[i] == 0 else -c + + def find_entering_edges(self): + """Yield entering edges until none can be found.""" + if self.edge_count == 0: + return + + # Entering edges are found by combining Dantzig's rule and Bland's + # rule. The edges are cyclically grouped into blocks of size B. Within + # each block, Dantzig's rule is applied to find an entering edge. The + # blocks to search is determined following Bland's rule. + B = int(ceil(sqrt(self.edge_count))) # pivot block size + M = (self.edge_count + B - 1) // B # number of blocks needed to cover all edges + m = 0 # number of consecutive blocks without eligible + # entering edges + f = 0 # first edge in block + while m < M: + # Determine the next block of edges. + l = f + B + if l <= self.edge_count: + edges = range(f, l) + else: + l -= self.edge_count + edges = chain(range(f, self.edge_count), range(l)) + f = l + # Find the first edge with the lowest reduced cost. + i = min(edges, key=self.reduced_cost) + c = self.reduced_cost(i) + if c >= 0: + # No entering edge found in the current block. + m += 1 + else: + # Entering edge found. + if self.edge_flow[i] == 0: + p = self.edge_sources[i] + q = self.edge_targets[i] + else: + p = self.edge_targets[i] + q = self.edge_sources[i] + yield i, p, q + m = 0 + # All edges have nonnegative reduced costs. The current flow is + # optimal. + + def residual_capacity(self, i, p): + """Returns the residual capacity of an edge i in the direction away + from its endpoint p. + """ + return ( + self.edge_capacities[i] - self.edge_flow[i] + if self.edge_sources[i] == p + else self.edge_flow[i] + ) + + def find_leaving_edge(self, Wn, We): + """Returns the leaving edge in a cycle represented by Wn and We.""" + j, s = min( + zip(reversed(We), reversed(Wn)), + key=lambda i_p: self.residual_capacity(*i_p), + ) + t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j] + return j, s, t + + +@not_implemented_for("undirected") +def network_simplex(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a primal network simplex algorithm that uses the leaving + arc rule to prevent cycling. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + The mincost flow algorithm can also be used to solve shortest path + problems. To find the shortest path between two nodes u and v, + give all edges an infinite capacity, give node u a demand of -1 and + node v a demand a 1. Then run the network simplex. The value of a + min cost flow will be the distance between u and v and edges + carrying positive flow will indicate the path. + + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> G.add_node("s", demand=-1) + >>> G.add_node("v", demand=1) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost == nx.shortest_path_length(G, "s", "v", weight="weight") + True + >>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0]) + [('s', 'x'), ('u', 'v'), ('x', 'u')] + >>> nx.shortest_path(G, "s", "v", weight="weight") + ['s', 'x', 'u', 'v'] + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.network_simplex( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + + References + ---------- + .. [1] Z. Kiraly, P. Kovacs. + Efficient implementation of minimum-cost flow algorithms. + Acta Universitatis Sapientiae, Informatica 4(1):67--118. 2012. + .. [2] R. Barr, F. Glover, D. Klingman. + Enhancement of spanning tree labeling procedures for network + optimization. + INFOR 17(1):16--34. 1979. + """ + ########################################################################### + # Problem essentials extraction and sanity check + ########################################################################### + + if len(G) == 0: + raise nx.NetworkXError("graph has no nodes") + + multigraph = G.is_multigraph() + + # extracting data essential to problem + DEAF = _DataEssentialsAndFunctions( + G, multigraph, demand=demand, capacity=capacity, weight=weight + ) + + ########################################################################### + # Quick Error Detection + ########################################################################### + + inf = float("inf") + for u, d in zip(DEAF.node_list, DEAF.node_demands): + if abs(d) == inf: + raise nx.NetworkXError(f"node {u!r} has infinite demand") + for e, w in zip(DEAF.edge_indices, DEAF.edge_weights): + if abs(w) == inf: + raise nx.NetworkXError(f"edge {e!r} has infinite weight") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if abs(e[-1].get(weight, 0)) == inf: + raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight") + + ########################################################################### + # Quick Infeasibility Detection + ########################################################################### + + if sum(DEAF.node_demands) != 0: + raise nx.NetworkXUnfeasible("total node demand is not zero") + for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities): + if c < 0: + raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if e[-1].get(capacity, inf) < 0: + raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity") + + ########################################################################### + # Initialization + ########################################################################### + + # Add a dummy node -1 and connect all existing nodes to it with infinite- + # capacity dummy edges. Node -1 will serve as the root of the + # spanning tree of the network simplex method. The new edges will used to + # trivially satisfy the node demands and create an initial strongly + # feasible spanning tree. + for i, d in enumerate(DEAF.node_demands): + # Must be greater-than here. Zero-demand nodes must have + # edges pointing towards the root to ensure strong feasibility. + if d > 0: + DEAF.edge_sources.append(-1) + DEAF.edge_targets.append(i) + else: + DEAF.edge_sources.append(i) + DEAF.edge_targets.append(-1) + faux_inf = ( + 3 + * max( + chain( + [ + sum(c for c in DEAF.edge_capacities if c < inf), + sum(abs(w) for w in DEAF.edge_weights), + ], + (abs(d) for d in DEAF.node_demands), + ) + ) + or 1 + ) + + n = len(DEAF.node_list) # number of nodes + DEAF.edge_weights.extend(repeat(faux_inf, n)) + DEAF.edge_capacities.extend(repeat(faux_inf, n)) + + # Construct the initial spanning tree. + DEAF.initialize_spanning_tree(n, faux_inf) + + ########################################################################### + # Pivot loop + ########################################################################### + + for i, p, q in DEAF.find_entering_edges(): + Wn, We = DEAF.find_cycle(i, p, q) + j, s, t = DEAF.find_leaving_edge(Wn, We) + DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s)) + # Do nothing more if the entering edge is the same as the leaving edge. + if i != j: + if DEAF.parent[t] != s: + # Ensure that s is the parent of t. + s, t = t, s + if We.index(i) > We.index(j): + # Ensure that q is in the subtree rooted at t. + p, q = q, p + DEAF.remove_edge(s, t) + DEAF.make_root(q) + DEAF.add_edge(i, p, q) + DEAF.update_potentials(i, p, q) + + ########################################################################### + # Infeasibility and unboundedness detection + ########################################################################### + + if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)): + raise nx.NetworkXUnfeasible("no flow satisfies all node demands") + + if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any( + e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0 + for e in nx.selfloop_edges(G, data=True) + ): + raise nx.NetworkXUnbounded("negative cycle with infinite capacity found") + + ########################################################################### + # Flow cost calculation and flow dict construction + ########################################################################### + + del DEAF.edge_flow[DEAF.edge_count :] + flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow)) + flow_dict = {n: {} for n in DEAF.node_list} + + def add_entry(e): + """Add a flow dict entry.""" + d = flow_dict[e[0]] + for k in e[1:-2]: + try: + d = d[k] + except KeyError: + t = {} + d[k] = t + d = t + d[e[-2]] = e[-1] + + DEAF.edge_sources = ( + DEAF.node_list[s] for s in DEAF.edge_sources + ) # Use original nodes. + DEAF.edge_targets = ( + DEAF.node_list[t] for t in DEAF.edge_targets + ) # Use original nodes. + if not multigraph: + for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow): + add_entry(e) + edges = G.edges(data=True) + else: + for e in zip( + DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow + ): + add_entry(e) + edges = G.edges(data=True, keys=True) + for e in edges: + if e[0] != e[1]: + if e[-1].get(capacity, inf) == 0: + add_entry(e[:-1] + (0,)) + else: + w = e[-1].get(weight, 0) + if w >= 0: + add_entry(e[:-1] + (0,)) + else: + c = e[-1][capacity] + flow_cost += w * c + add_entry(e[:-1] + (c,)) + + return flow_cost, flow_dict diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/preflowpush.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/preflowpush.py new file mode 100644 index 0000000..aa6647b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/preflowpush.py @@ -0,0 +1,423 @@ +""" +Highest-label preflow-push algorithm for maximum flow problems. +""" + +from collections import deque +from itertools import islice + +import networkx as nx + +from ...utils import arbitrary_element +from .utils import ( + CurrentEdge, + GlobalRelabelThreshold, + Level, + build_residual_network, + detect_unboundedness, +) + +__all__ = ["preflow_push"] + + +def preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only): + """Implementation of the highest-label preflow-push algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if global_relabel_freq is None: + global_relabel_freq = 0 + if global_relabel_freq < 0: + raise nx.NetworkXError("global_relabel_freq must be nonnegative.") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + detect_unboundedness(R, s, t) + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + R_nodes[u]["excess"] = 0 + for e in R_succ[u].values(): + e["flow"] = 0 + + def reverse_bfs(src): + """Perform a reverse breadth-first search from src in the residual + network. + """ + heights = {src: 0} + q = deque([(src, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + return heights + + # Initialize heights of the nodes. + heights = reverse_bfs(t) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(R) + # max_height represents the height of the highest level below level n with + # at least one active node. + max_height = max(heights[u] for u in heights if u != s) + heights[s] = n + + grt = GlobalRelabelThreshold(n, R.size(), global_relabel_freq) + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + 1 + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + def push(u, v, flow): + """Push flow units of flow from u to v.""" + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + + # The maximum flow must be nonzero now. Initialize the preflow by + # saturating all edges emanating from s. + for u, attr in R_succ[s].items(): + flow = attr["capacity"] + if flow > 0: + push(s, u, flow) + + # Partition nodes into levels. + levels = [Level() for i in range(2 * n)] + for u in R: + if u != s and u != t: + level = levels[R_nodes[u]["height"]] + if R_nodes[u]["excess"] > 0: + level.active.add(u) + else: + level.inactive.add(u) + + def activate(v): + """Move a node from the inactive set to the active set of its level.""" + if v != s and v != t: + level = levels[R_nodes[v]["height"]] + if v in level.inactive: + level.inactive.remove(v) + level.active.add(v) + + def relabel(u): + """Relabel a node to create an admissible edge.""" + grt.add_work(len(R_succ[u])) + return ( + min( + R_nodes[v]["height"] + for v, attr in R_succ[u].items() + if attr["flow"] < attr["capacity"] + ) + + 1 + ) + + def discharge(u, is_phase1): + """Discharge a node until it becomes inactive or, during phase 1 (see + below), its height reaches at least n. The node is known to have the + largest height among active nodes. + """ + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # next_height represents the next height to examine after discharging + # the current node. During phase 1, it is capped to below n. + next_height = height + levels[height].active.remove(u) + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + flow = min(R_nodes[u]["excess"], attr["capacity"] - attr["flow"]) + push(u, v, flow) + activate(v) + if R_nodes[u]["excess"] == 0: + # The node has become inactive. + levels[height].inactive.add(u) + break + try: + curr_edge.move_to_next() + except StopIteration: + # We have run off the end of the adjacency list, and there can + # be no more admissible edges. Relabel the node to create one. + height = relabel(u) + if is_phase1 and height >= n - 1: + # Although the node is still active, with a height at least + # n - 1, it is now known to be on the s side of the minimum + # s-t cut. Stop processing it until phase 2. + levels[height].active.add(u) + break + # The first relabel operation after global relabeling may not + # increase the height of the node since the 'current edge' data + # structure is not rewound. Use height instead of (height - 1) + # in case other active nodes at the same level are missed. + next_height = height + R_nodes[u]["height"] = height + return next_height + + def gap_heuristic(height): + """Apply the gap heuristic.""" + # Move all nodes at levels (height + 1) to max_height to level n + 1. + for level in islice(levels, height + 1, max_height + 1): + for u in level.active: + R_nodes[u]["height"] = n + 1 + for u in level.inactive: + R_nodes[u]["height"] = n + 1 + levels[n + 1].active.update(level.active) + level.active.clear() + levels[n + 1].inactive.update(level.inactive) + level.inactive.clear() + + def global_relabel(from_sink): + """Apply the global relabeling heuristic.""" + src = t if from_sink else s + heights = reverse_bfs(src) + if not from_sink: + # s must be reachable from t. Remove t explicitly. + del heights[t] + max_height = max(heights.values()) + if from_sink: + # Also mark nodes from which t is unreachable for relabeling. This + # serves the same purpose as the gap heuristic. + for u in R: + if u not in heights and R_nodes[u]["height"] < n: + heights[u] = n + 1 + else: + # Shift the computed heights because the height of s is n. + for u in heights: + heights[u] += n + max_height += n + del heights[src] + for u, new_height in heights.items(): + old_height = R_nodes[u]["height"] + if new_height != old_height: + if u in levels[old_height].active: + levels[old_height].active.remove(u) + levels[new_height].active.add(u) + else: + levels[old_height].inactive.remove(u) + levels[new_height].inactive.add(u) + R_nodes[u]["height"] = new_height + return max_height + + # Phase 1: Find the maximum preflow by pushing as much flow as possible to + # t. + + height = max_height + while height > 0: + # Discharge active nodes in the current level. + while True: + level = levels[height] + if not level.active: + # All active nodes in the current level have been discharged. + # Move to the next lower level. + height -= 1 + break + # Record the old height and level for the gap heuristic. + old_height = height + old_level = level + u = arbitrary_element(level.active) + height = discharge(u, True) + if grt.is_reached(): + # Global relabeling heuristic: Recompute the exact heights of + # all nodes. + height = global_relabel(True) + max_height = height + grt.clear_work() + elif not old_level.active and not old_level.inactive: + # Gap heuristic: If the level at old_height is empty (a 'gap'), + # a minimum cut has been identified. All nodes with heights + # above old_height can have their heights set to n + 1 and not + # be further processed before a maximum preflow is found. + gap_heuristic(old_height) + height = old_height - 1 + max_height = height + else: + # Update the height of the highest level with at least one + # active node. + max_height = max(max_height, height) + + # A maximum preflow has been found. The excess at t is the maximum flow + # value. + if value_only: + R.graph["flow_value"] = R_nodes[t]["excess"] + return R + + # Phase 2: Convert the maximum preflow into a maximum flow by returning the + # excess to s. + + # Relabel all nodes so that they have accurate heights. + height = global_relabel(False) + grt.clear_work() + + # Continue to discharge the active nodes. + while height > n: + # Discharge active nodes in the current level. + while True: + level = levels[height] + if not level.active: + # All active nodes in the current level have been discharged. + # Move to the next lower level. + height -= 1 + break + u = arbitrary_element(level.active) + height = discharge(u, False) + if grt.is_reached(): + # Global relabeling heuristic. + height = global_relabel(False) + grt.clear_work() + + R.graph["flow_value"] = R_nodes[t]["excess"] + return R + + +def preflow_push( + G, s, t, capacity="capacity", residual=None, global_relabel_freq=1, value_only=False +): + r"""Find a maximum single-commodity flow using the highest-label + preflow-push algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 \sqrt{m})$ for $n$ nodes and + $m$ edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + global_relabel_freq : integer, float + Relative frequency of applying the global relabeling heuristic to speed + up the algorithm. If it is None, the heuristic is disabled. Default + value: 1. + + value_only : bool + If False, compute a maximum flow; otherwise, compute a maximum preflow + which is enough for computing the maximum flow value. Default value: + False. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. For each node :samp:`u` in :samp:`R`, + :samp:`R.nodes[u]['excess']` represents the difference between flow into + :samp:`u` and flow out of :samp:`u`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import preflow_push + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = preflow_push(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value == R.graph["flow_value"] + True + >>> # preflow_push also stores the maximum flow value + >>> # in the excess attribute of the sink node t + >>> flow_value == R.nodes["y"]["excess"] + True + >>> # For some problems, you might only want to compute a + >>> # maximum preflow. + >>> R = preflow_push(G, "x", "y", value_only=True) + >>> flow_value == R.graph["flow_value"] + True + >>> flow_value == R.nodes["y"]["excess"] + True + + """ + R = preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only) + R.graph["algorithm"] = "preflow_push" + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py new file mode 100644 index 0000000..0a91cf4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py @@ -0,0 +1,298 @@ +""" +Shortest augmenting path algorithm for maximum flow problems. +""" + +from collections import deque + +import networkx as nx + +from .edmondskarp import edmonds_karp_core +from .utils import CurrentEdge, build_residual_network + +__all__ = ["shortest_augmenting_path"] + + +def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff): + """Implementation of the shortest augmenting path algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + for e in R_succ[u].values(): + e["flow"] = 0 + + # Initialize heights of the nodes. + heights = {t: 0} + q = deque([(t, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(G) + m = R.size() / 2 + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + # Initialize counts of nodes in each level. + counts = [0] * (2 * n - 1) + for u in R: + counts[R_nodes[u]["height"]] += 1 + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def relabel(u): + """Relabel a node to create an admissible edge.""" + height = n - 1 + for v, attr in R_succ[u].items(): + if attr["flow"] < attr["capacity"]: + height = min(height, R_nodes[v]["height"]) + return height + 1 + + if cutoff is None: + cutoff = float("inf") + + # Phase 1: Look for shortest augmenting paths using depth-first search. + + flow_value = 0 + path = [s] + u = s + d = n if not two_phase else int(min(m**0.5, 2 * n ** (2.0 / 3))) + done = R_nodes[s]["height"] >= d + while not done: + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # Depth-first search for the next node on the path to t. + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + # Advance to the next node following an admissible edge. + path.append(v) + u = v + break + try: + curr_edge.move_to_next() + except StopIteration: + counts[height] -= 1 + if counts[height] == 0: + # Gap heuristic: If relabeling causes a level to become + # empty, a minimum cut has been identified. The algorithm + # can now be terminated. + R.graph["flow_value"] = flow_value + return R + height = relabel(u) + if u == s and height >= d: + if not two_phase: + # t is disconnected from s in the residual network. No + # more augmenting paths exist. + R.graph["flow_value"] = flow_value + return R + else: + # t is at least d steps away from s. End of phase 1. + done = True + break + counts[height] += 1 + R_nodes[u]["height"] = height + if u != s: + # After relabeling, the last edge on the path is no longer + # admissible. Retreat one step to look for an alternative. + path.pop() + u = path[-1] + break + if u == t: + # t is reached. Augment flow along the path and reset it for a new + # depth-first search. + flow_value += augment(path) + if flow_value >= cutoff: + R.graph["flow_value"] = flow_value + return R + path = [s] + u = s + + # Phase 2: Look for shortest augmenting paths using breadth-first search. + flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value) + + R.graph["flow_value"] = flow_value + return R + + +def shortest_augmenting_path( + G, + s, + t, + capacity="capacity", + residual=None, + value_only=False, + two_phase=False, + cutoff=None, +): + r"""Find a maximum single-commodity flow using the shortest augmenting path + algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + two_phase : bool + If True, a two-phase variant is used. The two-phase variant improves + the running time on unit-capacity networks from $O(nm)$ to + $O(\min(n^{2/3}, m^{1/2}) m)$. Default value: False. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import shortest_augmenting_path + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = shortest_augmenting_path(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff) + R.graph["algorithm"] = "shortest_augmenting_path" + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 new file mode 100644 index 0000000..e6ed574 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 new file mode 100644 index 0000000..abd0e8a Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 new file mode 100644 index 0000000..cd3ea80 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py new file mode 100644 index 0000000..1649ec8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py @@ -0,0 +1,128 @@ +from itertools import combinations + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +class TestGomoryHuTree: + def minimum_edge_weight(self, T, u, v): + path = nx.shortest_path(T, u, v, weight="weight") + return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + + def compute_cutset(self, G, T_orig, edge): + T = T_orig.copy() + T.remove_edge(*edge) + U, V = list(nx.connected_components(T)) + cutset = set() + for x, nbrs in ((n, G[n]) for n in U): + cutset.update((x, y) for y in nbrs if y in V) + return cutset + + def test_default_flow_function_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_davis_southern_women_graph(self): + G = nx.davis_southern_women_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_florentine_families_graph(self): + G = nx.florentine_families_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + @pytest.mark.slow + def test_les_miserables_graph_cutset(self): + G = nx.les_miserables_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph_cutset(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + u, v = 0, 33 + cut_value, edge = self.minimum_edge_weight(T, u, v) + cutset = self.compute_cutset(G, T, edge) + assert cut_value == len(cutset) + + def test_wikipedia_example(self): + # Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree + G = nx.Graph() + G.add_weighted_edges_from( + ( + (0, 1, 1), + (0, 2, 7), + (1, 2, 1), + (1, 3, 3), + (1, 4, 2), + (2, 4, 4), + (3, 4, 1), + (3, 5, 6), + (4, 5, 2), + ) + ) + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, capacity="weight", flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v, capacity="weight") == cut_value + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + T = nx.gomory_hu_tree(G) + + def test_empty_raises(self): + with pytest.raises(nx.NetworkXError): + G = nx.empty_graph() + T = nx.gomory_hu_tree(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_maxflow.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_maxflow.py new file mode 100644 index 0000000..36bc5ec --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_maxflow.py @@ -0,0 +1,551 @@ +"""Maximum flow algorithms test suite. +""" +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = { + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +} +max_min_funcs = {nx.maximum_flow, nx.minimum_cut} +flow_value_funcs = {nx.maximum_flow_value, nx.minimum_cut_value} +interface_funcs = max_min_funcs & flow_value_funcs +all_funcs = flow_funcs & interface_funcs + + +def compute_cutset(G, partition): + reachable, non_reachable = partition + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + return cutset + + +def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert set(G) == set(flowDict), errmsg + for u in G: + assert set(G[u]) == set(flowDict[u]), errmsg + excess = {u: 0 for u in flowDict} + for u in flowDict: + for v, flow in flowDict[u].items(): + if capacity in G[u][v]: + assert flow <= G[u][v][capacity] + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -solnValue, errmsg + elif u == t: + assert exc == solnValue, errmsg + else: + assert exc == 0, errmsg + + +def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert all(n in G for n in partition[0]), errmsg + assert all(n in G for n in partition[1]), errmsg + cutset = compute_cutset(G, partition) + assert all(G.has_edge(u, v) for (u, v) in cutset), errmsg + assert solnValue == sum(G[u][v][capacity] for (u, v) in cutset), errmsg + H = G.copy() + H.remove_edges_from(cutset) + if not G.is_directed(): + assert not nx.is_connected(H), errmsg + else: + assert not nx.is_strongly_connected(H), errmsg + + +def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="capacity"): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + R = flow_func(G, s, t, capacity) + # Test both legacy and new implementations. + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + assert flow_value == solnValue, errmsg + validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func) + # Minimum cut + cut_value, partition = nx.minimum_cut( + G, s, t, capacity=capacity, flow_func=flow_func + ) + validate_cuts(G, s, t, solnValue, partition, capacity, flow_func) + + +class TestMaxflowMinCutCommon: + def test_graph1(self): + # Trivial undirected graph + G = nx.Graph() + G.add_edge(1, 2, capacity=1.0) + + solnFlows = {1: {2: 1.0}, 2: {1: 1.0}} + + compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0) + + def test_graph2(self): + # A more complex undirected graph + # adapted from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow + G = nx.Graph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + H = { + "x": {"a": 3, "b": 1}, + "a": {"c": 3, "x": 3}, + "b": {"c": 1, "d": 2, "x": 1}, + "c": {"a": 3, "b": 1, "y": 2}, + "d": {"b": 2, "e": 2}, + "e": {"d": 2, "y": 2}, + "y": {"c": 2, "e": 2}, + } + + compare_flows_and_cuts(G, "x", "y", H, 4.0) + + def test_digraph1(self): + # The classic directed graph example + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1000.0) + G.add_edge("a", "c", capacity=1000.0) + G.add_edge("b", "c", capacity=1.0) + G.add_edge("b", "d", capacity=1000.0) + G.add_edge("c", "d", capacity=1000.0) + + H = { + "a": {"b": 1000.0, "c": 1000.0}, + "b": {"c": 0, "d": 1000.0}, + "c": {"d": 1000.0}, + "d": {}, + } + + compare_flows_and_cuts(G, "a", "d", H, 2000.0) + + def test_digraph2(self): + # An example in which some edges end up with zero flow. + G = nx.DiGraph() + G.add_edge("s", "b", capacity=2) + G.add_edge("s", "c", capacity=1) + G.add_edge("c", "d", capacity=1) + G.add_edge("d", "a", capacity=1) + G.add_edge("b", "a", capacity=2) + G.add_edge("a", "t", capacity=2) + + H = { + "s": {"b": 2, "c": 0}, + "c": {"d": 0}, + "d": {"a": 0}, + "b": {"a": 2}, + "a": {"t": 2}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 2) + + def test_digraph3(self): + # A directed graph example from Cormen et al. + G = nx.DiGraph() + G.add_edge("s", "v1", capacity=16.0) + G.add_edge("s", "v2", capacity=13.0) + G.add_edge("v1", "v2", capacity=10.0) + G.add_edge("v2", "v1", capacity=4.0) + G.add_edge("v1", "v3", capacity=12.0) + G.add_edge("v3", "v2", capacity=9.0) + G.add_edge("v2", "v4", capacity=14.0) + G.add_edge("v4", "v3", capacity=7.0) + G.add_edge("v3", "t", capacity=20.0) + G.add_edge("v4", "t", capacity=4.0) + + H = { + "s": {"v1": 12.0, "v2": 11.0}, + "v2": {"v1": 0, "v4": 11.0}, + "v1": {"v2": 0, "v3": 12.0}, + "v3": {"v2": 0, "t": 19.0}, + "v4": {"v3": 7.0, "t": 4.0}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 23.0) + + def test_digraph4(self): + # A more complex directed graph + # from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + H = { + "x": {"a": 2.0, "b": 1.0}, + "a": {"c": 2.0}, + "b": {"c": 0, "d": 1.0}, + "c": {"y": 2.0}, + "d": {"e": 1.0}, + "e": {"y": 1.0}, + "y": {}, + } + + compare_flows_and_cuts(G, "x", "y", H, 3.0) + + def test_wikipedia_dinitz_example(self): + # Nice example from https://en.wikipedia.org/wiki/Dinic's_algorithm + G = nx.DiGraph() + G.add_edge("s", 1, capacity=10) + G.add_edge("s", 2, capacity=10) + G.add_edge(1, 3, capacity=4) + G.add_edge(1, 4, capacity=8) + G.add_edge(1, 2, capacity=2) + G.add_edge(2, 4, capacity=9) + G.add_edge(3, "t", capacity=10) + G.add_edge(4, 3, capacity=6) + G.add_edge(4, "t", capacity=10) + + solnFlows = { + 1: {2: 0, 3: 4, 4: 6}, + 2: {4: 9}, + 3: {"t": 9}, + 4: {3: 5, "t": 10}, + "s": {1: 10, 2: 9}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", solnFlows, 19) + + def test_optional_capacity(self): + # Test optional capacity parameter. + G = nx.DiGraph() + G.add_edge("x", "a", spam=3.0) + G.add_edge("x", "b", spam=1.0) + G.add_edge("a", "c", spam=3.0) + G.add_edge("b", "c", spam=5.0) + G.add_edge("b", "d", spam=4.0) + G.add_edge("d", "e", spam=2.0) + G.add_edge("c", "y", spam=2.0) + G.add_edge("e", "y", spam=3.0) + + solnFlows = { + "x": {"a": 2.0, "b": 1.0}, + "a": {"c": 2.0}, + "b": {"c": 0, "d": 1.0}, + "c": {"y": 2.0}, + "d": {"e": 1.0}, + "e": {"y": 1.0}, + "y": {}, + } + solnValue = 3.0 + s = "x" + t = "y" + + compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="spam") + + def test_digraph_infcap_edges(self): + # DiGraph with infinite capacity edges + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "t": 60}, + "b": {"c": 12}, + "c": {"t": 37}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + # DiGraph with infinite capacity digon + G = nx.DiGraph() + G.add_edge("s", "a", capacity=85) + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("c", "a") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t", capacity=37) + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "t": 60}, + "c": {"a": 0, "t": 37}, + "b": {"c": 12}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + def test_digraph_infcap_path(self): + # Graph with infinite capacity (s, t)-path + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + for flow_func in all_funcs: + pytest.raises(nx.NetworkXUnbounded, flow_func, G, "s", "t") + + def test_graph_infcap_edges(self): + # Undirected graph with infinite capacity edges + G = nx.Graph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "s": 85, "t": 60}, + "b": {"c": 12, "s": 12}, + "c": {"a": 25, "b": 12, "t": 37}, + "t": {"a": 60, "c": 37}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + def test_digraph5(self): + # From ticket #429 by mfrasca. + G = nx.DiGraph() + G.add_edge("s", "a", capacity=2) + G.add_edge("s", "b", capacity=2) + G.add_edge("a", "b", capacity=5) + G.add_edge("a", "t", capacity=1) + G.add_edge("b", "a", capacity=1) + G.add_edge("b", "t", capacity=3) + flowSoln = { + "a": {"b": 1, "t": 1}, + "b": {"a": 0, "t": 3}, + "s": {"a": 2, "b": 2}, + "t": {}, + } + compare_flows_and_cuts(G, "s", "t", flowSoln, 4) + + def test_disconnected(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(1) + assert nx.maximum_flow_value(G, 0, 3) == 0 + flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}} + compare_flows_and_cuts(G, 0, 3, flowSoln, 0) + + def test_source_target_not_in_graph(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(3) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + + def test_source_target_coincide(self): + G = nx.Graph() + G.add_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + def test_multigraphs_raise(self): + G = nx.MultiGraph() + M = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (1, 0)], capacity=True) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + +class TestMaxFlowMinCutInterface: + def setup(self): + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + self.G = G + H = nx.DiGraph() + H.add_edge(0, 1, capacity=1.0) + H.add_edge(1, 2, capacity=1.0) + self.H = H + + def test_flow_func_not_callable(self): + elements = ["this_should_be_callable", 10, {1, 2, 3}] + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + for flow_func in interface_funcs: + for element in elements: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + + def test_flow_func_parameters(self): + G = self.G + fv = 3.0 + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, "x", "y", flow_func=flow_func) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_minimum_cut_no_cutoff(self): + G = self.G + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, + nx.minimum_cut, + G, + "x", + "y", + flow_func=flow_func, + cutoff=1.0, + ) + pytest.raises( + nx.NetworkXError, + nx.minimum_cut_value, + G, + "x", + "y", + flow_func=flow_func, + cutoff=1.0, + ) + + def test_kwargs(self): + G = self.H + fv = 1.0 + to_test = ( + (shortest_augmenting_path, dict(two_phase=True)), + (preflow_push, dict(global_relabel_freq=5)), + ) + for interface_func in interface_funcs: + for flow_func, kwargs in to_test: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_kwargs_default_flow_func(self): + G = self.H + for interface_func in interface_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2 + ) + + def test_reusing_residual(self): + G = self.G + fv = 3.0 + s, t = "x", "y" + R = build_residual_network(G, "capacity") + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + for i in range(3): + result = interface_func( + G, "x", "y", flow_func=flow_func, residual=R + ) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + +# Tests specific to one algorithm +def test_preflow_push_global_relabel_freq(): + G = nx.DiGraph() + G.add_edge(1, 2, capacity=1) + R = preflow_push(G, 1, 2, global_relabel_freq=None) + assert R.graph["flow_value"] == 1 + pytest.raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1) + + +def test_preflow_push_makes_enough_space(): + # From ticket #1542 + G = nx.DiGraph() + nx.add_path(G, [0, 1, 3], capacity=1) + nx.add_path(G, [1, 2, 3], capacity=1) + R = preflow_push(G, 0, 3, value_only=False) + assert R.graph["flow_value"] == 1 + + +def test_shortest_augmenting_path_two_phase(): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=1) + nx.add_path(G, ((i, j) for j in range(p)), capacity=1) + G.add_edge((i, p - 1), "t", capacity=1) + R = shortest_augmenting_path(G, "s", "t", two_phase=True) + assert R.graph["flow_value"] == k + R = shortest_augmenting_path(G, "s", "t", two_phase=False) + assert R.graph["flow_value"] == k + + +class TestCutoff: + def test_cutoff(self): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=2) + nx.add_path(G, ((i, j) for j in range(p)), capacity=2) + G.add_edge((i, p - 1), "t", capacity=2) + R = shortest_augmenting_path(G, "s", "t", two_phase=True, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = shortest_augmenting_path(G, "s", "t", two_phase=False, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = edmonds_karp(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + + def test_complete_graph_cutoff(self): + G = nx.complete_graph(5) + nx.set_edge_attributes(G, {(u, v): 1 for u, v in G.edges()}, "capacity") + for flow_func in [shortest_augmenting_path, edmonds_karp]: + for cutoff in [3, 2, 1]: + result = nx.maximum_flow_value( + G, 0, 4, flow_func=flow_func, cutoff=cutoff + ) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py new file mode 100644 index 0000000..c62c0a9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py @@ -0,0 +1,149 @@ +"""Maximum flow algorithms test suite on large graphs. +""" + +import os + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +def gen_pyramid(N): + # This graph admits a flow of value 1 for which every arc is at + # capacity (except the arcs incident to the sink which have + # infinite capacity). + G = nx.DiGraph() + + for i in range(N - 1): + cap = 1.0 / (i + 2) + for j in range(i + 1): + G.add_edge((i, j), (i + 1, j), capacity=cap) + cap = 1.0 / (i + 1) - cap + G.add_edge((i, j), (i + 1, j + 1), capacity=cap) + cap = 1.0 / (i + 2) - cap + + for j in range(N): + G.add_edge((N - 1, j), "t") + + return G + + +def read_graph(name): + dirname = os.path.dirname(__file__) + path = os.path.join(dirname, name + ".gpickle.bz2") + return nx.read_gpickle(path) + + +def validate_flows(G, s, t, soln_value, R, flow_func): + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert soln_value == flow_value, errmsg + assert set(G) == set(flow_dict), errmsg + for u in G: + assert set(G[u]) == set(flow_dict[u]), errmsg + excess = {u: 0 for u in flow_dict} + for u in flow_dict: + for v, flow in flow_dict[u].items(): + assert flow <= G[u][v].get("capacity", float("inf")), errmsg + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -soln_value, errmsg + elif u == t: + assert exc == soln_value, errmsg + else: + assert exc == 0, errmsg + + +class TestMaxflowLargeGraph: + def test_complete_graph(self): + N = 50 + G = nx.complete_graph(N) + nx.set_edge_attributes(G, 5, "capacity") + R = build_residual_network(G, "capacity") + kwargs = dict(residual=R) + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs) + assert flow_value == 5 * (N - 1), errmsg + + def test_pyramid(self): + N = 10 + # N = 100 # this gives a graph with 5051 nodes + G = gen_pyramid(N) + R = build_residual_network(G, "capacity") + kwargs = dict(residual=R) + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, (0, 0), "t", **kwargs) + assert flow_value == pytest.approx(1.0, abs=1e-7) + + def test_gl1(self): + G = read_graph("gl1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = dict(residual=R) + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), + # flow_func) + + @pytest.mark.slow + def test_gw1(self): + G = read_graph("gw1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = dict(residual=R) + + for flow_func in flow_funcs: + validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs), flow_func) + + def test_wlm3(self): + G = read_graph("wlm3") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = dict(residual=R) + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), + # flow_func) + + def test_preflow_push_global_relabel(self): + G = read_graph("gw1") + R = preflow_push(G, 1, len(G), global_relabel_freq=50) + assert R.graph["flow_value"] == 1202018 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_mincost.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_mincost.py new file mode 100644 index 0000000..5a8c2d7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_mincost.py @@ -0,0 +1,469 @@ +import os + +import pytest + +import networkx as nx + + +class TestMinCostFlow: + def test_simple_digraph(self): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 24 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 24 + assert nx.cost_of_flow(G, H) == 24 + assert H == soln + + def test_negcycle_infcap(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_sum_demands_not_zero(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=4) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_no_flow_satisfying_demands(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_transshipment(self): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert nx.min_cost_flow_cost(G) == 41 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 41 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 41 + assert nx.cost_of_flow(G, H) == 41 + assert H == soln + + def test_max_flow_min_cost(self): + G = nx.DiGraph() + G.add_edge("s", "a", bandwidth=6) + G.add_edge("s", "c", bandwidth=10, cost=10) + G.add_edge("a", "b", cost=6) + G.add_edge("b", "d", bandwidth=8, cost=7) + G.add_edge("c", "d", cost=10) + G.add_edge("d", "t", bandwidth=5, cost=5) + soln = { + "s": {"a": 5, "c": 0}, + "a": {"b": 5}, + "b": {"d": 5}, + "c": {"d": 0}, + "d": {"t": 5}, + "t": {}, + } + flow = nx.max_flow_min_cost(G, "s", "t", capacity="bandwidth", weight="cost") + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + G.add_edge("t", "s", cost=-100) + flowCost, flow = nx.capacity_scaling(G, capacity="bandwidth", weight="cost") + G.remove_edge("t", "s") + assert flowCost == -410 + assert flow["t"]["s"] == 5 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + def test_digraph1(self): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 150 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 150 + assert H == soln + assert nx.cost_of_flow(G, H) == 150 + + def test_digraph2(self): + # Example from ticket #430 from mfrasca. Original source: + # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11. + G = nx.DiGraph() + G.add_edge("s", 1, capacity=12) + G.add_edge("s", 2, capacity=6) + G.add_edge("s", 3, capacity=14) + G.add_edge(1, 2, capacity=11, weight=4) + G.add_edge(2, 3, capacity=9, weight=6) + G.add_edge(1, 4, capacity=5, weight=5) + G.add_edge(1, 5, capacity=2, weight=12) + G.add_edge(2, 5, capacity=4, weight=4) + G.add_edge(2, 6, capacity=2, weight=6) + G.add_edge(3, 6, capacity=31, weight=3) + G.add_edge(4, 5, capacity=18, weight=4) + G.add_edge(5, 6, capacity=9, weight=5) + G.add_edge(4, "t", capacity=3) + G.add_edge(5, "t", capacity=7) + G.add_edge(6, "t", capacity=22) + flow = nx.max_flow_min_cost(G, "s", "t") + soln = { + 1: {2: 6, 4: 5, 5: 1}, + 2: {3: 6, 5: 4, 6: 2}, + 3: {6: 20}, + 4: {5: 2, "t": 3}, + 5: {6: 0, "t": 7}, + 6: {"t": 22}, + "s": {1: 12, 2: 6, 3: 14}, + "t": {}, + } + assert flow == soln + + G.add_edge("t", "s", weight=-100) + flowCost, flow = nx.capacity_scaling(G) + G.remove_edge("t", "s") + assert flow["t"]["s"] == 32 + assert flowCost == -3007 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow) == 193 + + def test_digraph3(self): + """Combinatorial Optimization: Algorithms and Complexity, + Papadimitriou Steiglitz at page 140 has an example, 7.1, but that + admits multiple solutions, so I alter it a bit. From ticket #430 + by mfrasca.""" + + G = nx.DiGraph() + G.add_edge("s", "a") + G["s"]["a"].update({0: 2, 1: 4}) + G.add_edge("s", "b") + G["s"]["b"].update({0: 2, 1: 1}) + G.add_edge("a", "b") + G["a"]["b"].update({0: 5, 1: 2}) + G.add_edge("a", "t") + G["a"]["t"].update({0: 1, 1: 5}) + G.add_edge("b", "a") + G["b"]["a"].update({0: 1, 1: 3}) + G.add_edge("b", "t") + G["b"]["t"].update({0: 3, 1: 2}) + + "PS.ex.7.1: testing main function" + sol = nx.max_flow_min_cost(G, "s", "t", capacity=0, weight=1) + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert 23 == nx.cost_of_flow(G, sol, weight=1) + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + + G.add_edge("t", "s") + G["t"]["s"].update({1: -100}) + flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1) + G.remove_edge("t", "s") + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert sol["t"]["s"] == 4 + assert flowCost == -377 + del sol["t"]["s"] + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + assert nx.cost_of_flow(G, sol, weight=1) == 23 + + def test_zero_capacity_edges(self): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 6 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 6 + assert H == soln + assert nx.cost_of_flow(G, H) == 6 + + def test_digon(self): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + assert nx.min_cost_flow_cost(G) == 2857140 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 2857140 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 2857140 + assert H == soln + assert nx.cost_of_flow(G, H) == 2857140 + + def test_deadend(self): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.min_cost_flow, G) + + def test_infinite_capacity_neg_digon(self): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_finite_capacity_neg_digon(self): + """The digon should receive the maximum amount of flow it can handle. + Taken from ticket #749 by @chuongdo.""" + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1, weight=-1) + G.add_edge("b", "a", capacity=1, weight=-1) + min_cost = -2 + assert nx.min_cost_flow_cost(G) == min_cost + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {"a": {"b": 1}, "b": {"a": 1}} + assert nx.cost_of_flow(G, H) == -2 + + def test_multidigraph(self): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + def test_negative_selfloops(self): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + def test_bone_shaped(self): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + def test_exceptions(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G.add_node(0, demand=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G.nodes[0]["demand"] = 0 + G.add_node(1, demand=0) + G.add_edge(0, 1, weight=-float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["weight"] = 0 + G.add_edge(0, 0, weight=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G[0][0]["weight"] = 0 + G[0][1]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["capacity"] = 0 + G[0][0]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_large(self): + fname = os.path.join(os.path.dirname(__file__), "netgen-2.gpickle.bz2") + G = nx.read_gpickle(fname) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + flowCost, flowDict = nx.capacity_scaling(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py new file mode 100644 index 0000000..0c25db9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py @@ -0,0 +1,379 @@ +import os + +import pytest + +import networkx as nx + + +@pytest.fixture +def simple_flow_graph(): + G = nx.DiGraph() + G.add_node("a", demand=0) + G.add_node("b", demand=-5) + G.add_node("c", demand=50000000) + G.add_node("d", demand=-49999995) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + return G + + +@pytest.fixture +def simple_no_flow_graph(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + return G + + +def get_flowcost_from_flowdict(G, flowDict): + """Returns flow cost calculated from flow dictionary""" + flowCost = 0 + for u in flowDict.keys(): + for v in flowDict[u].keys(): + flowCost += flowDict[u][v] * G[u][v]["weight"] + return flowCost + + +def test_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_node_attributes(G, {"a": {"demand": inf}}) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_neg_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_node_attributes(G, {"a": {"demand": -inf}}) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_infinite_weight_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_edge_attributes( + G, {("a", "b"): {"weight": inf}, ("b", "d"): {"weight": inf}} + ) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_nonzero_net_demand_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_node_attributes(G, {"b": {"demand": -4}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_negative_capacity_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_edge_attributes(G, {("a", "b"): {"weight": 1}, ("b", "d"): {"capacity": -9}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_no_flow_satisfying_demands(simple_no_flow_graph): + G = simple_no_flow_graph + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_sum_demands_not_zero(simple_no_flow_graph): + G = simple_no_flow_graph + nx.set_node_attributes(G, {"t": {"demand": 4}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_google_or_tools_example(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3] + supplies = [20, 0, 0, -5, -15] + answer = 150 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_google_or_tools_example2(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4, 3] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2, 5] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5, 10] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3, 4] + supplies = [23, 0, 0, -5, -15, -3] + answer = 183 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_large(): + fname = os.path.join(os.path.dirname(__file__), "netgen-2.gpickle.bz2") + G = nx.read_gpickle(fname) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + + +def test_simple_digraph(): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + + +def test_negcycle_infcap(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_transshipment(): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert H == soln + + +def test_digraph1(): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + + +def test_zero_capacity_edges(): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + + +def test_digon(): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + + +def test_deadend(): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_infinite_capacity_neg_digon(): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + +def test_multidigraph(): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + +def test_negative_selfloops(): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + +def test_bone_shaped(): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + +def test_graphs_type_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 new file mode 100644 index 0000000..8ce935a Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/utils.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/utils.py new file mode 100644 index 0000000..e1e3e6d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/flow/utils.py @@ -0,0 +1,181 @@ +""" +Utility classes and functions for network flow algorithms. +""" + +from collections import deque + +import networkx as nx + +__all__ = [ + "CurrentEdge", + "Level", + "GlobalRelabelThreshold", + "build_residual_network", + "detect_unboundedness", + "build_flow_dict", +] + + +class CurrentEdge: + """Mechanism for iterating over out-edges incident to a node in a circular + manner. StopIteration exception is raised when wraparound occurs. + """ + + __slots__ = ("_edges", "_it", "_curr") + + def __init__(self, edges): + self._edges = edges + if self._edges: + self._rewind() + + def get(self): + return self._curr + + def move_to_next(self): + try: + self._curr = next(self._it) + except StopIteration: + self._rewind() + raise + + def _rewind(self): + self._it = iter(self._edges.items()) + self._curr = next(self._it) + + +class Level: + """Active and inactive nodes in a level.""" + + __slots__ = ("active", "inactive") + + def __init__(self): + self.active = set() + self.inactive = set() + + +class GlobalRelabelThreshold: + """Measurement of work before the global relabeling heuristic should be + applied. + """ + + def __init__(self, n, m, freq): + self._threshold = (n + m) / freq if freq else float("inf") + self._work = 0 + + def add_work(self, work): + self._work += work + + def is_reached(self): + return self._work >= self._threshold + + def clear_work(self): + self._work = 0 + + +def build_residual_network(G, capacity): + """Build a residual network and initialize a zero flow. + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + """ + if G.is_multigraph(): + raise nx.NetworkXError("MultiGraph and MultiDiGraph not supported (yet).") + + R = nx.DiGraph() + R.add_nodes_from(G) + + inf = float("inf") + # Extract edges with positive capacities. Self loops excluded. + edge_list = [ + (u, v, attr) + for u, v, attr in G.edges(data=True) + if u != v and attr.get(capacity, inf) > 0 + ] + # Simulate infinity with three times the sum of the finite edge capacities + # or any positive value if the sum is zero. This allows the + # infinite-capacity edges to be distinguished for unboundedness detection + # and directly participate in residual capacity calculation. If the maximum + # flow is finite, these edges cannot appear in the minimum cut and thus + # guarantee correctness. Since the residual capacity of an + # infinite-capacity edge is always at least 2/3 of inf, while that of an + # finite-capacity edge is at most 1/3 of inf, if an operation moves more + # than 1/3 of inf units of flow to t, there must be an infinite-capacity + # s-t path in G. + inf = ( + 3 + * sum( + attr[capacity] + for u, v, attr in edge_list + if capacity in attr and attr[capacity] != inf + ) + or 1 + ) + if G.is_directed(): + for u, v, attr in edge_list: + r = min(attr.get(capacity, inf), inf) + if not R.has_edge(u, v): + # Both (u, v) and (v, u) must be present in the residual + # network. + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=0) + else: + # The edge (u, v) was added when (v, u) was visited. + R[u][v]["capacity"] = r + else: + for u, v, attr in edge_list: + # Add a pair of edges with equal residual capacities. + r = min(attr.get(capacity, inf), inf) + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=r) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + return R + + +def detect_unboundedness(R, s, t): + """Detect an infinite-capacity s-t path in R.""" + q = deque([s]) + seen = {s} + inf = R.graph["inf"] + while q: + u = q.popleft() + for v, attr in R[u].items(): + if attr["capacity"] == inf and v not in seen: + if v == t: + raise nx.NetworkXUnbounded( + "Infinite capacity path, flow unbounded above." + ) + seen.add(v) + q.append(v) + + +def build_flow_dict(G, R): + """Build a flow dictionary from a residual network.""" + flow_dict = {} + for u in G: + flow_dict[u] = {v: 0 for v in G[u]} + flow_dict[u].update( + (v, attr["flow"]) for v, attr in R[u].items() if attr["flow"] > 0 + ) + return flow_dict diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/graph_hashing.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/graph_hashing.py new file mode 100644 index 0000000..b6e6312 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/graph_hashing.py @@ -0,0 +1,304 @@ +""" +Functions for hashing graphs to strings. +Isomorphic graphs should be assigned identical hashes. +For now, only Weisfeiler-Lehman hashing is implemented. +""" + +from collections import Counter, defaultdict +from hashlib import blake2b + +__all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"] + + +def _hash_label(label, digest_size): + return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest() + + +def _init_node_labels(G, edge_attr, node_attr): + if node_attr: + return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)} + elif edge_attr: + return {u: "" for u in G} + else: + return {u: str(deg) for u, deg in G.degree()} + + +def _neighborhood_aggregate(G, node, node_labels, edge_attr=None): + """ + Compute new labels for given node by aggregating + the labels of each node's neighbors. + """ + label_list = [] + for nbr in G.neighbors(node): + prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr]) + label_list.append(prefix + node_labels[nbr]) + return node_labels[node] + "".join(sorted(label_list)) + + +def weisfeiler_lehman_graph_hash( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """Return Weisfeiler Lehman (WL) graph hash. + + The function iteratively aggregates and hashes neighbourhoods of each node. + After each node's neighbors are hashed to obtain updated node labels, + a hashed histogram of resulting labels is returned as the final hash. + + Hashes are identical for isomorphic graphs and strong guarantees that + non-isomorphic graphs will get different hashes. See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G: graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr: string, default=None + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, default=None + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, default=3 + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, default=16 + Size (in bits) of blake2b hash digest to use for hashing node labels. + + Returns + ------- + h : string + Hexadecimal string corresponding to hash of the input graph. + + Examples + -------- + Two graphs with edge attributes that are isomorphic, except for + differences in the edge labels. + + >>> G1 = nx.Graph() + >>> G1.add_edges_from( + ... [ + ... (1, 2, {"label": "A"}), + ... (2, 3, {"label": "A"}), + ... (3, 1, {"label": "A"}), + ... (1, 4, {"label": "B"}), + ... ] + ... ) + >>> G2 = nx.Graph() + >>> G2.add_edges_from( + ... [ + ... (5, 6, {"label": "B"}), + ... (6, 7, {"label": "A"}), + ... (7, 5, {"label": "A"}), + ... (7, 8, {"label": "A"}), + ... ] + ... ) + + Omitting the `edge_attr` option, results in identical hashes. + + >>> nx.weisfeiler_lehman_graph_hash(G1) + '7bc4dde9a09d0b94c5097b219891d81a' + >>> nx.weisfeiler_lehman_graph_hash(G2) + '7bc4dde9a09d0b94c5097b219891d81a' + + With edge labels, the graphs are no longer assigned + the same hash digest. + + >>> nx.weisfeiler_lehman_graph_hash(G1, edge_attr="label") + 'c653d85538bcf041d88c011f4f905f10' + >>> nx.weisfeiler_lehman_graph_hash(G2, edge_attr="label") + '3dcd84af1ca855d0eff3c978d88e7ec7' + + Notes + ----- + To return the WL hashes of each subgraph of a graph, use + `weisfeiler_lehman_subgraph_hashes` + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + + See also + -------- + weisfeiler_lehman_subgraph_hashes + """ + + def weisfeiler_lehman_step(G, labels, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + new_labels[node] = _hash_label(label, digest_size) + return new_labels + + # set initial node labels + node_labels = _init_node_labels(G, edge_attr, node_attr) + + subgraph_hash_counts = [] + for _ in range(iterations): + node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr) + counter = Counter(node_labels.values()) + # sort the counter, extend total counts + subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0])) + + # hash the final counter + return _hash_label(str(tuple(subgraph_hash_counts)), digest_size) + + +def weisfeiler_lehman_subgraph_hashes( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """ + Return a dictionary of subgraph hashes by node. + + The dictionary is keyed by node to a list of hashes in increasingly + sized induced subgraphs containing the nodes within 2*k edges + of the key node for increasing integer k until all nodes are included. + + The function iteratively aggregates and hashes neighbourhoods of each node. + This is achieved for each step by replacing for each node its label from + the previous iteration with its hashed 1-hop neighborhood aggregate. + The new node label is then appended to a list of node labels for each + node. + + To aggregate neighborhoods at each step for a node $n$, all labels of + nodes adjacent to $n$ are concatenated. If the `edge_attr` parameter is set, + labels for each neighboring node are prefixed with the value of this attribute + along the connecting edge from this neighbor to node $n$. The resulting string + is then hashed to compress this information into a fixed digest size. + + Thus, at the $i$th iteration nodes within $2i$ distance influence any given + hashed node label. We can therefore say that at depth $i$ for node $n$ + we have a hash for a subgraph induced by the $2i$-hop neighborhood of $n$. + + Can be used to to create general Weisfeiler-Lehman graph kernels, or + generate features for graphs or nodes, for example to generate 'words' in a + graph as seen in the 'graph2vec' algorithm. + See [1]_ & [2]_ respectively for details. + + Hashes are identical for isomorphic subgraphs and there exist strong + guarantees that non-isomorphic graphs will get different hashes. + See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G: graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr: string, default=None + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, default=None + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, default=3 + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, default=16 + Size (in bits) of blake2b hash digest to use for hashing node labels. + The default size is 16 bits + + Returns + ------- + node_subgraph_hashes : dict + A dictionary with each key given by a node in G, and each value given + by the subgraph hashes in order of depth from the key node. + + Examples + -------- + Finding similar nodes in different graphs: + + >>> G1 = nx.Graph() + >>> G1.add_edges_from([ + ... (1, 2), (2, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 7) + ... ]) + >>> G2 = nx.Graph() + >>> G2.add_edges_from([ + ... (1, 3), (2, 3), (1, 6), (1, 5), (4, 6) + ... ]) + >>> g1_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=3, digest_size=8) + >>> g2_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2, iterations=3, digest_size=8) + + Even though G1 and G2 are not isomorphic (they have different numbers of edges), + the hash sequence of depth 3 for node 1 in G1 and node 5 in G2 are similar: + + >>> g1_hashes[1] + ['a93b64973cfc8897', 'db1b43ae35a1878f', '57872a7d2059c1c0'] + >>> g2_hashes[5] + ['a93b64973cfc8897', 'db1b43ae35a1878f', '1716d2a4012fa4bc'] + + The first 2 WL subgraph hashes match. From this we can conclude that it's very + likely the neighborhood of 4 hops around these nodes are isomorphic: each + iteration aggregates 1-hop neighbourhoods meaning hashes at depth $n$ are influenced + by every node within $2n$ hops. + + However the neighborhood of 6 hops is no longer isomorphic since their 3rd hash does + not match. + + These nodes may be candidates to be classified together since their local topology + is similar. + + Notes + ----- + To hash the full graph when subgraph hashes are not needed, use + `weisfeiler_lehman_graph_hash` for efficiency. + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + .. [2] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan, + Lihui Chen, Yang Liu and Shantanu Jaiswa. graph2vec: Learning + Distributed Representations of Graphs. arXiv. 2017 + https://arxiv.org/pdf/1707.05005.pdf + + See also + -------- + weisfeiler_lehman_graph_hash + """ + + def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + Appends the new hashed label to the dictionary of subgraph hashes + originating from and indexed by each node in G + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + hashed_label = _hash_label(label, digest_size) + new_labels[node] = hashed_label + node_subgraph_hashes[node].append(hashed_label) + return new_labels + + node_labels = _init_node_labels(G, edge_attr, node_attr) + + node_subgraph_hashes = defaultdict(list) + for _ in range(iterations): + node_labels = weisfeiler_lehman_step( + G, node_labels, node_subgraph_hashes, edge_attr + ) + + return dict(node_subgraph_hashes) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/graphical.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/graphical.py new file mode 100644 index 0000000..da27e5f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/graphical.py @@ -0,0 +1,405 @@ +"""Test sequences for graphiness. +""" +import heapq + +import networkx as nx + +__all__ = [ + "is_graphical", + "is_multigraphical", + "is_pseudographical", + "is_digraphical", + "is_valid_degree_sequence_erdos_gallai", + "is_valid_degree_sequence_havel_hakimi", +] + + +def is_graphical(sequence, method="eg"): + """Returns True if sequence is a valid degree sequence. + + A degree sequence is valid if some graph can realize it. + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + method : "eg" | "hh" (default: 'eg') + The method used to validate the degree sequence. + "eg" corresponds to the Erdős-Gallai algorithm + [EG1960]_, [choudum1986]_, and + "hh" to the Havel-Hakimi algorithm + [havel1955]_, [hakimi1962]_, [CL1996]_. + + Returns + ------- + valid : bool + True if the sequence is a valid degree sequence and False if not. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> sequence = (d for n, d in G.degree()) + >>> nx.is_graphical(sequence) + True + + References + ---------- + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + .. [choudum1986] S.A. Choudum. "A simple proof of the Erdős-Gallai theorem on + graph sequences." Bulletin of the Australian Mathematical Society, 33, + pp 67-70, 1986. https://doi.org/10.1017/S0004972700002872 + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + if method == "eg": + valid = is_valid_degree_sequence_erdos_gallai(list(sequence)) + elif method == "hh": + valid = is_valid_degree_sequence_havel_hakimi(list(sequence)) + else: + msg = "`method` must be 'eg' or 'hh'" + raise nx.NetworkXException(msg) + return valid + + +def _basic_graphical_tests(deg_sequence): + # Sort and perform some simple tests on the sequence + deg_sequence = nx.utils.make_list_of_ints(deg_sequence) + p = len(deg_sequence) + num_degs = [0] * p + dmax, dmin, dsum, n = 0, p, 0, 0 + for d in deg_sequence: + # Reject if degree is negative or larger than the sequence length + if d < 0 or d >= p: + raise nx.NetworkXUnfeasible + # Process only the non-zero integers + elif d > 0: + dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1 + num_degs[d] += 1 + # Reject sequence if it has odd sum or is oversaturated + if dsum % 2 or dsum > n * (n - 1): + raise nx.NetworkXUnfeasible + return dmax, dmin, dsum, n, num_degs + + +def is_valid_degree_sequence_havel_hakimi(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation proceeds using the Havel-Hakimi theorem + [havel1955]_, [hakimi1962]_, [CL1996]_. + Worst-case run time is $O(s)$ where $s$ is the sum of the sequence. + + Parameters + ---------- + deg_sequence : list + A list of integers where each element specifies the degree of a node + in a graph. + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Notes + ----- + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [1]_. + + References + ---------- + .. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + modstubs = [0] * (dmax + 1) + # Successively reduce degree sequence by removing the maximum degree + while n > 0: + # Retrieve the maximum degree in the sequence + while num_degs[dmax] == 0: + dmax -= 1 + # If there are not enough stubs to connect to, then the sequence is + # not graphical + if dmax > n - 1: + return False + + # Remove largest stub in list + num_degs[dmax], n = num_degs[dmax] - 1, n - 1 + # Reduce the next dmax largest stubs + mslen = 0 + k = dmax + for i in range(dmax): + while num_degs[k] == 0: + k -= 1 + num_degs[k], n = num_degs[k] - 1, n - 1 + if k > 1: + modstubs[mslen] = k - 1 + mslen += 1 + # Add back to the list any non-zero stubs that were removed + for i in range(mslen): + stub = modstubs[i] + num_degs[stub], n = num_degs[stub] + 1, n + 1 + return True + + +def is_valid_degree_sequence_erdos_gallai(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation is done using the Erdős-Gallai theorem [EG1960]_. + + Parameters + ---------- + deg_sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Notes + ----- + + This implementation uses an equivalent form of the Erdős-Gallai criterion. + Worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + Specifically, a sequence d is graphical if and only if the + sum of the sequence is even and for all strong indices k in the sequence, + + .. math:: + + \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k) + = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j ) + + A strong index k is any index where d_k >= k and the value n_j is the + number of occurrences of j in d. The maximal strong index is called the + Durfee index. + + This particular rearrangement comes from the proof of Theorem 3 in [2]_. + + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [2]_. + + References + ---------- + .. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai", + Discrete Mathematics, 265, pp. 417-420 (2003). + .. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + # Perform the EG checks using the reformulation of Zverovich and Zverovich + k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0 + for dk in range(dmax, dmin - 1, -1): + if dk < k + 1: # Check if already past Durfee index + return True + if num_degs[dk] > 0: + run_size = num_degs[dk] # Process a run of identical-valued degrees + if dk < k + run_size: # Check if end of run is past Durfee index + run_size = dk - k # Adjust back to Durfee index + sum_deg += run_size * dk + for v in range(run_size): + sum_nj += num_degs[k + v] + sum_jnj += (k + v) * num_degs[k + v] + k += run_size + if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj: + return False + return True + + +def is_multigraphical(sequence): + """Returns True if some multigraph can realize the sequence. + + Parameters + ---------- + sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is a multigraphic degree sequence and False if not. + + Notes + ----- + The worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + References + ---------- + .. [1] S. L. Hakimi. "On the realizability of a set of integers as + degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506 + (1962). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + dsum, dmax = 0, 0 + for d in deg_sequence: + if d < 0: + return False + dsum, dmax = dsum + d, max(dmax, d) + if dsum % 2 or dsum < 2 * dmax: + return False + return True + + +def is_pseudographical(sequence): + """Returns True if some pseudograph can realize the sequence. + + Every nonnegative integer sequence with an even sum is pseudographical + (see [1]_). + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + Returns + ------- + valid : bool + True if the sequence is a pseudographic degree sequence and False if not. + + Notes + ----- + The worst-case run time is $O(n)$ where n is the length of the sequence. + + References + ---------- + .. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs + and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12), + pp. 778-782 (1976). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0 + + +def is_digraphical(in_sequence, out_sequence): + r"""Returns True if some directed graph can realize the in- and out-degree + sequences. + + Parameters + ---------- + in_sequence : list or iterable container + A sequence of integer node in-degrees + + out_sequence : list or iterable container + A sequence of integer node out-degrees + + Returns + ------- + valid : bool + True if in and out-sequences are digraphic False if not. + + Notes + ----- + This algorithm is from Kleitman and Wang [1]_. + The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the + sum and length of the sequences respectively. + + References + ---------- + .. [1] D.J. Kleitman and D.L. Wang + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + try: + in_deg_sequence = nx.utils.make_list_of_ints(in_sequence) + out_deg_sequence = nx.utils.make_list_of_ints(out_sequence) + except nx.NetworkXError: + return False + # Process the sequences and form two heaps to store degree pairs with + # either zero or non-zero out degrees + sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence) + maxn = max(nin, nout) + maxin = 0 + if maxn == 0: + return True + stubheap, zeroheap = [], [] + for n in range(maxn): + in_deg, out_deg = 0, 0 + if n < nout: + out_deg = out_deg_sequence[n] + if n < nin: + in_deg = in_deg_sequence[n] + if in_deg < 0 or out_deg < 0: + return False + sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg) + if in_deg > 0: + stubheap.append((-1 * out_deg, -1 * in_deg)) + elif out_deg > 0: + zeroheap.append(-1 * out_deg) + if sumin != sumout: + return False + heapq.heapify(stubheap) + heapq.heapify(zeroheap) + + modstubs = [(0, 0)] * (maxin + 1) + # Successively reduce degree sequence by removing the maximum out degree + while stubheap: + # Take the first value in the sequence with non-zero in degree + (freeout, freein) = heapq.heappop(stubheap) + freein *= -1 + if freein > len(stubheap) + len(zeroheap): + return False + + # Attach out stubs to the nodes with the most in stubs + mslen = 0 + for i in range(freein): + if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]): + stubout = heapq.heappop(zeroheap) + stubin = 0 + else: + (stubout, stubin) = heapq.heappop(stubheap) + if stubout == 0: + return False + # Check if target is now totally connected + if stubout + 1 < 0 or stubin < 0: + modstubs[mslen] = (stubout + 1, stubin) + mslen += 1 + + # Add back the nodes to the heap that still have available stubs + for i in range(mslen): + stub = modstubs[i] + if stub[1] < 0: + heapq.heappush(stubheap, stub) + else: + heapq.heappush(zeroheap, stub[0]) + if freeout < 0: + heapq.heappush(zeroheap, freeout) + return True diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/hierarchy.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/hierarchy.py new file mode 100644 index 0000000..4a4bc29 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/hierarchy.py @@ -0,0 +1,47 @@ +""" +Flow Hierarchy. +""" +import networkx as nx + +__all__ = ["flow_hierarchy"] + + +def flow_hierarchy(G, weight=None): + """Returns the flow hierarchy of a directed network. + + Flow hierarchy is defined as the fraction of edges not participating + in cycles in a directed graph [1]_. + + Parameters + ---------- + G : DiGraph or MultiDiGraph + A directed graph + + weight : key,optional (default=None) + Attribute to use for node weights. If None the weight defaults to 1. + + Returns + ------- + h : float + Flow hierarchy value + + Notes + ----- + The algorithm described in [1]_ computes the flow hierarchy through + exponentiation of the adjacency matrix. This function implements an + alternative approach that finds strongly connected components. + An edge is in a cycle if and only if it is in a strongly connected + component, which can be found in $O(m)$ time using Tarjan's algorithm. + + References + ---------- + .. [1] Luo, J.; Magee, C.L. (2011), + Detecting evolving patterns of self-organizing networks by flow + hierarchy measurement, Complexity, Volume 16 Issue 6 53-61. + DOI: 10.1002/cplx.20368 + http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf + """ + if not G.is_directed(): + raise nx.NetworkXError("G must be a digraph in flow_hierarchy") + scc = nx.strongly_connected_components(G) + return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/hybrid.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/hybrid.py new file mode 100644 index 0000000..d350fe2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/hybrid.py @@ -0,0 +1,193 @@ +""" +Provides functions for finding and testing for locally `(k, l)`-connected +graphs. + +""" +import copy + +import networkx as nx + +__all__ = ["kl_connected_subgraph", "is_kl_connected"] + + +def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False): + """Returns the maximum locally `(k, l)`-connected subgraph of `G`. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph in which to find a maximum locally `(k, l)`-connected + subgraph. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + same_as_graph : bool + If True then return a tuple of the form `(H, is_same)`, + where `H` is the maximum locally `(k, l)`-connected subgraph and + `is_same` is a Boolean representing whether `G` is locally `(k, + l)`-connected (and hence, whether `H` is simply a copy of the input + graph `G`). + + Returns + ------- + NetworkX graph or two-tuple + If `same_as_graph` is True, then this function returns a + two-tuple as described above. Otherwise, it returns only the maximum + locally `(k, l)`-connected subgraph. + + See also + -------- + is_kl_connected + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + H = copy.deepcopy(G) # subgraph we construct by removing from G + + graphOK = True + deleted_some = True # hack to start off the while loop + while deleted_some: + deleted_some = False + # We use `for edge in list(H.edges()):` instead of + # `for edge in H.edges():` because we edit the graph `H` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for edge in list(H.edges()): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + for w in verts.copy(): + verts.update(G[w]) + G2 = G.subgraph(verts).copy() + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if prev != w: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + H.remove_edge(u, v) + deleted_some = True + if graphOK: + graphOK = False + # We looked through all edges and removed none of them. + # So, H is the maximal (k,l)-connected subgraph of G + if same_as_graph: + return (H, graphOK) + return H + + +def is_kl_connected(G, k, l, low_memory=False): + """Returns True if and only if `G` is locally `(k, l)`-connected. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph to test for local `(k, l)`-connectedness. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + Returns + ------- + bool + Whether the graph is locally `(k, l)`-connected subgraph. + + See also + -------- + kl_connected_subgraph + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + graphOK = True + for edge in G.edges(): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + [verts.update(G.neighbors(w)) for w in verts.copy()] + G2 = G.subgraph(verts) + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if w != prev: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + graphOK = False + break + # return status + return graphOK diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isolate.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isolate.py new file mode 100644 index 0000000..e81e722 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isolate.py @@ -0,0 +1,103 @@ +""" +Functions for identifying isolate (degree zero) nodes. +""" + +__all__ = ["is_isolate", "isolates", "number_of_isolates"] + + +def is_isolate(G, n): + """Determines whether a node is an isolate. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + n : node + A node in `G`. + + Returns + ------- + is_isolate : bool + True if and only if `n` has no neighbors. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> nx.is_isolate(G, 2) + False + >>> nx.is_isolate(G, 3) + True + """ + return G.degree(n) == 0 + + +def isolates(G): + """Iterator over isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + iterator + An iterator over the isolates of `G`. + + Examples + -------- + To get a list of all isolates of a graph, use the :class:`list` + constructor:: + + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + To remove all isolates in the graph, first create a list of the + isolates, then use :meth:`Graph.remove_nodes_from`:: + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> list(G) + [1, 2] + + For digraphs, isolates have zero in-degree and zero out_degre:: + + >>> G = nx.DiGraph([(0, 1), (1, 2)]) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + """ + return (n for n, d in G.degree() if d == 0) + + +def number_of_isolates(G): + """Returns the number of isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + int + The number of degree zero nodes in the graph `G`. + + """ + # TODO This can be parallelized. + return sum(1 for v in isolates(G)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/__init__.py new file mode 100644 index 0000000..ddcedea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/__init__.py @@ -0,0 +1,6 @@ +from networkx.algorithms.isomorphism.isomorph import * +from networkx.algorithms.isomorphism.vf2userfunc import * +from networkx.algorithms.isomorphism.matchhelpers import * +from networkx.algorithms.isomorphism.temporalisomorphvf2 import * +from networkx.algorithms.isomorphism.ismags import * +from networkx.algorithms.isomorphism.tree_isomorphism import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/ismags.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/ismags.py new file mode 100644 index 0000000..bfb5eea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/ismags.py @@ -0,0 +1,1170 @@ +""" +**************** +ISMAGS Algorithm +**************** + +Provides a Python implementation of the ISMAGS algorithm. [1]_ + +It is capable of finding (subgraph) isomorphisms between two graphs, taking the +symmetry of the subgraph into account. In most cases the VF2 algorithm is +faster (at least on small graphs) than this implementation, but in some cases +there is an exponential number of isomorphisms that are symmetrically +equivalent. In that case, the ISMAGS algorithm will provide only one solution +per symmetry group. + +>>> petersen = nx.petersen_graph() +>>> ismags = nx.isomorphism.ISMAGS(petersen, petersen) +>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=False)) +>>> len(isomorphisms) +120 +>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=True)) +>>> answer = [{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}] +>>> answer == isomorphisms +True + +In addition, this implementation also provides an interface to find the +largest common induced subgraph [2]_ between any two graphs, again taking +symmetry into account. Given `graph` and `subgraph` the algorithm will remove +nodes from the `subgraph` until `subgraph` is isomorphic to a subgraph of +`graph`. Since only the symmetry of `subgraph` is taken into account it is +worth thinking about how you provide your graphs: + +>>> graph1 = nx.path_graph(4) +>>> graph2 = nx.star_graph(3) +>>> ismags = nx.isomorphism.ISMAGS(graph1, graph2) +>>> ismags.is_isomorphic() +False +>>> largest_common_subgraph = list(ismags.largest_common_subgraph()) +>>> answer = [{1: 0, 0: 1, 2: 2}, {2: 0, 1: 1, 3: 2}] +>>> answer == largest_common_subgraph +True +>>> ismags2 = nx.isomorphism.ISMAGS(graph2, graph1) +>>> largest_common_subgraph = list(ismags2.largest_common_subgraph()) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 0: 1, 3: 2}, +... {2: 0, 0: 1, 1: 2}, +... {2: 0, 0: 1, 3: 2}, +... {3: 0, 0: 1, 1: 2}, +... {3: 0, 0: 1, 2: 2}, +... ] +>>> answer == largest_common_subgraph +True + +However, when not taking symmetry into account, it doesn't matter: + +>>> largest_common_subgraph = list(ismags.largest_common_subgraph(symmetry=False)) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 2: 1, 0: 2}, +... {2: 0, 1: 1, 3: 2}, +... {2: 0, 3: 1, 1: 2}, +... {1: 0, 0: 1, 2: 3}, +... {1: 0, 2: 1, 0: 3}, +... {2: 0, 1: 1, 3: 3}, +... {2: 0, 3: 1, 1: 3}, +... {1: 0, 0: 2, 2: 3}, +... {1: 0, 2: 2, 0: 3}, +... {2: 0, 1: 2, 3: 3}, +... {2: 0, 3: 2, 1: 3}, +... ] +>>> answer == largest_common_subgraph +True +>>> largest_common_subgraph = list(ismags2.largest_common_subgraph(symmetry=False)) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 0: 1, 3: 2}, +... {2: 0, 0: 1, 1: 2}, +... {2: 0, 0: 1, 3: 2}, +... {3: 0, 0: 1, 1: 2}, +... {3: 0, 0: 1, 2: 2}, +... {1: 1, 0: 2, 2: 3}, +... {1: 1, 0: 2, 3: 3}, +... {2: 1, 0: 2, 1: 3}, +... {2: 1, 0: 2, 3: 3}, +... {3: 1, 0: 2, 1: 3}, +... {3: 1, 0: 2, 2: 3}, +... ] +>>> answer == largest_common_subgraph +True + +Notes +----- + - The current implementation works for undirected graphs only. The algorithm + in general should work for directed graphs as well though. + - Node keys for both provided graphs need to be fully orderable as well as + hashable. + - Node and edge equality is assumed to be transitive: if A is equal to B, and + B is equal to C, then A is equal to C. + +References +---------- + .. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle, + M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General + Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph + Enumeration", PLoS One 9(5): e97896, 2014. + https://doi.org/10.1371/journal.pone.0097896 + .. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph +""" + +__all__ = ["ISMAGS"] + +import itertools +from collections import Counter, defaultdict +from functools import reduce, wraps + + +def are_all_equal(iterable): + """ + Returns ``True`` if and only if all elements in `iterable` are equal; and + ``False`` otherwise. + + Parameters + ---------- + iterable: collections.abc.Iterable + The container whose elements will be checked. + + Returns + ------- + bool + ``True`` iff all elements in `iterable` compare equal, ``False`` + otherwise. + """ + try: + shape = iterable.shape + except AttributeError: + pass + else: + if len(shape) > 1: + message = "The function does not works on multidimension arrays." + raise NotImplementedError(message) from None + + iterator = iter(iterable) + first = next(iterator, None) + return all(item == first for item in iterator) + + +def make_partitions(items, test): + """ + Partitions items into sets based on the outcome of ``test(item1, item2)``. + Pairs of items for which `test` returns `True` end up in the same set. + + Parameters + ---------- + items : collections.abc.Iterable[collections.abc.Hashable] + Items to partition + test : collections.abc.Callable[collections.abc.Hashable, collections.abc.Hashable] + A function that will be called with 2 arguments, taken from items. + Should return `True` if those 2 items need to end up in the same + partition, and `False` otherwise. + + Returns + ------- + list[set] + A list of sets, with each set containing part of the items in `items`, + such that ``all(test(*pair) for pair in itertools.combinations(set, 2)) + == True`` + + Notes + ----- + The function `test` is assumed to be transitive: if ``test(a, b)`` and + ``test(b, c)`` return ``True``, then ``test(a, c)`` must also be ``True``. + """ + partitions = [] + for item in items: + for partition in partitions: + p_item = next(iter(partition)) + if test(item, p_item): + partition.add(item) + break + else: # No break + partitions.append({item}) + return partitions + + +def partition_to_color(partitions): + """ + Creates a dictionary with for every item in partition for every partition + in partitions the index of partition in partitions. + + Parameters + ---------- + partitions: collections.abc.Sequence[collections.abc.Iterable] + As returned by :func:`make_partitions`. + + Returns + ------- + dict + """ + colors = dict() + for color, keys in enumerate(partitions): + for key in keys: + colors[key] = color + return colors + + +def intersect(collection_of_sets): + """ + Given an collection of sets, returns the intersection of those sets. + + Parameters + ---------- + collection_of_sets: collections.abc.Collection[set] + A collection of sets. + + Returns + ------- + set + An intersection of all sets in `collection_of_sets`. Will have the same + type as the item initially taken from `collection_of_sets`. + """ + collection_of_sets = list(collection_of_sets) + first = collection_of_sets.pop() + out = reduce(set.intersection, collection_of_sets, set(first)) + return type(first)(out) + + +class ISMAGS: + """ + Implements the ISMAGS subgraph matching algorith. [1]_ ISMAGS stands for + "Index-based Subgraph Matching Algorithm with General Symmetries". As the + name implies, it is symmetry aware and will only generate non-symmetric + isomorphisms. + + Notes + ----- + The implementation imposes additional conditions compared to the VF2 + algorithm on the graphs provided and the comparison functions + (:attr:`node_equality` and :attr:`edge_equality`): + + - Node keys in both graphs must be orderable as well as hashable. + - Equality must be transitive: if A is equal to B, and B is equal to C, + then A must be equal to C. + + Attributes + ---------- + graph: networkx.Graph + subgraph: networkx.Graph + node_equality: collections.abc.Callable + The function called to see if two nodes should be considered equal. + It's signature looks like this: + ``f(graph1: networkx.Graph, node1, graph2: networkx.Graph, node2) -> bool``. + `node1` is a node in `graph1`, and `node2` a node in `graph2`. + Constructed from the argument `node_match`. + edge_equality: collections.abc.Callable + The function called to see if two edges should be considered equal. + It's signature looks like this: + ``f(graph1: networkx.Graph, edge1, graph2: networkx.Graph, edge2) -> bool``. + `edge1` is an edge in `graph1`, and `edge2` an edge in `graph2`. + Constructed from the argument `edge_match`. + + References + ---------- + .. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle, + M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General + Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph + Enumeration", PLoS One 9(5): e97896, 2014. + https://doi.org/10.1371/journal.pone.0097896 + """ + + def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None): + """ + Parameters + ---------- + graph: networkx.Graph + subgraph: networkx.Graph + node_match: collections.abc.Callable or None + Function used to determine whether two nodes are equivalent. Its + signature should look like ``f(n1: dict, n2: dict) -> bool``, with + `n1` and `n2` node property dicts. See also + :func:`~networkx.algorithms.isomorphism.categorical_node_match` and + friends. + If `None`, all nodes are considered equal. + edge_match: collections.abc.Callable or None + Function used to determine whether two edges are equivalent. Its + signature should look like ``f(e1: dict, e2: dict) -> bool``, with + `e1` and `e2` edge property dicts. See also + :func:`~networkx.algorithms.isomorphism.categorical_edge_match` and + friends. + If `None`, all edges are considered equal. + cache: collections.abc.Mapping + A cache used for caching graph symmetries. + """ + # TODO: graph and subgraph setter methods that invalidate the caches. + # TODO: allow for precomputed partitions and colors + self.graph = graph + self.subgraph = subgraph + self._symmetry_cache = cache + # Naming conventions are taken from the original paper. For your + # sanity: + # sg: subgraph + # g: graph + # e: edge(s) + # n: node(s) + # So: sgn means "subgraph nodes". + self._sgn_partitions_ = None + self._sge_partitions_ = None + + self._sgn_colors_ = None + self._sge_colors_ = None + + self._gn_partitions_ = None + self._ge_partitions_ = None + + self._gn_colors_ = None + self._ge_colors_ = None + + self._node_compat_ = None + self._edge_compat_ = None + + if node_match is None: + self.node_equality = self._node_match_maker(lambda n1, n2: True) + self._sgn_partitions_ = [set(self.subgraph.nodes)] + self._gn_partitions_ = [set(self.graph.nodes)] + self._node_compat_ = {0: 0} + else: + self.node_equality = self._node_match_maker(node_match) + if edge_match is None: + self.edge_equality = self._edge_match_maker(lambda e1, e2: True) + self._sge_partitions_ = [set(self.subgraph.edges)] + self._ge_partitions_ = [set(self.graph.edges)] + self._edge_compat_ = {0: 0} + else: + self.edge_equality = self._edge_match_maker(edge_match) + + @property + def _sgn_partitions(self): + if self._sgn_partitions_ is None: + + def nodematch(node1, node2): + return self.node_equality(self.subgraph, node1, self.subgraph, node2) + + self._sgn_partitions_ = make_partitions(self.subgraph.nodes, nodematch) + return self._sgn_partitions_ + + @property + def _sge_partitions(self): + if self._sge_partitions_ is None: + + def edgematch(edge1, edge2): + return self.edge_equality(self.subgraph, edge1, self.subgraph, edge2) + + self._sge_partitions_ = make_partitions(self.subgraph.edges, edgematch) + return self._sge_partitions_ + + @property + def _gn_partitions(self): + if self._gn_partitions_ is None: + + def nodematch(node1, node2): + return self.node_equality(self.graph, node1, self.graph, node2) + + self._gn_partitions_ = make_partitions(self.graph.nodes, nodematch) + return self._gn_partitions_ + + @property + def _ge_partitions(self): + if self._ge_partitions_ is None: + + def edgematch(edge1, edge2): + return self.edge_equality(self.graph, edge1, self.graph, edge2) + + self._ge_partitions_ = make_partitions(self.graph.edges, edgematch) + return self._ge_partitions_ + + @property + def _sgn_colors(self): + if self._sgn_colors_ is None: + self._sgn_colors_ = partition_to_color(self._sgn_partitions) + return self._sgn_colors_ + + @property + def _sge_colors(self): + if self._sge_colors_ is None: + self._sge_colors_ = partition_to_color(self._sge_partitions) + return self._sge_colors_ + + @property + def _gn_colors(self): + if self._gn_colors_ is None: + self._gn_colors_ = partition_to_color(self._gn_partitions) + return self._gn_colors_ + + @property + def _ge_colors(self): + if self._ge_colors_ is None: + self._ge_colors_ = partition_to_color(self._ge_partitions) + return self._ge_colors_ + + @property + def _node_compatibility(self): + if self._node_compat_ is not None: + return self._node_compat_ + self._node_compat_ = {} + for sgn_part_color, gn_part_color in itertools.product( + range(len(self._sgn_partitions)), range(len(self._gn_partitions)) + ): + sgn = next(iter(self._sgn_partitions[sgn_part_color])) + gn = next(iter(self._gn_partitions[gn_part_color])) + if self.node_equality(self.subgraph, sgn, self.graph, gn): + self._node_compat_[sgn_part_color] = gn_part_color + return self._node_compat_ + + @property + def _edge_compatibility(self): + if self._edge_compat_ is not None: + return self._edge_compat_ + self._edge_compat_ = {} + for sge_part_color, ge_part_color in itertools.product( + range(len(self._sge_partitions)), range(len(self._ge_partitions)) + ): + sge = next(iter(self._sge_partitions[sge_part_color])) + ge = next(iter(self._ge_partitions[ge_part_color])) + if self.edge_equality(self.subgraph, sge, self.graph, ge): + self._edge_compat_[sge_part_color] = ge_part_color + return self._edge_compat_ + + @staticmethod + def _node_match_maker(cmp): + @wraps(cmp) + def comparer(graph1, node1, graph2, node2): + return cmp(graph1.nodes[node1], graph2.nodes[node2]) + + return comparer + + @staticmethod + def _edge_match_maker(cmp): + @wraps(cmp) + def comparer(graph1, edge1, graph2, edge2): + return cmp(graph1.edges[edge1], graph2.edges[edge2]) + + return comparer + + def find_isomorphisms(self, symmetry=True): + """Find all subgraph isomorphisms between subgraph and graph + + Finds isomorphisms where :attr:`subgraph` <= :attr:`graph`. + + Parameters + ---------- + symmetry: bool + Whether symmetry should be taken into account. If False, found + isomorphisms may be symmetrically equivalent. + + Yields + ------ + dict + The found isomorphism mappings of {graph_node: subgraph_node}. + """ + # The networkx VF2 algorithm is slightly funny in when it yields an + # empty dict and when not. + if not self.subgraph: + yield {} + return + elif not self.graph: + return + elif len(self.graph) < len(self.subgraph): + return + + if symmetry: + _, cosets = self.analyze_symmetry( + self.subgraph, self._sgn_partitions, self._sge_colors + ) + constraints = self._make_constraints(cosets) + else: + constraints = [] + + candidates = self._find_nodecolor_candidates() + la_candidates = self._get_lookahead_candidates() + for sgn in self.subgraph: + extra_candidates = la_candidates[sgn] + if extra_candidates: + candidates[sgn] = candidates[sgn] | {frozenset(extra_candidates)} + + if any(candidates.values()): + start_sgn = min(candidates, key=lambda n: min(candidates[n], key=len)) + candidates[start_sgn] = (intersect(candidates[start_sgn]),) + yield from self._map_nodes(start_sgn, candidates, constraints) + else: + return + + @staticmethod + def _find_neighbor_color_count(graph, node, node_color, edge_color): + """ + For `node` in `graph`, count the number of edges of a specific color + it has to nodes of a specific color. + """ + counts = Counter() + neighbors = graph[node] + for neighbor in neighbors: + n_color = node_color[neighbor] + if (node, neighbor) in edge_color: + e_color = edge_color[node, neighbor] + else: + e_color = edge_color[neighbor, node] + counts[e_color, n_color] += 1 + return counts + + def _get_lookahead_candidates(self): + """ + Returns a mapping of {subgraph node: collection of graph nodes} for + which the graph nodes are feasible candidates for the subgraph node, as + determined by looking ahead one edge. + """ + g_counts = {} + for gn in self.graph: + g_counts[gn] = self._find_neighbor_color_count( + self.graph, gn, self._gn_colors, self._ge_colors + ) + candidates = defaultdict(set) + for sgn in self.subgraph: + sg_count = self._find_neighbor_color_count( + self.subgraph, sgn, self._sgn_colors, self._sge_colors + ) + new_sg_count = Counter() + for (sge_color, sgn_color), count in sg_count.items(): + try: + ge_color = self._edge_compatibility[sge_color] + gn_color = self._node_compatibility[sgn_color] + except KeyError: + pass + else: + new_sg_count[ge_color, gn_color] = count + + for gn, g_count in g_counts.items(): + if all(new_sg_count[x] <= g_count[x] for x in new_sg_count): + # Valid candidate + candidates[sgn].add(gn) + return candidates + + def largest_common_subgraph(self, symmetry=True): + """ + Find the largest common induced subgraphs between :attr:`subgraph` and + :attr:`graph`. + + Parameters + ---------- + symmetry: bool + Whether symmetry should be taken into account. If False, found + largest common subgraphs may be symmetrically equivalent. + + Yields + ------ + dict + The found isomorphism mappings of {graph_node: subgraph_node}. + """ + # The networkx VF2 algorithm is slightly funny in when it yields an + # empty dict and when not. + if not self.subgraph: + yield {} + return + elif not self.graph: + return + + if symmetry: + _, cosets = self.analyze_symmetry( + self.subgraph, self._sgn_partitions, self._sge_colors + ) + constraints = self._make_constraints(cosets) + else: + constraints = [] + + candidates = self._find_nodecolor_candidates() + + if any(candidates.values()): + yield from self._largest_common_subgraph(candidates, constraints) + else: + return + + def analyze_symmetry(self, graph, node_partitions, edge_colors): + """ + Find a minimal set of permutations and corresponding co-sets that + describe the symmetry of `graph`, given the node and edge equalities + given by `node_partitions` and `edge_colors`, respectively. + + Parameters + ---------- + graph : networkx.Graph + The graph whose symmetry should be analyzed. + node_partitions : list of sets + A list of sets containining node keys. Node keys in the same set + are considered equivalent. Every node key in `graph` should be in + exactly one of the sets. If all nodes are equivalent, this should + be ``[set(graph.nodes)]``. + edge_colors : dict mapping edges to their colors + A dict mapping every edge in `graph` to its corresponding color. + Edges with the same color are considered equivalent. If all edges + are equivalent, this should be ``{e: 0 for e in graph.edges}``. + + + Returns + ------- + set[frozenset] + The found permutations. This is a set of frozensets of pairs of node + keys which can be exchanged without changing :attr:`subgraph`. + dict[collections.abc.Hashable, set[collections.abc.Hashable]] + The found co-sets. The co-sets is a dictionary of + ``{node key: set of node keys}``. + Every key-value pair describes which ``values`` can be interchanged + without changing nodes less than ``key``. + """ + if self._symmetry_cache is not None: + key = hash( + ( + tuple(graph.nodes), + tuple(graph.edges), + tuple(map(tuple, node_partitions)), + tuple(edge_colors.items()), + ) + ) + if key in self._symmetry_cache: + return self._symmetry_cache[key] + node_partitions = list( + self._refine_node_partitions(graph, node_partitions, edge_colors) + ) + assert len(node_partitions) == 1 + node_partitions = node_partitions[0] + permutations, cosets = self._process_ordered_pair_partitions( + graph, node_partitions, node_partitions, edge_colors + ) + if self._symmetry_cache is not None: + self._symmetry_cache[key] = permutations, cosets + return permutations, cosets + + def is_isomorphic(self, symmetry=False): + """ + Returns True if :attr:`graph` is isomorphic to :attr:`subgraph` and + False otherwise. + + Returns + ------- + bool + """ + return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic( + symmetry + ) + + def subgraph_is_isomorphic(self, symmetry=False): + """ + Returns True if a subgraph of :attr:`graph` is isomorphic to + :attr:`subgraph` and False otherwise. + + Returns + ------- + bool + """ + # symmetry=False, since we only need to know whether there is any + # example; figuring out all symmetry elements probably costs more time + # than it gains. + isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None) + return isom is not None + + def isomorphisms_iter(self, symmetry=True): + """ + Does the same as :meth:`find_isomorphisms` if :attr:`graph` and + :attr:`subgraph` have the same number of nodes. + """ + if len(self.graph) == len(self.subgraph): + yield from self.subgraph_isomorphisms_iter(symmetry=symmetry) + + def subgraph_isomorphisms_iter(self, symmetry=True): + """Alternative name for :meth:`find_isomorphisms`.""" + return self.find_isomorphisms(symmetry) + + def _find_nodecolor_candidates(self): + """ + Per node in subgraph find all nodes in graph that have the same color. + """ + candidates = defaultdict(set) + for sgn in self.subgraph.nodes: + sgn_color = self._sgn_colors[sgn] + if sgn_color in self._node_compatibility: + gn_color = self._node_compatibility[sgn_color] + candidates[sgn].add(frozenset(self._gn_partitions[gn_color])) + else: + candidates[sgn].add(frozenset()) + candidates = dict(candidates) + for sgn, options in candidates.items(): + candidates[sgn] = frozenset(options) + return candidates + + @staticmethod + def _make_constraints(cosets): + """ + Turn cosets into constraints. + """ + constraints = [] + for node_i, node_ts in cosets.items(): + for node_t in node_ts: + if node_i != node_t: + # Node i must be smaller than node t. + constraints.append((node_i, node_t)) + return constraints + + @staticmethod + def _find_node_edge_color(graph, node_colors, edge_colors): + """ + For every node in graph, come up with a color that combines 1) the + color of the node, and 2) the number of edges of a color to each type + of node. + """ + counts = defaultdict(lambda: defaultdict(int)) + for node1, node2 in graph.edges: + if (node1, node2) in edge_colors: + # FIXME directed graphs + ecolor = edge_colors[node1, node2] + else: + ecolor = edge_colors[node2, node1] + # Count per node how many edges it has of what color to nodes of + # what color + counts[node1][ecolor, node_colors[node2]] += 1 + counts[node2][ecolor, node_colors[node1]] += 1 + + node_edge_colors = dict() + for node in graph.nodes: + node_edge_colors[node] = node_colors[node], set(counts[node].items()) + + return node_edge_colors + + @staticmethod + def _get_permutations_by_length(items): + """ + Get all permutations of items, but only permute items with the same + length. + + >>> found = list(ISMAGS._get_permutations_by_length([[1], [2], [3, 4], [4, 5]])) + >>> answer = [ + ... (([1], [2]), ([3, 4], [4, 5])), + ... (([1], [2]), ([4, 5], [3, 4])), + ... (([2], [1]), ([3, 4], [4, 5])), + ... (([2], [1]), ([4, 5], [3, 4])), + ... ] + >>> found == answer + True + """ + by_len = defaultdict(list) + for item in items: + by_len[len(item)].append(item) + + yield from itertools.product( + *(itertools.permutations(by_len[l]) for l in sorted(by_len)) + ) + + @classmethod + def _refine_node_partitions(cls, graph, node_partitions, edge_colors, branch=False): + """ + Given a partition of nodes in graph, make the partitions smaller such + that all nodes in a partition have 1) the same color, and 2) the same + number of edges to specific other partitions. + """ + + def equal_color(node1, node2): + return node_edge_colors[node1] == node_edge_colors[node2] + + node_partitions = list(node_partitions) + node_colors = partition_to_color(node_partitions) + node_edge_colors = cls._find_node_edge_color(graph, node_colors, edge_colors) + if all( + are_all_equal(node_edge_colors[node] for node in partition) + for partition in node_partitions + ): + yield node_partitions + return + + new_partitions = [] + output = [new_partitions] + for partition in node_partitions: + if not are_all_equal(node_edge_colors[node] for node in partition): + refined = make_partitions(partition, equal_color) + if ( + branch + and len(refined) != 1 + and len({len(r) for r in refined}) != len([len(r) for r in refined]) + ): + # This is where it breaks. There are multiple new cells + # in refined with the same length, and their order + # matters. + # So option 1) Hit it with a big hammer and simply make all + # orderings. + permutations = cls._get_permutations_by_length(refined) + new_output = [] + for n_p in output: + for permutation in permutations: + new_output.append(n_p + list(permutation[0])) + output = new_output + else: + for n_p in output: + n_p.extend(sorted(refined, key=len)) + else: + for n_p in output: + n_p.append(partition) + for n_p in output: + yield from cls._refine_node_partitions(graph, n_p, edge_colors, branch) + + def _edges_of_same_color(self, sgn1, sgn2): + """ + Returns all edges in :attr:`graph` that have the same colour as the + edge between sgn1 and sgn2 in :attr:`subgraph`. + """ + if (sgn1, sgn2) in self._sge_colors: + # FIXME directed graphs + sge_color = self._sge_colors[sgn1, sgn2] + else: + sge_color = self._sge_colors[sgn2, sgn1] + if sge_color in self._edge_compatibility: + ge_color = self._edge_compatibility[sge_color] + g_edges = self._ge_partitions[ge_color] + else: + g_edges = [] + return g_edges + + def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=None): + """ + Find all subgraph isomorphisms honoring constraints. + """ + if mapping is None: + mapping = {} + else: + mapping = mapping.copy() + if to_be_mapped is None: + to_be_mapped = set(self.subgraph.nodes) + + # Note, we modify candidates here. Doesn't seem to affect results, but + # remember this. + # candidates = candidates.copy() + sgn_candidates = intersect(candidates[sgn]) + candidates[sgn] = frozenset([sgn_candidates]) + for gn in sgn_candidates: + # We're going to try to map sgn to gn. + if gn in mapping.values() or sgn not in to_be_mapped: + # gn is already mapped to something + continue # pragma: no cover + + # REDUCTION and COMBINATION + mapping[sgn] = gn + # BASECASE + if to_be_mapped == set(mapping.keys()): + yield {v: k for k, v in mapping.items()} + continue + left_to_map = to_be_mapped - set(mapping.keys()) + + new_candidates = candidates.copy() + sgn_neighbours = set(self.subgraph[sgn]) + not_gn_neighbours = set(self.graph.nodes) - set(self.graph[gn]) + for sgn2 in left_to_map: + if sgn2 not in sgn_neighbours: + gn2_options = not_gn_neighbours + else: + # Get all edges to gn of the right color: + g_edges = self._edges_of_same_color(sgn, sgn2) + # FIXME directed graphs + # And all nodes involved in those which are connected to gn + gn2_options = {n for e in g_edges for n in e if gn in e} + # Node color compatibility should be taken care of by the + # initial candidate lists made by find_subgraphs + + # Add gn2_options to the right collection. Since new_candidates + # is a dict of frozensets of frozensets of node indices it's + # a bit clunky. We can't do .add, and + also doesn't work. We + # could do |, but I deem union to be clearer. + new_candidates[sgn2] = new_candidates[sgn2].union( + [frozenset(gn2_options)] + ) + + if (sgn, sgn2) in constraints: + gn2_options = {gn2 for gn2 in self.graph if gn2 > gn} + elif (sgn2, sgn) in constraints: + gn2_options = {gn2 for gn2 in self.graph if gn2 < gn} + else: + continue # pragma: no cover + new_candidates[sgn2] = new_candidates[sgn2].union( + [frozenset(gn2_options)] + ) + + # The next node is the one that is unmapped and has fewest + # candidates + # Pylint disables because it's a one-shot function. + next_sgn = min( + left_to_map, key=lambda n: min(new_candidates[n], key=len) + ) # pylint: disable=cell-var-from-loop + yield from self._map_nodes( + next_sgn, + new_candidates, + constraints, + mapping=mapping, + to_be_mapped=to_be_mapped, + ) + # Unmap sgn-gn. Strictly not necessary since it'd get overwritten + # when making a new mapping for sgn. + # del mapping[sgn] + + def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None): + """ + Find all largest common subgraphs honoring constraints. + """ + if to_be_mapped is None: + to_be_mapped = {frozenset(self.subgraph.nodes)} + + # The LCS problem is basically a repeated subgraph isomorphism problem + # with smaller and smaller subgraphs. We store the nodes that are + # "part of" the subgraph in to_be_mapped, and we make it a little + # smaller every iteration. + + # pylint disable becuase it's guarded against by default value + current_size = len( + next(iter(to_be_mapped), []) + ) # pylint: disable=stop-iteration-return + + found_iso = False + if current_size <= len(self.graph): + # There's no point in trying to find isomorphisms of + # graph >= subgraph if subgraph has more nodes than graph. + + # Try the isomorphism first with the nodes with lowest ID. So sort + # them. Those are more likely to be part of the final + # correspondence. This makes finding the first answer(s) faster. In + # theory. + for nodes in sorted(to_be_mapped, key=sorted): + # Find the isomorphism between subgraph[to_be_mapped] <= graph + next_sgn = min(nodes, key=lambda n: min(candidates[n], key=len)) + isomorphs = self._map_nodes( + next_sgn, candidates, constraints, to_be_mapped=nodes + ) + + # This is effectively `yield from isomorphs`, except that we look + # whether an item was yielded. + try: + item = next(isomorphs) + except StopIteration: + pass + else: + yield item + yield from isomorphs + found_iso = True + + # BASECASE + if found_iso or current_size == 1: + # Shrinking has no point because either 1) we end up with a smaller + # common subgraph (and we want the largest), or 2) there'll be no + # more subgraph. + return + + left_to_be_mapped = set() + for nodes in to_be_mapped: + for sgn in nodes: + # We're going to remove sgn from to_be_mapped, but subject to + # symmetry constraints. We know that for every constraint we + # have those subgraph nodes are equal. So whenever we would + # remove the lower part of a constraint, remove the higher + # instead. This is all dealth with by _remove_node. And because + # left_to_be_mapped is a set, we don't do double work. + + # And finally, make the subgraph one node smaller. + # REDUCTION + new_nodes = self._remove_node(sgn, nodes, constraints) + left_to_be_mapped.add(new_nodes) + # COMBINATION + yield from self._largest_common_subgraph( + candidates, constraints, to_be_mapped=left_to_be_mapped + ) + + @staticmethod + def _remove_node(node, nodes, constraints): + """ + Returns a new set where node has been removed from nodes, subject to + symmetry constraints. We know, that for every constraint we have + those subgraph nodes are equal. So whenever we would remove the + lower part of a constraint, remove the higher instead. + """ + while True: + for low, high in constraints: + if low == node and high in nodes: + node = high + break + else: # no break, couldn't find node in constraints + break + return frozenset(nodes - {node}) + + @staticmethod + def _find_permutations(top_partitions, bottom_partitions): + """ + Return the pairs of top/bottom partitions where the partitions are + different. Ensures that all partitions in both top and bottom + partitions have size 1. + """ + # Find permutations + permutations = set() + for top, bot in zip(top_partitions, bottom_partitions): + # top and bot have only one element + if len(top) != 1 or len(bot) != 1: + raise IndexError( + "Not all nodes are coupled. This is" + f" impossible: {top_partitions}, {bottom_partitions}" + ) + if top != bot: + permutations.add(frozenset((next(iter(top)), next(iter(bot))))) + return permutations + + @staticmethod + def _update_orbits(orbits, permutations): + """ + Update orbits based on permutations. Orbits is modified in place. + For every pair of items in permutations their respective orbits are + merged. + """ + for permutation in permutations: + node, node2 = permutation + # Find the orbits that contain node and node2, and replace the + # orbit containing node with the union + first = second = None + for idx, orbit in enumerate(orbits): + if first is not None and second is not None: + break + if node in orbit: + first = idx + if node2 in orbit: + second = idx + if first != second: + orbits[first].update(orbits[second]) + del orbits[second] + + def _couple_nodes( + self, + top_partitions, + bottom_partitions, + pair_idx, + t_node, + b_node, + graph, + edge_colors, + ): + """ + Generate new partitions from top and bottom_partitions where t_node is + coupled to b_node. pair_idx is the index of the partitions where t_ and + b_node can be found. + """ + t_partition = top_partitions[pair_idx] + b_partition = bottom_partitions[pair_idx] + assert t_node in t_partition and b_node in b_partition + # Couple node to node2. This means they get their own partition + new_top_partitions = [top.copy() for top in top_partitions] + new_bottom_partitions = [bot.copy() for bot in bottom_partitions] + new_t_groups = {t_node}, t_partition - {t_node} + new_b_groups = {b_node}, b_partition - {b_node} + # Replace the old partitions with the coupled ones + del new_top_partitions[pair_idx] + del new_bottom_partitions[pair_idx] + new_top_partitions[pair_idx:pair_idx] = new_t_groups + new_bottom_partitions[pair_idx:pair_idx] = new_b_groups + + new_top_partitions = self._refine_node_partitions( + graph, new_top_partitions, edge_colors + ) + new_bottom_partitions = self._refine_node_partitions( + graph, new_bottom_partitions, edge_colors, branch=True + ) + new_top_partitions = list(new_top_partitions) + assert len(new_top_partitions) == 1 + new_top_partitions = new_top_partitions[0] + for bot in new_bottom_partitions: + yield list(new_top_partitions), bot + + def _process_ordered_pair_partitions( + self, + graph, + top_partitions, + bottom_partitions, + edge_colors, + orbits=None, + cosets=None, + ): + """ + Processes ordered pair partitions as per the reference paper. Finds and + returns all permutations and cosets that leave the graph unchanged. + """ + if orbits is None: + orbits = [{node} for node in graph.nodes] + else: + # Note that we don't copy orbits when we are given one. This means + # we leak information between the recursive branches. This is + # intentional! + orbits = orbits + if cosets is None: + cosets = {} + else: + cosets = cosets.copy() + + assert all( + len(t_p) == len(b_p) for t_p, b_p in zip(top_partitions, bottom_partitions) + ) + + # BASECASE + if all(len(top) == 1 for top in top_partitions): + # All nodes are mapped + permutations = self._find_permutations(top_partitions, bottom_partitions) + self._update_orbits(orbits, permutations) + if permutations: + return [permutations], cosets + else: + return [], cosets + + permutations = [] + unmapped_nodes = { + (node, idx) + for idx, t_partition in enumerate(top_partitions) + for node in t_partition + if len(t_partition) > 1 + } + node, pair_idx = min(unmapped_nodes) + b_partition = bottom_partitions[pair_idx] + + for node2 in sorted(b_partition): + if len(b_partition) == 1: + # Can never result in symmetry + continue + if node != node2 and any( + node in orbit and node2 in orbit for orbit in orbits + ): + # Orbit prune branch + continue + # REDUCTION + # Couple node to node2 + partitions = self._couple_nodes( + top_partitions, + bottom_partitions, + pair_idx, + node, + node2, + graph, + edge_colors, + ) + for opp in partitions: + new_top_partitions, new_bottom_partitions = opp + + new_perms, new_cosets = self._process_ordered_pair_partitions( + graph, + new_top_partitions, + new_bottom_partitions, + edge_colors, + orbits, + cosets, + ) + # COMBINATION + permutations += new_perms + cosets.update(new_cosets) + + mapped = { + k + for top, bottom in zip(top_partitions, bottom_partitions) + for k in top + if len(top) == 1 and top == bottom + } + ks = {k for k in graph.nodes if k < node} + # Have all nodes with ID < node been mapped? + find_coset = ks <= mapped and node not in cosets + if find_coset: + # Find the orbit that contains node + for orbit in orbits: + if node in orbit: + cosets[node] = orbit.copy() + return permutations, cosets diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/isomorph.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/isomorph.py new file mode 100644 index 0000000..1b9a727 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/isomorph.py @@ -0,0 +1,233 @@ +""" +Graph isomorphism functions. +""" +import networkx as nx +from networkx.exception import NetworkXError + +__all__ = [ + "could_be_isomorphic", + "fast_could_be_isomorphic", + "faster_could_be_isomorphic", + "is_isomorphic", +] + + +def could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree, triangle, and number of cliques sequences. + """ + + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = G1.degree() + t1 = nx.triangles(G1) + c1 = nx.number_of_cliques(G1) + props1 = [[d, t1[v], c1[v]] for v, d in d1] + props1.sort() + + d2 = G2.degree() + t2 = nx.triangles(G2) + c2 = nx.number_of_cliques(G2) + props2 = [[d, t2[v], c2[v]] for v, d in d2] + props2.sort() + + if props1 != props2: + return False + + # OK... + return True + + +graph_could_be_isomorphic = could_be_isomorphic + + +def fast_could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree and triangle sequences. + """ + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = G1.degree() + t1 = nx.triangles(G1) + props1 = [[d, t1[v]] for v, d in d1] + props1.sort() + + d2 = G2.degree() + t2 = nx.triangles(G2) + props2 = [[d, t2[v]] for v, d in d2] + props2.sort() + + if props1 != props2: + return False + + # OK... + return True + + +fast_graph_could_be_isomorphic = fast_could_be_isomorphic + + +def faster_could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree sequences. + """ + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = sorted(d for n, d in G1.degree()) + d2 = sorted(d for n, d in G2.degree()) + + if d1 != d2: + return False + + # OK... + return True + + +faster_graph_could_be_isomorphic = faster_could_be_isomorphic + + +def is_isomorphic(G1, G2, node_match=None, edge_match=None): + """Returns True if the graphs G1 and G2 are isomorphic and False otherwise. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 should + be considered equal during the isomorphism test. + If node_match is not specified then node attributes are not considered. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute dictionaries + for n1 and n2 as inputs. + + edge_match : callable + A function that returns True if the edge attribute dictionary + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during the isomorphism test. If edge_match is + not specified then edge attributes are not considered. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. + + Notes + ----- + Uses the vf2 algorithm [1]_. + + Examples + -------- + >>> import networkx.algorithms.isomorphism as iso + + For digraphs G1 and G2, using 'weight' edge attribute (default: 1) + + >>> G1 = nx.DiGraph() + >>> G2 = nx.DiGraph() + >>> nx.add_path(G1, [1, 2, 3, 4], weight=1) + >>> nx.add_path(G2, [10, 20, 30, 40], weight=2) + >>> em = iso.numerical_edge_match("weight", 1) + >>> nx.is_isomorphic(G1, G2) # no weights considered + True + >>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights + False + + For multidigraphs G1 and G2, using 'fill' node attribute (default: '') + + >>> G1 = nx.MultiDiGraph() + >>> G2 = nx.MultiDiGraph() + >>> G1.add_nodes_from([1, 2, 3], fill="red") + >>> G2.add_nodes_from([10, 20, 30, 40], fill="red") + >>> nx.add_path(G1, [1, 2, 3, 4], weight=3, linewidth=2.5) + >>> nx.add_path(G2, [10, 20, 30, 40], weight=3) + >>> nm = iso.categorical_node_match("fill", "red") + >>> nx.is_isomorphic(G1, G2, node_match=nm) + True + + For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7) + + >>> G1.add_edge(1, 2, weight=7) + 1 + >>> G2.add_edge(10, 20) + 1 + >>> em = iso.numerical_multiedge_match("weight", 7, rtol=1e-6) + >>> nx.is_isomorphic(G1, G2, edge_match=em) + True + + For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes + with default values 7 and 2.5. Also using 'fill' node attribute with + default value 'red'. + + >>> em = iso.numerical_multiedge_match(["weight", "linewidth"], [7, 2.5]) + >>> nm = iso.categorical_node_match("fill", "red") + >>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm) + True + + See Also + -------- + numerical_node_match, numerical_edge_match, numerical_multiedge_match + categorical_node_match, categorical_edge_match, categorical_multiedge_match + + References + ---------- + .. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, + "An Improved Algorithm for Matching Large Graphs", + 3rd IAPR-TC15 Workshop on Graph-based Representations in + Pattern Recognition, Cuen, pp. 149-159, 2001. + https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.5342 + """ + if G1.is_directed() and G2.is_directed(): + GM = nx.algorithms.isomorphism.DiGraphMatcher + elif (not G1.is_directed()) and (not G2.is_directed()): + GM = nx.algorithms.isomorphism.GraphMatcher + else: + raise NetworkXError("Graphs G1 and G2 are not of the same type.") + + gm = GM(G1, G2, node_match=node_match, edge_match=edge_match) + + return gm.is_isomorphic() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py new file mode 100644 index 0000000..bcd478e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py @@ -0,0 +1,1061 @@ +""" +************* +VF2 Algorithm +************* + +An implementation of VF2 algorithm for graph isomorphism testing. + +The simplest interface to use this module is to call networkx.is_isomorphic(). + +Introduction +------------ + +The GraphMatcher and DiGraphMatcher are responsible for matching +graphs or directed graphs in a predetermined manner. This +usually means a check for an isomorphism, though other checks +are also possible. For example, a subgraph of one graph +can be checked for isomorphism to a second graph. + +Matching is done via syntactic feasibility. It is also possible +to check for semantic feasibility. Feasibility, then, is defined +as the logical AND of the two functions. + +To include a semantic check, the (Di)GraphMatcher class should be +subclassed, and the semantic_feasibility() function should be +redefined. By default, the semantic feasibility function always +returns True. The effect of this is that semantics are not +considered in the matching of G1 and G2. + +Examples +-------- + +Suppose G1 and G2 are isomorphic graphs. Verification is as follows: + +>>> from networkx.algorithms import isomorphism +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> GM = isomorphism.GraphMatcher(G1, G2) +>>> GM.is_isomorphic() +True + +GM.mapping stores the isomorphism mapping from G1 to G2. + +>>> GM.mapping +{0: 0, 1: 1, 2: 2, 3: 3} + + +Suppose G1 and G2 are isomorphic directed graphs. +Verification is as follows: + +>>> G1 = nx.path_graph(4, create_using=nx.DiGraph()) +>>> G2 = nx.path_graph(4, create_using=nx.DiGraph()) +>>> DiGM = isomorphism.DiGraphMatcher(G1, G2) +>>> DiGM.is_isomorphic() +True + +DiGM.mapping stores the isomorphism mapping from G1 to G2. + +>>> DiGM.mapping +{0: 0, 1: 1, 2: 2, 3: 3} + + + +Subgraph Isomorphism +-------------------- +Graph theory literature can be ambiguous about the meaning of the +above statement, and we seek to clarify it now. + +In the VF2 literature, a mapping M is said to be a graph-subgraph +isomorphism iff M is an isomorphism between G2 and a subgraph of G1. +Thus, to say that G1 and G2 are graph-subgraph isomorphic is to say +that a subgraph of G1 is isomorphic to G2. + +Other literature uses the phrase 'subgraph isomorphic' as in 'G1 does +not have a subgraph isomorphic to G2'. Another use is as an in adverb +for isomorphic. Thus, to say that G1 and G2 are subgraph isomorphic +is to say that a subgraph of G1 is isomorphic to G2. + +Finally, the term 'subgraph' can have multiple meanings. In this +context, 'subgraph' always means a 'node-induced subgraph'. Edge-induced +subgraph isomorphisms are not directly supported, but one should be +able to perform the check by making use of nx.line_graph(). For +subgraphs which are not induced, the term 'monomorphism' is preferred +over 'isomorphism'. + +Let G=(N,E) be a graph with a set of nodes N and set of edges E. + +If G'=(N',E') is a subgraph, then: + N' is a subset of N + E' is a subset of E + +If G'=(N',E') is a node-induced subgraph, then: + N' is a subset of N + E' is the subset of edges in E relating nodes in N' + +If G'=(N',E') is an edge-induced subgraph, then: + N' is the subset of nodes in N related by edges in E' + E' is a subset of E + +If G'=(N',E') is a monomorphism, then: + N' is a subset of N + E' is a subset of the set of edges in E relating nodes in N' + +Note that if G' is a node-induced subgraph of G, then it is always a +subgraph monomorphism of G, but the opposite is not always true, as a +monomorphism can have fewer edges. + +References +---------- +[1] Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento, + "A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs", + IEEE Transactions on Pattern Analysis and Machine Intelligence, + vol. 26, no. 10, pp. 1367-1372, Oct., 2004. + http://ieeexplore.ieee.org/iel5/34/29305/01323804.pdf + +[2] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, "An Improved + Algorithm for Matching Large Graphs", 3rd IAPR-TC15 Workshop + on Graph-based Representations in Pattern Recognition, Cuen, + pp. 149-159, 2001. + https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.5342 + +See Also +-------- +syntactic_feasibility(), semantic_feasibility() + +Notes +----- + +The implementation handles both directed and undirected graphs as well +as multigraphs. + +In general, the subgraph isomorphism problem is NP-complete whereas the +graph isomorphism problem is most likely not NP-complete (although no +polynomial-time algorithm is known to exist). + +""" + +# This work was originally coded by Christopher Ellison +# as part of the Computational Mechanics Python (CMPy) project. +# James P. Crutchfield, principal investigator. +# Complexity Sciences Center and Physics Department, UC Davis. + +import sys + +__all__ = ["GraphMatcher", "DiGraphMatcher"] + + +class GraphMatcher: + """Implementation of VF2 algorithm for matching undirected graphs. + + Suitable for Graph and MultiGraph instances. + """ + + def __init__(self, G1, G2): + """Initialize GraphMatcher. + + Parameters + ---------- + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism. + + Examples + -------- + To create a GraphMatcher which checks for syntactic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> G1 = nx.path_graph(4) + >>> G2 = nx.path_graph(4) + >>> GM = isomorphism.GraphMatcher(G1, G2) + """ + self.G1 = G1 + self.G2 = G2 + self.G1_nodes = set(G1.nodes()) + self.G2_nodes = set(G2.nodes()) + self.G2_node_order = {n: i for i, n in enumerate(G2)} + + # Set recursion limit. + self.old_recursion_limit = sys.getrecursionlimit() + expected_max_recursion_level = len(self.G2) + if self.old_recursion_limit < 1.5 * expected_max_recursion_level: + # Give some breathing room. + sys.setrecursionlimit(int(1.5 * expected_max_recursion_level)) + + # Declare that we will be searching for a graph-graph isomorphism. + self.test = "graph" + + # Initialize state + self.initialize() + + def reset_recursion_limit(self): + """Restores the recursion limit.""" + # TODO: + # Currently, we use recursion and set the recursion level higher. + # It would be nice to restore the level, but because the + # (Di)GraphMatcher classes make use of cyclic references, garbage + # collection will never happen when we define __del__() to + # restore the recursion level. The result is a memory leak. + # So for now, we do not automatically restore the recursion level, + # and instead provide a method to do this manually. Eventually, + # we should turn this into a non-recursive implementation. + sys.setrecursionlimit(self.old_recursion_limit) + + def candidate_pairs_iter(self): + """Iterator over candidate pairs of nodes in G1 and G2.""" + + # All computations are done using the current state! + + G1_nodes = self.G1_nodes + G2_nodes = self.G2_nodes + min_key = self.G2_node_order.__getitem__ + + # First we compute the inout-terminal sets. + T1_inout = [node for node in self.inout_1 if node not in self.core_1] + T2_inout = [node for node in self.inout_2 if node not in self.core_2] + + # If T1_inout and T2_inout are both nonempty. + # P(s) = T1_inout x {min T2_inout} + if T1_inout and T2_inout: + node_2 = min(T2_inout, key=min_key) + for node_1 in T1_inout: + yield node_1, node_2 + + else: + # If T1_inout and T2_inout were both empty.... + # P(s) = (N_1 - M_1) x {min (N_2 - M_2)} + # if not (T1_inout or T2_inout): # as suggested by [2], incorrect + if 1: # as inferred from [1], correct + # First we determine the candidate node for G2 + other_node = min(G2_nodes - set(self.core_2), key=min_key) + for node in self.G1: + if node not in self.core_1: + yield node, other_node + + # For all other cases, we don't have any candidate pairs. + + def initialize(self): + """Reinitializes the state of the algorithm. + + This method should be redefined if using something other than GMState. + If only subclassing GraphMatcher, a redefinition is not necessary. + + """ + + # core_1[n] contains the index of the node paired with n, which is m, + # provided n is in the mapping. + # core_2[m] contains the index of the node paired with m, which is n, + # provided m is in the mapping. + self.core_1 = {} + self.core_2 = {} + + # See the paper for definitions of M_x and T_x^{y} + + # inout_1[n] is non-zero if n is in M_1 or in T_1^{inout} + # inout_2[m] is non-zero if m is in M_2 or in T_2^{inout} + # + # The value stored is the depth of the SSR tree when the node became + # part of the corresponding set. + self.inout_1 = {} + self.inout_2 = {} + # Practically, these sets simply store the nodes in the subgraph. + + self.state = GMState(self) + + # Provide a convenient way to access the isomorphism mapping. + self.mapping = self.core_1.copy() + + def is_isomorphic(self): + """Returns True if G1 and G2 are isomorphic graphs.""" + + # Let's do two very quick checks! + # QUESTION: Should we call faster_graph_could_be_isomorphic(G1,G2)? + # For now, I just copy the code. + + # Check global properties + if self.G1.order() != self.G2.order(): + return False + + # Check local properties + d1 = sorted(d for n, d in self.G1.degree()) + d2 = sorted(d for n, d in self.G2.degree()) + if d1 != d2: + return False + + try: + x = next(self.isomorphisms_iter()) + return True + except StopIteration: + return False + + def isomorphisms_iter(self): + """Generator over isomorphisms between G1 and G2.""" + # Declare that we are looking for a graph-graph isomorphism. + self.test = "graph" + self.initialize() + yield from self.match() + + def match(self): + """Extends the isomorphism mapping. + + This function is called recursively to determine if a complete + isomorphism can be found between G1 and G2. It cleans up the class + variables after each recursive call. If an isomorphism is found, + we yield the mapping. + + """ + if len(self.core_1) == len(self.G2): + # Save the final mapping, otherwise garbage collection deletes it. + self.mapping = self.core_1.copy() + # The mapping is complete. + yield self.mapping + else: + for G1_node, G2_node in self.candidate_pairs_iter(): + if self.syntactic_feasibility(G1_node, G2_node): + if self.semantic_feasibility(G1_node, G2_node): + # Recursive call, adding the feasible state. + newstate = self.state.__class__(self, G1_node, G2_node) + yield from self.match() + + # restore data structures + newstate.restore() + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is symantically feasible. + + The semantic feasibility function should return True if it is + acceptable to add the candidate pair (G1_node, G2_node) to the current + partial isomorphism mapping. The logic should focus on semantic + information contained in the edge data or a formalized node class. + + By acceptable, we mean that the subsequent mapping can still become a + complete isomorphism mapping. Thus, if adding the candidate pair + definitely makes it so that the subsequent mapping cannot become a + complete isomorphism mapping, then this function must return False. + + The default semantic feasibility function always returns True. The + effect is that semantics are not considered in the matching of G1 + and G2. + + The semantic checks might differ based on the what type of test is + being performed. A keyword description of the test is stored in + self.test. Here is a quick description of the currently implemented + tests:: + + test='graph' + Indicates that the graph matcher is looking for a graph-graph + isomorphism. + + test='subgraph' + Indicates that the graph matcher is looking for a subgraph-graph + isomorphism such that a subgraph of G1 is isomorphic to G2. + + test='mono' + Indicates that the graph matcher is looking for a subgraph-graph + monomorphism such that a subgraph of G1 is monomorphic to G2. + + Any subclass which redefines semantic_feasibility() must maintain + the above form to keep the match() method functional. Implementations + should consider multigraphs. + """ + return True + + def subgraph_is_isomorphic(self): + """Returns True if a subgraph of G1 is isomorphic to G2.""" + try: + x = next(self.subgraph_isomorphisms_iter()) + return True + except StopIteration: + return False + + def subgraph_is_monomorphic(self): + """Returns True if a subgraph of G1 is monomorphic to G2.""" + try: + x = next(self.subgraph_monomorphisms_iter()) + return True + except StopIteration: + return False + + # subgraph_is_isomorphic.__doc__ += "\n" + subgraph.replace('\n','\n'+indent) + + def subgraph_isomorphisms_iter(self): + """Generator over isomorphisms between a subgraph of G1 and G2.""" + # Declare that we are looking for graph-subgraph isomorphism. + self.test = "subgraph" + self.initialize() + yield from self.match() + + def subgraph_monomorphisms_iter(self): + """Generator over monomorphisms between a subgraph of G1 and G2.""" + # Declare that we are looking for graph-subgraph monomorphism. + self.test = "mono" + self.initialize() + yield from self.match() + + # subgraph_isomorphisms_iter.__doc__ += "\n" + subgraph.replace('\n','\n'+indent) + + def syntactic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is syntactically feasible. + + This function returns True if it is adding the candidate pair + to the current partial isomorphism/monomorphism mapping is allowable. + The addition is allowable if the inclusion of the candidate pair does + not make it impossible for an isomorphism/monomorphism to be found. + """ + + # The VF2 algorithm was designed to work with graphs having, at most, + # one edge connecting any two nodes. This is not the case when + # dealing with an MultiGraphs. + # + # Basically, when we test the look-ahead rules R_neighbor, we will + # make sure that the number of edges are checked. We also add + # a R_self check to verify that the number of selfloops is acceptable. + # + # Users might be comparing Graph instances with MultiGraph instances. + # So the generic GraphMatcher class must work with MultiGraphs. + # Care must be taken since the value in the innermost dictionary is a + # singlet for Graph instances. For MultiGraphs, the value in the + # innermost dictionary is a list. + + ### + # Test at each step to get a return value as soon as possible. + ### + + # Look ahead 0 + + # R_self + + # The number of selfloops for G1_node must equal the number of + # self-loops for G2_node. Without this check, we would fail on + # R_neighbor at the next recursion level. But it is good to prune the + # search tree now. + + if self.test == "mono": + if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges( + G2_node, G2_node + ): + return False + else: + if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges( + G2_node, G2_node + ): + return False + + # R_neighbor + + # For each neighbor n' of n in the partial mapping, the corresponding + # node m' is a neighbor of m, and vice versa. Also, the number of + # edges must be equal. + if self.test != "mono": + for neighbor in self.G1[G1_node]: + if neighbor in self.core_1: + if not (self.core_1[neighbor] in self.G2[G2_node]): + return False + elif self.G1.number_of_edges( + neighbor, G1_node + ) != self.G2.number_of_edges(self.core_1[neighbor], G2_node): + return False + + for neighbor in self.G2[G2_node]: + if neighbor in self.core_2: + if not (self.core_2[neighbor] in self.G1[G1_node]): + return False + elif self.test == "mono": + if self.G1.number_of_edges( + self.core_2[neighbor], G1_node + ) < self.G2.number_of_edges(neighbor, G2_node): + return False + else: + if self.G1.number_of_edges( + self.core_2[neighbor], G1_node + ) != self.G2.number_of_edges(neighbor, G2_node): + return False + + if self.test != "mono": + # Look ahead 1 + + # R_terminout + # The number of neighbors of n in T_1^{inout} is equal to the + # number of neighbors of m that are in T_2^{inout}, and vice versa. + num1 = 0 + for neighbor in self.G1[G1_node]: + if (neighbor in self.inout_1) and (neighbor not in self.core_1): + num1 += 1 + num2 = 0 + for neighbor in self.G2[G2_node]: + if (neighbor in self.inout_2) and (neighbor not in self.core_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Look ahead 2 + + # R_new + + # The number of neighbors of n that are neither in the core_1 nor + # T_1^{inout} is equal to the number of neighbors of m + # that are neither in core_2 nor T_2^{inout}. + num1 = 0 + for neighbor in self.G1[G1_node]: + if neighbor not in self.inout_1: + num1 += 1 + num2 = 0 + for neighbor in self.G2[G2_node]: + if neighbor not in self.inout_2: + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Otherwise, this node pair is syntactically feasible! + return True + + +class DiGraphMatcher(GraphMatcher): + """Implementation of VF2 algorithm for matching directed graphs. + + Suitable for DiGraph and MultiDiGraph instances. + """ + + def __init__(self, G1, G2): + """Initialize DiGraphMatcher. + + G1 and G2 should be nx.Graph or nx.MultiGraph instances. + + Examples + -------- + To create a GraphMatcher which checks for syntactic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + >>> DiGM = isomorphism.DiGraphMatcher(G1, G2) + """ + super().__init__(G1, G2) + + def candidate_pairs_iter(self): + """Iterator over candidate pairs of nodes in G1 and G2.""" + + # All computations are done using the current state! + + G1_nodes = self.G1_nodes + G2_nodes = self.G2_nodes + min_key = self.G2_node_order.__getitem__ + + # First we compute the out-terminal sets. + T1_out = [node for node in self.out_1 if node not in self.core_1] + T2_out = [node for node in self.out_2 if node not in self.core_2] + + # If T1_out and T2_out are both nonempty. + # P(s) = T1_out x {min T2_out} + if T1_out and T2_out: + node_2 = min(T2_out, key=min_key) + for node_1 in T1_out: + yield node_1, node_2 + + # If T1_out and T2_out were both empty.... + # We compute the in-terminal sets. + + # elif not (T1_out or T2_out): # as suggested by [2], incorrect + else: # as suggested by [1], correct + T1_in = [node for node in self.in_1 if node not in self.core_1] + T2_in = [node for node in self.in_2 if node not in self.core_2] + + # If T1_in and T2_in are both nonempty. + # P(s) = T1_out x {min T2_out} + if T1_in and T2_in: + node_2 = min(T2_in, key=min_key) + for node_1 in T1_in: + yield node_1, node_2 + + # If all terminal sets are empty... + # P(s) = (N_1 - M_1) x {min (N_2 - M_2)} + + # elif not (T1_in or T2_in): # as suggested by [2], incorrect + else: # as inferred from [1], correct + node_2 = min(G2_nodes - set(self.core_2), key=min_key) + for node_1 in G1_nodes: + if node_1 not in self.core_1: + yield node_1, node_2 + + # For all other cases, we don't have any candidate pairs. + + def initialize(self): + """Reinitializes the state of the algorithm. + + This method should be redefined if using something other than DiGMState. + If only subclassing GraphMatcher, a redefinition is not necessary. + """ + + # core_1[n] contains the index of the node paired with n, which is m, + # provided n is in the mapping. + # core_2[m] contains the index of the node paired with m, which is n, + # provided m is in the mapping. + self.core_1 = {} + self.core_2 = {} + + # See the paper for definitions of M_x and T_x^{y} + + # in_1[n] is non-zero if n is in M_1 or in T_1^{in} + # out_1[n] is non-zero if n is in M_1 or in T_1^{out} + # + # in_2[m] is non-zero if m is in M_2 or in T_2^{in} + # out_2[m] is non-zero if m is in M_2 or in T_2^{out} + # + # The value stored is the depth of the search tree when the node became + # part of the corresponding set. + self.in_1 = {} + self.in_2 = {} + self.out_1 = {} + self.out_2 = {} + + self.state = DiGMState(self) + + # Provide a convenient way to access the isomorphism mapping. + self.mapping = self.core_1.copy() + + def syntactic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is syntactically feasible. + + This function returns True if it is adding the candidate pair + to the current partial isomorphism/monomorphism mapping is allowable. + The addition is allowable if the inclusion of the candidate pair does + not make it impossible for an isomorphism/monomorphism to be found. + """ + + # The VF2 algorithm was designed to work with graphs having, at most, + # one edge connecting any two nodes. This is not the case when + # dealing with an MultiGraphs. + # + # Basically, when we test the look-ahead rules R_pred and R_succ, we + # will make sure that the number of edges are checked. We also add + # a R_self check to verify that the number of selfloops is acceptable. + + # Users might be comparing DiGraph instances with MultiDiGraph + # instances. So the generic DiGraphMatcher class must work with + # MultiDiGraphs. Care must be taken since the value in the innermost + # dictionary is a singlet for DiGraph instances. For MultiDiGraphs, + # the value in the innermost dictionary is a list. + + ### + # Test at each step to get a return value as soon as possible. + ### + + # Look ahead 0 + + # R_self + + # The number of selfloops for G1_node must equal the number of + # self-loops for G2_node. Without this check, we would fail on R_pred + # at the next recursion level. This should prune the tree even further. + if self.test == "mono": + if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges( + G2_node, G2_node + ): + return False + else: + if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges( + G2_node, G2_node + ): + return False + + # R_pred + + # For each predecessor n' of n in the partial mapping, the + # corresponding node m' is a predecessor of m, and vice versa. Also, + # the number of edges must be equal + if self.test != "mono": + for predecessor in self.G1.pred[G1_node]: + if predecessor in self.core_1: + if not (self.core_1[predecessor] in self.G2.pred[G2_node]): + return False + elif self.G1.number_of_edges( + predecessor, G1_node + ) != self.G2.number_of_edges(self.core_1[predecessor], G2_node): + return False + + for predecessor in self.G2.pred[G2_node]: + if predecessor in self.core_2: + if not (self.core_2[predecessor] in self.G1.pred[G1_node]): + return False + elif self.test == "mono": + if self.G1.number_of_edges( + self.core_2[predecessor], G1_node + ) < self.G2.number_of_edges(predecessor, G2_node): + return False + else: + if self.G1.number_of_edges( + self.core_2[predecessor], G1_node + ) != self.G2.number_of_edges(predecessor, G2_node): + return False + + # R_succ + + # For each successor n' of n in the partial mapping, the corresponding + # node m' is a successor of m, and vice versa. Also, the number of + # edges must be equal. + if self.test != "mono": + for successor in self.G1[G1_node]: + if successor in self.core_1: + if not (self.core_1[successor] in self.G2[G2_node]): + return False + elif self.G1.number_of_edges( + G1_node, successor + ) != self.G2.number_of_edges(G2_node, self.core_1[successor]): + return False + + for successor in self.G2[G2_node]: + if successor in self.core_2: + if not (self.core_2[successor] in self.G1[G1_node]): + return False + elif self.test == "mono": + if self.G1.number_of_edges( + G1_node, self.core_2[successor] + ) < self.G2.number_of_edges(G2_node, successor): + return False + else: + if self.G1.number_of_edges( + G1_node, self.core_2[successor] + ) != self.G2.number_of_edges(G2_node, successor): + return False + + if self.test != "mono": + + # Look ahead 1 + + # R_termin + # The number of predecessors of n that are in T_1^{in} is equal to the + # number of predecessors of m that are in T_2^{in}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor in self.in_1) and (predecessor not in self.core_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor in self.in_2) and (predecessor not in self.core_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are in T_1^{in} is equal to the + # number of successors of m that are in T_2^{in}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor in self.in_1) and (successor not in self.core_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor in self.in_2) and (successor not in self.core_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # R_termout + + # The number of predecessors of n that are in T_1^{out} is equal to the + # number of predecessors of m that are in T_2^{out}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor in self.out_1) and (predecessor not in self.core_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor in self.out_2) and (predecessor not in self.core_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are in T_1^{out} is equal to the + # number of successors of m that are in T_2^{out}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor in self.out_1) and (successor not in self.core_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor in self.out_2) and (successor not in self.core_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Look ahead 2 + + # R_new + + # The number of predecessors of n that are neither in the core_1 nor + # T_1^{in} nor T_1^{out} is equal to the number of predecessors of m + # that are neither in core_2 nor T_2^{in} nor T_2^{out}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor not in self.in_1) and (predecessor not in self.out_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor not in self.in_2) and (predecessor not in self.out_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are neither in the core_1 nor + # T_1^{in} nor T_1^{out} is equal to the number of successors of m + # that are neither in core_2 nor T_2^{in} nor T_2^{out}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor not in self.in_1) and (successor not in self.out_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor not in self.in_2) and (successor not in self.out_2): + num2 += 1 + if self.test == "graph": + if not (num1 == num2): + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Otherwise, this node pair is syntactically feasible! + return True + + +class GMState: + """Internal representation of state for the GraphMatcher class. + + This class is used internally by the GraphMatcher class. It is used + only to store state specific data. There will be at most G2.order() of + these objects in memory at a time, due to the depth-first search + strategy employed by the VF2 algorithm. + """ + + def __init__(self, GM, G1_node=None, G2_node=None): + """Initializes GMState object. + + Pass in the GraphMatcher to which this GMState belongs and the + new node pair that will be added to the GraphMatcher's current + isomorphism mapping. + """ + self.GM = GM + + # Initialize the last stored node pair. + self.G1_node = None + self.G2_node = None + self.depth = len(GM.core_1) + + if G1_node is None or G2_node is None: + # Then we reset the class variables + GM.core_1 = {} + GM.core_2 = {} + GM.inout_1 = {} + GM.inout_2 = {} + + # Watch out! G1_node == 0 should evaluate to True. + if G1_node is not None and G2_node is not None: + # Add the node pair to the isomorphism mapping. + GM.core_1[G1_node] = G2_node + GM.core_2[G2_node] = G1_node + + # Store the node that was added last. + self.G1_node = G1_node + self.G2_node = G2_node + + # Now we must update the other two vectors. + # We will add only if it is not in there already! + self.depth = len(GM.core_1) + + # First we add the new nodes... + if G1_node not in GM.inout_1: + GM.inout_1[G1_node] = self.depth + if G2_node not in GM.inout_2: + GM.inout_2[G2_node] = self.depth + + # Now we add every other node... + + # Updates for T_1^{inout} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1] + ) + for node in new_nodes: + if node not in GM.inout_1: + GM.inout_1[node] = self.depth + + # Updates for T_2^{inout} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2] + ) + for node in new_nodes: + if node not in GM.inout_2: + GM.inout_2[node] = self.depth + + def restore(self): + """Deletes the GMState object and restores the class variables.""" + # First we remove the node that was added from the core vectors. + # Watch out! G1_node == 0 should evaluate to True. + if self.G1_node is not None and self.G2_node is not None: + del self.GM.core_1[self.G1_node] + del self.GM.core_2[self.G2_node] + + # Now we revert the other two vectors. + # Thus, we delete all entries which have this depth level. + for vector in (self.GM.inout_1, self.GM.inout_2): + for node in list(vector.keys()): + if vector[node] == self.depth: + del vector[node] + + +class DiGMState: + """Internal representation of state for the DiGraphMatcher class. + + This class is used internally by the DiGraphMatcher class. It is used + only to store state specific data. There will be at most G2.order() of + these objects in memory at a time, due to the depth-first search + strategy employed by the VF2 algorithm. + + """ + + def __init__(self, GM, G1_node=None, G2_node=None): + """Initializes DiGMState object. + + Pass in the DiGraphMatcher to which this DiGMState belongs and the + new node pair that will be added to the GraphMatcher's current + isomorphism mapping. + """ + self.GM = GM + + # Initialize the last stored node pair. + self.G1_node = None + self.G2_node = None + self.depth = len(GM.core_1) + + if G1_node is None or G2_node is None: + # Then we reset the class variables + GM.core_1 = {} + GM.core_2 = {} + GM.in_1 = {} + GM.in_2 = {} + GM.out_1 = {} + GM.out_2 = {} + + # Watch out! G1_node == 0 should evaluate to True. + if G1_node is not None and G2_node is not None: + # Add the node pair to the isomorphism mapping. + GM.core_1[G1_node] = G2_node + GM.core_2[G2_node] = G1_node + + # Store the node that was added last. + self.G1_node = G1_node + self.G2_node = G2_node + + # Now we must update the other four vectors. + # We will add only if it is not in there already! + self.depth = len(GM.core_1) + + # First we add the new nodes... + for vector in (GM.in_1, GM.out_1): + if G1_node not in vector: + vector[G1_node] = self.depth + for vector in (GM.in_2, GM.out_2): + if G2_node not in vector: + vector[G2_node] = self.depth + + # Now we add every other node... + + # Updates for T_1^{in} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [ + predecessor + for predecessor in GM.G1.predecessors(node) + if predecessor not in GM.core_1 + ] + ) + for node in new_nodes: + if node not in GM.in_1: + GM.in_1[node] = self.depth + + # Updates for T_2^{in} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [ + predecessor + for predecessor in GM.G2.predecessors(node) + if predecessor not in GM.core_2 + ] + ) + for node in new_nodes: + if node not in GM.in_2: + GM.in_2[node] = self.depth + + # Updates for T_1^{out} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [ + successor + for successor in GM.G1.successors(node) + if successor not in GM.core_1 + ] + ) + for node in new_nodes: + if node not in GM.out_1: + GM.out_1[node] = self.depth + + # Updates for T_2^{out} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [ + successor + for successor in GM.G2.successors(node) + if successor not in GM.core_2 + ] + ) + for node in new_nodes: + if node not in GM.out_2: + GM.out_2[node] = self.depth + + def restore(self): + """Deletes the DiGMState object and restores the class variables.""" + + # First we remove the node that was added from the core vectors. + # Watch out! G1_node == 0 should evaluate to True. + if self.G1_node is not None and self.G2_node is not None: + del self.GM.core_1[self.G1_node] + del self.GM.core_2[self.G2_node] + + # Now we revert the other four vectors. + # Thus, we delete all entries which have this depth level. + for vector in (self.GM.in_1, self.GM.in_2, self.GM.out_1, self.GM.out_2): + for node in list(vector.keys()): + if vector[node] == self.depth: + del vector[node] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/matchhelpers.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/matchhelpers.py new file mode 100644 index 0000000..9010e26 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/matchhelpers.py @@ -0,0 +1,355 @@ +"""Functions which help end users define customize node_match and +edge_match functions to use during isomorphism checks. +""" +import math +import types +from itertools import permutations + +__all__ = [ + "categorical_node_match", + "categorical_edge_match", + "categorical_multiedge_match", + "numerical_node_match", + "numerical_edge_match", + "numerical_multiedge_match", + "generic_node_match", + "generic_edge_match", + "generic_multiedge_match", +] + + +def copyfunc(f, name=None): + """Returns a deepcopy of a function.""" + return types.FunctionType( + f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__ + ) + + +def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08): + """Returns True if x and y are sufficiently close, elementwise. + + Parameters + ---------- + rtol : float + The relative error tolerance. + atol : float + The absolute error tolerance. + + """ + # assume finite weights, see numpy.allclose() for reference + for xi, yi in zip(x, y): + if not math.isclose(xi, yi, rel_tol=rtol, abs_tol=atol): + return False + return True + + +categorical_doc = """ +Returns a comparison function for a categorical node attribute. + +The value(s) of the attr(s) must be hashable and comparable via the == +operator since they are placed into a set([]) object. If the sets from +G1 and G2 are the same, then the constructed function returns True. + +Parameters +---------- +attr : string | list + The categorical node attribute to compare, or a list of categorical + node attributes to compare. +default : value | list + The default value for the categorical node attribute, or a list of + default values for the categorical node attributes. + +Returns +------- +match : function + The customized, categorical `node_match` function. + +Examples +-------- +>>> import networkx.algorithms.isomorphism as iso +>>> nm = iso.categorical_node_match("size", 1) +>>> nm = iso.categorical_node_match(["color", "size"], ["red", 2]) + +""" + + +def categorical_node_match(attr, default): + if isinstance(attr, str): + + def match(data1, data2): + return data1.get(attr, default) == data2.get(attr, default) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(data1, data2): + return all(data1.get(attr, d) == data2.get(attr, d) for attr, d in attrs) + + return match + + +categorical_edge_match = copyfunc(categorical_node_match, "categorical_edge_match") + + +def categorical_multiedge_match(attr, default): + if isinstance(attr, str): + + def match(datasets1, datasets2): + values1 = {data.get(attr, default) for data in datasets1.values()} + values2 = {data.get(attr, default) for data in datasets2.values()} + return values1 == values2 + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = set() + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.add(x) + values2 = set() + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.add(x) + return values1 == values2 + + return match + + +# Docstrings for categorical functions. +categorical_node_match.__doc__ = categorical_doc +categorical_edge_match.__doc__ = categorical_doc.replace("node", "edge") +tmpdoc = categorical_doc.replace("node", "edge") +tmpdoc = tmpdoc.replace("categorical_edge_match", "categorical_multiedge_match") +categorical_multiedge_match.__doc__ = tmpdoc + + +numerical_doc = """ +Returns a comparison function for a numerical node attribute. + +The value(s) of the attr(s) must be numerical and sortable. If the +sorted list of values from G1 and G2 are the same within some +tolerance, then the constructed function returns True. + +Parameters +---------- +attr : string | list + The numerical node attribute to compare, or a list of numerical + node attributes to compare. +default : value | list + The default value for the numerical node attribute, or a list of + default values for the numerical node attributes. +rtol : float + The relative error tolerance. +atol : float + The absolute error tolerance. + +Returns +------- +match : function + The customized, numerical `node_match` function. + +Examples +-------- +>>> import networkx.algorithms.isomorphism as iso +>>> nm = iso.numerical_node_match("weight", 1.0) +>>> nm = iso.numerical_node_match(["weight", "linewidth"], [0.25, 0.5]) + +""" + + +def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08): + if isinstance(attr, str): + + def match(data1, data2): + return math.isclose( + data1.get(attr, default), + data2.get(attr, default), + rel_tol=rtol, + abs_tol=atol, + ) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(data1, data2): + values1 = [data1.get(attr, d) for attr, d in attrs] + values2 = [data2.get(attr, d) for attr, d in attrs] + return allclose(values1, values2, rtol=rtol, atol=atol) + + return match + + +numerical_edge_match = copyfunc(numerical_node_match, "numerical_edge_match") + + +def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08): + if isinstance(attr, str): + + def match(datasets1, datasets2): + values1 = sorted(data.get(attr, default) for data in datasets1.values()) + values2 = sorted(data.get(attr, default) for data in datasets2.values()) + return allclose(values1, values2, rtol=rtol, atol=atol) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = [] + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.append(x) + values2 = [] + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.append(x) + values1.sort() + values2.sort() + for xi, yi in zip(values1, values2): + if not allclose(xi, yi, rtol=rtol, atol=atol): + return False + else: + return True + + return match + + +# Docstrings for numerical functions. +numerical_node_match.__doc__ = numerical_doc +numerical_edge_match.__doc__ = numerical_doc.replace("node", "edge") +tmpdoc = numerical_doc.replace("node", "edge") +tmpdoc = tmpdoc.replace("numerical_edge_match", "numerical_multiedge_match") +numerical_multiedge_match.__doc__ = tmpdoc + + +generic_doc = """ +Returns a comparison function for a generic attribute. + +The value(s) of the attr(s) are compared using the specified +operators. If all the attributes are equal, then the constructed +function returns True. + +Parameters +---------- +attr : string | list + The node attribute to compare, or a list of node attributes + to compare. +default : value | list + The default value for the node attribute, or a list of + default values for the node attributes. +op : callable | list + The operator to use when comparing attribute values, or a list + of operators to use when comparing values for each attribute. + +Returns +------- +match : function + The customized, generic `node_match` function. + +Examples +-------- +>>> from operator import eq +>>> from math import isclose +>>> from networkx.algorithms.isomorphism import generic_node_match +>>> nm = generic_node_match("weight", 1.0, isclose) +>>> nm = generic_node_match("color", "red", eq) +>>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq]) + +""" + + +def generic_node_match(attr, default, op): + if isinstance(attr, str): + + def match(data1, data2): + return op(data1.get(attr, default), data2.get(attr, default)) + + else: + attrs = list(zip(attr, default, op)) # Python 3 + + def match(data1, data2): + for attr, d, operator in attrs: + if not operator(data1.get(attr, d), data2.get(attr, d)): + return False + else: + return True + + return match + + +generic_edge_match = copyfunc(generic_node_match, "generic_edge_match") + + +def generic_multiedge_match(attr, default, op): + """Returns a comparison function for a generic attribute. + + The value(s) of the attr(s) are compared using the specified + operators. If all the attributes are equal, then the constructed + function returns True. Potentially, the constructed edge_match + function can be slow since it must verify that no isomorphism + exists between the multiedges before it returns False. + + Parameters + ---------- + attr : string | list + The edge attribute to compare, or a list of node attributes + to compare. + default : value | list + The default value for the edge attribute, or a list of + default values for the dgeattributes. + op : callable | list + The operator to use when comparing attribute values, or a list + of operators to use when comparing values for each attribute. + + Returns + ------- + match : function + The customized, generic `edge_match` function. + + Examples + -------- + >>> from operator import eq + >>> from math import isclose + >>> from networkx.algorithms.isomorphism import generic_node_match + >>> nm = generic_node_match("weight", 1.0, isclose) + >>> nm = generic_node_match("color", "red", eq) + >>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq]) + ... + + """ + + # This is slow, but generic. + # We must test every possible isomorphism between the edges. + if isinstance(attr, str): + attr = [attr] + default = [default] + op = [op] + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = [] + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.append(x) + values2 = [] + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.append(x) + for vals2 in permutations(values2): + for xi, yi in zip(values1, vals2): + if not all(map(lambda x, y, z: z(x, y), xi, yi, op)): + # This is not an isomorphism, go to next permutation. + break + else: + # Then we found an isomorphism. + return True + else: + # Then there are no isomorphisms between the multiedges. + return False + + return match + + +# Docstrings for numerical functions. +generic_node_match.__doc__ = generic_doc +generic_edge_match.__doc__ = generic_doc.replace("node", "edge") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py new file mode 100644 index 0000000..853e7cd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py @@ -0,0 +1,308 @@ +""" +***************************** +Time-respecting VF2 Algorithm +***************************** + +An extension of the VF2 algorithm for time-respecting graph isomorphism +testing in temporal graphs. + +A temporal graph is one in which edges contain a datetime attribute, +denoting when interaction occurred between the incident nodes. A +time-respecting subgraph of a temporal graph is a subgraph such that +all interactions incident to a node occurred within a time threshold, +delta, of each other. A directed time-respecting subgraph has the +added constraint that incoming interactions to a node must precede +outgoing interactions from the same node - this enforces a sense of +directed flow. + +Introduction +------------ + +The TimeRespectingGraphMatcher and TimeRespectingDiGraphMatcher +extend the GraphMatcher and DiGraphMatcher classes, respectively, +to include temporal constraints on matches. This is achieved through +a semantic check, via the semantic_feasibility() function. + +As well as including G1 (the graph in which to seek embeddings) and +G2 (the subgraph structure of interest), the name of the temporal +attribute on the edges and the time threshold, delta, must be supplied +as arguments to the matching constructors. + +A delta of zero is the strictest temporal constraint on the match - +only embeddings in which all interactions occur at the same time will +be returned. A delta of one day will allow embeddings in which +adjacent interactions occur up to a day apart. + +Examples +-------- + +Examples will be provided when the datetime type has been incorporated. + + +Temporal Subgraph Isomorphism +----------------------------- + +A brief discussion of the somewhat diverse current literature will be +included here. + +References +---------- + +[1] Redmond, U. and Cunningham, P. Temporal subgraph isomorphism. In: +The 2013 IEEE/ACM International Conference on Advances in Social +Networks Analysis and Mining (ASONAM). Niagara Falls, Canada; 2013: +pages 1451 - 1452. [65] + +For a discussion of the literature on temporal networks: + +[3] P. Holme and J. Saramaki. Temporal networks. Physics Reports, +519(3):97–125, 2012. + +Notes +----- + +Handles directed and undirected graphs and graphs with parallel edges. + +""" + +import networkx as nx + +from .isomorphvf2 import DiGraphMatcher, GraphMatcher + +__all__ = ["TimeRespectingGraphMatcher", "TimeRespectingDiGraphMatcher"] + + +class TimeRespectingGraphMatcher(GraphMatcher): + def __init__(self, G1, G2, temporal_attribute_name, delta): + """Initialize TimeRespectingGraphMatcher. + + G1 and G2 should be nx.Graph or nx.MultiGraph instances. + + Examples + -------- + To create a TimeRespectingGraphMatcher which checks for + syntactic and semantic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> from datetime import timedelta + >>> G1 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) + + >>> G2 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) + + >>> GM = isomorphism.TimeRespectingGraphMatcher( + ... G1, G2, "date", timedelta(days=1) + ... ) + """ + self.temporal_attribute_name = temporal_attribute_name + self.delta = delta + super().__init__(G1, G2) + + def one_hop(self, Gx, Gx_node, neighbors): + """ + Edges one hop out from a node in the mapping should be + time-respecting with respect to each other. + """ + dates = [] + for n in neighbors: + if isinstance(Gx, nx.Graph): # Graph G[u][v] returns the data dictionary. + dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for edge in Gx[Gx_node][ + n + ].values(): # Iterates all edges between node pair. + dates.append(edge[self.temporal_attribute_name]) + if any(x is None for x in dates): + raise ValueError("Datetime not supplied for at least one edge.") + return not dates or max(dates) - min(dates) <= self.delta + + def two_hop(self, Gx, core_x, Gx_node, neighbors): + """ + Paths of length 2 from Gx_node should be time-respecting. + """ + return all( + self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) + for v in neighbors + ) + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically + feasible. + + Any subclass which redefines semantic_feasibility() must + maintain the self.tests if needed, to keep the match() method + functional. Implementations should consider multigraphs. + """ + neighbors = [n for n in self.G1[G1_node] if n in self.core_1] + if not self.one_hop(self.G1, G1_node, neighbors): # Fail fast on first node. + return False + if not self.two_hop(self.G1, self.core_1, G1_node, neighbors): + return False + # Otherwise, this node is semantically feasible! + return True + + +class TimeRespectingDiGraphMatcher(DiGraphMatcher): + def __init__(self, G1, G2, temporal_attribute_name, delta): + """Initialize TimeRespectingDiGraphMatcher. + + G1 and G2 should be nx.DiGraph or nx.MultiDiGraph instances. + + Examples + -------- + To create a TimeRespectingDiGraphMatcher which checks for + syntactic and semantic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> from datetime import timedelta + >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + + >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + + >>> GM = isomorphism.TimeRespectingDiGraphMatcher( + ... G1, G2, "date", timedelta(days=1) + ... ) + """ + self.temporal_attribute_name = temporal_attribute_name + self.delta = delta + super().__init__(G1, G2) + + def get_pred_dates(self, Gx, Gx_node, core_x, pred): + """ + Get the dates of edges from predecessors. + """ + pred_dates = [] + if isinstance(Gx, nx.DiGraph): # Graph G[u][v] returns the data dictionary. + for n in pred: + pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for n in pred: + for edge in Gx[n][ + Gx_node + ].values(): # Iterates all edge data between node pair. + pred_dates.append(edge[self.temporal_attribute_name]) + return pred_dates + + def get_succ_dates(self, Gx, Gx_node, core_x, succ): + """ + Get the dates of edges to successors. + """ + succ_dates = [] + if isinstance(Gx, nx.DiGraph): # Graph G[u][v] returns the data dictionary. + for n in succ: + succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for n in succ: + for edge in Gx[Gx_node][ + n + ].values(): # Iterates all edge data between node pair. + succ_dates.append(edge[self.temporal_attribute_name]) + return succ_dates + + def one_hop(self, Gx, Gx_node, core_x, pred, succ): + """ + The ego node. + """ + pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred) + succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ) + return self.test_one(pred_dates, succ_dates) and self.test_two( + pred_dates, succ_dates + ) + + def two_hop_pred(self, Gx, Gx_node, core_x, pred): + """ + The predeccessors of the ego node. + """ + return all( + self.one_hop( + Gx, + p, + core_x, + self.preds(Gx, core_x, p), + self.succs(Gx, core_x, p, Gx_node), + ) + for p in pred + ) + + def two_hop_succ(self, Gx, Gx_node, core_x, succ): + """ + The successors of the ego node. + """ + return all( + self.one_hop( + Gx, + s, + core_x, + self.preds(Gx, core_x, s, Gx_node), + self.succs(Gx, core_x, s), + ) + for s in succ + ) + + def preds(self, Gx, core_x, v, Gx_node=None): + pred = [n for n in Gx.predecessors(v) if n in core_x] + if Gx_node: + pred.append(Gx_node) + return pred + + def succs(self, Gx, core_x, v, Gx_node=None): + succ = [n for n in Gx.successors(v) if n in core_x] + if Gx_node: + succ.append(Gx_node) + return succ + + def test_one(self, pred_dates, succ_dates): + """ + Edges one hop out from Gx_node in the mapping should be + time-respecting with respect to each other, regardless of + direction. + """ + time_respecting = True + dates = pred_dates + succ_dates + + if any(x is None for x in dates): + raise ValueError("Date or datetime not supplied for at least one edge.") + + dates.sort() # Small to large. + if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta): + time_respecting = False + return time_respecting + + def test_two(self, pred_dates, succ_dates): + """ + Edges from a dual Gx_node in the mapping should be ordered in + a time-respecting manner. + """ + time_respecting = True + pred_dates.sort() + succ_dates.sort() + # First out before last in; negative of the necessary condition for time-respect. + if ( + 0 < len(succ_dates) + and 0 < len(pred_dates) + and succ_dates[0] < pred_dates[-1] + ): + time_respecting = False + return time_respecting + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically + feasible. + + Any subclass which redefines semantic_feasibility() must + maintain the self.tests if needed, to keep the match() method + functional. Implementations should consider multigraphs. + """ + pred, succ = ( + [n for n in self.G1.predecessors(G1_node) if n in self.core_1], + [n for n in self.G1.successors(G1_node) if n in self.core_1], + ) + if not self.one_hop( + self.G1, G1_node, self.core_1, pred, succ + ): # Fail fast on first node. + return False + if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred): + return False + if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ): + return False + # Otherwise, this node is semantically feasible! + return True diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 new file mode 100644 index 0000000..dac54f0 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 new file mode 100644 index 0000000..6c6af68 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 new file mode 100644 index 0000000..60c3a3c Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 new file mode 100644 index 0000000..0236872 Binary files /dev/null and b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 differ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py new file mode 100644 index 0000000..6fa4ab9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py @@ -0,0 +1,327 @@ +""" + Tests for ISMAGS isomorphism algorithm. +""" + +import pytest + +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +def _matches_to_sets(matches): + """ + Helper function to facilitate comparing collections of dictionaries in + which order does not matter. + """ + return set(map(lambda m: frozenset(m.items()), matches)) + + +class TestSelfIsomorphism: + data = [ + ( + [ + (0, dict(name="a")), + (1, dict(name="a")), + (2, dict(name="b")), + (3, dict(name="b")), + (4, dict(name="a")), + (5, dict(name="a")), + ], + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + ), + (range(1, 5), [(1, 2), (2, 4), (4, 3), (3, 1)]), + ( + [], + [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 0), + (0, 6), + (6, 7), + (2, 8), + (8, 9), + (4, 10), + (10, 11), + ], + ), + ([], [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 6)]), + ] + + def test_self_isomorphism(self): + """ + For some small, symmetric graphs, make sure that 1) they are isomorphic + to themselves, and 2) that only the identity mapping is found. + """ + for node_data, edge_data in self.data: + graph = nx.Graph() + graph.add_nodes_from(node_data) + graph.add_edges_from(edge_data) + + ismags = iso.ISMAGS( + graph, graph, node_match=iso.categorical_node_match("name", None) + ) + assert ismags.is_isomorphic() + assert ismags.subgraph_is_isomorphic() + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in graph.nodes} + ] + + def test_edgecase_self_isomorphism(self): + """ + This edgecase is one of the cases in which it is hard to find all + symmetry elements. + """ + graph = nx.Graph() + nx.add_path(graph, range(5)) + graph.add_edges_from([(2, 5), (5, 6)]) + + ismags = iso.ISMAGS(graph, graph) + ismags_answer = list(ismags.find_isomorphisms(True)) + assert ismags_answer == [{n: n for n in graph.nodes}] + + graph = nx.relabel_nodes(graph, {0: 0, 1: 1, 2: 2, 3: 3, 4: 6, 5: 4, 6: 5}) + ismags = iso.ISMAGS(graph, graph) + ismags_answer = list(ismags.find_isomorphisms(True)) + assert ismags_answer == [{n: n for n in graph.nodes}] + + def test_directed_self_isomorphism(self): + """ + For some small, directed, symmetric graphs, make sure that 1) they are + isomorphic to themselves, and 2) that only the identity mapping is + found. + """ + for node_data, edge_data in self.data: + graph = nx.Graph() + graph.add_nodes_from(node_data) + graph.add_edges_from(edge_data) + + ismags = iso.ISMAGS( + graph, graph, node_match=iso.categorical_node_match("name", None) + ) + assert ismags.is_isomorphic() + assert ismags.subgraph_is_isomorphic() + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in graph.nodes} + ] + + +class TestSubgraphIsomorphism: + def test_isomorphism(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(4)) + + g2 = nx.Graph() + nx.add_cycle(g2, range(4)) + g2.add_edges_from([(n, m) for n, m in zip(g2, range(4, 8))]) + ismags = iso.ISMAGS(g2, g1) + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in g1.nodes} + ] + + def test_isomorphism2(self): + g1 = nx.Graph() + nx.add_path(g1, range(3)) + + g2 = g1.copy() + g2.add_edge(1, 3) + + ismags = iso.ISMAGS(g2, g1) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [ + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 1, 3: 2}, + {2: 0, 1: 1, 3: 2}, + ] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [ + {0: 2, 1: 1, 2: 0}, + {0: 2, 1: 1, 3: 0}, + {2: 2, 1: 1, 3: 0}, + ] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + def test_labeled_nodes(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(3)) + g1.nodes[1]["attr"] = True + + g2 = g1.copy() + g2.add_edge(1, 3) + ismags = iso.ISMAGS(g2, g1, node_match=lambda x, y: x == y) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [{0: 0, 1: 1, 2: 2}] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [{0: 2, 1: 1, 2: 0}] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + def test_labeled_edges(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(3)) + g1.edges[1, 2]["attr"] = True + + g2 = g1.copy() + g2.add_edge(1, 3) + ismags = iso.ISMAGS(g2, g1, edge_match=lambda x, y: x == y) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [{0: 0, 1: 1, 2: 2}] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [{1: 2, 0: 0, 2: 1}] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + +class TestWikipediaExample: + # Nodes 'a', 'b', 'c' and 'd' form a column. + # Nodes 'g', 'h', 'i' and 'j' form a column. + g1edges = [ + ["a", "g"], + ["a", "h"], + ["a", "i"], + ["b", "g"], + ["b", "h"], + ["b", "j"], + ["c", "g"], + ["c", "i"], + ["c", "j"], + ["d", "h"], + ["d", "i"], + ["d", "j"], + ] + + # Nodes 1,2,3,4 form the clockwise corners of a large square. + # Nodes 5,6,7,8 form the clockwise corners of a small square + g2edges = [ + [1, 2], + [2, 3], + [3, 4], + [4, 1], + [5, 6], + [6, 7], + [7, 8], + [8, 5], + [1, 5], + [2, 6], + [3, 7], + [4, 8], + ] + + def test_graph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + gm = iso.ISMAGS(g1, g2) + assert gm.is_isomorphic() + + +class TestLargestCommonSubgraph: + def test_mcis(self): + # Example graphs from DOI: 10.1002/spe.588 + graph1 = nx.Graph() + graph1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 4), (4, 5)]) + graph1.nodes[1]["color"] = 0 + + graph2 = nx.Graph() + graph2.add_edges_from( + [(1, 2), (2, 3), (2, 4), (3, 4), (3, 5), (5, 6), (5, 7), (6, 7)] + ) + graph2.nodes[1]["color"] = 1 + graph2.nodes[6]["color"] = 2 + graph2.nodes[7]["color"] = 2 + + ismags = iso.ISMAGS( + graph1, graph2, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags.subgraph_isomorphisms_iter(True)) == [] + assert list(ismags.subgraph_isomorphisms_iter(False)) == [] + found_mcis = _matches_to_sets(ismags.largest_common_subgraph()) + expected = _matches_to_sets( + [{2: 2, 3: 4, 4: 3, 5: 5}, {2: 4, 3: 2, 4: 3, 5: 5}] + ) + assert expected == found_mcis + + ismags = iso.ISMAGS( + graph2, graph1, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags.subgraph_isomorphisms_iter(True)) == [] + assert list(ismags.subgraph_isomorphisms_iter(False)) == [] + found_mcis = _matches_to_sets(ismags.largest_common_subgraph()) + # Same answer, but reversed. + expected = _matches_to_sets( + [{2: 2, 3: 4, 4: 3, 5: 5}, {4: 2, 2: 3, 3: 4, 5: 5}] + ) + assert expected == found_mcis + + def test_symmetry_mcis(self): + graph1 = nx.Graph() + nx.add_path(graph1, range(4)) + + graph2 = nx.Graph() + nx.add_path(graph2, range(3)) + graph2.add_edge(1, 3) + + # Only the symmetry of graph2 is taken into account here. + ismags1 = iso.ISMAGS( + graph1, graph2, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags1.subgraph_isomorphisms_iter(True)) == [] + found_mcis = _matches_to_sets(ismags1.largest_common_subgraph()) + expected = _matches_to_sets([{0: 0, 1: 1, 2: 2}, {1: 0, 3: 2, 2: 1}]) + assert expected == found_mcis + + # Only the symmetry of graph1 is taken into account here. + ismags2 = iso.ISMAGS( + graph2, graph1, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags2.subgraph_isomorphisms_iter(True)) == [] + found_mcis = _matches_to_sets(ismags2.largest_common_subgraph()) + expected = _matches_to_sets( + [ + {3: 2, 0: 0, 1: 1}, + {2: 0, 0: 2, 1: 1}, + {3: 0, 0: 2, 1: 1}, + {3: 0, 1: 1, 2: 2}, + {0: 0, 1: 1, 2: 2}, + {2: 0, 3: 2, 1: 1}, + ] + ) + + assert expected == found_mcis + + found_mcis1 = _matches_to_sets(ismags1.largest_common_subgraph(False)) + found_mcis2 = ismags2.largest_common_subgraph(False) + found_mcis2 = [{v: k for k, v in d.items()} for d in found_mcis2] + found_mcis2 = _matches_to_sets(found_mcis2) + + expected = _matches_to_sets( + [ + {3: 2, 1: 3, 2: 1}, + {2: 0, 0: 2, 1: 1}, + {1: 2, 3: 3, 2: 1}, + {3: 0, 1: 3, 2: 1}, + {0: 2, 2: 3, 1: 1}, + {3: 0, 1: 2, 2: 1}, + {2: 0, 0: 3, 1: 1}, + {0: 0, 2: 3, 1: 1}, + {1: 0, 3: 3, 2: 1}, + {1: 0, 3: 2, 2: 1}, + {0: 3, 1: 1, 2: 2}, + {0: 0, 1: 1, 2: 2}, + ] + ) + assert expected == found_mcis1 + assert expected == found_mcis2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py new file mode 100644 index 0000000..c669040 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py @@ -0,0 +1,40 @@ +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +class TestIsomorph: + @classmethod + def setup_class(cls): + cls.G1 = nx.Graph() + cls.G2 = nx.Graph() + cls.G3 = nx.Graph() + cls.G4 = nx.Graph() + cls.G5 = nx.Graph() + cls.G6 = nx.Graph() + cls.G1.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 3]]) + cls.G2.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50]]) + cls.G3.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 5]]) + cls.G4.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 4]]) + cls.G5.add_edges_from([[1, 2], [1, 3]]) + cls.G6.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50], [20, 50]]) + + def test_could_be_isomorphic(self): + assert iso.could_be_isomorphic(self.G1, self.G2) + assert iso.could_be_isomorphic(self.G1, self.G3) + assert not iso.could_be_isomorphic(self.G1, self.G4) + assert iso.could_be_isomorphic(self.G3, self.G2) + assert not iso.could_be_isomorphic(self.G1, self.G6) + + def test_fast_could_be_isomorphic(self): + assert iso.fast_could_be_isomorphic(self.G3, self.G2) + assert not iso.fast_could_be_isomorphic(self.G3, self.G5) + assert not iso.fast_could_be_isomorphic(self.G1, self.G6) + + def test_faster_could_be_isomorphic(self): + assert iso.faster_could_be_isomorphic(self.G3, self.G2) + assert not iso.faster_could_be_isomorphic(self.G3, self.G5) + assert not iso.faster_could_be_isomorphic(self.G1, self.G6) + + def test_is_isomorphic(self): + assert iso.is_isomorphic(self.G1, self.G2) + assert not iso.is_isomorphic(self.G1, self.G4) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py new file mode 100644 index 0000000..5d3f41b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py @@ -0,0 +1,403 @@ +""" + Tests for VF2 isomorphism algorithm. +""" + +import os +import random +import struct + +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +class TestWikipediaExample: + # Source: https://en.wikipedia.org/wiki/Graph_isomorphism + + # Nodes 'a', 'b', 'c' and 'd' form a column. + # Nodes 'g', 'h', 'i' and 'j' form a column. + g1edges = [ + ["a", "g"], + ["a", "h"], + ["a", "i"], + ["b", "g"], + ["b", "h"], + ["b", "j"], + ["c", "g"], + ["c", "i"], + ["c", "j"], + ["d", "h"], + ["d", "i"], + ["d", "j"], + ] + + # Nodes 1,2,3,4 form the clockwise corners of a large square. + # Nodes 5,6,7,8 form the clockwise corners of a small square + g2edges = [ + [1, 2], + [2, 3], + [3, 4], + [4, 1], + [5, 6], + [6, 7], + [7, 8], + [8, 5], + [1, 5], + [2, 6], + [3, 7], + [4, 8], + ] + + def test_graph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + gm = iso.GraphMatcher(g1, g2) + assert gm.is_isomorphic() + # Just testing some cases + assert gm.subgraph_is_monomorphic() + + mapping = sorted(gm.mapping.items()) + + # this mapping is only one of the possibilies + # so this test needs to be reconsidered + # isomap = [('a', 1), ('b', 6), ('c', 3), ('d', 8), + # ('g', 2), ('h', 5), ('i', 4), ('j', 7)] + # assert_equal(mapping, isomap) + + def test_subgraph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + g3 = g2.subgraph([1, 2, 3, 4]) + gm = iso.GraphMatcher(g1, g3) + assert gm.subgraph_is_isomorphic() + + def test_subgraph_mono(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from([[1, 2], [2, 3], [3, 4]]) + gm = iso.GraphMatcher(g1, g2) + assert gm.subgraph_is_monomorphic() + + +class TestVF2GraphDB: + # https://web.archive.org/web/20090303210205/http://amalfi.dis.unina.it/graph/db/ + + @staticmethod + def create_graph(filename): + """Creates a Graph instance from the filename.""" + + # The file is assumed to be in the format from the VF2 graph database. + # Each file is composed of 16-bit numbers (unsigned short int). + # So we will want to read 2 bytes at a time. + + # We can read the number as follows: + # number = struct.unpack(' 0 + assert check_isomorphism(t1, t2, isomorphism) + + +# run positive_single_tree over all the +# non-isomorphic trees for k from 4 to maxk +# k = 4 is the first level that has more than 1 non-isomorphic tree +# k = 13 takes about 2.86 seconds to run on my laptop +# larger values run slow down significantly +# as the number of trees grows rapidly +def test_positive(maxk=14): + + print("positive test") + + for k in range(2, maxk + 1): + start_time = time.time() + trial = 0 + for t in nx.nonisomorphic_trees(k): + positive_single_tree(t) + trial += 1 + print(k, trial, time.time() - start_time) + + +# test the trivial case of a single node in each tree +# note that nonisomorphic_trees doesn't work for k = 1 +def test_trivial(): + + print("trivial test") + + # back to an undirected graph + t1 = nx.Graph() + t1.add_node("a") + root1 = "a" + + t2 = nx.Graph() + t2.add_node("n") + root2 = "n" + + isomorphism = rooted_tree_isomorphism(t1, root1, t2, root2) + + assert isomorphism == [("a", "n")] + + assert check_isomorphism(t1, t2, isomorphism) + + +# test another trivial case where the two graphs have +# different numbers of nodes +def test_trivial_2(): + + print("trivial test 2") + + edges_1 = [("a", "b"), ("a", "c")] + + edges_2 = [("v", "y")] + + t1 = nx.Graph() + t1.add_edges_from(edges_1) + + t2 = nx.Graph() + t2.add_edges_from(edges_2) + + isomorphism = tree_isomorphism(t1, t2) + + # they cannot be isomorphic, + # since they have different numbers of nodes + assert isomorphism == [] + + +# the function nonisomorphic_trees generates all the non-isomorphic +# trees of a given size. Take each pair of these and verify that +# they are not isomorphic +# k = 4 is the first level that has more than 1 non-isomorphic tree +# k = 11 takes about 4.76 seconds to run on my laptop +# larger values run slow down significantly +# as the number of trees grows rapidly +def test_negative(maxk=11): + + print("negative test") + + for k in range(4, maxk + 1): + test_trees = list(nx.nonisomorphic_trees(k)) + start_time = time.time() + trial = 0 + for i in range(len(test_trees) - 1): + for j in range(i + 1, len(test_trees)): + trial += 1 + assert tree_isomorphism(test_trees[i], test_trees[j]) == [] + print(k, trial, time.time() - start_time) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py new file mode 100644 index 0000000..5a22d41 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py @@ -0,0 +1,201 @@ +""" + Tests for VF2 isomorphism algorithm for weighted graphs. +""" + +import math +from operator import eq + +import networkx as nx +import networkx.algorithms.isomorphism as iso + + +def test_simple(): + # 16 simple tests + w = "weight" + edges = [(0, 0, 1), (0, 0, 1.5), (0, 1, 2), (1, 0, 3)] + for g1 in [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]: + + g1.add_weighted_edges_from(edges) + g2 = g1.subgraph(g1.nodes()) + if g1.is_multigraph(): + em = iso.numerical_multiedge_match("weight", 1) + else: + em = iso.numerical_edge_match("weight", 1) + assert nx.is_isomorphic(g1, g2, edge_match=em) + + for mod1, mod2 in [(False, True), (True, False), (True, True)]: + # mod1 tests a regular edge + # mod2 tests a selfloop + if g2.is_multigraph(): + if mod1: + data1 = {0: {"weight": 10}} + if mod2: + data2 = {0: {"weight": 1}, 1: {"weight": 2.5}} + else: + if mod1: + data1 = {"weight": 10} + if mod2: + data2 = {"weight": 2.5} + + g2 = g1.subgraph(g1.nodes()).copy() + if mod1: + if not g1.is_directed(): + g2._adj[1][0] = data1 + g2._adj[0][1] = data1 + else: + g2._succ[1][0] = data1 + g2._pred[0][1] = data1 + if mod2: + if not g1.is_directed(): + g2._adj[0][0] = data2 + else: + g2._succ[0][0] = data2 + g2._pred[0][0] = data2 + + assert not nx.is_isomorphic(g1, g2, edge_match=em) + + +def test_weightkey(): + g1 = nx.DiGraph() + g2 = nx.DiGraph() + + g1.add_edge("A", "B", weight=1) + g2.add_edge("C", "D", weight=0) + + assert nx.is_isomorphic(g1, g2) + em = iso.numerical_edge_match("nonexistent attribute", 1) + assert nx.is_isomorphic(g1, g2, edge_match=em) + em = iso.numerical_edge_match("weight", 1) + assert not nx.is_isomorphic(g1, g2, edge_match=em) + + g2 = nx.DiGraph() + g2.add_edge("C", "D") + assert nx.is_isomorphic(g1, g2, edge_match=em) + + +class TestNodeMatch_Graph: + def setup_method(self): + self.g1 = nx.Graph() + self.g2 = nx.Graph() + self.build() + + def build(self): + self.nm = iso.categorical_node_match("color", "") + self.em = iso.numerical_edge_match("weight", 1) + + self.g1.add_node("A", color="red") + self.g2.add_node("C", color="blue") + + self.g1.add_edge("A", "B", weight=1) + self.g2.add_edge("C", "D", weight=1) + + def test_noweight_nocolor(self): + assert nx.is_isomorphic(self.g1, self.g2) + + def test_color1(self): + assert not nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) + + def test_color2(self): + self.g1.nodes["A"]["color"] = "blue" + assert nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) + + def test_weight1(self): + assert nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) + + def test_weight2(self): + self.g1.add_edge("A", "B", weight=2) + assert not nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) + + def test_colorsandweights1(self): + iso = nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) + assert not iso + + def test_colorsandweights2(self): + self.g1.nodes["A"]["color"] = "blue" + iso = nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) + assert iso + + def test_colorsandweights3(self): + # make the weights disagree + self.g1.add_edge("A", "B", weight=2) + assert not nx.is_isomorphic( + self.g1, self.g2, node_match=self.nm, edge_match=self.em + ) + + +class TestEdgeMatch_MultiGraph: + def setup_method(self): + self.g1 = nx.MultiGraph() + self.g2 = nx.MultiGraph() + self.GM = iso.MultiGraphMatcher + self.build() + + def build(self): + g1 = self.g1 + g2 = self.g2 + + # We will assume integer weights only. + g1.add_edge("A", "B", color="green", weight=0, size=0.5) + g1.add_edge("A", "B", color="red", weight=1, size=0.35) + g1.add_edge("A", "B", color="red", weight=2, size=0.65) + + g2.add_edge("C", "D", color="green", weight=1, size=0.5) + g2.add_edge("C", "D", color="red", weight=0, size=0.45) + g2.add_edge("C", "D", color="red", weight=2, size=0.65) + + if g1.is_multigraph(): + self.em = iso.numerical_multiedge_match("weight", 1) + self.emc = iso.categorical_multiedge_match("color", "") + self.emcm = iso.categorical_multiedge_match(["color", "weight"], ["", 1]) + self.emg1 = iso.generic_multiedge_match("color", "red", eq) + self.emg2 = iso.generic_multiedge_match( + ["color", "weight", "size"], + ["red", 1, 0.5], + [eq, eq, math.isclose], + ) + else: + self.em = iso.numerical_edge_match("weight", 1) + self.emc = iso.categorical_edge_match("color", "") + self.emcm = iso.categorical_edge_match(["color", "weight"], ["", 1]) + self.emg1 = iso.generic_multiedge_match("color", "red", eq) + self.emg2 = iso.generic_edge_match( + ["color", "weight", "size"], + ["red", 1, 0.5], + [eq, eq, math.isclose], + ) + + def test_weights_only(self): + assert nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) + + def test_colors_only(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emc) + assert gm.is_isomorphic() + + def test_colorsandweights(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emcm) + assert not gm.is_isomorphic() + + def test_generic1(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emg1) + assert gm.is_isomorphic() + + def test_generic2(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emg2) + assert not gm.is_isomorphic() + + +class TestEdgeMatch_DiGraph(TestNodeMatch_Graph): + def setup_method(self): + TestNodeMatch_Graph.setup_method(self) + self.g1 = nx.DiGraph() + self.g2 = nx.DiGraph() + self.build() + + +class TestEdgeMatch_MultiDiGraph(TestEdgeMatch_MultiGraph): + def setup_method(self): + TestEdgeMatch_MultiGraph.setup_method(self) + self.g1 = nx.MultiDiGraph() + self.g2 = nx.MultiDiGraph() + self.GM = iso.MultiDiGraphMatcher + self.build() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py new file mode 100644 index 0000000..7e13d02 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py @@ -0,0 +1,279 @@ +""" +An algorithm for finding if two undirected trees are isomorphic, +and if so returns an isomorphism between the two sets of nodes. + +This algorithm uses a routine to tell if two rooted trees (trees with a +specified root node) are isomorphic, which may be independently useful. + +This implements an algorithm from: +The Design and Analysis of Computer Algorithms +by Aho, Hopcroft, and Ullman +Addison-Wesley Publishing 1974 +Example 3.2 pp. 84-86. + +A more understandable version of this algorithm is described in: +Homework Assignment 5 +McGill University SOCS 308-250B, Winter 2002 +by Matthew Suderman +http://crypto.cs.mcgill.ca/~crepeau/CS250/2004/HW5+.pdf +""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = ["rooted_tree_isomorphism", "tree_isomorphism"] + + +def root_trees(t1, root1, t2, root2): + """Create a single digraph dT of free trees t1 and t2 + # with roots root1 and root2 respectively + # rename the nodes with consecutive integers + # so that all nodes get a unique name between both trees + + # our new "fake" root node is 0 + # t1 is numbers from 1 ... n + # t2 is numbered from n+1 to 2n + """ + + dT = nx.DiGraph() + + newroot1 = 1 # left root will be 1 + newroot2 = nx.number_of_nodes(t1) + 1 # right will be n+1 + + # may be overlap in node names here so need separate maps + # given the old name, what is the new + namemap1 = {root1: newroot1} + namemap2 = {root2: newroot2} + + # add an edge from our new root to root1 and root2 + dT.add_edge(0, namemap1[root1]) + dT.add_edge(0, namemap2[root2]) + + for i, (v1, v2) in enumerate(nx.bfs_edges(t1, root1)): + namemap1[v2] = i + namemap1[root1] + 1 + dT.add_edge(namemap1[v1], namemap1[v2]) + + for i, (v1, v2) in enumerate(nx.bfs_edges(t2, root2)): + namemap2[v2] = i + namemap2[root2] + 1 + dT.add_edge(namemap2[v1], namemap2[v2]) + + # now we really want the inverse of namemap1 and namemap2 + # giving the old name given the new + # since the values of namemap1 and namemap2 are unique + # there won't be collisions + namemap = {} + for old, new in namemap1.items(): + namemap[new] = old + for old, new in namemap2.items(): + namemap[new] = old + + return (dT, namemap, newroot1, newroot2) + + +# figure out the level of each node, with 0 at root +def assign_levels(G, root): + level = {} + level[root] = 0 + for (v1, v2) in nx.bfs_edges(G, root): + level[v2] = level[v1] + 1 + + return level + + +# now group the nodes at each level +def group_by_levels(levels): + L = {} + for (n, lev) in levels.items(): + if lev not in L: + L[lev] = [] + L[lev].append(n) + + return L + + +# now lets get the isomorphism by walking the ordered_children +def generate_isomorphism(v, w, M, ordered_children): + # make sure tree1 comes first + assert v < w + M.append((v, w)) + for i, (x, y) in enumerate(zip(ordered_children[v], ordered_children[w])): + generate_isomorphism(x, y, M, ordered_children) + + +def rooted_tree_isomorphism(t1, root1, t2, root2): + """ + Given two rooted trees `t1` and `t2`, + with roots `root1` and `root2` respectivly + this routine will determine if they are isomorphic. + + These trees may be either directed or undirected, + but if they are directed, all edges should flow from the root. + + It returns the isomorphism, a mapping of the nodes of `t1` onto the nodes + of `t2`, such that two trees are then identical. + + Note that two trees may have more than one isomorphism, and this + routine just returns one valid mapping. + + Parameters + ---------- + `t1` : NetworkX graph + One of the trees being compared + + `root1` : a node of `t1` which is the root of the tree + + `t2` : undirected NetworkX graph + The other tree being compared + + `root2` : a node of `t2` which is the root of the tree + + This is a subroutine used to implement `tree_isomorphism`, but will + be somewhat faster if you already have rooted trees. + + Returns + ------- + isomorphism : list + A list of pairs in which the left element is a node in `t1` + and the right element is a node in `t2`. The pairs are in + arbitrary order. If the nodes in one tree is mapped to the names in + the other, then trees will be identical. Note that an isomorphism + will not necessarily be unique. + + If `t1` and `t2` are not isomorphic, then it returns the empty list. + """ + + assert nx.is_tree(t1) + assert nx.is_tree(t2) + + # get the rooted tree formed by combining them + # with unique names + (dT, namemap, newroot1, newroot2) = root_trees(t1, root1, t2, root2) + + # compute the distance from the root, with 0 for our + levels = assign_levels(dT, 0) + + # height + h = max(levels.values()) + + # collect nodes into a dict by level + L = group_by_levels(levels) + + # each node has a label, initially set to 0 + label = {v: 0 for v in dT} + # and also ordered_labels and ordered_children + # which will store ordered tuples + ordered_labels = {v: () for v in dT} + ordered_children = {v: () for v in dT} + + # nothing to do on last level so start on h-1 + # also nothing to do for our fake level 0, so skip that + for i in range(h - 1, 0, -1): + # update the ordered_labels and ordered_childen + # for any children + for v in L[i]: + # nothing to do if no children + if dT.out_degree(v) > 0: + # get all the pairs of labels and nodes of children + # and sort by labels + s = sorted((label[u], u) for u in dT.successors(v)) + + # invert to give a list of two tuples + # the sorted labels, and the corresponding children + ordered_labels[v], ordered_children[v] = list(zip(*s)) + + # now collect and sort the sorted ordered_labels + # for all nodes in L[i], carrying along the node + forlabel = sorted((ordered_labels[v], v) for v in L[i]) + + # now assign labels to these nodes, according to the sorted order + # starting from 0, where idential ordered_labels get the same label + current = 0 + for i, (ol, v) in enumerate(forlabel): + # advance to next label if not 0, and different from previous + if (i != 0) and (ol != forlabel[i - 1][0]): + current += 1 + label[v] = current + + # they are isomorphic if the labels of newroot1 and newroot2 are 0 + isomorphism = [] + if label[newroot1] == 0 and label[newroot2] == 0: + generate_isomorphism(newroot1, newroot2, isomorphism, ordered_children) + + # get the mapping back in terms of the old names + # return in sorted order for neatness + isomorphism = [(namemap[u], namemap[v]) for (u, v) in isomorphism] + + return isomorphism + + +@not_implemented_for("directed", "multigraph") +def tree_isomorphism(t1, t2): + """ + Given two undirected (or free) trees `t1` and `t2`, + this routine will determine if they are isomorphic. + It returns the isomorphism, a mapping of the nodes of `t1` onto the nodes + of `t2`, such that two trees are then identical. + + Note that two trees may have more than one isomorphism, and this + routine just returns one valid mapping. + + Parameters + ---------- + t1 : undirected NetworkX graph + One of the trees being compared + + t2 : undirected NetworkX graph + The other tree being compared + + Returns + ------- + isomorphism : list + A list of pairs in which the left element is a node in `t1` + and the right element is a node in `t2`. The pairs are in + arbitrary order. If the nodes in one tree is mapped to the names in + the other, then trees will be identical. Note that an isomorphism + will not necessarily be unique. + + If `t1` and `t2` are not isomorphic, then it returns the empty list. + + Notes + ----- + This runs in O(n*log(n)) time for trees with n nodes. + """ + + assert nx.is_tree(t1) + assert nx.is_tree(t2) + + # To be isomrophic, t1 and t2 must have the same number of nodes. + if nx.number_of_nodes(t1) != nx.number_of_nodes(t2): + return [] + + # Another shortcut is that the sorted degree sequences need to be the same. + degree_sequence1 = sorted(d for (n, d) in t1.degree()) + degree_sequence2 = sorted(d for (n, d) in t2.degree()) + + if degree_sequence1 != degree_sequence2: + return [] + + # A tree can have either 1 or 2 centers. + # If the number doesn't match then t1 and t2 are not isomorphic. + center1 = nx.center(t1) + center2 = nx.center(t2) + + if len(center1) != len(center2): + return [] + + # If there is only 1 center in each, then use it. + if len(center1) == 1: + return rooted_tree_isomorphism(t1, center1[0], t2, center2[0]) + + # If there both have 2 centers, then try the first for t1 + # with the first for t2. + attemps = rooted_tree_isomorphism(t1, center1[0], t2, center2[0]) + + # If that worked we're done. + if len(attemps) > 0: + return attemps + + # Otherwise, try center1[0] with the center2[1], and see if that works + return rooted_tree_isomorphism(t1, center1[0], t2, center2[1]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py new file mode 100644 index 0000000..349129e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py @@ -0,0 +1,197 @@ +""" + Module to simplify the specification of user-defined equality functions for + node and edge attributes during isomorphism checks. + + During the construction of an isomorphism, the algorithm considers two + candidate nodes n1 in G1 and n2 in G2. The graphs G1 and G2 are then + compared with respect to properties involving n1 and n2, and if the outcome + is good, then the candidate nodes are considered isomorphic. NetworkX + provides a simple mechanism for users to extend the comparisons to include + node and edge attributes. + + Node attributes are handled by the node_match keyword. When considering + n1 and n2, the algorithm passes their node attribute dictionaries to + node_match, and if it returns False, then n1 and n2 cannot be + considered to be isomorphic. + + Edge attributes are handled by the edge_match keyword. When considering + n1 and n2, the algorithm must verify that outgoing edges from n1 are + commensurate with the outgoing edges for n2. If the graph is directed, + then a similar check is also performed for incoming edges. + + Focusing only on outgoing edges, we consider pairs of nodes (n1, v1) from + G1 and (n2, v2) from G2. For graphs and digraphs, there is only one edge + between (n1, v1) and only one edge between (n2, v2). Those edge attribute + dictionaries are passed to edge_match, and if it returns False, then + n1 and n2 cannot be considered isomorphic. For multigraphs and + multidigraphs, there can be multiple edges between (n1, v1) and also + multiple edges between (n2, v2). Now, there must exist an isomorphism + from "all the edges between (n1, v1)" to "all the edges between (n2, v2)". + So, all of the edge attribute dictionaries are passed to edge_match, and + it must determine if there is an isomorphism between the two sets of edges. +""" + +from . import isomorphvf2 as vf2 + +__all__ = ["GraphMatcher", "DiGraphMatcher", "MultiGraphMatcher", "MultiDiGraphMatcher"] + + +def _semantic_feasibility(self, G1_node, G2_node): + """Returns True if mapping G1_node to G2_node is semantically feasible.""" + # Make sure the nodes match + if self.node_match is not None: + nm = self.node_match(self.G1.nodes[G1_node], self.G2.nodes[G2_node]) + if not nm: + return False + + # Make sure the edges match + if self.edge_match is not None: + + # Cached lookups + G1nbrs = self.G1_adj[G1_node] + G2nbrs = self.G2_adj[G2_node] + core_1 = self.core_1 + edge_match = self.edge_match + + for neighbor in G1nbrs: + # G1_node is not in core_1, so we must handle R_self separately + if neighbor == G1_node: + if G2_node in G2nbrs and not edge_match( + G1nbrs[G1_node], G2nbrs[G2_node] + ): + return False + elif neighbor in core_1: + G2_nbr = core_1[neighbor] + if G2_nbr in G2nbrs and not edge_match( + G1nbrs[neighbor], G2nbrs[G2_nbr] + ): + return False + # syntactic check has already verified that neighbors are symmetric + + return True + + +class GraphMatcher(vf2.GraphMatcher): + """VF2 isomorphism checker for undirected graphs.""" + + def __init__(self, G1, G2, node_match=None, edge_match=None): + """Initialize graph matcher. + + Parameters + ---------- + G1, G2: graph + The graphs to be tested. + + node_match: callable + A function that returns True iff node n1 in G1 and n2 in G2 + should be considered equal during the isomorphism test. The + function will be called like:: + + node_match(G1.nodes[n1], G2.nodes[n2]) + + That is, the function will receive the node attribute dictionaries + of the nodes under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + edge_match: callable + A function that returns True iff the edge attribute dictionary for + the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be + considered equal during the isomorphism test. The function will be + called like:: + + edge_match(G1[u1][v1], G2[u2][v2]) + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + """ + vf2.GraphMatcher.__init__(self, G1, G2) + + self.node_match = node_match + self.edge_match = edge_match + + # These will be modified during checks to minimize code repeat. + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + semantic_feasibility = _semantic_feasibility + + +class DiGraphMatcher(vf2.DiGraphMatcher): + """VF2 isomorphism checker for directed graphs.""" + + def __init__(self, G1, G2, node_match=None, edge_match=None): + """Initialize graph matcher. + + Parameters + ---------- + G1, G2 : graph + The graphs to be tested. + + node_match : callable + A function that returns True iff node n1 in G1 and n2 in G2 + should be considered equal during the isomorphism test. The + function will be called like:: + + node_match(G1.nodes[n1], G2.nodes[n2]) + + That is, the function will receive the node attribute dictionaries + of the nodes under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + edge_match : callable + A function that returns True iff the edge attribute dictionary for + the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be + considered equal during the isomorphism test. The function will be + called like:: + + edge_match(G1[u1][v1], G2[u2][v2]) + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + """ + vf2.DiGraphMatcher.__init__(self, G1, G2) + + self.node_match = node_match + self.edge_match = edge_match + + # These will be modified during checks to minimize code repeat. + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if mapping G1_node to G2_node is semantically feasible.""" + + # Test node_match and also test edge_match on successors + feasible = _semantic_feasibility(self, G1_node, G2_node) + if not feasible: + return False + + # Test edge_match on predecessors + self.G1_adj = self.G1.pred + self.G2_adj = self.G2.pred + feasible = _semantic_feasibility(self, G1_node, G2_node) + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + return feasible + + +# The "semantics" of edge_match are different for multi(di)graphs, but +# the implementation is the same. So, technically we do not need to +# provide "multi" versions, but we do so to match NetworkX's base classes. + + +class MultiGraphMatcher(GraphMatcher): + """VF2 isomorphism checker for undirected multigraphs.""" + + pass + + +class MultiDiGraphMatcher(DiGraphMatcher): + """VF2 isomorphism checker for directed multigraphs.""" + + pass diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/__init__.py new file mode 100644 index 0000000..6009f00 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/__init__.py @@ -0,0 +1,2 @@ +from networkx.algorithms.link_analysis.hits_alg import * +from networkx.algorithms.link_analysis.pagerank_alg import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/hits_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/hits_alg.py new file mode 100644 index 0000000..2deb3f4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/hits_alg.py @@ -0,0 +1,399 @@ +"""Hubs and authorities analysis of graph structure. +""" +import networkx as nx + +__all__ = ["hits", "hits_numpy", "hits_scipy", "authority_matrix", "hub_matrix"] + + +def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + max_iter : integer, optional + Maximum number of iterations in power method. + + tol : float, optional + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of each node for power method iteration. + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> h, a = nx.hits(G) + + Notes + ----- + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop + after max_iter iterations or an error tolerance of + number_of_nodes(G)*tol has been reached. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-32, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + import scipy as sp + import scipy.sparse.linalg # call as sp.sparse.linalg + + if len(G) == 0: + return {}, {} + A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float) + + if nstart is None: + _, _, vt = sp.sparse.linalg.svds(A, k=1, maxiter=max_iter, tol=tol) + else: + nstart = np.array(list(nstart.values())) + _, _, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol) + + a = vt.flatten().real + h = A @ a + if normalized: + h /= h.sum() + a /= a.sum() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities + + +def _hits_python(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): + if isinstance(G, (nx.MultiGraph, nx.MultiDiGraph)): + raise Exception("hits() not defined for graphs with multiedges.") + if len(G) == 0: + return {}, {} + # choose fixed starting vector if not given + if nstart is None: + h = dict.fromkeys(G, 1.0 / G.number_of_nodes()) + else: + h = nstart + # normalize starting vector + s = 1.0 / sum(h.values()) + for k in h: + h[k] *= s + for _ in range(max_iter): # power iteration: make up to max_iter iterations + hlast = h + h = dict.fromkeys(hlast.keys(), 0) + a = dict.fromkeys(hlast.keys(), 0) + # this "matrix multiply" looks odd because it is + # doing a left multiply a^T=hlast^T*G + for n in h: + for nbr in G[n]: + a[nbr] += hlast[n] * G[n][nbr].get("weight", 1) + # now multiply h=Ga + for n in h: + for nbr in G[n]: + h[n] += a[nbr] * G[n][nbr].get("weight", 1) + # normalize vector + s = 1.0 / max(h.values()) + for n in h: + h[n] *= s + # normalize vector + s = 1.0 / max(a.values()) + for n in a: + a[n] *= s + # check convergence, l1 norm + err = sum(abs(h[n] - hlast[n]) for n in h) + if err < tol: + break + else: + raise nx.PowerIterationFailedConvergence(max_iter) + if normalized: + s = 1.0 / sum(a.values()) + for n in a: + a[n] *= s + s = 1.0 / sum(h.values()) + for n in h: + h[n] *= s + return h, a + + +def authority_matrix(G, nodelist=None): + """Returns the HITS authority matrix. + + .. deprecated:: 2.6 + """ + import warnings + + msg = ( + "\nauthority_matrix is deprecated as of version 2.6 and will be removed " + "in version 3.0.\n" + "The authority matrix can be computed by::\n" + " >>> M = nx.to_numpy_array(G, nodelist=nodelist)\n" + " >>> M.T @ M" + ) + warnings.warn(msg, DeprecationWarning) + M = nx.to_numpy_array(G, nodelist=nodelist) + return M.T @ M + + +def hub_matrix(G, nodelist=None): + """Returns the HITS hub matrix. + + .. deprecated:: 2.6 + """ + import warnings + + msg = ( + "\nhub_matrix is deprecated as of version 2.6 and will be removed " + "in version 3.0.\n" + "The hub matrix can be computed by::\n" + " >>> M = nx.to_numpy_array(G, nodelist=nodelist)\n" + " >>> M @ M.T" + ) + warnings.warn(msg, DeprecationWarning) + M = nx.to_numpy_array(G, nodelist=nodelist) + return M @ M.T + + +def hits_numpy(G, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + .. deprecated:: 2.6 + + hits_numpy is deprecated and will be removed in networkx 3.0. + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Examples + -------- + >>> G = nx.path_graph(4) + + The `hubs` and `authorities` are given by the eigenvectors corresponding to the + maximum eigenvalues of the hubs_matrix and the authority_matrix, respectively. + + The ``hubs`` and ``authority`` matrices are computed from the adjancency + matrix: + + >>> adj_ary = nx.to_numpy_array(G) + >>> hubs_matrix = adj_ary @ adj_ary.T + >>> authority_matrix = adj_ary.T @ adj_ary + + `hits_numpy` maps the eigenvector corresponding to the maximum eigenvalue + of the respective matrices to the nodes in `G`: + + >>> hubs, authority = nx.hits_numpy(G) + + Notes + ----- + The eigenvector calculation uses NumPy's interface to LAPACK. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-32, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import warnings + + import numpy as np + + warnings.warn( + ( + "networkx.hits_numpy is deprecated and will be removed" + "in NetworkX 3.0, use networkx.hits instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + if len(G) == 0: + return {}, {} + adj_ary = nx.to_numpy_array(G) + # Hub matrix + H = adj_ary @ adj_ary.T + e, ev = np.linalg.eig(H) + h = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue + # Authority matrix + A = adj_ary.T @ adj_ary + e, ev = np.linalg.eig(A) + a = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue + if normalized: + h /= h.sum() + a /= a.sum() + else: + h /= h.max() + a /= a.max() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities + + +def hits_scipy(G, max_iter=100, tol=1.0e-6, nstart=None, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + .. deprecated:: 2.6 + + hits_scipy is deprecated and will be removed in networkx 3.0 + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + max_iter : integer, optional + Maximum number of iterations in power method. + + tol : float, optional + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of each node for power method iteration. + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> h, a = nx.hits(G) + + Notes + ----- + This implementation uses SciPy sparse matrices. + + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop + after max_iter iterations or an error tolerance of + number_of_nodes(G)*tol has been reached. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-632, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import warnings + + import numpy as np + + warnings.warn( + ( + "networkx.hits_scipy is deprecated and will be removed" + "in NetworkX 3.0, use networkx.hits instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + if len(G) == 0: + return {}, {} + A = nx.to_scipy_sparse_array(G, nodelist=list(G)) + (n, _) = A.shape # should be square + ATA = A.T @ A # authority matrix + # choose fixed starting vector if not given + if nstart is None: + x = np.ones((n, 1)) / n + else: + x = np.array([nstart.get(n, 0) for n in list(G)], dtype=float) + x /= x.sum() + + # power iteration on authority matrix + i = 0 + while True: + xlast = x + x = ATA @ x + x /= x.max() + # check convergence, l1 norm + err = np.absolute(x - xlast).sum() + if err < tol: + break + if i > max_iter: + raise nx.PowerIterationFailedConvergence(max_iter) + i += 1 + + a = x.flatten() + h = A @ a + if normalized: + h /= h.sum() + a /= a.sum() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py new file mode 100644 index 0000000..ece444c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py @@ -0,0 +1,508 @@ +"""PageRank analysis of graph structure. """ +from warnings import warn + +import networkx as nx + +__all__ = ["pagerank", "pagerank_numpy", "pagerank_scipy", "google_matrix"] + + +def pagerank( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + max_iter : integer, optional + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional + Error tolerance used to check convergence in power method solver. + + nstart : dictionary, optional + Starting value of PageRank iteration for each node. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified). This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value + + Examples + -------- + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = nx.pagerank(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop after + an error tolerance of ``len(G) * tol`` has been reached. If the + number of iterations exceed `max_iter`, a + :exc:`networkx.exception.PowerIterationFailedConvergence` exception + is raised. + + The PageRank algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs by converting each edge in the + directed graph to two edges. + + See Also + -------- + pagerank_numpy, pagerank_scipy, google_matrix + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + + """ + return pagerank_scipy( + G, alpha, personalization, max_iter, tol, nstart, weight, dangling + ) + + +def _pagerank_python( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + if len(G) == 0: + return {} + + D = G.to_directed() + + # Create a copy in (right) stochastic form + W = nx.stochastic_graph(D, weight=weight) + N = W.number_of_nodes() + + # Choose fixed starting vector if not given + if nstart is None: + x = dict.fromkeys(W, 1.0 / N) + else: + # Normalized nstart vector + s = sum(nstart.values()) + x = {k: v / s for k, v in nstart.items()} + + if personalization is None: + # Assign uniform personalization vector if not given + p = dict.fromkeys(W, 1.0 / N) + else: + s = sum(personalization.values()) + p = {k: v / s for k, v in personalization.items()} + + if dangling is None: + # Use personalization vector if dangling vector not specified + dangling_weights = p + else: + s = sum(dangling.values()) + dangling_weights = {k: v / s for k, v in dangling.items()} + dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0] + + # power iteration: make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast.keys(), 0) + danglesum = alpha * sum(xlast[n] for n in dangling_nodes) + for n in x: + # this matrix multiply looks odd because it is + # doing a left multiply x^T=xlast^T*W + for _, nbr, wt in W.edges(n, data=weight): + x[nbr] += alpha * xlast[n] * wt + x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0) + # check convergence, l1 norm + err = sum(abs(x[n] - xlast[n]) for n in x) + if err < N * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +def google_matrix( + G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None +): + """Returns the Google matrix of the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float + The damping factor. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes below). It may be common to have the dangling dict to + be the same as the personalization dict. + + Returns + ------- + A : NumPy matrix + Google matrix of the graph + + Notes + ----- + The matrix returned represents the transition matrix that describes the + Markov chain used in PageRank. For PageRank to converge to a unique + solution (i.e., a unique stationary distribution in a Markov chain), the + transition matrix must be irreducible. In other words, it must be that + there exists a path between every pair of nodes in the graph, or else there + is the potential of "rank sinks." + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank, pagerank_numpy, pagerank_scipy + """ + # TODO: Remove this warning in version 3.0 + import warnings + + import numpy as np + + warnings.warn( + "google_matrix will return an np.ndarray instead of a np.matrix in\n" + "NetworkX version 3.0.", + FutureWarning, + stacklevel=2, + ) + + if nodelist is None: + nodelist = list(G) + + A = nx.to_numpy_array(G, nodelist=nodelist, weight=weight) + N = len(G) + if N == 0: + # TODO: Remove np.asmatrix wrapper in version 3.0 + return np.asmatrix(A) + + # Personalization vector + if personalization is None: + p = np.repeat(1.0 / N, N) + else: + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) + if p.sum() == 0: + raise ZeroDivisionError + p /= p.sum() + + # Dangling nodes + if dangling is None: + dangling_weights = p + else: + # Convert the dangling dictionary into an array in nodelist order + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) + dangling_weights /= dangling_weights.sum() + dangling_nodes = np.where(A.sum(axis=1) == 0)[0] + + # Assign dangling_weights to any dangling nodes (nodes with no out links) + A[dangling_nodes] = dangling_weights + + A /= A.sum(axis=1)[:, np.newaxis] # Normalize rows to sum to 1 + + # TODO: Remove np.asmatrix wrapper in version 3.0 + return np.asmatrix(alpha * A + (1 - alpha) * p) + + +def pagerank_numpy(G, alpha=0.85, personalization=None, weight="weight", dangling=None): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value. + + Examples + -------- + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = nx.pagerank_numpy(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation uses NumPy's interface to the LAPACK + eigenvalue solvers. This will be the fastest and most accurate + for small graphs. + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank, pagerank_scipy, google_matrix + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + """ + msg = "networkx.pagerank_numpy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead." + warn(msg, DeprecationWarning, stacklevel=2) + import numpy as np + + if len(G) == 0: + return {} + M = google_matrix( + G, alpha, personalization=personalization, weight=weight, dangling=dangling + ) + # use numpy LAPACK solver + eigenvalues, eigenvectors = np.linalg.eig(M.T) + ind = np.argmax(eigenvalues) + # eigenvector of largest eigenvalue is at ind, normalized + largest = np.array(eigenvectors[:, ind]).flatten().real + norm = largest.sum() + return dict(zip(G, map(float, largest / norm))) + + +def pagerank_scipy( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + max_iter : integer, optional + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional + Error tolerance used to check convergence in power method solver. + + nstart : dictionary, optional + Starting value of PageRank iteration for each node. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value + + Examples + -------- + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = nx.pagerank_scipy(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation uses power iteration with a SciPy + sparse matrix representation. + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank, pagerank_numpy, google_matrix + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + """ + msg = "networkx.pagerank_scipy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead." + warn(msg, DeprecationWarning, stacklevel=2) + import numpy as np + import scipy as sp + import scipy.sparse # call as sp.sparse + + N = len(G) + if N == 0: + return {} + + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float) + S = A.sum(axis=1) + S[S != 0] = 1.0 / S[S != 0] + # TODO: csr_array + Q = sp.sparse.csr_array(sp.sparse.spdiags(S.T, 0, *A.shape)) + A = Q @ A + + # initial vector + if nstart is None: + x = np.repeat(1.0 / N, N) + else: + x = np.array([nstart.get(n, 0) for n in nodelist], dtype=float) + x /= x.sum() + + # Personalization vector + if personalization is None: + p = np.repeat(1.0 / N, N) + else: + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) + if p.sum() == 0: + raise ZeroDivisionError + p /= p.sum() + # Dangling nodes + if dangling is None: + dangling_weights = p + else: + # Convert the dangling dictionary into an array in nodelist order + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) + dangling_weights /= dangling_weights.sum() + is_dangling = np.where(S == 0)[0] + + # power iteration: make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = alpha * (x @ A + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p + # check convergence, l1 norm + err = np.absolute(x - xlast).sum() + if err < N * tol: + return dict(zip(nodelist, map(float, x))) + raise nx.PowerIterationFailedConvergence(max_iter) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py new file mode 100644 index 0000000..df5f0da --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py @@ -0,0 +1,89 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") +import scipy.sparse # call as sp.sparse + +from networkx.algorithms.link_analysis.hits_alg import _hits_python + +# Example from +# A. Langville and C. Meyer, "A survey of eigenvector methods of web +# information retrieval." http://citeseer.ist.psu.edu/713792.html + + +class TestHITS: + @classmethod + def setup_class(cls): + + G = nx.DiGraph() + + edges = [(1, 3), (1, 5), (2, 1), (3, 5), (5, 4), (5, 3), (6, 5)] + + G.add_edges_from(edges, weight=1) + cls.G = G + cls.G.a = dict( + zip(sorted(G), [0.000000, 0.000000, 0.366025, 0.133975, 0.500000, 0.000000]) + ) + cls.G.h = dict( + zip(sorted(G), [0.366025, 0.000000, 0.211325, 0.000000, 0.211325, 0.211325]) + ) + + def test_hits_numpy(self): + G = self.G + h, a = nx.hits_numpy(G) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + + @pytest.mark.parametrize("hits_alg", (nx.hits, nx.hits_scipy, _hits_python)) + def test_hits(self, hits_alg): + G = self.G + h, a = hits_alg(G, tol=1.0e-08) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + nstart = {i: 1.0 / 2 for i in G} + h, a = hits_alg(G, nstart=nstart) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + + def test_empty(self): + G = nx.Graph() + assert nx.hits(G) == ({}, {}) + assert nx.hits_numpy(G) == ({}, {}) + assert _hits_python(G) == ({}, {}) + assert nx.hits_scipy(G) == ({}, {}) + assert nx.authority_matrix(G).shape == (0, 0) + assert nx.hub_matrix(G).shape == (0, 0) + + def test_hits_not_convergent(self): + G = nx.path_graph(50) + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.hits_scipy(G, max_iter=1) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_python(G, max_iter=1) + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.hits_scipy(G, max_iter=0) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_python(G, max_iter=0) + with pytest.raises(ValueError): + nx.hits(G, max_iter=0) + with pytest.raises(sp.sparse.linalg.ArpackNoConvergence): + nx.hits(G, max_iter=1) + + +@pytest.mark.parametrize("hits_alg", (nx.hits_numpy, nx.hits_scipy)) +def test_deprecation_warnings(hits_alg): + """Make sure deprecation warnings are raised. + + To be removed when deprecations expire. + """ + G = nx.DiGraph(nx.path_graph(4)) + with pytest.warns(DeprecationWarning): + hits_alg(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py new file mode 100644 index 0000000..4c9722f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py @@ -0,0 +1,220 @@ +import random + +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +from networkx.algorithms.link_analysis.pagerank_alg import _pagerank_python + +# Example from +# A. Langville and C. Meyer, "A survey of eigenvector methods of web +# information retrieval." http://citeseer.ist.psu.edu/713792.html + + +class TestPageRank: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + # 2 is a dangling node + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ] + G.add_edges_from(edges) + cls.G = G + cls.G.pagerank = dict( + zip( + sorted(G), + [ + 0.03721197, + 0.05395735, + 0.04150565, + 0.37508082, + 0.20599833, + 0.28624589, + ], + ) + ) + cls.dangling_node_index = 1 + cls.dangling_edges = {1: 2, 2: 3, 3: 0, 4: 0, 5: 0, 6: 0} + cls.G.dangling_pagerank = dict( + zip( + sorted(G), + [0.10844518, 0.18618601, 0.0710892, 0.2683668, 0.15919783, 0.20671497], + ) + ) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_pagerank(self, alg): + G = self.G + p = alg(G, alpha=0.9, tol=1.0e-08) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + nstart = {n: random.random() for n in G} + p = alg(G, alpha=0.9, tol=1.0e-08, nstart=nstart) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_pagerank_max_iter(self, alg): + with pytest.raises(nx.PowerIterationFailedConvergence): + alg(self.G, max_iter=0) + + def test_numpy_pagerank(self): + G = self.G + p = nx.pagerank_numpy(G, alpha=0.9) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + def test_google_matrix(self): + G = self.G + M = nx.google_matrix(G, alpha=0.9, nodelist=sorted(G)) + _, ev = np.linalg.eig(M.T) + p = np.array(ev[:, 0] / ev[:, 0].sum())[:, 0] + for (a, b) in zip(p, self.G.pagerank.values()): + assert a == pytest.approx(b, abs=1e-7) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, nx.pagerank_numpy)) + def test_personalization(self, alg): + G = nx.complete_graph(4) + personalize = {0: 1, 1: 1, 2: 4, 3: 4} + answer = { + 0: 0.23246732615667579, + 1: 0.23246732615667579, + 2: 0.267532673843324, + 3: 0.2675326738433241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, nx.google_matrix)) + def test_zero_personalization_vector(self, alg): + G = nx.complete_graph(4) + personalize = {0: 0, 1: 0, 2: 0, 3: 0} + pytest.raises(ZeroDivisionError, alg, G, personalization=personalize) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_one_nonzero_personalization_value(self, alg): + G = nx.complete_graph(4) + personalize = {0: 0, 1: 0, 2: 0, 3: 1} + answer = { + 0: 0.22077931820379187, + 1: 0.22077931820379187, + 2: 0.22077931820379187, + 3: 0.3376620453886241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_incomplete_personalization(self, alg): + G = nx.complete_graph(4) + personalize = {3: 1} + answer = { + 0: 0.22077931820379187, + 1: 0.22077931820379187, + 2: 0.22077931820379187, + 3: 0.3376620453886241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + def test_dangling_matrix(self): + """ + Tests that the google_matrix doesn't change except for the dangling + nodes. + """ + G = self.G + dangling = self.dangling_edges + dangling_sum = sum(dangling.values()) + M1 = nx.google_matrix(G, personalization=dangling) + M2 = nx.google_matrix(G, personalization=dangling, dangling=dangling) + for i in range(len(G)): + for j in range(len(G)): + if i == self.dangling_node_index and (j + 1) in dangling: + assert M2[i, j] == pytest.approx( + dangling[j + 1] / dangling_sum, abs=1e-4 + ) + else: + assert M2[i, j] == pytest.approx(M1[i, j], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, nx.pagerank_numpy)) + def test_dangling_pagerank(self, alg): + pr = alg(self.G, dangling=self.dangling_edges) + for n in self.G: + assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) + + def test_empty(self): + G = nx.Graph() + assert nx.pagerank(G) == {} + assert _pagerank_python(G) == {} + assert nx.pagerank_numpy(G) == {} + assert nx.google_matrix(G).shape == (0, 0) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_multigraph(self, alg): + G = nx.MultiGraph() + G.add_edges_from([(1, 2), (1, 2), (1, 2), (2, 3), (2, 3), ("3", 3), ("3", 3)]) + answer = { + 1: 0.21066048614468322, + 2: 0.3395308825985378, + 3: 0.28933951385531687, + "3": 0.16046911740146227, + } + p = alg(G) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + +class TestPageRankScipy(TestPageRank): + def test_scipy_pagerank(self): + G = self.G + p = nx.pagerank_scipy(G, alpha=0.9, tol=1.0e-08) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + personalize = {n: random.random() for n in G} + p = nx.pagerank_scipy(G, alpha=0.9, tol=1.0e-08, personalization=personalize) + + nstart = {n: random.random() for n in G} + p = nx.pagerank_scipy(G, alpha=0.9, tol=1.0e-08, nstart=nstart) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + def test_scipy_pagerank_max_iter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.pagerank_scipy(self.G, max_iter=0) + + def test_dangling_scipy_pagerank(self): + pr = nx.pagerank_scipy(self.G, dangling=self.dangling_edges) + for n in self.G: + assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) + + def test_empty_scipy(self): + G = nx.Graph() + assert nx.pagerank_scipy(G) == {} + + +@pytest.mark.parametrize("pagerank_alg", (nx.pagerank_numpy, nx.pagerank_scipy)) +def test_deprecation_warnings(pagerank_alg): + """Make sure deprecation warnings are raised. + + To be removed when deprecations expire. + """ + G = nx.DiGraph(nx.path_graph(4)) + with pytest.warns(DeprecationWarning): + pagerank_alg(G, alpha=0.9) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/link_prediction.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_prediction.py new file mode 100644 index 0000000..de29e24 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/link_prediction.py @@ -0,0 +1,596 @@ +""" +Link prediction algorithms. +""" + + +from math import log + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "resource_allocation_index", + "jaccard_coefficient", + "adamic_adar_index", + "preferential_attachment", + "cn_soundarajan_hopcroft", + "ra_index_soundarajan_hopcroft", + "within_inter_cluster", + "common_neighbor_centrality", +] + + +def _apply_prediction(G, func, ebunch=None): + """Applies the given function to each edge in the specified iterable + of edges. + + `G` is an instance of :class:`networkx.Graph`. + + `func` is a function on two inputs, each of which is a node in the + graph. The function can return anything, but it should return a + value representing a prediction of the likelihood of a "link" + joining the two nodes. + + `ebunch` is an iterable of pairs of nodes. If not specified, all + non-edges in the graph `G` will be used. + + """ + if ebunch is None: + ebunch = nx.non_edges(G) + return ((u, v, func(u, v)) for u, v in ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def resource_allocation_index(G, ebunch=None): + r"""Compute the resource allocation index of all node pairs in ebunch. + + Resource allocation index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Resource allocation index will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all non-existent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their resource allocation index. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.75000000 + (2, 3) -> 0.75000000 + + References + ---------- + .. [1] T. Zhou, L. Lu, Y.-C. Zhang. + Predicting missing links via local information. + Eur. Phys. J. B 71 (2009) 623. + https://arxiv.org/pdf/0901.0553.pdf + """ + + def predict(u, v): + return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def jaccard_coefficient(G, ebunch=None): + r"""Compute the Jaccard coefficient of all node pairs in ebunch. + + Jaccard coefficient of nodes `u` and `v` is defined as + + .. math:: + + \frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Jaccard coefficient will be computed for each pair of nodes + given in the iterable. The pairs must be given as 2-tuples + (u, v) where u and v are nodes in the graph. If ebunch is None + then all non-existent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Jaccard coefficient. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.60000000 + (2, 3) -> 0.60000000 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + union_size = len(set(G[u]) | set(G[v])) + if union_size == 0: + return 0 + return len(list(nx.common_neighbors(G, u, v))) / union_size + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def adamic_adar_index(G, ebunch=None): + r"""Compute the Adamic-Adar index of all node pairs in ebunch. + + Adamic-Adar index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + This index leads to zero-division for nodes only connected via self-loops. + It is intended to be used when no self-loops are present. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Adamic-Adar index will be computed for each pair of nodes given + in the iterable. The pairs must be given as 2-tuples (u, v) + where u and v are nodes in the graph. If ebunch is None then all + non-existent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Adamic-Adar index. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 2.16404256 + (2, 3) -> 2.16404256 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def common_neighbor_centrality(G, ebunch=None, alpha=0.8): + r"""Return the CCPA score for each pair of nodes. + + Compute the Common Neighbor and Centrality based Parameterized Algorithm(CCPA) + score of all node pairs in ebunch. + + CCPA score of `u` and `v` is defined as + + .. math:: + + \alpha \cdot (|\Gamma (u){\cap }^{}\Gamma (v)|)+(1-\alpha )\cdot \frac{N}{{d}_{uv}} + + where $\Gamma(u)$ denotes the set of neighbors of $u$, $\Gamma(v)$ denotes the + set of neighbors of $v$, $\alpha$ is parameter varies between [0,1], $N$ denotes + total number of nodes in the Graph and ${d}_{uv}$ denotes shortest distance + between $u$ and $v$. + + This algorithm is based on two vital properties of nodes, namely the number + of common neighbors and their centrality. Common neighbor refers to the common + nodes between two nodes. Centrality refers to the prestige that a node enjoys + in a network. + + .. seealso:: + + :func:`common_neighbors` + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all non-existent edges in the graph will be used. + Default value: None. + + alpha : Parameter defined for participation of Common Neighbor + and Centrality Algorithm share. Values for alpha should + normally be between 0 and 1. Default value set to 0.8 + because author found better performance at 0.8 for all the + dataset. + Default value: 0.8 + + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Common Neighbor and Centrality based + Parameterized Algorithm(CCPA) score. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.common_neighbor_centrality(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 3.4000000000000004 + (2, 3) -> 3.4000000000000004 + + References + ---------- + .. [1] Ahmad, I., Akhtar, M.U., Noor, S. et al. + Missing Link Prediction using Common Neighbor and Centrality based Parameterized Algorithm. + Sci Rep 10, 364 (2020). + https://doi.org/10.1038/s41598-019-57304-y + """ + + # When alpha == 1, the CCPA score simplifies to the number of common neighbors. + if alpha == 1: + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self links are not supported") + + return sum(1 for _ in nx.common_neighbors(G, u, v)) + + else: + spl = dict(nx.shortest_path_length(G)) + inf = float("inf") + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self links are not supported") + path_len = spl[u].get(v, inf) + + return alpha * sum(1 for _ in nx.common_neighbors(G, u, v)) + ( + 1 - alpha + ) * (G.number_of_nodes() / path_len) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def preferential_attachment(G, ebunch=None): + r"""Compute the preferential attachment score of all node pairs in ebunch. + + Preferential attachment score of `u` and `v` is defined as + + .. math:: + + |\Gamma(u)| |\Gamma(v)| + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all non-existent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their preferential attachment score. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 16 + (2, 3) -> 16 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return G.degree(u) * G.degree(v) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def cn_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Count the number of common neighbors of all node pairs in ebunch + using community information. + + For two nodes $u$ and $v$, this function computes the number of + common neighbors and bonus one for each common neighbor belonging to + the same community as $u$ and $v$. Mathematically, + + .. math:: + + |\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w) + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + non-existent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 0 + >>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 2) -> 2 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + cnbors = list(nx.common_neighbors(G, u, v)) + neighbors = ( + sum(_community(G, w, community) == Cu for w in cnbors) if Cu == Cv else 0 + ) + return len(cnbors) + neighbors + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def ra_index_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Compute the resource allocation index of all node pairs in + ebunch using community information. + + For two nodes $u$ and $v$, this function computes the resource + allocation index considering only common neighbors belonging to the + same community as $u$ and $v$. Mathematically, + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|} + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + non-existent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 1 + >>> G.nodes[3]["community"] = 0 + >>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 3) -> 0.50000000 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = nx.common_neighbors(G, u, v) + return sum(1 / G.degree(w) for w in cnbors if _community(G, w, community) == Cu) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def within_inter_cluster(G, ebunch=None, delta=0.001, community="community"): + """Compute the ratio of within- and inter-cluster common neighbors + of all node pairs in ebunch. + + For two nodes `u` and `v`, if a common neighbor `w` belongs to the + same community as them, `w` is considered as within-cluster common + neighbor of `u` and `v`. Otherwise, it is considered as + inter-cluster common neighbor of `u` and `v`. The ratio between the + size of the set of within- and inter-cluster common neighbors is + defined as the WIC measure. [1]_ + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The WIC measure will be computed for each pair of nodes given in + the iterable. The pairs must be given as 2-tuples (u, v) where + u and v are nodes in the graph. If ebunch is None then all + non-existent edges in the graph will be used. + Default value: None. + + delta : float, optional (default = 0.001) + Value to prevent division by zero in case there is no + inter-cluster common neighbor between two nodes. See [1]_ for + details. Default value: 0.001. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their WIC measure. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 1 + >>> G.nodes[2]["community"] = 0 + >>> G.nodes[3]["community"] = 0 + >>> G.nodes[4]["community"] = 0 + >>> preds = nx.within_inter_cluster(G, [(0, 4)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.99800200 + >>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.33333333 + + References + ---------- + .. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes. + Link prediction in complex networks based on cluster information. + In Proceedings of the 21st Brazilian conference on Advances in + Artificial Intelligence (SBIA'12) + https://doi.org/10.1007/978-3-642-34459-6_10 + """ + if delta <= 0: + raise nx.NetworkXAlgorithmError("Delta must be greater than zero") + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = set(nx.common_neighbors(G, u, v)) + within = {w for w in cnbors if _community(G, w, community) == Cu} + inter = cnbors - within + return len(within) / (len(inter) + delta) + + return _apply_prediction(G, predict, ebunch) + + +def _community(G, u, community): + """Get the community of the given node.""" + node_u = G.nodes[u] + try: + return node_u[community] + except KeyError as err: + raise nx.NetworkXAlgorithmError("No community information") from err diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/lowest_common_ancestors.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/lowest_common_ancestors.py new file mode 100644 index 0000000..fa5c9db --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/lowest_common_ancestors.py @@ -0,0 +1,365 @@ +"""Algorithms for finding the lowest common ancestor of trees and DAGs.""" +from collections import defaultdict +from collections.abc import Mapping, Set +from itertools import chain, count + +import networkx as nx +from networkx.utils import UnionFind, arbitrary_element, not_implemented_for + +__all__ = [ + "all_pairs_lowest_common_ancestor", + "tree_all_pairs_lowest_common_ancestor", + "lowest_common_ancestor", +] + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None): + r"""Yield the lowest common ancestor for sets of pairs in a tree. + + Parameters + ---------- + G : NetworkX directed graph (must be a tree) + + root : node, optional (default: None) + The root of the subtree to operate on. + If None, assume the entire graph has exactly one source and use that. + + pairs : iterable or iterator of pairs of nodes, optional (default: None) + The pairs of interest. If None, Defaults to all pairs of nodes + under `root` that have a lowest common ancestor. + + Returns + ------- + lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes + in `pairs` and `lca` is their lowest common ancestor. + + Notes + ----- + Only defined on non-null trees represented with directed edges from + parents to children. Uses Tarjan's off-line lowest-common-ancestors + algorithm. Runs in time $O(4 \times (V + E + P))$ time, where 4 is the largest + value of the inverse Ackermann function likely to ever come up in actual + use, and $P$ is the number of pairs requested (or $V^2$ if all are needed). + + Tarjan, R. E. (1979), "Applications of path compression on balanced trees", + Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161. + + See Also + -------- + all_pairs_lowest_common_ancestor: similar routine for general DAGs + lowest_common_ancestor: just a single pair for general DAGs + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + elif None in G: + raise nx.NetworkXError("None is not a valid node.") + + # Index pairs of interest for efficient lookup from either side. + if pairs is not None: + pair_dict = defaultdict(set) + # See note on all_pairs_lowest_common_ancestor. + if not isinstance(pairs, (Mapping, Set)): + pairs = set(pairs) + for u, v in pairs: + for n in (u, v): + if n not in G: + msg = f"The node {str(n)} is not in the digraph." + raise nx.NodeNotFound(msg) + pair_dict[u].add(v) + pair_dict[v].add(u) + + # If root is not specified, find the exactly one node with in degree 0 and + # use it. Raise an error if none are found, or more than one is. Also check + # for any nodes with in degree larger than 1, which would imply G is not a + # tree. + if root is None: + for n, deg in G.in_degree: + if deg == 0: + if root is not None: + msg = "No root specified and tree has multiple sources." + raise nx.NetworkXError(msg) + root = n + elif deg > 1: + msg = "Tree LCA only defined on trees; use DAG routine." + raise nx.NetworkXError(msg) + if root is None: + raise nx.NetworkXError("Graph contains a cycle.") + + # Iterative implementation of Tarjan's offline lca algorithm + # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition) + uf = UnionFind() + ancestors = {} + for node in G: + ancestors[node] = uf[node] + + colors = defaultdict(bool) + for node in nx.dfs_postorder_nodes(G, root): + colors[node] = True + for v in pair_dict[node] if pairs is not None else G: + if colors[v]: + # If the user requested both directions of a pair, give it. + # Otherwise, just give one. + if pairs is not None and (node, v) in pairs: + yield (node, v), ancestors[uf[v]] + if pairs is None or (v, node) in pairs: + yield (v, node), ancestors[uf[v]] + if node != root: + parent = arbitrary_element(G.pred[node]) + uf.union(parent, node) + ancestors[uf[parent]] = parent + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def lowest_common_ancestor(G, node1, node2, default=None): + """Compute the lowest common ancestor of the given pair of nodes. + + Parameters + ---------- + G : NetworkX directed graph + + node1, node2 : nodes in the graph. + + default : object + Returned if no common ancestor between `node1` and `node2` + + Returns + ------- + The lowest common ancestor of node1 and node2, + or default if they have no common ancestors. + + Notes + ----- + Only defined on non-null directed acyclic graphs. + Takes n log(n) time in the size of the graph. + See `all_pairs_lowest_common_ancestor` when you have + more than one pair of nodes of interest. + + See Also + -------- + tree_all_pairs_lowest_common_ancestor + all_pairs_lowest_common_ancestor + """ + ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) + if ans: + assert len(ans) == 1 + return ans[0][1] + else: + return default + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def all_pairs_lowest_common_ancestor(G, pairs=None): + """Compute the lowest common ancestor for pairs of nodes. + + Parameters + ---------- + G : NetworkX directed graph + + pairs : iterable of pairs of nodes, optional (default: all pairs) + The pairs of nodes of interest. + If None, will find the LCA of all pairs of nodes. + + Returns + ------- + An iterator over ((node1, node2), lca) where (node1, node2) are + the pairs specified and lca is a lowest common ancestor of the pair. + Note that for the default of all pairs in G, we consider + unordered pairs, e.g. you will not get both (b, a) and (a, b). + + Notes + ----- + Only defined on non-null directed acyclic graphs. + + Uses the $O(n^3)$ ancestor-list algorithm from: + M. A. Bender, M. Farach-Colton, G. Pemmasani, S. Skiena, P. Sumazin. + "Lowest common ancestors in trees and directed acyclic graphs." + Journal of Algorithms, 57(2): 75-94, 2005. + + See Also + -------- + tree_all_pairs_lowest_common_ancestor + lowest_common_ancestor + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") + elif len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + elif None in G: + raise nx.NetworkXError("None is not a valid node.") + + # The copy isn't ideal, neither is the switch-on-type, but without it users + # passing an iterable will encounter confusing errors, and itertools.tee + # does not appear to handle builtin types efficiently (IE, it materializes + # another buffer rather than just creating listoperators at the same + # offset). The Python documentation notes use of tee is unadvised when one + # is consumed before the other. + # + # This will always produce correct results and avoid unnecessary + # copies in many common cases. + # + if not isinstance(pairs, (Mapping, Set)) and pairs is not None: + pairs = set(pairs) + + # Convert G into a dag with a single root by adding a node with edges to + # all sources iff necessary. + sources = [n for n, deg in G.in_degree if deg == 0] + if len(sources) == 1: + root = sources[0] + super_root = None + else: + G = G.copy() + # find unused node + root = -1 + while root in G: + root -= 1 + # use that as the super_root below all sources + super_root = root + for source in sources: + G.add_edge(root, source) + + # Start by computing a spanning tree, and the DAG of all edges not in it. + # We will then use the tree lca algorithm on the spanning tree, and use + # the DAG to figure out the set of tree queries necessary. + spanning_tree = nx.dfs_tree(G, root) + dag = nx.DiGraph( + (u, v) + for u, v in G.edges + if u not in spanning_tree or v not in spanning_tree[u] + ) + + # Ensure that both the dag and the spanning tree contains all nodes in G, + # even nodes that are disconnected in the dag. + spanning_tree.add_nodes_from(G) + dag.add_nodes_from(G) + + counter = count() + + # Necessary to handle graphs consisting of a single node and no edges. + root_distance = {root: next(counter)} + + for edge in nx.bfs_edges(spanning_tree, root): + for node in edge: + if node not in root_distance: + root_distance[node] = next(counter) + + # Index the position of all nodes in the Euler tour so we can efficiently + # sort lists and merge in tour order. + euler_tour_pos = {} + for node in nx.depth_first_search.dfs_preorder_nodes(G, root): + if node not in euler_tour_pos: + euler_tour_pos[node] = next(counter) + + # Generate the set of all nodes of interest in the pairs. + pairset = set() + if pairs is not None: + pairset = set(chain.from_iterable(pairs)) + + for n in pairset: + if n not in G: + msg = f"The node {str(n)} is not in the digraph." + raise nx.NodeNotFound(msg) + + # Generate the transitive closure over the dag (not G) of all nodes, and + # sort each node's closure set by order of first appearance in the Euler + # tour. + ancestors = {} + for v in dag: + if pairs is None or v in pairset: + my_ancestors = nx.ancestors(G, v) + my_ancestors.add(v) + ancestors[v] = sorted(my_ancestors, key=euler_tour_pos.get) + + def _compute_dag_lca_from_tree_values(tree_lca, dry_run): + """Iterate through the in-order merge for each pair of interest. + + We do this to answer the user's query, but it is also used to + avoid generating unnecessary tree entries when the user only + needs some pairs. + """ + for (node1, node2) in pairs if pairs is not None else tree_lca: + best_root_distance = None + best = None + + indices = [0, 0] + ancestors_by_index = [ancestors[node1], ancestors[node2]] + + def get_next_in_merged_lists(indices): + """Returns index of the list containing the next item + + Next order refers to the merged order. + Index can be 0 or 1 (or None if exhausted). + """ + index1, index2 = indices + if index1 >= len(ancestors[node1]) and index2 >= len(ancestors[node2]): + return None + elif index1 >= len(ancestors[node1]): + return 1 + elif index2 >= len(ancestors[node2]): + return 0 + elif ( + euler_tour_pos[ancestors[node1][index1]] + < euler_tour_pos[ancestors[node2][index2]] + ): + return 0 + else: + return 1 + + # Find the LCA by iterating through the in-order merge of the two + # nodes of interests' ancestor sets. In principle, we need to + # consider all pairs in the Cartesian product of the ancestor sets, + # but by the restricted min range query reduction we are guaranteed + # that one of the pairs of interest is adjacent in the merged list + # iff one came from each list. + i = get_next_in_merged_lists(indices) + cur = ancestors_by_index[i][indices[i]], i + while i is not None: + prev = cur + indices[i] += 1 + i = get_next_in_merged_lists(indices) + if i is not None: + cur = ancestors_by_index[i][indices[i]], i + + # Two adjacent entries must not be from the same list + # in order for their tree LCA to be considered. + if cur[1] != prev[1]: + tree_node1, tree_node2 = prev[0], cur[0] + if (tree_node1, tree_node2) in tree_lca: + ans = tree_lca[tree_node1, tree_node2] + else: + ans = tree_lca[tree_node2, tree_node1] + if not dry_run and ( + best is None or root_distance[ans] > best_root_distance + ): + best_root_distance = root_distance[ans] + best = ans + + # If the LCA is super_root, there is no LCA in the user's graph. + if not dry_run and (super_root is None or best != super_root): + yield (node1, node2), best + + # Generate the spanning tree lca for all pairs. This doesn't make sense to + # do incrementally since we are using a linear time offline algorithm for + # tree lca. + if pairs is None: + # We want all pairs so we'll need the entire tree. + tree_lca = dict(tree_all_pairs_lowest_common_ancestor(spanning_tree, root)) + else: + # We only need the merged adjacent pairs by seeing which queries the + # algorithm needs then generating them in a single pass. + tree_lca = defaultdict(int) + for _ in _compute_dag_lca_from_tree_values(tree_lca, True): + pass + + # Replace the bogus default tree values with the real ones. + for (pair, lca) in tree_all_pairs_lowest_common_ancestor( + spanning_tree, root, tree_lca + ): + tree_lca[pair] = lca + + # All precomputations complete. Now we just need to give the user the pairs + # they asked for, or all pairs if they want them all. + return _compute_dag_lca_from_tree_values(tree_lca, False) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/matching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/matching.py new file mode 100644 index 0000000..a4e7e9c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/matching.py @@ -0,0 +1,1123 @@ +"""Functions for computing and verifying matchings in a graph.""" +from collections import Counter +from itertools import combinations, repeat + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "is_matching", + "is_maximal_matching", + "is_perfect_matching", + "max_weight_matching", + "min_weight_matching", + "maximal_matching", +] + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def maximal_matching(G): + r"""Find a maximal matching in the graph. + + A matching is a subset of edges in which no node occurs more than once. + A maximal matching cannot add more edges and still be a matching. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> sorted(nx.maximal_matching(G)) + [(1, 2), (3, 5)] + + Notes + ----- + The algorithm greedily selects a maximal matching M of the graph G + (i.e. no superset of M exists). It runs in $O(|E|)$ time. + """ + matching = set() + nodes = set() + for edge in G.edges(): + # If the edge isn't covered, add it to the matching + # then remove neighborhood of u and v from consideration. + u, v = edge + if u not in nodes and v not in nodes and u != v: + matching.add(edge) + nodes.update(edge) + return matching + + +def matching_dict_to_set(matching): + """Converts matching dict format to matching set format + + Converts a dictionary representing a matching (as returned by + :func:`max_weight_matching`) to a set representing a matching (as + returned by :func:`maximal_matching`). + + In the definition of maximal matching adopted by NetworkX, + self-loops are not allowed, so the provided dictionary is expected + to never have any mapping from a key to itself. However, the + dictionary is expected to have mirrored key/value pairs, for + example, key ``u`` with value ``v`` and key ``v`` with value ``u``. + + """ + edges = set() + for edge in matching.items(): + u, v = edge + if (v, u) in edges or edge in edges: + continue + if u == v: + raise nx.NetworkXError(f"Selfloops cannot appear in matchings {edge}") + edges.add(edge) + return edges + + +def is_matching(G, matching): + """Return True if ``matching`` is a valid matching of ``G`` + + A *matching* in a graph is a set of edges in which no two distinct + edges share a common endpoint. Each node is incident to at most one + edge in the matching. The edges are said to be independent. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid matching + in the graph. + + Raises + ------ + NetworkXError + If the proposed matching has an edge to a node not in G. + Or if the matching is not a collection of 2-tuple edges. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> nx.is_maximal_matching(G, {1: 3, 2: 4}) # using dict to represent matching + True + + >>> nx.is_matching(G, {(1, 3), (2, 4)}) # using set to represent matching + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return True + + +def is_maximal_matching(G, matching): + """Return True if ``matching`` is a maximal matching of ``G`` + + A *maximal matching* in a graph is a matching in which adding any + edge would cause the set to no longer be a valid matching. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid maximal + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> nx.is_maximal_matching(G, {(1, 2), (3, 4)}) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + # If the given set is not a matching, then it is not a maximal matching. + edges = set() + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + edges.add(edge) + edges.add((v, u)) + # A matching is maximal if adding any new edge from G to it + # causes the resulting set to match some node twice. + # Be careful to check for adding selfloops + for u, v in G.edges: + if (u, v) not in edges: + # could add edge (u, v) to edges and have a bigger matching + if u not in nodes and v not in nodes and u != v: + return False + return True + + +def is_perfect_matching(G, matching): + """Return True if ``matching`` is a perfect matching for ``G`` + + A *perfect matching* in a graph is a matching in which exactly one edge + is incident upon each vertex. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid perfect + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5), (4, 6)]) + >>> my_match = {1: 2, 3: 5, 4: 6} + >>> nx.is_perfect_matching(G, my_match) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return len(nodes) == len(G) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def min_weight_matching(G, maxcardinality=None, weight="weight"): + """Computing a minimum-weight maximal matching of G. + + Use the maximum-weight algorithm with edge weights subtracted + from the maximum weight of all edges. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + This method replaces the edge weights with 1 plus the maximum edge weight + minus the original edge weight. + + new_weight = (max_weight + 1) - edge_weight + + then runs :func:`max_weight_matching` with the new weights. + The max weight matching with these new weights corresponds + to the min weight matching using the original weights. + Adding 1 to the max edge weight keeps all edge weights positive + and as integers if they started as integers. + + You might worry that adding 1 to each weight would make the algorithm + favor matchings with more edges. But we use the parameter + `maxcardinality=True` in `max_weight_matching` to ensure that the + number of edges in the competing matchings are the same and thus + the optimum does not change due to changes in the number of edges. + + Read the documentation of `max_weight_matching` for more information. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + maxcardinality: bool + .. deprecated:: 2.8 + The `maxcardinality` parameter will be removed in v3.0. + It doesn't make sense to set it to False when looking for + a min weight matching because then we just return no edges. + + If maxcardinality is True, compute the maximum-cardinality matching + with minimum weight among all maximum-cardinality matchings. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + Returns + ------- + matching : set + A minimal weight matching of the graph. + + See Also + -------- + max_weight_matching + """ + if maxcardinality not in (True, None): + raise nx.NetworkXError( + "The argument maxcardinality does not make sense " + "in the context of minimum weight matchings." + "It is deprecated and will be removed in v3.0." + ) + if len(G.edges) == 0: + return max_weight_matching(G, maxcardinality=True, weight=weight) + G_edges = G.edges(data=weight, default=1) + max_weight = 1 + max(w for _, _, w in G_edges) + InvG = nx.Graph() + edges = ((u, v, max_weight - w) for u, v, w in G_edges) + InvG.add_weighted_edges_from(edges, weight=weight) + return max_weight_matching(InvG, maxcardinality=True, weight=weight) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +def max_weight_matching(G, maxcardinality=False, weight="weight"): + """Compute a maximum-weighted matching of G. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + maxcardinality: bool, optional (default=False) + If maxcardinality is True, compute the maximum-cardinality matching + with maximum weight among all maximum-cardinality matchings. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(1, 2, 6), (1, 3, 2), (2, 3, 1), (2, 4, 7), (3, 5, 9), (4, 5, 3)] + >>> G.add_weighted_edges_from(edges) + >>> sorted(nx.max_weight_matching(G)) + [(2, 4), (5, 3)] + + Notes + ----- + If G has edges with weight attributes the edge data are used as + weight values else the weights are assumed to be 1. + + This function takes time O(number_of_nodes ** 3). + + If all edge weights are integers, the algorithm uses only integer + computations. If floating point weights are used, the algorithm + could return a slightly suboptimal matching due to numeric + precision errors. + + This method is based on the "blossom" method for finding augmenting + paths and the "primal-dual" method for finding a matching of maximum + weight, both methods invented by Jack Edmonds [1]_. + + Bipartite graphs can also be matched using the functions present in + :mod:`networkx.algorithms.bipartite.matching`. + + References + ---------- + .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs", + Zvi Galil, ACM Computing Surveys, 1986. + """ + # + # The algorithm is taken from "Efficient Algorithms for Finding Maximum + # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986. + # It is based on the "blossom" method for finding augmenting paths and + # the "primal-dual" method for finding a matching of maximum weight, both + # methods invented by Jack Edmonds. + # + # A C program for maximum weight matching by Ed Rothberg was used + # extensively to validate this new code. + # + # Many terms used in the code comments are explained in the paper + # by Galil. You will probably need the paper to make sense of this code. + # + + class NoNode: + """Dummy value which is different from any node.""" + + pass + + class Blossom: + """Representation of a non-trivial blossom or sub-blossom.""" + + __slots__ = ["childs", "edges", "mybestedges"] + + # b.childs is an ordered list of b's sub-blossoms, starting with + # the base and going round the blossom. + + # b.edges is the list of b's connecting edges, such that + # b.edges[i] = (v, w) where v is a vertex in b.childs[i] + # and w is a vertex in b.childs[wrap(i+1)]. + + # If b is a top-level S-blossom, + # b.mybestedges is a list of least-slack edges to neighbouring + # S-blossoms, or None if no such list has been computed yet. + # This is used for efficient computation of delta3. + + # Generate the blossom's leaf vertices. + def leaves(self): + for t in self.childs: + if isinstance(t, Blossom): + yield from t.leaves() + else: + yield t + + # Get a list of vertices. + gnodes = list(G) + if not gnodes: + return set() # don't bother with empty graphs + + # Find the maximum edge weight. + maxweight = 0 + allinteger = True + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i != j and wt > maxweight: + maxweight = wt + allinteger = allinteger and (str(type(wt)).split("'")[1] in ("int", "long")) + + # If v is a matched vertex, mate[v] is its partner vertex. + # If v is a single vertex, v does not occur as a key in mate. + # Initially all vertices are single; updated during augmentation. + mate = {} + + # If b is a top-level blossom, + # label.get(b) is None if b is unlabeled (free), + # 1 if b is an S-blossom, + # 2 if b is a T-blossom. + # The label of a vertex is found by looking at the label of its top-level + # containing blossom. + # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable + # from an S-vertex outside the blossom. + # Labels are assigned during a stage and reset after each augmentation. + label = {} + + # If b is a labeled top-level blossom, + # labeledge[b] = (v, w) is the edge through which b obtained its label + # such that w is a vertex in b, or None if b's base vertex is single. + # If w is a vertex inside a T-blossom and label[w] == 2, + # labeledge[w] = (v, w) is an edge through which w is reachable from + # outside the blossom. + labeledge = {} + + # If v is a vertex, inblossom[v] is the top-level blossom to which v + # belongs. + # If v is a top-level vertex, inblossom[v] == v since v is itself + # a (trivial) top-level blossom. + # Initially all vertices are top-level trivial blossoms. + inblossom = dict(zip(gnodes, gnodes)) + + # If b is a sub-blossom, + # blossomparent[b] is its immediate parent (sub-)blossom. + # If b is a top-level blossom, blossomparent[b] is None. + blossomparent = dict(zip(gnodes, repeat(None))) + + # If b is a (sub-)blossom, + # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom). + blossombase = dict(zip(gnodes, gnodes)) + + # If w is a free vertex (or an unreached vertex inside a T-blossom), + # bestedge[w] = (v, w) is the least-slack edge from an S-vertex, + # or None if there is no such edge. + # If b is a (possibly trivial) top-level S-blossom, + # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom + # (v inside b), or None if there is no such edge. + # This is used for efficient computation of delta2 and delta3. + bestedge = {} + + # If v is a vertex, + # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual + # optimization problem (if all edge weights are integers, multiplication + # by two ensures that all values remain integers throughout the algorithm). + # Initially, u(v) = maxweight / 2. + dualvar = dict(zip(gnodes, repeat(maxweight))) + + # If b is a non-trivial blossom, + # blossomdual[b] = z(b) where z(b) is b's variable in the dual + # optimization problem. + blossomdual = {} + + # If (v, w) in allowedge or (w, v) in allowedg, then the edge + # (v, w) is known to have zero slack in the optimization problem; + # otherwise the edge may or may not have zero slack. + allowedge = {} + + # Queue of newly discovered S-vertices. + queue = [] + + # Return 2 * slack of edge (v, w) (does not work inside blossoms). + def slack(v, w): + return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1) + + # Assign label t to the top-level blossom containing vertex w, + # coming through an edge from vertex v. + def assignLabel(w, t, v): + b = inblossom[w] + assert label.get(w) is None and label.get(b) is None + label[w] = label[b] = t + if v is not None: + labeledge[w] = labeledge[b] = (v, w) + else: + labeledge[w] = labeledge[b] = None + bestedge[w] = bestedge[b] = None + if t == 1: + # b became an S-vertex/blossom; add it(s vertices) to the queue. + if isinstance(b, Blossom): + queue.extend(b.leaves()) + else: + queue.append(b) + elif t == 2: + # b became a T-vertex/blossom; assign label S to its mate. + # (If b is a non-trivial blossom, its base is the only vertex + # with an external mate.) + base = blossombase[b] + assignLabel(mate[base], 1, base) + + # Trace back from vertices v and w to discover either a new blossom + # or an augmenting path. Return the base vertex of the new blossom, + # or NoNode if an augmenting path was found. + def scanBlossom(v, w): + # Trace back from v and w, placing breadcrumbs as we go. + path = [] + base = NoNode + while v is not NoNode: + # Look for a breadcrumb in v's blossom or put a new breadcrumb. + b = inblossom[v] + if label[b] & 4: + base = blossombase[b] + break + assert label[b] == 1 + path.append(b) + label[b] = 5 + # Trace one step back. + if labeledge[b] is None: + # The base of blossom b is single; stop tracing this path. + assert blossombase[b] not in mate + v = NoNode + else: + assert labeledge[b][0] == mate[blossombase[b]] + v = labeledge[b][0] + b = inblossom[v] + assert label[b] == 2 + # b is a T-blossom; trace one more step back. + v = labeledge[b][0] + # Swap v and w so that we alternate between both paths. + if w is not NoNode: + v, w = w, v + # Remove breadcrumbs. + for b in path: + label[b] = 1 + # Return base vertex, if we found one. + return base + + # Construct a new blossom with given base, through S-vertices v and w. + # Label the new blossom as S; set its dual variable to zero; + # relabel its T-vertices to S and add them to the queue. + def addBlossom(base, v, w): + bb = inblossom[base] + bv = inblossom[v] + bw = inblossom[w] + # Create blossom. + b = Blossom() + blossombase[b] = base + blossomparent[b] = None + blossomparent[bb] = b + # Make list of sub-blossoms and their interconnecting edge endpoints. + b.childs = path = [] + b.edges = edgs = [(v, w)] + # Trace back from v to base. + while bv != bb: + # Add bv to the new blossom. + blossomparent[bv] = b + path.append(bv) + edgs.append(labeledge[bv]) + assert label[bv] == 2 or ( + label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]] + ) + # Trace one step back. + v = labeledge[bv][0] + bv = inblossom[v] + # Add base sub-blossom; reverse lists. + path.append(bb) + path.reverse() + edgs.reverse() + # Trace back from w to base. + while bw != bb: + # Add bw to the new blossom. + blossomparent[bw] = b + path.append(bw) + edgs.append((labeledge[bw][1], labeledge[bw][0])) + assert label[bw] == 2 or ( + label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]] + ) + # Trace one step back. + w = labeledge[bw][0] + bw = inblossom[w] + # Set label to S. + assert label[bb] == 1 + label[b] = 1 + labeledge[b] = labeledge[bb] + # Set dual variable to zero. + blossomdual[b] = 0 + # Relabel vertices. + for v in b.leaves(): + if label[inblossom[v]] == 2: + # This T-vertex now turns into an S-vertex because it becomes + # part of an S-blossom; add it to the queue. + queue.append(v) + inblossom[v] = b + # Compute b.mybestedges. + bestedgeto = {} + for bv in path: + if isinstance(bv, Blossom): + if bv.mybestedges is not None: + # Walk this subblossom's least-slack edges. + nblist = bv.mybestedges + # The sub-blossom won't need this data again. + bv.mybestedges = None + else: + # This subblossom does not have a list of least-slack + # edges; get the information from the vertices. + nblist = [ + (v, w) for v in bv.leaves() for w in G.neighbors(v) if v != w + ] + else: + nblist = [(bv, w) for w in G.neighbors(bv) if bv != w] + for k in nblist: + (i, j) = k + if inblossom[j] == b: + i, j = j, i + bj = inblossom[j] + if ( + bj != b + and label.get(bj) == 1 + and ((bj not in bestedgeto) or slack(i, j) < slack(*bestedgeto[bj])) + ): + bestedgeto[bj] = k + # Forget about least-slack edge of the subblossom. + bestedge[bv] = None + b.mybestedges = list(bestedgeto.values()) + # Select bestedge[b]. + mybestedge = None + bestedge[b] = None + for k in b.mybestedges: + kslack = slack(*k) + if mybestedge is None or kslack < mybestslack: + mybestedge = k + mybestslack = kslack + bestedge[b] = mybestedge + + # Expand the given top-level blossom. + def expandBlossom(b, endstage): + # Convert sub-blossoms into top-level blossoms. + for s in b.childs: + blossomparent[s] = None + if isinstance(s, Blossom): + if endstage and blossomdual[s] == 0: + # Recursively expand this sub-blossom. + expandBlossom(s, endstage) + else: + for v in s.leaves(): + inblossom[v] = s + else: + inblossom[s] = s + # If we expand a T-blossom during a stage, its sub-blossoms must be + # relabeled. + if (not endstage) and label.get(b) == 2: + # Start at the sub-blossom through which the expanding + # blossom obtained its label, and relabel sub-blossoms untili + # we reach the base. + # Figure out through which sub-blossom the expanding blossom + # obtained its label initially. + entrychild = inblossom[labeledge[b][1]] + # Decide in which direction we will go round the blossom. + j = b.childs.index(entrychild) + if j & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + v, w = labeledge[b] + while j != 0: + # Relabel the T-sub-blossom. + if jstep == 1: + p, q = b.edges[j] + else: + q, p = b.edges[j - 1] + label[w] = None + label[q] = None + assignLabel(w, 2, v) + # Step to the next S-sub-blossom and note its forward edge. + allowedge[(p, q)] = allowedge[(q, p)] = True + j += jstep + if jstep == 1: + v, w = b.edges[j] + else: + w, v = b.edges[j - 1] + # Step to the next T-sub-blossom. + allowedge[(v, w)] = allowedge[(w, v)] = True + j += jstep + # Relabel the base T-sub-blossom WITHOUT stepping through to + # its mate (so don't call assignLabel). + bw = b.childs[j] + label[w] = label[bw] = 2 + labeledge[w] = labeledge[bw] = (v, w) + bestedge[bw] = None + # Continue along the blossom until we get back to entrychild. + j += jstep + while b.childs[j] != entrychild: + # Examine the vertices of the sub-blossom to see whether + # it is reachable from a neighbouring S-vertex outside the + # expanding blossom. + bv = b.childs[j] + if label.get(bv) == 1: + # This sub-blossom just got label S through one of its + # neighbours; leave it be. + j += jstep + continue + if isinstance(bv, Blossom): + for v in bv.leaves(): + if label.get(v): + break + else: + v = bv + # If the sub-blossom contains a reachable vertex, assign + # label T to the sub-blossom. + if label.get(v): + assert label[v] == 2 + assert inblossom[v] == bv + label[v] = None + label[mate[blossombase[bv]]] = None + assignLabel(v, 2, labeledge[v][0]) + j += jstep + # Remove the expanded blossom entirely. + label.pop(b, None) + labeledge.pop(b, None) + bestedge.pop(b, None) + del blossomparent[b] + del blossombase[b] + del blossomdual[b] + + # Swap matched/unmatched edges over an alternating path through blossom b + # between vertex v and the base vertex. Keep blossom bookkeeping + # consistent. + def augmentBlossom(b, v): + # Bubble up through the blossom tree from vertex v to an immediate + # sub-blossom of b. + t = v + while blossomparent[t] != b: + t = blossomparent[t] + # Recursively deal with the first sub-blossom. + if isinstance(t, Blossom): + augmentBlossom(t, v) + # Decide in which direction we will go round the blossom. + i = j = b.childs.index(t) + if i & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + while j != 0: + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if jstep == 1: + w, x = b.edges[j] + else: + x, w = b.edges[j - 1] + if isinstance(t, Blossom): + augmentBlossom(t, w) + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if isinstance(t, Blossom): + augmentBlossom(t, x) + # Match the edge connecting those sub-blossoms. + mate[w] = x + mate[x] = w + # Rotate the list of sub-blossoms to put the new base at the front. + b.childs = b.childs[i:] + b.childs[:i] + b.edges = b.edges[i:] + b.edges[:i] + blossombase[b] = blossombase[b.childs[0]] + assert blossombase[b] == v + + # Swap matched/unmatched edges over an alternating path between two + # single vertices. The augmenting path runs through S-vertices v and w. + def augmentMatching(v, w): + for (s, j) in ((v, w), (w, v)): + # Match vertex s to vertex j. Then trace back from s + # until we find a single vertex, swapping matched and unmatched + # edges as we go. + while 1: + bs = inblossom[s] + assert label[bs] == 1 + assert (labeledge[bs] is None and blossombase[bs] not in mate) or ( + labeledge[bs][0] == mate[blossombase[bs]] + ) + # Augment through the S-blossom from s to base. + if isinstance(bs, Blossom): + augmentBlossom(bs, s) + # Update mate[s] + mate[s] = j + # Trace one step back. + if labeledge[bs] is None: + # Reached single vertex; stop. + break + t = labeledge[bs][0] + bt = inblossom[t] + assert label[bt] == 2 + # Trace one more step back. + s, j = labeledge[bt] + # Augment through the T-blossom from j to base. + assert blossombase[bt] == t + if isinstance(bt, Blossom): + augmentBlossom(bt, j) + # Update mate[j] + mate[j] = s + + # Verify that the optimum solution has been reached. + def verifyOptimum(): + if maxcardinality: + # Vertices may have negative dual; + # find a constant non-negative number to add to all vertex duals. + vdualoffset = max(0, -min(dualvar.values())) + else: + vdualoffset = 0 + # 0. all dual variables are non-negative + assert min(dualvar.values()) + vdualoffset >= 0 + assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0 + # 0. all edges have non-negative slack and + # 1. all matched edges have zero slack; + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i == j: + continue # ignore self-loops + s = dualvar[i] + dualvar[j] - 2 * wt + iblossoms = [i] + jblossoms = [j] + while blossomparent[iblossoms[-1]] is not None: + iblossoms.append(blossomparent[iblossoms[-1]]) + while blossomparent[jblossoms[-1]] is not None: + jblossoms.append(blossomparent[jblossoms[-1]]) + iblossoms.reverse() + jblossoms.reverse() + for (bi, bj) in zip(iblossoms, jblossoms): + if bi != bj: + break + s += 2 * blossomdual[bi] + assert s >= 0 + if mate.get(i) == j or mate.get(j) == i: + assert mate[i] == j and mate[j] == i + assert s == 0 + # 2. all single vertices have zero dual value; + for v in gnodes: + assert (v in mate) or dualvar[v] + vdualoffset == 0 + # 3. all blossoms with positive dual value are full. + for b in blossomdual: + if blossomdual[b] > 0: + assert len(b.edges) % 2 == 1 + for (i, j) in b.edges[1::2]: + assert mate[i] == j and mate[j] == i + # Ok. + + # Main loop: continue until no further improvement is possible. + while 1: + + # Each iteration of this loop is a "stage". + # A stage finds an augmenting path and uses that to improve + # the matching. + + # Remove labels from top-level blossoms/vertices. + label.clear() + labeledge.clear() + + # Forget all about least-slack edges. + bestedge.clear() + for b in blossomdual: + b.mybestedges = None + + # Loss of labeling means that we can not be sure that currently + # allowable edges remain allowable throughout this stage. + allowedge.clear() + + # Make queue empty. + queue[:] = [] + + # Label single blossoms/vertices with S and put them in the queue. + for v in gnodes: + if (v not in mate) and label.get(inblossom[v]) is None: + assignLabel(v, 1, None) + + # Loop until we succeed in augmenting the matching. + augmented = 0 + while 1: + + # Each iteration of this loop is a "substage". + # A substage tries to find an augmenting path; + # if found, the path is used to improve the matching and + # the stage ends. If there is no augmenting path, the + # primal-dual method is used to pump some slack out of + # the dual variables. + + # Continue labeling until all vertices which are reachable + # through an alternating path have got a label. + while queue and not augmented: + + # Take an S vertex from the queue. + v = queue.pop() + assert label[inblossom[v]] == 1 + + # Scan its neighbours: + for w in G.neighbors(v): + if w == v: + continue # ignore self-loops + # w is a neighbour to v + bv = inblossom[v] + bw = inblossom[w] + if bv == bw: + # this edge is internal to a blossom; ignore it + continue + if (v, w) not in allowedge: + kslack = slack(v, w) + if kslack <= 0: + # edge k has zero slack => it is allowable + allowedge[(v, w)] = allowedge[(w, v)] = True + if (v, w) in allowedge: + if label.get(bw) is None: + # (C1) w is a free vertex; + # label w with T and label its mate with S (R12). + assignLabel(w, 2, v) + elif label.get(bw) == 1: + # (C2) w is an S-vertex (not in the same blossom); + # follow back-links to discover either an + # augmenting path or a new blossom. + base = scanBlossom(v, w) + if base is not NoNode: + # Found a new blossom; add it to the blossom + # bookkeeping and turn it into an S-blossom. + addBlossom(base, v, w) + else: + # Found an augmenting path; augment the + # matching and end this stage. + augmentMatching(v, w) + augmented = 1 + break + elif label.get(w) is None: + # w is inside a T-blossom, but w itself has not + # yet been reached from outside the blossom; + # mark it as reached (we need this to relabel + # during T-blossom expansion). + assert label[bw] == 2 + label[w] = 2 + labeledge[w] = (v, w) + elif label.get(bw) == 1: + # keep track of the least-slack non-allowable edge to + # a different S-blossom. + if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]): + bestedge[bv] = (v, w) + elif label.get(w) is None: + # w is a free vertex (or an unreached vertex inside + # a T-blossom) but we can not reach it yet; + # keep track of the least-slack edge that reaches w. + if bestedge.get(w) is None or kslack < slack(*bestedge[w]): + bestedge[w] = (v, w) + + if augmented: + break + + # There is no augmenting path under these constraints; + # compute delta and reduce slack in the optimization problem. + # (Note that our vertex dual variables, edge slacks and delta's + # are pre-multiplied by two.) + deltatype = -1 + delta = deltaedge = deltablossom = None + + # Compute delta1: the minimum value of any vertex dual. + if not maxcardinality: + deltatype = 1 + delta = min(dualvar.values()) + + # Compute delta2: the minimum slack on any edge between + # an S-vertex and a free vertex. + for v in G.nodes(): + if label.get(inblossom[v]) is None and bestedge.get(v) is not None: + d = slack(*bestedge[v]) + if deltatype == -1 or d < delta: + delta = d + deltatype = 2 + deltaedge = bestedge[v] + + # Compute delta3: half the minimum slack on any edge between + # a pair of S-blossoms. + for b in blossomparent: + if ( + blossomparent[b] is None + and label.get(b) == 1 + and bestedge.get(b) is not None + ): + kslack = slack(*bestedge[b]) + if allinteger: + assert (kslack % 2) == 0 + d = kslack // 2 + else: + d = kslack / 2.0 + if deltatype == -1 or d < delta: + delta = d + deltatype = 3 + deltaedge = bestedge[b] + + # Compute delta4: minimum z variable of any T-blossom. + for b in blossomdual: + if ( + blossomparent[b] is None + and label.get(b) == 2 + and (deltatype == -1 or blossomdual[b] < delta) + ): + delta = blossomdual[b] + deltatype = 4 + deltablossom = b + + if deltatype == -1: + # No further improvement possible; max-cardinality optimum + # reached. Do a final delta update to make the optimum + # verifyable. + assert maxcardinality + deltatype = 1 + delta = max(0, min(dualvar.values())) + + # Update dual variables according to delta. + for v in gnodes: + if label.get(inblossom[v]) == 1: + # S-vertex: 2*u = 2*u - 2*delta + dualvar[v] -= delta + elif label.get(inblossom[v]) == 2: + # T-vertex: 2*u = 2*u + 2*delta + dualvar[v] += delta + for b in blossomdual: + if blossomparent[b] is None: + if label.get(b) == 1: + # top-level S-blossom: z = z + 2*delta + blossomdual[b] += delta + elif label.get(b) == 2: + # top-level T-blossom: z = z - 2*delta + blossomdual[b] -= delta + + # Take action at the point where minimum delta occurred. + if deltatype == 1: + # No further improvement possible; optimum reached. + break + elif deltatype == 2: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + assert label[inblossom[v]] == 1 + allowedge[(v, w)] = allowedge[(w, v)] = True + queue.append(v) + elif deltatype == 3: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + allowedge[(v, w)] = allowedge[(w, v)] = True + assert label[inblossom[v]] == 1 + queue.append(v) + elif deltatype == 4: + # Expand the least-z blossom. + expandBlossom(deltablossom, False) + + # End of a this substage. + + # Paranoia check that the matching is symmetric. + for v in mate: + assert mate[mate[v]] == v + + # Stop when no more augmenting path can be found. + if not augmented: + break + + # End of a stage; expand all S-blossoms which have zero dual. + for b in list(blossomdual.keys()): + if b not in blossomdual: + continue # already expanded + if blossomparent[b] is None and label.get(b) == 1 and blossomdual[b] == 0: + expandBlossom(b, True) + + # Verify that we reached the optimum solution (only for integer weights). + if allinteger: + verifyOptimum() + + return matching_dict_to_set(mate) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/__init__.py new file mode 100644 index 0000000..cf15ddb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/__init__.py @@ -0,0 +1,27 @@ +""" +Subpackages related to graph-minor problems. + +In graph theory, an undirected graph H is called a minor of the graph G if H +can be formed from G by deleting edges and vertices and by contracting edges +[1]_. + +References +---------- +.. [1] https://en.wikipedia.org/wiki/Graph_minor +""" + +from networkx.algorithms.minors.contraction import ( + contracted_edge, + contracted_nodes, + equivalence_classes, + identified_nodes, + quotient_graph, +) + +__all__ = [ + "contracted_edge", + "contracted_nodes", + "equivalence_classes", + "identified_nodes", + "quotient_graph", +] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/contraction.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/contraction.py new file mode 100644 index 0000000..72a0d70 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/contraction.py @@ -0,0 +1,600 @@ +"""Provides functions for computing minors of a graph.""" +from itertools import chain, combinations, permutations, product + +import networkx as nx +from networkx import density +from networkx.exception import NetworkXException +from networkx.utils import arbitrary_element + +__all__ = [ + "contracted_edge", + "contracted_nodes", + "equivalence_classes", + "identified_nodes", + "quotient_graph", +] + +chaini = chain.from_iterable + + +def equivalence_classes(iterable, relation): + """Returns equivalence classes of `relation` when applied to `iterable`. + + The equivalence classes, or blocks, consist of objects from `iterable` + which are all equivalent. They are defined to be equivalent if the + `relation` function returns `True` when passed any two objects from that + class, and `False` otherwise. To define an equivalence relation the + function must be reflexive, symmetric and transitive. + + Parameters + ---------- + iterable : list, tuple, or set + An iterable of elements/nodes. + + relation : function + A Boolean-valued function that implements an equivalence relation + (reflexive, symmetric, transitive binary relation) on the elements + of `iterable` - it must take two elements and return `True` if + they are related, or `False` if not. + + Returns + ------- + set of frozensets + A set of frozensets representing the partition induced by the equivalence + relation function `relation` on the elements of `iterable`. Each + member set in the return set represents an equivalence class, or + block, of the partition. + + Duplicate elements will be ignored so it makes the most sense for + `iterable` to be a :class:`set`. + + Notes + ----- + This function does not check that `relation` represents an equivalence + relation. You can check that your equivalence classes provide a partition + using `is_partition`. + + Examples + -------- + Let `X` be the set of integers from `0` to `9`, and consider an equivalence + relation `R` on `X` of congruence modulo `3`: this means that two integers + `x` and `y` in `X` are equivalent under `R` if they leave the same + remainder when divided by `3`, i.e. `(x - y) mod 3 = 0`. + + The equivalence classes of this relation are `{0, 3, 6, 9}`, `{1, 4, 7}`, + `{2, 5, 8}`: `0`, `3`, `6`, `9` are all divisible by `3` and leave zero + remainder; `1`, `4`, `7` leave remainder `1`; while `2`, `5` and `8` leave + remainder `2`. We can see this by calling `equivalence_classes` with + `X` and a function implementation of `R`. + + >>> X = set(range(10)) + >>> def mod3(x, y): return (x - y) % 3 == 0 + >>> equivalence_classes(X, mod3) # doctest: +SKIP + {frozenset({1, 4, 7}), frozenset({8, 2, 5}), frozenset({0, 9, 3, 6})} + """ + # For simplicity of implementation, we initialize the return value as a + # list of lists, then convert it to a set of sets at the end of the + # function. + blocks = [] + # Determine the equivalence class for each element of the iterable. + for y in iterable: + # Each element y must be in *exactly one* equivalence class. + # + # Each block is guaranteed to be non-empty + for block in blocks: + x = arbitrary_element(block) + if relation(x, y): + block.append(y) + break + else: + # If the element y is not part of any known equivalence class, it + # must be in its own, so we create a new singleton equivalence + # class for it. + blocks.append([y]) + return {frozenset(block) for block in blocks} + + +def quotient_graph( + G, + partition, + edge_relation=None, + node_data=None, + edge_data=None, + relabel=False, + create_using=None, +): + """Returns the quotient graph of `G` under the specified equivalence + relation on nodes. + + Parameters + ---------- + G : NetworkX graph + The graph for which to return the quotient graph with the + specified node relation. + + partition : function, or dict or list of lists, tuples or sets + If a function, this function must represent an equivalence + relation on the nodes of `G`. It must take two arguments *u* + and *v* and return True exactly when *u* and *v* are in the + same equivalence class. The equivalence classes form the nodes + in the returned graph. + + If a dict of lists/tuples/sets, the keys can be any meaningful + block labels, but the values must be the block lists/tuples/sets + (one list/tuple/set per block), and the blocks must form a valid + partition of the nodes of the graph. That is, each node must be + in exactly one block of the partition. + + If a list of sets, the list must form a valid partition of + the nodes of the graph. That is, each node must be in exactly + one block of the partition. + + edge_relation : Boolean function with two arguments + This function must represent an edge relation on the *blocks* of + the `partition` of `G`. It must take two arguments, *B* and *C*, + each one a set of nodes, and return True exactly when there should be + an edge joining block *B* to block *C* in the returned graph. + + If `edge_relation` is not specified, it is assumed to be the + following relation. Block *B* is related to block *C* if and + only if some node in *B* is adjacent to some node in *C*, + according to the edge set of `G`. + + edge_data : function + This function takes two arguments, *B* and *C*, each one a set + of nodes, and must return a dictionary representing the edge + data attributes to set on the edge joining *B* and *C*, should + there be an edge joining *B* and *C* in the quotient graph (if + no such edge occurs in the quotient graph as determined by + `edge_relation`, then the output of this function is ignored). + + If the quotient graph would be a multigraph, this function is + not applied, since the edge data from each edge in the graph + `G` appears in the edges of the quotient graph. + + node_data : function + This function takes one argument, *B*, a set of nodes in `G`, + and must return a dictionary representing the node data + attributes to set on the node representing *B* in the quotient graph. + If None, the following node attributes will be set: + + * 'graph', the subgraph of the graph `G` that this block + represents, + * 'nnodes', the number of nodes in this block, + * 'nedges', the number of edges within this block, + * 'density', the density of the subgraph of `G` that this + block represents. + + relabel : bool + If True, relabel the nodes of the quotient graph to be + nonnegative integers. Otherwise, the nodes are identified with + :class:`frozenset` instances representing the blocks given in + `partition`. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + The quotient graph of `G` under the equivalence relation + specified by `partition`. If the partition were given as a + list of :class:`set` instances and `relabel` is False, + each node will be a :class:`frozenset` corresponding to the same + :class:`set`. + + Raises + ------ + NetworkXException + If the given partition is not a valid partition of the nodes of + `G`. + + Examples + -------- + The quotient graph of the complete bipartite graph under the "same + neighbors" equivalence relation is `K_2`. Under this relation, two nodes + are equivalent if they are not adjacent but have the same neighbor set. + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> same_neighbors = lambda u, v: ( + ... u not in G[v] and v not in G[u] and G[u] == G[v] + ... ) + >>> Q = nx.quotient_graph(G, same_neighbors) + >>> K2 = nx.complete_graph(2) + >>> nx.is_isomorphic(Q, K2) + True + + The quotient graph of a directed graph under the "same strongly connected + component" equivalence relation is the condensation of the graph (see + :func:`condensation`). This example comes from the Wikipedia article + *`Strongly connected component`_*. + + >>> G = nx.DiGraph() + >>> edges = [ + ... "ab", + ... "be", + ... "bf", + ... "bc", + ... "cg", + ... "cd", + ... "dc", + ... "dh", + ... "ea", + ... "ef", + ... "fg", + ... "gf", + ... "hd", + ... "hf", + ... ] + >>> G.add_edges_from(tuple(x) for x in edges) + >>> components = list(nx.strongly_connected_components(G)) + >>> sorted(sorted(component) for component in components) + [['a', 'b', 'e'], ['c', 'd', 'h'], ['f', 'g']] + >>> + >>> C = nx.condensation(G, components) + >>> component_of = C.graph["mapping"] + >>> same_component = lambda u, v: component_of[u] == component_of[v] + >>> Q = nx.quotient_graph(G, same_component) + >>> nx.is_isomorphic(C, Q) + True + + Node identification can be represented as the quotient of a graph under the + equivalence relation that places the two nodes in one block and each other + node in its own singleton block. + + >>> K24 = nx.complete_bipartite_graph(2, 4) + >>> K34 = nx.complete_bipartite_graph(3, 4) + >>> C = nx.contracted_nodes(K34, 1, 2) + >>> nodes = {1, 2} + >>> is_contracted = lambda u, v: u in nodes and v in nodes + >>> Q = nx.quotient_graph(K34, is_contracted) + >>> nx.is_isomorphic(Q, C) + True + >>> nx.is_isomorphic(Q, K24) + True + + The blockmodeling technique described in [1]_ can be implemented as a + quotient graph. + + >>> G = nx.path_graph(6) + >>> partition = [{0, 1}, {2, 3}, {4, 5}] + >>> M = nx.quotient_graph(G, partition, relabel=True) + >>> list(M.edges()) + [(0, 1), (1, 2)] + + Here is the sample example but using partition as a dict of block sets. + + >>> G = nx.path_graph(6) + >>> partition = {0: {0, 1}, 2: {2, 3}, 4: {4, 5}} + >>> M = nx.quotient_graph(G, partition, relabel=True) + >>> list(M.edges()) + [(0, 1), (1, 2)] + + Partitions can be represented in various ways: + :: + + (0) a list/tuple/set of block lists/tuples/sets + (1) a dict with block labels as keys and blocks lists/tuples/sets as values + (2) a dict with block lists/tuples/sets as keys and block labels as values + (3) a function from nodes in the original iterable to block labels + (4) an equivalence relation function on the target iterable + + As `quotient_graph` is designed to accept partitions represented as (0), (1) or + (4) only, the `equivalence_classes` function can be used to get the partitions + in the right form, in order to call `quotient_graph`. + + .. _Strongly connected component: https://en.wikipedia.org/wiki/Strongly_connected_component + + References + ---------- + .. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj. + *Generalized Blockmodeling*. + Cambridge University Press, 2004. + + """ + # If the user provided an equivalence relation as a function to compute + # the blocks of the partition on the nodes of G induced by the + # equivalence relation. + if callable(partition): + # equivalence_classes always return partition of whole G. + partition = equivalence_classes(G, partition) + if not nx.community.is_partition(G, partition): + raise nx.NetworkXException( + "Input `partition` is not an equivalence relation for nodes of G" + ) + return _quotient_graph( + G, partition, edge_relation, node_data, edge_data, relabel, create_using + ) + + # If the partition is a dict, it is assumed to be one where the keys are + # user-defined block labels, and values are block lists, tuples or sets. + if isinstance(partition, dict): + partition = [block for block in partition.values()] + + # If the user provided partition as a collection of sets. Then we + # need to check if partition covers all of G nodes. If the answer + # is 'No' then we need to prepare suitable subgraph view. + partition_nodes = set().union(*partition) + if len(partition_nodes) != len(G): + G = G.subgraph(partition_nodes) + # Each node in the graph/subgraph must be in exactly one block. + if not nx.community.is_partition(G, partition): + raise NetworkXException("each node must be in exactly one part of `partition`") + return _quotient_graph( + G, partition, edge_relation, node_data, edge_data, relabel, create_using + ) + + +def _quotient_graph( + G, + partition, + edge_relation=None, + node_data=None, + edge_data=None, + relabel=False, + create_using=None, +): + """Construct the quotient graph assuming input has been checked""" + if create_using is None: + H = G.__class__() + else: + H = nx.empty_graph(0, create_using) + # By default set some basic information about the subgraph that each block + # represents on the nodes in the quotient graph. + if node_data is None: + + def node_data(b): + S = G.subgraph(b) + return dict( + graph=S, nnodes=len(S), nedges=S.number_of_edges(), density=density(S) + ) + + # Each block of the partition becomes a node in the quotient graph. + partition = [frozenset(b) for b in partition] + H.add_nodes_from((b, node_data(b)) for b in partition) + # By default, the edge relation is the relation defined as follows. B is + # adjacent to C if a node in B is adjacent to a node in C, according to the + # edge set of G. + # + # This is not a particularly efficient implementation of this relation: + # there are O(n^2) pairs to check and each check may require O(log n) time + # (to check set membership). This can certainly be parallelized. + if edge_relation is None: + + def edge_relation(b, c): + return any(v in G[u] for u, v in product(b, c)) + + # By default, sum the weights of the edges joining pairs of nodes across + # blocks to get the weight of the edge joining those two blocks. + if edge_data is None: + + def edge_data(b, c): + edgedata = ( + d + for u, v, d in G.edges(b | c, data=True) + if (u in b and v in c) or (u in c and v in b) + ) + return {"weight": sum(d.get("weight", 1) for d in edgedata)} + + block_pairs = permutations(H, 2) if H.is_directed() else combinations(H, 2) + # In a multigraph, add one edge in the quotient graph for each edge + # in the original graph. + if H.is_multigraph(): + edges = chaini( + ( + (b, c, G.get_edge_data(u, v, default={})) + for u, v in product(b, c) + if v in G[u] + ) + for b, c in block_pairs + if edge_relation(b, c) + ) + # In a simple graph, apply the edge data function to each pair of + # blocks to determine the edge data attributes to apply to each edge + # in the quotient graph. + else: + edges = ( + (b, c, edge_data(b, c)) for (b, c) in block_pairs if edge_relation(b, c) + ) + H.add_edges_from(edges) + # If requested by the user, relabel the nodes to be integers, + # numbered in increasing order from zero in the same order as the + # iteration order of `partition`. + if relabel: + # Can't use nx.convert_node_labels_to_integers() here since we + # want the order of iteration to be the same for backward + # compatibility with the nx.blockmodel() function. + labels = {b: i for i, b in enumerate(partition)} + H = nx.relabel_nodes(H, labels) + return H + + +def contracted_nodes(G, u, v, self_loops=True, copy=True): + """Returns the graph that results from contracting `u` and `v`. + + Node contraction identifies the two nodes as a single node incident to any + edge that was incident to the original two nodes. + + Parameters + ---------- + G : NetworkX graph + The graph whose nodes will be contracted. + + u, v : nodes + Must be nodes in `G`. + + self_loops : Boolean + If this is True, any edges joining `u` and `v` in `G` become + self-loops on the new node in the returned graph. + + copy : Boolean + If this is True (default True), make a copy of + `G` and return that instead of directly changing `G`. + + + Returns + ------- + Networkx graph + If Copy is True, + A new graph object of the same type as `G` (leaving `G` unmodified) + with `u` and `v` identified in a single node. The right node `v` + will be merged into the node `u`, so only `u` will appear in the + returned graph. + If copy is False, + Modifies `G` with `u` and `v` identified in a single node. + The right node `v` will be merged into the node `u`, so + only `u` will appear in the returned graph. + + Notes + ----- + For multigraphs, the edge keys for the realigned edges may + not be the same as the edge keys for the old edges. This is + natural because edge keys are unique only within each pair of nodes. + + For non-multigraphs where `u` and `v` are adjacent to a third node + `w`, the edge (`v`, `w`) will be contracted into the edge (`u`, + `w`) with its attributes stored into a "contraction" attribute. + + This function is also available as `identified_nodes`. + + Examples + -------- + Contracting two nonadjacent nodes of the cycle graph on four nodes `C_4` + yields the path graph (ignoring parallel edges): + + >>> G = nx.cycle_graph(4) + >>> M = nx.contracted_nodes(G, 1, 3) + >>> P3 = nx.path_graph(3) + >>> nx.is_isomorphic(M, P3) + True + + >>> G = nx.MultiGraph(P3) + >>> M = nx.contracted_nodes(G, 0, 2) + >>> M.edges + MultiEdgeView([(0, 1, 0), (0, 1, 1)]) + + >>> G = nx.Graph([(1, 2), (2, 2)]) + >>> H = nx.contracted_nodes(G, 1, 2, self_loops=False) + >>> list(H.nodes()) + [1] + >>> list(H.edges()) + [(1, 1)] + + See Also + -------- + contracted_edge + quotient_graph + + """ + # Copying has significant overhead and can be disabled if needed + if copy: + H = G.copy() + else: + H = G + + # edge code uses G.edges(v) instead of G.adj[v] to handle multiedges + if H.is_directed(): + edges_to_remap = chain(G.in_edges(v, data=True), G.out_edges(v, data=True)) + else: + edges_to_remap = G.edges(v, data=True) + + # If the H=G, the generators change as H changes + # This makes the edges_to_remap independent of H + if not copy: + edges_to_remap = list(edges_to_remap) + + v_data = H.nodes[v] + H.remove_node(v) + + for (prev_w, prev_x, d) in edges_to_remap: + w = prev_w if prev_w != v else u + x = prev_x if prev_x != v else u + + if ({prev_w, prev_x} == {u, v}) and not self_loops: + continue + + if not H.has_edge(w, x) or G.is_multigraph(): + H.add_edge(w, x, **d) + else: + if "contraction" in H.edges[(w, x)]: + H.edges[(w, x)]["contraction"][(prev_w, prev_x)] = d + else: + H.edges[(w, x)]["contraction"] = {(prev_w, prev_x): d} + + if "contraction" in H.nodes[u]: + H.nodes[u]["contraction"][v] = v_data + else: + H.nodes[u]["contraction"] = {v: v_data} + return H + + +identified_nodes = contracted_nodes + + +def contracted_edge(G, edge, self_loops=True, copy=True): + """Returns the graph that results from contracting the specified edge. + + Edge contraction identifies the two endpoints of the edge as a single node + incident to any edge that was incident to the original two nodes. A graph + that results from edge contraction is called a *minor* of the original + graph. + + Parameters + ---------- + G : NetworkX graph + The graph whose edge will be contracted. + + edge : tuple + Must be a pair of nodes in `G`. + + self_loops : Boolean + If this is True, any edges (including `edge`) joining the + endpoints of `edge` in `G` become self-loops on the new node in the + returned graph. + + copy : Boolean (default True) + If this is True, a the contraction will be performed on a copy of `G`, + otherwise the contraction will happen in place. + + Returns + ------- + Networkx graph + A new graph object of the same type as `G` (leaving `G` unmodified) + with endpoints of `edge` identified in a single node. The right node + of `edge` will be merged into the left one, so only the left one will + appear in the returned graph. + + Raises + ------ + ValueError + If `edge` is not an edge in `G`. + + Examples + -------- + Attempting to contract two nonadjacent nodes yields an error: + + >>> G = nx.cycle_graph(4) + >>> nx.contracted_edge(G, (1, 3)) + Traceback (most recent call last): + ... + ValueError: Edge (1, 3) does not exist in graph G; cannot contract it + + Contracting two adjacent nodes in the cycle graph on *n* nodes yields the + cycle graph on *n - 1* nodes: + + >>> C5 = nx.cycle_graph(5) + >>> C4 = nx.cycle_graph(4) + >>> M = nx.contracted_edge(C5, (0, 1), self_loops=False) + >>> nx.is_isomorphic(M, C4) + True + + See also + -------- + contracted_nodes + quotient_graph + + """ + u, v = edge[:2] + if not G.has_edge(u, v): + raise ValueError(f"Edge {edge} does not exist in graph G; cannot contract it") + return contracted_nodes(G, u, v, self_loops=self_loops, copy=copy) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/tests/test_contraction.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/tests/test_contraction.py new file mode 100644 index 0000000..1dc4978 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/minors/tests/test_contraction.py @@ -0,0 +1,422 @@ +"""Unit tests for the :mod:`networkx.algorithms.minors.contraction` module.""" +import pytest + +import networkx as nx +from networkx.utils import arbitrary_element, edges_equal, nodes_equal + + +class TestQuotient: + """Unit tests for computing quotient graphs.""" + + def test_quotient_graph_complete_multipartite(self): + """Tests that the quotient graph of the complete *n*-partite graph + under the "same neighbors" node relation is the complete graph on *n* + nodes. + + """ + G = nx.complete_multipartite_graph(2, 3, 4) + # Two nodes are equivalent if they are not adjacent but have the same + # neighbor set. + + def same_neighbors(u, v): + return u not in G[v] and v not in G[u] and G[u] == G[v] + + expected = nx.complete_graph(3) + actual = nx.quotient_graph(G, same_neighbors) + # It won't take too long to run a graph isomorphism algorithm on such + # small graphs. + assert nx.is_isomorphic(expected, actual) + + def test_quotient_graph_complete_bipartite(self): + """Tests that the quotient graph of the complete bipartite graph under + the "same neighbors" node relation is `K_2`. + + """ + G = nx.complete_bipartite_graph(2, 3) + # Two nodes are equivalent if they are not adjacent but have the same + # neighbor set. + + def same_neighbors(u, v): + return u not in G[v] and v not in G[u] and G[u] == G[v] + + expected = nx.complete_graph(2) + actual = nx.quotient_graph(G, same_neighbors) + # It won't take too long to run a graph isomorphism algorithm on such + # small graphs. + assert nx.is_isomorphic(expected, actual) + + def test_quotient_graph_edge_relation(self): + """Tests for specifying an alternate edge relation for the quotient + graph. + + """ + G = nx.path_graph(5) + + def identity(u, v): + return u == v + + def same_parity(b, c): + return arbitrary_element(b) % 2 == arbitrary_element(c) % 2 + + actual = nx.quotient_graph(G, identity, same_parity) + expected = nx.Graph() + expected.add_edges_from([(0, 2), (0, 4), (2, 4)]) + expected.add_edge(1, 3) + assert nx.is_isomorphic(actual, expected) + + def test_condensation_as_quotient(self): + """This tests that the condensation of a graph can be viewed as the + quotient graph under the "in the same connected component" equivalence + relation. + + """ + # This example graph comes from the file `test_strongly_connected.py`. + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 11), + (2, 12), + (3, 4), + (4, 3), + (4, 5), + (5, 6), + (6, 5), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 9), + (9, 7), + (10, 6), + (11, 2), + (11, 4), + (11, 6), + (12, 6), + (12, 11), + ] + ) + scc = list(nx.strongly_connected_components(G)) + C = nx.condensation(G, scc) + component_of = C.graph["mapping"] + # Two nodes are equivalent if they are in the same connected component. + + def same_component(u, v): + return component_of[u] == component_of[v] + + Q = nx.quotient_graph(G, same_component) + assert nx.is_isomorphic(C, Q) + + def test_path(self): + G = nx.path_graph(6) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + def test_path__partition_provided_as_dict_of_lists(self): + G = nx.path_graph(6) + partition = {0: [0, 1], 2: [2, 3], 4: [4, 5]} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + def test_path__partition_provided_as_dict_of_tuples(self): + G = nx.path_graph(6) + partition = {0: (0, 1), 2: (2, 3), 4: (4, 5)} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + def test_path__partition_provided_as_dict_of_sets(self): + G = nx.path_graph(6) + partition = {0: {0, 1}, 2: {2, 3}, 4: {4, 5}} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + def test_multigraph_path(self): + G = nx.MultiGraph(nx.path_graph(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + def test_directed_path(self): + G = nx.DiGraph() + nx.add_path(G, range(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 0.5 + + def test_directed_multigraph_path(self): + G = nx.MultiDiGraph() + nx.add_path(G, range(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 0.5 + + def test_overlapping_blocks(self): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(6) + partition = [{0, 1, 2}, {2, 3}, {4, 5}] + nx.quotient_graph(G, partition) + + def test_weighted_path(self): + G = nx.path_graph(6) + for i in range(5): + G[i][i + 1]["weight"] = i + 1 + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + assert M[0][1]["weight"] == 2 + assert M[1][2]["weight"] == 4 + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + def test_barbell(self): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1]) + assert edges_equal(M.edges(), [(0, 1)]) + for n in M: + assert M.nodes[n]["nedges"] == 3 + assert M.nodes[n]["nnodes"] == 3 + assert M.nodes[n]["density"] == 1 + + def test_barbell_plus(self): + G = nx.barbell_graph(3, 0) + # Add an extra edge joining the bells. + G.add_edge(0, 5) + partition = [{0, 1, 2}, {3, 4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1]) + assert edges_equal(M.edges(), [(0, 1)]) + assert M[0][1]["weight"] == 2 + for n in M: + assert M.nodes[n]["nedges"] == 3 + assert M.nodes[n]["nnodes"] == 3 + assert M.nodes[n]["density"] == 1 + + def test_blockmodel(self): + G = nx.path_graph(6) + partition = [[0, 1], [2, 3], [4, 5]] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M.nodes(), [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M.nodes(): + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1.0 + + def test_multigraph_blockmodel(self): + G = nx.MultiGraph(nx.path_graph(6)) + partition = [[0, 1], [2, 3], [4, 5]] + M = nx.quotient_graph(G, partition, create_using=nx.MultiGraph(), relabel=True) + assert nodes_equal(M.nodes(), [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M.nodes(): + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1.0 + + def test_quotient_graph_incomplete_partition(self): + G = nx.path_graph(6) + partition = [] + H = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(H.nodes(), []) + assert edges_equal(H.edges(), []) + + partition = [[0, 1], [2, 3], [5]] + H = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(H.nodes(), [0, 1, 2]) + assert edges_equal(H.edges(), [(0, 1)]) + + +class TestContraction: + """Unit tests for node and edge contraction functions.""" + + def test_undirected_node_contraction(self): + """Tests for node contraction in an undirected graph.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.cycle_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + def test_directed_node_contraction(self): + """Tests for node contraction in a directed graph.""" + G = nx.DiGraph(nx.cycle_graph(4)) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.DiGraph(nx.cycle_graph(3)) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + def test_undirected_node_contraction_no_copy(self): + """Tests for node contraction in an undirected graph + by making changes in place.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes(G, 0, 1, copy=False) + expected = nx.cycle_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, G) + assert nx.is_isomorphic(actual, expected) + + def test_directed_node_contraction_no_copy(self): + """Tests for node contraction in a directed graph + by making changes in place.""" + G = nx.DiGraph(nx.cycle_graph(4)) + actual = nx.contracted_nodes(G, 0, 1, copy=False) + expected = nx.DiGraph(nx.cycle_graph(3)) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, G) + assert nx.is_isomorphic(actual, expected) + + def test_create_multigraph(self): + """Tests that using a MultiGraph creates multiple edges.""" + G = nx.path_graph(3, create_using=nx.MultiGraph()) + G.add_edge(0, 1) + G.add_edge(0, 0) + G.add_edge(0, 2) + actual = nx.contracted_nodes(G, 0, 2) + expected = nx.MultiGraph() + expected.add_edge(0, 1) + expected.add_edge(0, 1) + expected.add_edge(0, 1) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert edges_equal(actual.edges, expected.edges) + + def test_multigraph_keys(self): + """Tests that multiedge keys are reset in new graph.""" + G = nx.path_graph(3, create_using=nx.MultiGraph()) + G.add_edge(0, 1, 5) + G.add_edge(0, 0, 0) + G.add_edge(0, 2, 5) + actual = nx.contracted_nodes(G, 0, 2) + expected = nx.MultiGraph() + expected.add_edge(0, 1, 0) + expected.add_edge(0, 1, 5) + expected.add_edge(0, 1, 2) # keyed as 2 b/c 2 edges already in G + expected.add_edge(0, 0, 0) + expected.add_edge(0, 0, 1) # this comes from (0, 2, 5) + assert edges_equal(actual.edges, expected.edges) + + def test_node_attributes(self): + """Tests that node contraction preserves node attributes.""" + G = nx.cycle_graph(4) + # Add some data to the two nodes being contracted. + G.nodes[0]["foo"] = "bar" + G.nodes[1]["baz"] = "xyzzy" + actual = nx.contracted_nodes(G, 0, 1) + # We expect that contracting the nodes 0 and 1 in C_4 yields K_3, but + # with nodes labeled 0, 2, and 3, and with a self-loop on 0. + expected = nx.complete_graph(3) + expected = nx.relabel_nodes(expected, {1: 2, 2: 3}) + expected.add_edge(0, 0) + cdict = {1: {"baz": "xyzzy"}} + expected.nodes[0].update(dict(foo="bar", contraction=cdict)) + assert nx.is_isomorphic(actual, expected) + assert actual.nodes == expected.nodes + + def test_edge_attributes(self): + """Tests that node contraction preserves edge attributes.""" + # Shape: src1 --> dest <-- src2 + G = nx.DiGraph([("src1", "dest"), ("src2", "dest")]) + G["src1"]["dest"]["value"] = "src1-->dest" + G["src2"]["dest"]["value"] = "src2-->dest" + H = nx.MultiDiGraph(G) + + G = nx.contracted_nodes(G, "src1", "src2") # New Shape: src1 --> dest + assert G.edges[("src1", "dest")]["value"] == "src1-->dest" + assert ( + G.edges[("src1", "dest")]["contraction"][("src2", "dest")]["value"] + == "src2-->dest" + ) + + H = nx.contracted_nodes(H, "src1", "src2") # New Shape: src1 -(x2)-> dest + assert len(H.edges(("src1", "dest"))) == 2 + + def test_without_self_loops(self): + """Tests for node contraction without preserving self-loops.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes(G, 0, 1, self_loops=False) + expected = nx.complete_graph(3) + assert nx.is_isomorphic(actual, expected) + + def test_contract_selfloop_graph(self): + """Tests for node contraction when nodes have selfloops.""" + G = nx.cycle_graph(4) + G.add_edge(0, 0) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.complete_graph([0, 2, 3]) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert edges_equal(actual.edges, expected.edges) + actual = nx.contracted_nodes(G, 1, 0) + expected = nx.complete_graph([1, 2, 3]) + expected.add_edge(1, 1) + expected.add_edge(1, 1) + assert edges_equal(actual.edges, expected.edges) + + def test_undirected_edge_contraction(self): + """Tests for edge contraction in an undirected graph.""" + G = nx.cycle_graph(4) + actual = nx.contracted_edge(G, (0, 1)) + expected = nx.complete_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + def test_multigraph_edge_contraction(self): + """Tests for edge contraction in a multigraph""" + G = nx.cycle_graph(4) + actual = nx.contracted_edge(G, (0, 1, 0)) + expected = nx.complete_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + def test_nonexistent_edge(self): + """Tests that attempting to contract a non-existent edge raises an + exception. + + """ + with pytest.raises(ValueError): + G = nx.cycle_graph(4) + nx.contracted_edge(G, (0, 2)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/mis.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/mis.py new file mode 100644 index 0000000..540e9a0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/mis.py @@ -0,0 +1,76 @@ +""" +Algorithm to find a maximal (not maximum) independent set. + +""" +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["maximal_independent_set"] + + +@py_random_state(2) +@not_implemented_for("directed") +def maximal_independent_set(G, nodes=None, seed=None): + """Returns a random maximal independent set guaranteed to contain + a given set of nodes. + + An independent set is a set of nodes such that the subgraph + of G induced by these nodes contains no edges. A maximal + independent set is an independent set such that it is not possible + to add a new node and still get an independent set. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or iterable + Nodes that must be part of the independent set. This set of nodes + must be independent. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + indep_nodes : list + List of nodes that are part of a maximal independent set. + + Raises + ------ + NetworkXUnfeasible + If the nodes in the provided list are not part of the graph or + do not form an independent set, an exception is raised. + + NetworkXNotImplemented + If `G` is directed. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.maximal_independent_set(G) # doctest: +SKIP + [4, 0, 2] + >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP + [1, 3] + + Notes + ----- + This algorithm does not solve the maximum independent set problem. + + """ + if not nodes: + nodes = {seed.choice(list(G))} + else: + nodes = set(nodes) + if not nodes.issubset(G): + raise nx.NetworkXUnfeasible(f"{nodes} is not a subset of the nodes of G") + neighbors = set.union(*[set(G.adj[v]) for v in nodes]) + if set.intersection(neighbors, nodes): + raise nx.NetworkXUnfeasible(f"{nodes} is not an independent set of G") + indep_nodes = list(nodes) + available_nodes = set(G.nodes()).difference(neighbors.union(nodes)) + while available_nodes: + node = seed.choice(list(available_nodes)) + indep_nodes.append(node) + available_nodes.difference_update(list(G.adj[node]) + [node]) + return indep_nodes diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/moral.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/moral.py new file mode 100644 index 0000000..028160e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/moral.py @@ -0,0 +1,57 @@ +r"""Function for computing the moral graph of a directed graph.""" + +import itertools + +from networkx.utils import not_implemented_for + +__all__ = ["moral_graph"] + + +@not_implemented_for("undirected") +def moral_graph(G): + r"""Return the Moral Graph + + Returns the moralized graph of a given directed graph. + + Parameters + ---------- + G : NetworkX graph + Directed graph + + Returns + ------- + H : NetworkX graph + The undirected moralized graph of G + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (2, 5), (3, 4), (4, 3)]) + >>> G_moral = nx.moral_graph(G) + >>> G_moral.edges() + EdgeView([(1, 2), (2, 3), (2, 5), (2, 4), (3, 4)]) + + Notes + ----- + A moral graph is an undirected graph H = (V, E) generated from a + directed Graph, where if a node has more than one parent node, edges + between these parent nodes are inserted and all directed edges become + undirected. + + https://en.wikipedia.org/wiki/Moral_graph + + References + ---------- + .. [1] Wray L. Buntine. 1995. Chain graphs for learning. + In Proceedings of the Eleventh conference on Uncertainty + in artificial intelligence (UAI'95) + """ + H = G.to_undirected() + for preds in G.pred.values(): + predecessors_combinations = itertools.combinations(preds, r=2) + H.add_edges_from(predecessors_combinations) + return H diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/__init__.py new file mode 100644 index 0000000..23fa264 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/__init__.py @@ -0,0 +1,52 @@ +""" This module provides the functions for node classification problem. + +The functions in this module are not imported +into the top level `networkx` namespace. +You can access these functions by importing +the `networkx.algorithms.node_classification` modules, +then accessing the functions as attributes of `node_classification`. +For example: + + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> node_classification.harmonic_function(G) + ['A', 'A', 'B', 'B'] + +""" + + +def __getattr__(name): + if name in ("hmn", "lgc"): + import warnings + import importlib + + fn_name = ( + "harmonic_function" if name == "hmn" else "local_and_global_consistency" + ) + msg = ( + f"The {name} module is deprecated and will be removed in version 3.0.\n" + f"Access `{fn_name}` directly from `node_classification`:\n\n" + " from networkx.algorithms import node_classification\n" + f" node_classification.{fn_name}\n" + ) + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + return importlib.import_module( + f".{name}", "networkx.algorithms.node_classification" + ) + if name == "harmonic_function": + from .hmn import harmonic_function + + return harmonic_function + if name == "local_and_global_consistency": + from .lgc import local_and_global_consistency + + return local_and_global_consistency + raise AttributeError(f"module {__name__} has no attribute {name}") + + +def __dir__(): + return ["harmonic_function", "local_and_global_consistency"] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/hmn.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/hmn.py new file mode 100644 index 0000000..727ee36 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/hmn.py @@ -0,0 +1,88 @@ +"""Function for computing Harmonic function algorithm by Zhu et al. + +References +---------- +Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). +Semi-supervised learning using gaussian fields and harmonic functions. +In ICML (Vol. 3, pp. 912-919). +""" +import networkx as nx +from networkx.algorithms.node_classification.utils import _get_label_info +from networkx.utils.decorators import not_implemented_for + +__all__ = ["harmonic_function"] + + +@not_implemented_for("directed") +def harmonic_function(G, max_iter=30, label_name="label"): + """Node classification by Harmonic function + + Parameters + ---------- + G : NetworkX Graph + max_iter : int + maximum number of iterations allowed + label_name : string + name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.harmonic_function(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). + Semi-supervised learning using gaussian fields and harmonic functions. + In ICML (Vol. 3, pp. 912-919). + """ + import numpy as np + import scipy as sp + import scipy.sparse # call as sp.sparse + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)) + P = (D @ X).tolil() + P[labels[:, 0]] = 0 # labels[:, 0] indicates IDs of labeled nodes + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/lgc.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/lgc.py new file mode 100644 index 0000000..5324470 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/lgc.py @@ -0,0 +1,89 @@ +"""Function for computing Local and global consistency algorithm by Zhou et al. + +References +---------- +Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004). +Learning with local and global consistency. +Advances in neural information processing systems, 16(16), 321-328. +""" +import networkx as nx +from networkx.algorithms.node_classification.utils import _get_label_info +from networkx.utils.decorators import not_implemented_for + +__all__ = ["local_and_global_consistency"] + + +@not_implemented_for("directed") +def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"): + """Node classification by Local and Global Consistency + + Parameters + ---------- + G : NetworkX Graph + alpha : float + Clamping factor + max_iter : int + Maximum number of iterations allowed + label_name : string + Name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.local_and_global_consistency(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004). + Learning with local and global consistency. + Advances in neural information processing systems, 16(16), 321-328. + """ + import numpy as np + import scipy as sp + import scipy.sparse # call as sp.sparse + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))) + P = alpha * ((D2 @ X) @ D2) + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 - alpha + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/utils.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/utils.py new file mode 100644 index 0000000..f7d7ac2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/node_classification/utils.py @@ -0,0 +1,34 @@ +def _get_label_info(G, label_name): + """Get and return information of labels from the input graph + + Parameters + ---------- + G : Network X graph + label_name : string + Name of the target label + + Returns + ---------- + labels : numpy array, shape = [n_labeled_samples, 2] + Array of pairs of labeled node ID and label ID + label_dict : numpy array, shape = [n_classes] + Array of labels + i-th element contains the label corresponding label ID `i` + """ + import numpy as np + + labels = [] + label_to_id = {} + lid = 0 + for i, n in enumerate(G.nodes(data=True)): + if label_name in n[1]: + label = n[1][label_name] + if label not in label_to_id: + label_to_id[label] = lid + lid += 1 + labels.append([i, label_to_id[label]]) + labels = np.array(labels) + label_dict = np.array( + [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])] + ) + return (labels, label_dict) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/non_randomness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/non_randomness.py new file mode 100644 index 0000000..e6953db --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/non_randomness.py @@ -0,0 +1,95 @@ +r""" Computation of graph non-randomness +""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["non_randomness"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def non_randomness(G, k=None, weight="weight"): + """Compute the non-randomness of graph G. + + The first returned value nr is the sum of non-randomness values of all + edges within the graph (where the non-randomness of an edge tends to be + small when the two nodes linked by that edge are from two different + communities). + + The second computed value nr_rd is a relative measure that indicates + to what extent graph G is different from random graphs in terms + of probability. When it is close to 0, the graph tends to be more + likely generated by an Erdos Renyi model. + + Parameters + ---------- + G : NetworkX graph + Graph must be symmetric, connected, and without self-loops. + + k : int + The number of communities in G. + If k is not set, the function will use a default community + detection algorithm to set it. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1, i.e., the graph is + binary. + + Returns + ------- + non-randomness : (float, float) tuple + Non-randomness, Relative non-randomness w.r.t. + Erdos Renyi random graphs. + + Raises + ------ + NetworkXException + if the input graph is not connected. + NetworkXError + if the input graph contains self-loops. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nr, nr_rd = nx.non_randomness(G, 2) + >>> nr, nr_rd = nx.non_randomness(G, 2, 'weight') + + Notes + ----- + This computes Eq. (4.4) and (4.5) in Ref. [1]_. + + If a weight field is passed, this algorithm will use the eigenvalues + of the weighted adjacency matrix to compute Eq. (4.4) and (4.5). + + References + ---------- + .. [1] Xiaowei Ying and Xintao Wu, + On Randomness Measures for Social Networks, + SIAM International Conference on Data Mining. 2009 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if len(list(nx.selfloop_edges(G))) > 0: + raise nx.NetworkXError("Graph must not contain self-loops") + + if k is None: + k = len(tuple(nx.community.label_propagation_communities(G))) + + # eq. 4.4 + eigenvalues = np.linalg.eigvals(nx.to_numpy_array(G, weight=weight)) + nr = np.real(np.sum(eigenvalues[:k])) + + n = G.number_of_nodes() + m = G.number_of_edges() + p = (2 * k * m) / (n * (n - k)) + + # eq. 4.5 + nr_rd = (nr - ((n - 2 * k) * p + k)) / math.sqrt(2 * k * p * (1 - p)) + + return nr, nr_rd diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/__init__.py new file mode 100644 index 0000000..0ebc6ab --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.operators.all import * +from networkx.algorithms.operators.binary import * +from networkx.algorithms.operators.product import * +from networkx.algorithms.operators.unary import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/all.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/all.py new file mode 100644 index 0000000..7d7c19c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/all.py @@ -0,0 +1,253 @@ +"""Operations on many graphs. +""" +from itertools import zip_longest + +import networkx as nx + +__all__ = ["union_all", "compose_all", "disjoint_union_all", "intersection_all"] + + +def union_all(graphs, rename=(None,)): + """Returns the union of all graphs. + + The graphs must be disjoint, otherwise an exception is raised. + + Parameters + ---------- + graphs : list of graphs + List of NetworkX graphs + + rename : bool , default=(None, None) + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + Returns + ------- + U : a graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + Notes + ----- + To force a disjoint union with node relabeling, use + disjoint_union_all(G,H) or convert_node_labels_to integers(). + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + + See Also + -------- + union + disjoint_union_all + """ + # collect the graphs in case an iterator was passed + graphs = list(graphs) + + if not graphs: + raise ValueError("cannot apply union_all to an empty list") + + U = graphs[0] + + if any(G.is_multigraph() != U.is_multigraph() for G in graphs): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + + # rename graph to obtain disjoint node labels + def add_prefix(graph, prefix): + if prefix is None: + return graph + + def label(x): + if isinstance(x, str): + name = prefix + x + else: + name = prefix + repr(x) + return name + + return nx.relabel_nodes(graph, label) + + graphs = [add_prefix(G, name) for G, name in zip_longest(graphs, rename)] + + if sum(len(G) for G in graphs) != len(set().union(*graphs)): + raise nx.NetworkXError( + "The node sets of the graphs are not disjoint.", + "Use appropriate rename" + "=(G1prefix,G2prefix,...,GNprefix)" + "or use disjoint_union(G1,G2,...,GN).", + ) + + # Union is the same type as first graph + R = U.__class__() + + # add graph attributes, later attributes take precedent over earlier ones + for G in graphs: + R.graph.update(G.graph) + + # add nodes and attributes + for G in graphs: + R.add_nodes_from(G.nodes(data=True)) + + if U.is_multigraph(): + for G in graphs: + R.add_edges_from(G.edges(keys=True, data=True)) + else: + for G in graphs: + R.add_edges_from(G.edges(data=True)) + + return R + + +def disjoint_union_all(graphs): + """Returns the disjoint union of all graphs. + + This operation forces distinct integer node labels starting with 0 + for the first graph in the list and numbering consecutively. + + Parameters + ---------- + graphs : list + List of NetworkX graphs + + Returns + ------- + U : A graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + Notes + ----- + It is recommended that the graphs be either all directed or all undirected. + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + """ + graphs = list(graphs) + + if not graphs: + raise ValueError("cannot apply disjoint_union_all to an empty list") + + first_labels = [0] + for G in graphs[:-1]: + first_labels.append(len(G) + first_labels[-1]) + + relabeled = [ + nx.convert_node_labels_to_integers(G, first_label=first_label) + for G, first_label in zip(graphs, first_labels) + ] + R = union_all(relabeled) + for G in graphs: + R.graph.update(G.graph) + return R + + +def compose_all(graphs): + """Returns the composition of all graphs. + + Composition is the simple union of the node sets and edge sets. + The node sets of the supplied graphs need not be disjoint. + + Parameters + ---------- + graphs : list + List of NetworkX graphs + + Returns + ------- + C : A graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + Notes + ----- + It is recommended that the supplied graphs be either all directed or all + undirected. + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + """ + graphs = list(graphs) + + if not graphs: + raise ValueError("cannot apply compose_all to an empty list") + + U = graphs[0] + + if any(G.is_multigraph() != U.is_multigraph() for G in graphs): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + + R = U.__class__() + # add graph attributes, H attributes take precedent over G attributes + for G in graphs: + R.graph.update(G.graph) + + for G in graphs: + R.add_nodes_from(G.nodes(data=True)) + + if U.is_multigraph(): + for G in graphs: + R.add_edges_from(G.edges(keys=True, data=True)) + else: + for G in graphs: + R.add_edges_from(G.edges(data=True)) + return R + + +def intersection_all(graphs): + """Returns a new graph that contains only the nodes and the edges that exist in + all graphs. + + Parameters + ---------- + graphs : list + List of NetworkX graphs + + Returns + ------- + R : A new graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. + """ + graphs = list(graphs) + + if not graphs: + raise ValueError("cannot apply intersection_all to an empty list") + + U = graphs[0] + + if any(G.is_multigraph() != U.is_multigraph() for G in graphs): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + + # create new graph + node_intersection = set.intersection(*[set(G.nodes) for G in graphs]) + R = U.__class__() + R.add_nodes_from(node_intersection) + + if U.is_multigraph(): + edge_sets = [set(G.edges(keys=True)) for G in graphs] + else: + edge_sets = [set(G.edges()) for G in graphs] + + edge_intersection = set.intersection(*edge_sets) + R.add_edges_from(edge_intersection) + + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/binary.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/binary.py new file mode 100644 index 0000000..fcd5933 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/binary.py @@ -0,0 +1,424 @@ +""" +Operations on graphs including union, intersection, difference. +""" +import networkx as nx + +__all__ = [ + "union", + "compose", + "disjoint_union", + "intersection", + "difference", + "symmetric_difference", + "full_join", +] + + +def union(G, H, rename=(None, None), name=None): + """Return the union of graphs G and H. + + Graphs G and H must be disjoint after the renaming takes place, + otherwise an exception is raised. + + Parameters + ---------- + G,H : graph + A NetworkX graph + + rename : tuple , default=(None, None) + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + name : string + Specify the name for the union graph + + .. deprecated:: 2.7 + This is deprecated and will be removed in version v3.0. + + Returns + ------- + U : A union graph with the same type as G. + + Notes + ----- + To force a disjoint union with node relabeling, use + disjoint_union(G,H) or convert_node_labels_to integers(). + + Graph, edge, and node attributes are propagated from G and H + to the union graph. If a graph attribute is present in both + G and H the value from H is used. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 1), (0, 3), (1, 3), (1, 2)]) + >>> U = nx.union(G, H, rename=("G", "H")) + >>> U.nodes + NodeView(('G0', 'G1', 'G2', 'H0', 'H1', 'H3', 'H2')) + >>> U.edges + EdgeView([('G0', 'G1'), ('G0', 'G2'), ('G1', 'G2'), ('H0', 'H1'), ('H0', 'H3'), ('H1', 'H3'), ('H1', 'H2')]) + + See Also + -------- + disjoint_union + """ + if name is not None: + import warnings + + warnings.warn( + "name parameter is deprecated and will be removed in version 3.0", + DeprecationWarning, + stacklevel=2, + ) + + return nx.union_all([G, H], rename) + + +def disjoint_union(G, H): + """Return the disjoint union of graphs G and H. + + This algorithm forces distinct integer node labels. + + Parameters + ---------- + G,H : graph + A NetworkX graph + + Returns + ------- + U : A union graph with the same type as G. + + Notes + ----- + A new graph is created, of the same class as G. It is recommended + that G and H be either both directed or both undirected. + + The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are + relabeled len(G) to len(G)+len(H)-1. + + Graph, edge, and node attributes are propagated from G and H + to the union graph. If a graph attribute is present in both + G and H the value from H is used. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 3), (1, 2), (2, 3)]) + >>> G.nodes[0]["key1"] = 5 + >>> H.nodes[0]["key2"] = 10 + >>> U = nx.disjoint_union(G, H) + >>> U.nodes(data=True) + NodeDataView({0: {'key1': 5}, 1: {}, 2: {}, 3: {'key2': 10}, 4: {}, 5: {}, 6: {}}) + >>> U.edges + EdgeView([(0, 1), (0, 2), (1, 2), (3, 4), (4, 6), (5, 6)]) + """ + return nx.disjoint_union_all([G, H]) + + +def intersection(G, H): + """Returns a new graph that contains only the nodes and the edges that exist in + both G and H. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H can have different node sets but must be both graphs or both multigraphs. + + Raises + ------ + NetworkXError + If one is a MultiGraph and the other one is a graph. + + Returns + ------- + GH : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. If you want a new graph of the intersection of G and H + with the attributes (including edge data) from G use remove_nodes_from() + as follows + + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() + >>> R.remove_nodes_from(n for n in G if n not in H) + >>> R.remove_edges_from(e for e in G.edges if e not in H.edges) + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 3), (1, 2), (2, 3)]) + >>> R = nx.intersection(G, H) + >>> R.nodes + NodeView((0, 1, 2)) + >>> R.edges + EdgeView([(1, 2)]) + """ + return nx.intersection_all([G, H]) + + +def difference(G, H): + """Returns a new graph that contains the edges that exist in G but not in H. + + The node sets of H and G must be the same. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H must have the same node sets. + + Returns + ------- + D : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. If you want a new graph of the difference of G and H with + the attributes (including edge data) from G use remove_nodes_from() + as follows: + + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() + >>> R.remove_nodes_from(n for n in G if n in H) + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3)]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 3)]) + >>> R = nx.difference(G, H) + >>> R.nodes + NodeView((0, 1, 2, 3)) + >>> R.edges + EdgeView([(0, 2), (1, 3)]) + """ + # create new graph + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError("G and H must both be graphs or multigraphs.") + R = nx.create_empty_copy(G) + + if set(G) != set(H): + raise nx.NetworkXError("Node sets of graphs not equal") + + if G.is_multigraph(): + edges = G.edges(keys=True) + else: + edges = G.edges() + for e in edges: + if not H.has_edge(*e): + R.add_edge(*e) + return R + + +def symmetric_difference(G, H): + """Returns new graph with edges that exist in either G or H but not both. + + The node sets of H and G must be the same. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H must have the same node sets. + + Returns + ------- + D : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3)]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 3)]) + >>> R = nx.symmetric_difference(G, H) + >>> R.nodes + NodeView((0, 1, 2, 3)) + >>> R.edges + EdgeView([(0, 2), (0, 3), (1, 3)]) + """ + # create new graph + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError("G and H must both be graphs or multigraphs.") + R = nx.create_empty_copy(G) + + if set(G) != set(H): + raise nx.NetworkXError("Node sets of graphs not equal") + + gnodes = set(G) # set of nodes in G + hnodes = set(H) # set of nodes in H + nodes = gnodes.symmetric_difference(hnodes) + R.add_nodes_from(nodes) + + if G.is_multigraph(): + edges = G.edges(keys=True) + else: + edges = G.edges() + # we could copy the data here but then this function doesn't + # match intersection and difference + for e in edges: + if not H.has_edge(*e): + R.add_edge(*e) + + if H.is_multigraph(): + edges = H.edges(keys=True) + else: + edges = H.edges() + for e in edges: + if not G.has_edge(*e): + R.add_edge(*e) + return R + + +def compose(G, H): + """Returns a new graph of G composed with H. + + Composition is the simple union of the node sets and edge sets. + The node sets of G and H do not need to be disjoint. + + Parameters + ---------- + G, H : graph + A NetworkX graph + + Returns + ------- + C: A new graph with the same type as G + + Notes + ----- + It is recommended that G and H be either both directed or both undirected. + Attributes from H take precedent over attributes from G. + + For MultiGraphs, the edges are identified by incident nodes AND edge-key. + This can cause surprises (i.e., edge `(1, 2)` may or may not be the same + in two graphs) if you use MultiGraph without keeping track of edge keys. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> H = nx.Graph([(0, 1), (1, 2)]) + >>> R = nx.compose(G, H) + >>> R.nodes + NodeView((0, 1, 2)) + >>> R.edges + EdgeView([(0, 1), (0, 2), (1, 2)]) + + By default, the attributes from `H` take precedent over attributes from `G`. + If you prefer another way of combining attributes, you can update them after the compose operation: + + >>> G = nx.Graph([(0, 1, {'weight': 2.0}), (3, 0, {'weight': 100.0})]) + >>> H = nx.Graph([(0, 1, {'weight': 10.0}), (1, 2, {'weight': -1.0})]) + >>> nx.set_node_attributes(G, {0: 'dark', 1: 'light', 3: 'black'}, name='color') + >>> nx.set_node_attributes(H, {0: 'green', 1: 'orange', 2: 'yellow'}, name='color') + >>> GcomposeH = nx.compose(G, H) + + Normally, color attribute values of nodes of GcomposeH come from H. We can workaround this as follows: + + >>> node_data = {n: G.nodes[n]['color'] + " " + H.nodes[n]['color'] for n in G.nodes & H.nodes} + >>> nx.set_node_attributes(GcomposeH, node_data, 'color') + >>> print(GcomposeH.nodes[0]['color']) + dark green + + >>> print(GcomposeH.nodes[3]['color']) + black + + Similarly, we can update edge attributes after the compose operation in a way we prefer: + + >>> edge_data = {e: G.edges[e]['weight'] * H.edges[e]['weight'] for e in G.edges & H.edges} + >>> nx.set_edge_attributes(GcomposeH, edge_data, 'weight') + >>> print(GcomposeH.edges[(0, 1)]['weight']) + 20.0 + + >>> print(GcomposeH.edges[(3, 0)]['weight']) + 100.0 + """ + return nx.compose_all([G, H]) + + +def full_join(G, H, rename=(None, None)): + """Returns the full join of graphs G and H. + + Full join is the union of G and H in which all edges between + G and H are added. + The node sets of G and H must be disjoint, + otherwise an exception is raised. + + Parameters + ---------- + G, H : graph + A NetworkX graph + + rename : tuple , default=(None, None) + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + Returns + ------- + U : The full join graph with the same type as G. + + Notes + ----- + It is recommended that G and H be either both directed or both undirected. + + If G is directed, then edges from G to H are added as well as from H to G. + + Note that full_join() does not produce parallel edges for MultiGraphs. + + The full join operation of graphs G and H is the same as getting + their complement, performing a disjoint union, and finally getting + the complement of the resulting graph. + + Graph, edge, and node attributes are propagated from G and H + to the union graph. If a graph attribute is present in both + G and H the value from H is used. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> H = nx.Graph([(3, 4)]) + >>> R = nx.full_join(G, H, rename=("G", "H")) + >>> R.nodes + NodeView(('G0', 'G1', 'G2', 'H3', 'H4')) + >>> R.edges + EdgeView([('G0', 'G1'), ('G0', 'G2'), ('G0', 'H3'), ('G0', 'H4'), ('G1', 'H3'), ('G1', 'H4'), ('G2', 'H3'), ('G2', 'H4'), ('H3', 'H4')]) + + See Also + -------- + union + disjoint_union + """ + R = union(G, H, rename) + + def add_prefix(graph, prefix): + if prefix is None: + return graph + + def label(x): + if isinstance(x, str): + name = prefix + x + else: + name = prefix + repr(x) + return name + + return nx.relabel_nodes(graph, label) + + G = add_prefix(G, rename[0]) + H = add_prefix(H, rename[1]) + + for i in G: + for j in H: + R.add_edge(i, j) + if R.is_directed(): + for i in H: + for j in G: + R.add_edge(i, j) + + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/product.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/product.py new file mode 100644 index 0000000..4c56bbe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/product.py @@ -0,0 +1,461 @@ +""" +Graph products. +""" +from itertools import product + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "tensor_product", + "cartesian_product", + "lexicographic_product", + "strong_product", + "power", + "rooted_product", +] + + +def _dict_product(d1, d2): + return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)} + + +# Generators for producting graph products +def _node_product(G, H): + for u, v in product(G, H): + yield ((u, v), _dict_product(G.nodes[u], H.nodes[v])) + + +def _directed_edges_cross_edges(G, H): + if not G.is_multigraph() and not H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + yield (u, x), (v, y), _dict_product(c, d) + if not G.is_multigraph() and H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (u, x), (v, y), k, _dict_product(c, d) + if G.is_multigraph() and not H.is_multigraph(): + for u, v, k, c in G.edges(data=True, keys=True): + for x, y, d in H.edges(data=True): + yield (u, x), (v, y), k, _dict_product(c, d) + if G.is_multigraph() and H.is_multigraph(): + for u, v, j, c in G.edges(data=True, keys=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (u, x), (v, y), (j, k), _dict_product(c, d) + + +def _undirected_edges_cross_edges(G, H): + if not G.is_multigraph() and not H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + yield (v, x), (u, y), _dict_product(c, d) + if not G.is_multigraph() and H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (v, x), (u, y), k, _dict_product(c, d) + if G.is_multigraph() and not H.is_multigraph(): + for u, v, k, c in G.edges(data=True, keys=True): + for x, y, d in H.edges(data=True): + yield (v, x), (u, y), k, _dict_product(c, d) + if G.is_multigraph() and H.is_multigraph(): + for u, v, j, c in G.edges(data=True, keys=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (v, x), (u, y), (j, k), _dict_product(c, d) + + +def _edges_cross_nodes(G, H): + if G.is_multigraph(): + for u, v, k, d in G.edges(data=True, keys=True): + for x in H: + yield (u, x), (v, x), k, d + else: + for u, v, d in G.edges(data=True): + for x in H: + if H.is_multigraph(): + yield (u, x), (v, x), None, d + else: + yield (u, x), (v, x), d + + +def _nodes_cross_edges(G, H): + if H.is_multigraph(): + for x in G: + for u, v, k, d in H.edges(data=True, keys=True): + yield (x, u), (x, v), k, d + else: + for x in G: + for u, v, d in H.edges(data=True): + if G.is_multigraph(): + yield (x, u), (x, v), None, d + else: + yield (x, u), (x, v), d + + +def _edges_cross_nodes_and_nodes(G, H): + if G.is_multigraph(): + for u, v, k, d in G.edges(data=True, keys=True): + for x in H: + for y in H: + yield (u, x), (v, y), k, d + else: + for u, v, d in G.edges(data=True): + for x in H: + for y in H: + if H.is_multigraph(): + yield (u, x), (v, y), None, d + else: + yield (u, x), (v, y), d + + +def _init_product_graph(G, H): + if not G.is_directed() == H.is_directed(): + msg = "G and H must be both directed or both undirected" + raise nx.NetworkXError(msg) + if G.is_multigraph() or H.is_multigraph(): + GH = nx.MultiGraph() + else: + GH = nx.Graph() + if G.is_directed(): + GH = GH.to_directed() + return GH + + +def tensor_product(G, H): + r"""Returns the tensor product of G and H. + + The tensor product $P$ of the graphs $G$ and $H$ has a node set that + is the tensor product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if $(u,x)$ is an edge in $G$ + and $(v,y)$ is an edge in $H$. + + Tensor product is sometimes also referred to as the categorical product, + direct product, cardinal product or conjunction. + + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The tensor product of G and H. P will be a multi-graph if either G + or H is a multi-graph, will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.tensor_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_directed_edges_cross_edges(G, H)) + if not GH.is_directed(): + GH.add_edges_from(_undirected_edges_cross_edges(G, H)) + return GH + + +def cartesian_product(G, H): + r"""Returns the Cartesian product of G and H. + + The Cartesian product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v),(x,y))$ if and only if either $u$ is equal to $x$ + and both $v$ and $y$ are adjacent in $H$ or if $v$ is equal to $y$ and + both $u$ and $x$ are adjacent in $G$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.cartesian_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_edges_cross_nodes(G, H)) + GH.add_edges_from(_nodes_cross_edges(G, H)) + return GH + + +def lexicographic_product(G, H): + r"""Returns the lexicographic product of G and H. + + The lexicographical product $P$ of the graphs $G$ and $H$ has a node set + that is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if $(u,v)$ is an edge in $G$ + or $u==v$ and $(x,y)$ is an edge in $H$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.lexicographic_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + # Edges in G regardless of H designation + GH.add_edges_from(_edges_cross_nodes_and_nodes(G, H)) + # For each x in G, only if there is an edge in H + GH.add_edges_from(_nodes_cross_edges(G, H)) + return GH + + +def strong_product(G, H): + r"""Returns the strong product of G and H. + + The strong product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if + $u==v$ and $(x,y)$ is an edge in $H$, or + $x==y$ and $(u,v)$ is an edge in $G$, or + $(u,v)$ is an edge in $G$ and $(x,y)$ is an edge in $H$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.strong_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_nodes_cross_edges(G, H)) + GH.add_edges_from(_edges_cross_nodes(G, H)) + GH.add_edges_from(_directed_edges_cross_edges(G, H)) + if not GH.is_directed(): + GH.add_edges_from(_undirected_edges_cross_edges(G, H)) + return GH + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def power(G, k): + """Returns the specified power of a graph. + + The $k$th power of a simple graph $G$, denoted $G^k$, is a + graph on the same set of nodes in which two distinct nodes $u$ and + $v$ are adjacent in $G^k$ if and only if the shortest path + distance between $u$ and $v$ in $G$ is at most $k$. + + Parameters + ---------- + G : graph + A NetworkX simple graph object. + + k : positive integer + The power to which to raise the graph `G`. + + Returns + ------- + NetworkX simple graph + `G` to the power `k`. + + Raises + ------ + ValueError + If the exponent `k` is not positive. + + NetworkXNotImplemented + If `G` is not a simple graph. + + Examples + -------- + The number of edges will never decrease when taking successive + powers: + + >>> G = nx.path_graph(4) + >>> list(nx.power(G, 2).edges) + [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)] + >>> list(nx.power(G, 3).edges) + [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)] + + The `k`th power of a cycle graph on *n* nodes is the complete graph + on *n* nodes, if `k` is at least ``n // 2``: + + >>> G = nx.cycle_graph(5) + >>> H = nx.complete_graph(5) + >>> nx.is_isomorphic(nx.power(G, 2), H) + True + >>> G = nx.cycle_graph(8) + >>> H = nx.complete_graph(8) + >>> nx.is_isomorphic(nx.power(G, 4), H) + True + + References + ---------- + .. [1] J. A. Bondy, U. S. R. Murty, *Graph Theory*. Springer, 2008. + + Notes + ----- + This definition of "power graph" comes from Exercise 3.1.6 of + *Graph Theory* by Bondy and Murty [1]_. + + """ + if k <= 0: + raise ValueError("k must be a positive integer") + H = nx.Graph() + H.add_nodes_from(G) + # update BFS code to ignore self loops. + for n in G: + seen = {} # level (number of hops) when seen in BFS + level = 1 # the current level + nextlevel = G[n] + while nextlevel: + thislevel = nextlevel # advance to next level + nextlevel = {} # and start a new list (fringe) + for v in thislevel: + if v == n: # avoid self loop + continue + if v not in seen: + seen[v] = level # set the level of vertex v + nextlevel.update(G[v]) # add neighbors of v + if k <= level: + break + level += 1 + H.add_edges_from((n, nbr) for nbr in seen) + return H + + +@not_implemented_for("multigraph") +def rooted_product(G, H, root): + """Return the rooted product of graphs G and H rooted at root in H. + + A new graph is constructed representing the rooted product of + the inputted graphs, G and H, with a root in H. + A rooted product duplicates H for each nodes in G with the root + of H corresponding to the node in G. Nodes are renamed as the direct + product of G and H. The result is a subgraph of the cartesian product. + + Parameters + ---------- + G,H : graph + A NetworkX graph + root : node + A node in H + + Returns + ------- + R : The rooted product of G and H with a specified root in H + + Notes + ----- + The nodes of R are the Cartesian Product of the nodes of G and H. + The nodes of G and H are not relabeled. + """ + if root not in H: + raise nx.NetworkXError("root must be a vertex in H") + + R = nx.Graph() + R.add_nodes_from(product(G, H)) + + R.add_edges_from(((e[0], root), (e[1], root)) for e in G.edges()) + R.add_edges_from(((g, e[0]), (g, e[1])) for g in G for e in H.edges()) + + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_all.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_all.py new file mode 100644 index 0000000..e09791b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_all.py @@ -0,0 +1,300 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_union_all_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + j = g.copy() + j.graph["name"] = "j" + j.graph["attr"] = "attr" + j.nodes[0]["x"] = 7 + + ghj = nx.union_all([g, h, j], rename=("g", "h", "j")) + assert set(ghj.nodes()) == {"h0", "h1", "g0", "g1", "j0", "j1"} + for n in ghj: + graph, node = n + assert ghj.nodes[n] == eval(graph).nodes[int(node)] + + assert ghj.graph["attr"] == "attr" + assert ghj.graph["name"] == "j" # j graph attributes take precendent + + +def test_intersection_all(): + G = nx.Graph() + H = nx.Graph() + R = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + R.add_nodes_from([1, 2, 3, 4]) + R.add_edge(2, 3) + R.add_edge(4, 1) + I = nx.intersection_all([G, H, R]) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_all_different_node_sets(): + G = nx.Graph() + H = nx.Graph() + R = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 6, 7]) + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(6, 7) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + R.add_nodes_from([1, 2, 3, 4, 8, 9]) + R.add_edge(2, 3) + R.add_edge(4, 1) + R.add_edge(8, 9) + I = nx.intersection_all([G, H, R]) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_all_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_all_attributes_different_node_sets(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + g.add_node(2) + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_all_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_intersection_all_multigraph_attributes_different_node_sets(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + g.add_edge(1, 2, key=1) + g.add_edge(1, 2, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_union_all_and_compose_all(): + K3 = nx.complete_graph(3) + P3 = nx.path_graph(3) + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G2 = nx.DiGraph() + G2.add_edge("1", "2") + G2.add_edge("1", "3") + G2.add_edge("1", "4") + + G = nx.union_all([G1, G2]) + H = nx.compose_all([G1, G2]) + assert edges_equal(G.edges(), H.edges()) + assert not G.has_edge("A", "1") + pytest.raises(nx.NetworkXError, nx.union, K3, P3) + H1 = nx.union_all([H, G1], rename=("H", "G1")) + assert sorted(H1.nodes()) == [ + "G1A", + "G1B", + "G1C", + "G1D", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + H2 = nx.union_all([H, G2], rename=("H", "")) + assert sorted(H2.nodes()) == [ + "1", + "2", + "3", + "4", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + assert not H1.has_edge("NB", "NA") + + G = nx.compose_all([G, G]) + assert edges_equal(G.edges(), H.edges()) + + G2 = nx.union_all([G2, G2], rename=("", "copy")) + assert sorted(G2.nodes()) == [ + "1", + "2", + "3", + "4", + "copy1", + "copy2", + "copy3", + "copy4", + ] + + assert sorted(G2.neighbors("copy4")) == [] + assert sorted(G2.neighbors("copy1")) == ["copy2", "copy3", "copy4"] + assert len(G) == 8 + assert nx.number_of_edges(G) == 6 + + E = nx.disjoint_union_all([G, G]) + assert len(E) == 16 + assert nx.number_of_edges(E) == 12 + + E = nx.disjoint_union_all([G1, G2]) + assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G2 = nx.DiGraph() + G2.add_edge(1, 2) + G3 = nx.DiGraph() + G3.add_edge(11, 22) + G4 = nx.union_all([G1, G2, G3], rename=("G1", "G2", "G3")) + assert sorted(G4.nodes()) == ["G1A", "G1B", "G21", "G22", "G311", "G322"] + + +def test_union_all_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.union_all([G, H]) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_input_output(): + l = [nx.Graph([(1, 2)]), nx.Graph([(3, 4)])] + U = nx.disjoint_union_all(l) + assert len(l) == 2 + C = nx.compose_all(l) + assert len(l) == 2 + l = [nx.Graph([(1, 2)]), nx.Graph([(1, 2)])] + R = nx.intersection_all(l) + assert len(l) == 2 + + +def test_mixed_type_union(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.union_all([G, H, I]) + + +def test_mixed_type_disjoint_union(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.disjoint_union_all([G, H, I]) + + +def test_mixed_type_intersection(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.intersection_all([G, H, I]) + + +def test_mixed_type_compose(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.compose_all([G, H, I]) + + +def test_empty_union(): + with pytest.raises(ValueError): + nx.union_all([]) + + +def test_empty_disjoint_union(): + with pytest.raises(ValueError): + nx.disjoint_union_all([]) + + +def test_empty_compose_all(): + with pytest.raises(ValueError): + nx.compose_all([]) + + +def test_empty_intersection_all(): + with pytest.raises(ValueError): + nx.intersection_all([]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_binary.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_binary.py new file mode 100644 index 0000000..f11e159 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_binary.py @@ -0,0 +1,448 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_union_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.union(g, h, rename=("g", "h")) + assert set(gh.nodes()) == {"h0", "h1", "g0", "g1"} + for n in gh: + graph, node = n + assert gh.nodes[n] == eval(graph).nodes[int(node)] + + assert gh.graph["attr"] == "attr" + assert gh.graph["name"] == "h" # h graph attributes take precendent + + +def test_intersection(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + I = nx.intersection(G, H) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_node_sets_different(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 7]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4, 5, 6]) + H.add_edge(2, 3) + H.add_edge(3, 4) + H.add_edge(5, 6) + I = nx.intersection(G, H) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + gh = nx.intersection(g, h) + + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_attributes_node_sets_different(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_node(2, x=3) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + h.remove_node(2) + + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_intersection_multigraph_attributes_node_set_different(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + g.add_edge(0, 2, key=2) + g.add_edge(0, 2, key=1) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_difference(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + D = nx.difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(1, 2)] + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(3, 4)] + D = nx.symmetric_difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(1, 2), (3, 4)] + + +def test_difference2(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + H.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + H.add_edge(1, 2) + G.add_edge(2, 3) + D = nx.difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(2, 3)] + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [] + H.add_edge(3, 4) + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(3, 4)] + + +def test_difference_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [] + + +def test_difference_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1), (0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 1), (0, 1, 2)] + + +def test_difference_raise(): + G = nx.path_graph(4) + H = nx.path_graph(3) + pytest.raises(nx.NetworkXError, nx.difference, G, H) + pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H) + + +def test_symmetric_difference_multigraph(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.symmetric_difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == 3 * [(0, 1)] + assert sorted(sorted(e) for e in gh.edges(keys=True)) == [ + [0, 1, 1], + [0, 1, 2], + [0, 1, 3], + ] + + +def test_union_and_compose(): + K3 = nx.complete_graph(3) + P3 = nx.path_graph(3) + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G2 = nx.DiGraph() + G2.add_edge("1", "2") + G2.add_edge("1", "3") + G2.add_edge("1", "4") + + G = nx.union(G1, G2) + H = nx.compose(G1, G2) + assert edges_equal(G.edges(), H.edges()) + assert not G.has_edge("A", 1) + pytest.raises(nx.NetworkXError, nx.union, K3, P3) + H1 = nx.union(H, G1, rename=("H", "G1")) + assert sorted(H1.nodes()) == [ + "G1A", + "G1B", + "G1C", + "G1D", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + H2 = nx.union(H, G2, rename=("H", "")) + assert sorted(H2.nodes()) == [ + "1", + "2", + "3", + "4", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + assert not H1.has_edge("NB", "NA") + + G = nx.compose(G, G) + assert edges_equal(G.edges(), H.edges()) + + G2 = nx.union(G2, G2, rename=("", "copy")) + assert sorted(G2.nodes()) == [ + "1", + "2", + "3", + "4", + "copy1", + "copy2", + "copy3", + "copy4", + ] + + assert sorted(G2.neighbors("copy4")) == [] + assert sorted(G2.neighbors("copy1")) == ["copy2", "copy3", "copy4"] + assert len(G) == 8 + assert nx.number_of_edges(G) == 6 + + E = nx.disjoint_union(G, G) + assert len(E) == 16 + assert nx.number_of_edges(E) == 12 + + E = nx.disjoint_union(G1, G2) + assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([(1, {"a1": 1})]) + H.add_nodes_from([(1, {"b1": 1})]) + R = nx.compose(G, H) + assert R.nodes == {1: {"a1": 1, "b1": 1}} + + +def test_union_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.union(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_disjoint_union_multigraph(): + G = nx.MultiGraph() + G.add_edge(0, 1, key=0) + G.add_edge(0, 1, key=1) + H = nx.MultiGraph() + H.add_edge(2, 3, key=0) + H.add_edge(2, 3, key=1) + GH = nx.disjoint_union(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_compose_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.compose(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + H.add_edge(1, 2, key=2) + GH = nx.compose(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_full_join_graph(): + # Simple Graphs + G = nx.Graph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.Graph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # Rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # Rename graphs with string-like nodes + G = nx.Graph() + G.add_node("a") + G.add_edge("b", "c") + H = nx.Graph() + H.add_edge("d", "e") + + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"ga", "gb", "gc", "hd", "he"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # DiGraphs + G = nx.DiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.DiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + # DiGraphs Rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + +def test_full_join_multigraph(): + # MultiGraphs + G = nx.MultiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.MultiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # MultiGraphs rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # MultiDiGraphs + G = nx.MultiDiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.MultiDiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + # MultiDiGraphs rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + +def test_mixed_type_union(): + G = nx.Graph() + H = nx.MultiGraph() + pytest.raises(nx.NetworkXError, nx.union, G, H) + pytest.raises(nx.NetworkXError, nx.disjoint_union, G, H) + pytest.raises(nx.NetworkXError, nx.intersection, G, H) + pytest.raises(nx.NetworkXError, nx.difference, G, H) + pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H) + pytest.raises(nx.NetworkXError, nx.compose, G, H) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_product.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_product.py new file mode 100644 index 0000000..fb97756 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_product.py @@ -0,0 +1,427 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_tensor_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.tensor_product(nx.DiGraph(), nx.Graph()) + + +def test_tensor_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.tensor_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.tensor_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_tensor_product_size(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + + G = nx.tensor_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_tensor_product_combinations(): + # basic smoke test, more realistic tests would be useful + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.tensor_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + G = nx.tensor_product(nx.DiGraph(P5), nx.DiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + +def test_tensor_product_classic_result(): + K2 = nx.complete_graph(2) + G = nx.petersen_graph() + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.desargues_graph()) + + G = nx.cycle_graph(5) + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.cycle_graph(10)) + + G = nx.tetrahedral_graph() + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.cubical_graph()) + + +def test_tensor_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.tensor_product(G, H) + + for (u_G, u_H) in GH.nodes(): + for (v_G, v_H) in GH.nodes(): + if H.has_edge(u_H, v_H) and G.has_edge(u_G, v_G): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_cartesian_product_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.cartesian_product(G, H) + assert set(GH) == {(1, 3), (2, 3), (2, 4), (1, 4)} + assert {(frozenset([u, v]), k) for u, v, k in GH.edges(keys=True)} == { + (frozenset([u, v]), k) + for u, v, k in [ + ((1, 3), (2, 3), 0), + ((1, 3), (2, 3), 1), + ((1, 3), (1, 4), 0), + ((1, 3), (1, 4), 1), + ((2, 3), (2, 4), 0), + ((2, 3), (2, 4), 1), + ((2, 4), (1, 4), 0), + ((2, 4), (1, 4), 1), + ] + } + + +def test_cartesian_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.cartesian_product(nx.DiGraph(), nx.Graph()) + + +def test_cartesian_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.cartesian_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.cartesian_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_cartesian_product_size(): + # order(GXH)=order(G)*order(H) + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.cartesian_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + assert nx.number_of_edges(G) == nx.number_of_edges(P5) * nx.number_of_nodes( + K3 + ) + nx.number_of_edges(K3) * nx.number_of_nodes(P5) + G = nx.cartesian_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + assert nx.number_of_edges(G) == nx.number_of_edges(K5) * nx.number_of_nodes( + K3 + ) + nx.number_of_edges(K3) * nx.number_of_nodes(K5) + + +def test_cartesian_product_classic(): + # test some classic product graphs + P2 = nx.path_graph(2) + P3 = nx.path_graph(3) + # cube = 2-path X 2-path + G = nx.cartesian_product(P2, P2) + G = nx.cartesian_product(P2, G) + assert nx.is_isomorphic(G, nx.cubical_graph()) + + # 3x3 grid + G = nx.cartesian_product(P3, P3) + assert nx.is_isomorphic(G, nx.grid_2d_graph(3, 3)) + + +def test_cartesian_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.cartesian_product(G, H) + + for (u_G, u_H) in GH.nodes(): + for (v_G, v_H) in GH.nodes(): + if (u_G == v_G and H.has_edge(u_H, v_H)) or ( + u_H == v_H and G.has_edge(u_G, v_G) + ): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_lexicographic_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.lexicographic_product(nx.DiGraph(), nx.Graph()) + + +def test_lexicographic_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.lexicographic_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.lexicographic_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_lexicographic_product_size(): + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.lexicographic_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_lexicographic_product_combinations(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.lexicographic_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + # No classic easily found classic results for lexicographic product + + +def test_lexicographic_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.lexicographic_product(G, H) + + for (u_G, u_H) in GH.nodes(): + for (v_G, v_H) in GH.nodes(): + if G.has_edge(u_G, v_G) or (u_G == v_G and H.has_edge(u_H, v_H)): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_strong_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.strong_product(nx.DiGraph(), nx.Graph()) + + +def test_strong_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.strong_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.strong_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_strong_product_size(): + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.strong_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_strong_product_combinations(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.strong_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + # No classic easily found classic results for strong product + + +def test_strong_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.strong_product(G, H) + + for (u_G, u_H) in GH.nodes(): + for (v_G, v_H) in GH.nodes(): + if ( + (u_G == v_G and H.has_edge(u_H, v_H)) + or (u_H == v_H and G.has_edge(u_G, v_G)) + or (G.has_edge(u_G, v_G) and H.has_edge(u_H, v_H)) + ): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_graph_power_raises(): + with pytest.raises(nx.NetworkXNotImplemented): + nx.power(nx.MultiDiGraph(), 2) + + +def test_graph_power(): + # wikipedia example for graph power + G = nx.cycle_graph(7) + G.add_edge(6, 7) + G.add_edge(7, 8) + G.add_edge(8, 9) + G.add_edge(9, 2) + H = nx.power(G, 2) + + assert edges_equal( + list(H.edges()), + [ + (0, 1), + (0, 2), + (0, 5), + (0, 6), + (0, 7), + (1, 9), + (1, 2), + (1, 3), + (1, 6), + (2, 3), + (2, 4), + (2, 8), + (2, 9), + (3, 4), + (3, 5), + (3, 9), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (6, 7), + (6, 8), + (7, 8), + (7, 9), + (8, 9), + ], + ) + + +def test_graph_power_negative(): + with pytest.raises(ValueError): + nx.power(nx.Graph(), -1) + + +def test_rooted_product_raises(): + with pytest.raises(nx.NetworkXError): + nx.rooted_product(nx.Graph(), nx.path_graph(2), 10) + + +def test_rooted_product(): + G = nx.cycle_graph(5) + H = nx.Graph() + H.add_edges_from([("a", "b"), ("b", "c"), ("b", "d")]) + R = nx.rooted_product(G, H, "a") + assert len(R) == len(G) * len(H) + assert R.size() == G.size() + len(G) * H.size() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_unary.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_unary.py new file mode 100644 index 0000000..d68e55c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/tests/test_unary.py @@ -0,0 +1,55 @@ +import pytest + +import networkx as nx + + +def test_complement(): + null = nx.null_graph() + empty1 = nx.empty_graph(1) + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + K10 = nx.complete_graph(10) + P2 = nx.path_graph(2) + P3 = nx.path_graph(3) + P5 = nx.path_graph(5) + P10 = nx.path_graph(10) + # complement of the complete graph is empty + + G = nx.complement(K3) + assert nx.is_isomorphic(G, nx.empty_graph(3)) + G = nx.complement(K5) + assert nx.is_isomorphic(G, nx.empty_graph(5)) + # for any G, G=complement(complement(G)) + P3cc = nx.complement(nx.complement(P3)) + assert nx.is_isomorphic(P3, P3cc) + nullcc = nx.complement(nx.complement(null)) + assert nx.is_isomorphic(null, nullcc) + b = nx.bull_graph() + bcc = nx.complement(nx.complement(b)) + assert nx.is_isomorphic(b, bcc) + + +def test_complement_2(): + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G1C = nx.complement(G1) + assert sorted(G1C.edges()) == [ + ("B", "A"), + ("B", "C"), + ("B", "D"), + ("C", "A"), + ("C", "B"), + ("C", "D"), + ("D", "A"), + ("D", "B"), + ("D", "C"), + ] + + +def test_reverse1(): + # Other tests for reverse are done by the DiGraph and MultiDigraph. + G1 = nx.Graph() + pytest.raises(nx.NetworkXError, nx.reverse, G1) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/unary.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/unary.py new file mode 100644 index 0000000..f522956 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/operators/unary.py @@ -0,0 +1,74 @@ +"""Unary operations on graphs""" +import networkx as nx + +__all__ = ["complement", "reverse"] + + +def complement(G): + """Returns the graph complement of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + GC : A new graph. + + Notes + ----- + Note that `complement` does not create self-loops and also + does not produce parallel edges for MultiGraphs. + + Graph, node, and edge data are not propagated to the new graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> G_complement = nx.complement(G) + >>> G_complement.edges() # This shows the edges of the complemented graph + EdgeView([(1, 4), (1, 5), (2, 4), (2, 5), (4, 5)]) + + """ + R = G.__class__() + R.add_nodes_from(G) + R.add_edges_from( + ((n, n2) for n, nbrs in G.adjacency() for n2 in G if n2 not in nbrs if n != n2) + ) + return R + + +def reverse(G, copy=True): + """Returns the reverse directed graph of G. + + Parameters + ---------- + G : directed graph + A NetworkX directed graph + copy : bool + If True, then a new graph is returned. If False, then the graph is + reversed in place. + + Returns + ------- + H : directed graph + The reversed G. + + Raises + ------ + NetworkXError + If graph is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> G_reversed = nx.reverse(G) + >>> G_reversed.edges() + OutEdgeView([(2, 1), (3, 1), (3, 2), (4, 3), (5, 3)]) + + """ + if not G.is_directed(): + raise nx.NetworkXError("Cannot reverse an undirected graph.") + else: + return G.reverse(copy=copy) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/planar_drawing.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/planar_drawing.py new file mode 100644 index 0000000..acbef23 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/planar_drawing.py @@ -0,0 +1,464 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["combinatorial_embedding_to_pos"] + + +def combinatorial_embedding_to_pos(embedding, fully_triangulate=False): + """Assigns every node a (x, y) position based on the given embedding + + The algorithm iteratively inserts nodes of the input graph in a certain + order and rearranges previously inserted nodes so that the planar drawing + stays valid. This is done efficiently by only maintaining relative + positions during the node placements and calculating the absolute positions + at the end. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + This defines the order of the edges + + fully_triangulate : bool + If set to True the algorithm adds edges to a copy of the input + embedding and makes it chordal. + + Returns + ------- + pos : dict + Maps each node to a tuple that defines the (x, y) position + + References + ---------- + .. [1] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + if len(embedding.nodes()) < 4: + # Position the node in any triangle + default_positions = [(0, 0), (2, 0), (1, 1)] + pos = {} + for i, v in enumerate(embedding.nodes()): + pos[v] = default_positions[i] + return pos + + embedding, outer_face = triangulate_embedding(embedding, fully_triangulate) + + # The following dicts map a node to another node + # If a node is not in the key set it means that the node is not yet in G_k + # If a node maps to None then the corresponding subtree does not exist + left_t_child = {} + right_t_child = {} + + # The following dicts map a node to an integer + delta_x = {} + y_coordinate = {} + + node_list = get_canonical_ordering(embedding, outer_face) + + # 1. Phase: Compute relative positions + + # Initialization + v1, v2, v3 = node_list[0][0], node_list[1][0], node_list[2][0] + + delta_x[v1] = 0 + y_coordinate[v1] = 0 + right_t_child[v1] = v3 + left_t_child[v1] = None + + delta_x[v2] = 1 + y_coordinate[v2] = 0 + right_t_child[v2] = None + left_t_child[v2] = None + + delta_x[v3] = 1 + y_coordinate[v3] = 1 + right_t_child[v3] = v2 + left_t_child[v3] = None + + for k in range(3, len(node_list)): + vk, contour_neighbors = node_list[k] + wp = contour_neighbors[0] + wp1 = contour_neighbors[1] + wq = contour_neighbors[-1] + wq1 = contour_neighbors[-2] + adds_mult_tri = len(contour_neighbors) > 2 + + # Stretch gaps: + delta_x[wp1] += 1 + delta_x[wq] += 1 + + delta_x_wp_wq = sum(delta_x[x] for x in contour_neighbors[1:]) + + # Adjust offsets + delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + y_coordinate[vk] = (y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + delta_x[wq] = delta_x_wp_wq - delta_x[vk] + if adds_mult_tri: + delta_x[wp1] -= delta_x[vk] + + # Install v_k: + right_t_child[wp] = vk + right_t_child[vk] = wq + if adds_mult_tri: + left_t_child[vk] = wp1 + right_t_child[wq1] = None + else: + left_t_child[vk] = None + + # 2. Phase: Set absolute positions + pos = dict() + pos[v1] = (0, y_coordinate[v1]) + remaining_nodes = [v1] + while remaining_nodes: + parent_node = remaining_nodes.pop() + + # Calculate position for left child + set_position( + parent_node, left_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + # Calculate position for right child + set_position( + parent_node, right_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + return pos + + +def set_position(parent, tree, remaining_nodes, delta_x, y_coordinate, pos): + """Helper method to calculate the absolute position of nodes.""" + child = tree[parent] + parent_node_x = pos[parent][0] + if child is not None: + # Calculate pos of child + child_x = parent_node_x + delta_x[child] + pos[child] = (child_x, y_coordinate[child]) + # Remember to calculate pos of its children + remaining_nodes.append(child) + + +def get_canonical_ordering(embedding, outer_face): + """Returns a canonical ordering of the nodes + + The canonical ordering of nodes (v1, ..., vn) must fulfill the following + conditions: + (See Lemma 1 in [2]_) + + - For the subgraph G_k of the input graph induced by v1, ..., vk it holds: + - 2-connected + - internally triangulated + - the edge (v1, v2) is part of the outer face + - For a node v(k+1) the following holds: + - The node v(k+1) is part of the outer face of G_k + - It has at least two neighbors in G_k + - All neighbors of v(k+1) in G_k lie consecutively on the outer face of + G_k (excluding the edge (v1, v2)). + + The algorithm used here starts with G_n (containing all nodes). It first + selects the nodes v1 and v2. And then tries to find the order of the other + nodes by checking which node can be removed in order to fulfill the + conditions mentioned above. This is done by calculating the number of + chords of nodes on the outer face. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The embedding must be triangulated + outer_face : list + The nodes on the outer face of the graph + + Returns + ------- + ordering : list + A list of tuples `(vk, wp_wq)`. Here `vk` is the node at this position + in the canonical ordering. The element `wp_wq` is a list of nodes that + make up the outer face of G_k. + + References + ---------- + .. [1] Steven Chaplick. + Canonical Orders of Planar Graphs and (some of) Their Applications 2015 + https://wuecampus2.uni-wuerzburg.de/moodle/pluginfile.php/545727/mod_resource/content/0/vg-ss15-vl03-canonical-orders-druckversion.pdf + .. [2] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + v1 = outer_face[0] + v2 = outer_face[1] + chords = defaultdict(int) # Maps nodes to the number of their chords + marked_nodes = set() + ready_to_pick = set(outer_face) + + # Initialize outer_face_ccw_nbr (do not include v1 -> v2) + outer_face_ccw_nbr = {} + prev_nbr = v2 + for idx in range(2, len(outer_face)): + outer_face_ccw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + outer_face_ccw_nbr[prev_nbr] = v1 + + # Initialize outer_face_cw_nbr (do not include v2 -> v1) + outer_face_cw_nbr = {} + prev_nbr = v1 + for idx in range(len(outer_face) - 1, 0, -1): + outer_face_cw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + + def is_outer_face_nbr(x, y): + if x not in outer_face_ccw_nbr: + return outer_face_cw_nbr[x] == y + if x not in outer_face_cw_nbr: + return outer_face_ccw_nbr[x] == y + return outer_face_ccw_nbr[x] == y or outer_face_cw_nbr[x] == y + + def is_on_outer_face(x): + return x not in marked_nodes and (x in outer_face_ccw_nbr.keys() or x == v1) + + # Initialize number of chords + for v in outer_face: + for nbr in embedding.neighbors_cw_order(v): + if is_on_outer_face(nbr) and not is_outer_face_nbr(v, nbr): + chords[v] += 1 + ready_to_pick.discard(v) + + # Initialize canonical_ordering + canonical_ordering = [None] * len(embedding.nodes()) # type: list + canonical_ordering[0] = (v1, []) + canonical_ordering[1] = (v2, []) + ready_to_pick.discard(v1) + ready_to_pick.discard(v2) + + for k in range(len(embedding.nodes()) - 1, 1, -1): + # 1. Pick v from ready_to_pick + v = ready_to_pick.pop() + marked_nodes.add(v) + + # v has exactly two neighbors on the outer face (wp and wq) + wp = None + wq = None + # Iterate over neighbors of v to find wp and wq + nbr_iterator = iter(embedding.neighbors_cw_order(v)) + while True: + nbr = next(nbr_iterator) + if nbr in marked_nodes: + # Only consider nodes that are not yet removed + continue + if is_on_outer_face(nbr): + # nbr is either wp or wq + if nbr == v1: + wp = v1 + elif nbr == v2: + wq = v2 + else: + if outer_face_cw_nbr[nbr] == v: + # nbr is wp + wp = nbr + else: + # nbr is wq + wq = nbr + if wp is not None and wq is not None: + # We don't need to iterate any further + break + + # Obtain new nodes on outer face (neighbors of v from wp to wq) + wp_wq = [wp] + nbr = wp + while nbr != wq: + # Get next neighbor (clockwise on the outer face) + next_nbr = embedding[v][nbr]["ccw"] + wp_wq.append(next_nbr) + # Update outer face + outer_face_cw_nbr[nbr] = next_nbr + outer_face_ccw_nbr[next_nbr] = nbr + # Move to next neighbor of v + nbr = next_nbr + + if len(wp_wq) == 2: + # There was a chord between wp and wq, decrease number of chords + chords[wp] -= 1 + if chords[wp] == 0: + ready_to_pick.add(wp) + chords[wq] -= 1 + if chords[wq] == 0: + ready_to_pick.add(wq) + else: + # Update all chords involving w_(p+1) to w_(q-1) + new_face_nodes = set(wp_wq[1:-1]) + for w in new_face_nodes: + # If we do not find a chord for w later we can pick it next + ready_to_pick.add(w) + for nbr in embedding.neighbors_cw_order(w): + if is_on_outer_face(nbr) and not is_outer_face_nbr(w, nbr): + # There is a chord involving w + chords[w] += 1 + ready_to_pick.discard(w) + if nbr not in new_face_nodes: + # Also increase chord for the neighbor + # We only iterator over new_face_nodes + chords[nbr] += 1 + ready_to_pick.discard(nbr) + # Set the canonical ordering node and the list of contour neighbors + canonical_ordering[k] = (v, wp_wq) + + return canonical_ordering + + +def triangulate_face(embedding, v1, v2): + """Triangulates the face given by half edge (v, w) + + Parameters + ---------- + embedding : nx.PlanarEmbedding + v1 : node + The half-edge (v1, v2) belongs to the face that gets triangulated + v2 : node + """ + _, v3 = embedding.next_face_half_edge(v1, v2) + _, v4 = embedding.next_face_half_edge(v2, v3) + if v1 == v2 or v1 == v3: + # The component has less than 3 nodes + return + while v1 != v4: + # Add edge if not already present on other side + if embedding.has_edge(v1, v3): + # Cannot triangulate at this position + v1, v2, v3 = v2, v3, v4 + else: + # Add edge for triangulation + embedding.add_half_edge_cw(v1, v3, v2) + embedding.add_half_edge_ccw(v3, v1, v2) + v1, v2, v3 = v1, v3, v4 + # Get next node + _, v4 = embedding.next_face_half_edge(v2, v3) + + +def triangulate_embedding(embedding, fully_triangulate=True): + """Triangulates the embedding. + + Traverses faces of the embedding and adds edges to a copy of the + embedding to triangulate it. + The method also ensures that the resulting graph is 2-connected by adding + edges if the same vertex is contained twice on a path around a face. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The input graph must contain at least 3 nodes. + + fully_triangulate : bool + If set to False the face with the most nodes is chooses as outer face. + This outer face does not get triangulated. + + Returns + ------- + (embedding, outer_face) : (nx.PlanarEmbedding, list) tuple + The element `embedding` is a new embedding containing all edges from + the input embedding and the additional edges to triangulate the graph. + The element `outer_face` is a list of nodes that lie on the outer face. + If the graph is fully triangulated these are three arbitrary connected + nodes. + + """ + if len(embedding.nodes) <= 1: + return embedding, list(embedding.nodes) + embedding = nx.PlanarEmbedding(embedding) + + # Get a list with a node for each connected component + component_nodes = [next(iter(x)) for x in nx.connected_components(embedding)] + + # 1. Make graph a single component (add edge between components) + for i in range(len(component_nodes) - 1): + v1 = component_nodes[i] + v2 = component_nodes[i + 1] + embedding.connect_components(v1, v2) + + # 2. Calculate faces, ensure 2-connectedness and determine outer face + outer_face = [] # A face with the most number of nodes + face_list = [] + edges_visited = set() # Used to keep track of already visited faces + for v in embedding.nodes(): + for w in embedding.neighbors_cw_order(v): + new_face = make_bi_connected(embedding, v, w, edges_visited) + if new_face: + # Found a new face + face_list.append(new_face) + if len(new_face) > len(outer_face): + # The face is a candidate to be the outer face + outer_face = new_face + + # 3. Triangulate (internal) faces + for face in face_list: + if face is not outer_face or fully_triangulate: + # Triangulate this face + triangulate_face(embedding, face[0], face[1]) + + if fully_triangulate: + v1 = outer_face[0] + v2 = outer_face[1] + v3 = embedding[v2][v1]["ccw"] + outer_face = [v1, v2, v3] + + return embedding, outer_face + + +def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted): + """Triangulate a face and make it 2-connected + + This method also adds all edges on the face to `edges_counted`. + + Parameters + ---------- + embedding: nx.PlanarEmbedding + The embedding that defines the faces + starting_node : node + A node on the face + outgoing_node : node + A node such that the half edge (starting_node, outgoing_node) belongs + to the face + edges_counted: set + Set of all half-edges that belong to a face that have been visited + + Returns + ------- + face_nodes: list + A list of all nodes at the border of this face + """ + + # Check if the face has already been calculated + if (starting_node, outgoing_node) in edges_counted: + # This face was already counted + return [] + edges_counted.add((starting_node, outgoing_node)) + + # Add all edges to edges_counted which have this face to their left + v1 = starting_node + v2 = outgoing_node + face_list = [starting_node] # List of nodes around the face + face_set = set(face_list) # Set for faster queries + _, v3 = embedding.next_face_half_edge(v1, v2) + + # Move the nodes v1, v2, v3 around the face: + while v2 != starting_node or v3 != outgoing_node: + if v1 == v2: + raise nx.NetworkXException("Invalid half-edge") + # cycle is not completed yet + if v2 in face_set: + # v2 encountered twice: Add edge to ensure 2-connectedness + embedding.add_half_edge_cw(v1, v3, v2) + embedding.add_half_edge_ccw(v3, v1, v2) + edges_counted.add((v2, v3)) + edges_counted.add((v3, v1)) + v2 = v1 + else: + face_set.add(v2) + face_list.append(v2) + + # set next edge + v1 = v2 + v2, v3 = embedding.next_face_half_edge(v2, v3) + + # remember that this edge has been counted + edges_counted.add((v1, v2)) + + return face_list diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/planarity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/planarity.py new file mode 100644 index 0000000..bcde0f3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/planarity.py @@ -0,0 +1,1174 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["check_planarity", "is_planar", "PlanarEmbedding"] + + +def is_planar(G): + """Returns True if and only if `G` is planar. + + A graph is *planar* iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the graph is planar. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> nx.is_planar(G) + True + >>> nx.is_planar(nx.complete_graph(5)) + False + + See Also + -------- + check_planarity : + Check if graph is planar *and* return a `PlanarEmbedding` instance if True. + """ + + return check_planarity(G, counterexample=False)[0] + + +def check_planarity(G, counterexample=False): + """Check if a graph is planar and return a counterexample or an embedding. + + A graph is planar iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + counterexample : bool + A Kuratowski subgraph (to proof non planarity) is only returned if set + to true. + + Returns + ------- + (is_planar, certificate) : (bool, NetworkX graph) tuple + is_planar is true if the graph is planar. + If the graph is planar `certificate` is a PlanarEmbedding + otherwise it is a Kuratowski subgraph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> is_planar, P = nx.check_planarity(G) + >>> print(is_planar) + True + + When `G` is planar, a `PlanarEmbedding` instance is returned: + + >>> P.get_data() + {0: [1, 2], 1: [0], 2: [0]} + + Notes + ----- + A (combinatorial) embedding consists of cyclic orderings of the incident + edges at each vertex. Given such an embedding there are multiple approaches + discussed in literature to drawing the graph (subject to various + constraints, e.g. integer coordinates), see e.g. [2]. + + The planarity check algorithm and extraction of the combinatorial embedding + is based on the Left-Right Planarity Test [1]. + + A counterexample is only generated if the corresponding parameter is set, + because the complexity of the counterexample generation is higher. + + See also + -------- + is_planar : + Check for planarity without creating a `PlanarEmbedding` or counterexample. + + References + ---------- + .. [1] Ulrik Brandes: + The Left-Right Planarity Test + 2009 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208 + .. [2] Takao Nishizeki, Md Saidur Rahman: + Planar graph drawing + Lecture Notes Series on Computing: Volume 12 + 2004 + """ + + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +def check_planarity_recursive(G, counterexample=False): + """Recursive version of :meth:`check_planarity`.""" + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity_recursive() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample_recursive(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +def get_counterexample(G): + """Obtains a Kuratowski subgraph. + + Raises nx.NetworkXException if G is planar. + + The function removes edges such that the graph is still not planar. + At some point the removal of any edge would make the graph planar. + This subgraph must be a Kuratowski subgraph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + subgraph : NetworkX graph + A Kuratowski subgraph that proves that G is not planar. + + """ + # copy graph + G = nx.Graph(G) + + if check_planarity(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +def get_counterexample_recursive(G): + """Recursive version of :meth:`get_counterexample`.""" + + # copy graph + G = nx.Graph(G) + + if check_planarity_recursive(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity_recursive(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +class Interval: + """Represents a set of return edges. + + All return edges in an interval induce a same constraint on the contained + edges, which means that all edges must either have a left orientation or + all edges must have a right orientation. + """ + + def __init__(self, low=None, high=None): + self.low = low + self.high = high + + def empty(self): + """Check if the interval is empty""" + return self.low is None and self.high is None + + def copy(self): + """Returns a copy of this interval""" + return Interval(self.low, self.high) + + def conflicting(self, b, planarity_state): + """Returns True if interval I conflicts with edge b""" + return ( + not self.empty() + and planarity_state.lowpt[self.high] > planarity_state.lowpt[b] + ) + + +class ConflictPair: + """Represents a different constraint between two intervals. + + The edges in the left interval must have a different orientation than + the one in the right interval. + """ + + def __init__(self, left=Interval(), right=Interval()): + self.left = left + self.right = right + + def swap(self): + """Swap left and right intervals""" + temp = self.left + self.left = self.right + self.right = temp + + def lowest(self, planarity_state): + """Returns the lowest lowpoint of a conflict pair""" + if self.left.empty(): + return planarity_state.lowpt[self.right.low] + if self.right.empty(): + return planarity_state.lowpt[self.left.low] + return min( + planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low] + ) + + +def top_of_stack(l): + """Returns the element on top of the stack.""" + if not l: + return None + return l[-1] + + +class LRPlanarity: + """A class to maintain the state during planarity check.""" + + __slots__ = [ + "G", + "roots", + "height", + "lowpt", + "lowpt2", + "nesting_depth", + "parent_edge", + "DG", + "adjs", + "ordered_adjs", + "ref", + "side", + "S", + "stack_bottom", + "lowpt_edge", + "left_ref", + "right_ref", + "embedding", + ] + + def __init__(self, G): + # copy G without adding self-loops + self.G = nx.Graph() + self.G.add_nodes_from(G.nodes) + for e in G.edges: + if e[0] != e[1]: + self.G.add_edge(e[0], e[1]) + + self.roots = [] + + # distance from tree root + self.height = defaultdict(lambda: None) + + self.lowpt = {} # height of lowest return point of an edge + self.lowpt2 = {} # height of second lowest return point + self.nesting_depth = {} # for nesting order + + # None -> missing edge + self.parent_edge = defaultdict(lambda: None) + + # oriented DFS graph + self.DG = nx.DiGraph() + self.DG.add_nodes_from(G.nodes) + + self.adjs = {} + self.ordered_adjs = {} + + self.ref = defaultdict(lambda: None) + self.side = defaultdict(lambda: 1) + + # stack of conflict pairs + self.S = [] + self.stack_bottom = {} + self.lowpt_edge = {} + + self.left_ref = {} + self.right_ref = {} + + self.embedding = PlanarEmbedding() + + def lr_planarity(self): + """Execute the LR planarity test. + + Returns + ------- + embedding : dict + If the graph is planar an embedding is returned. Otherwise None. + """ + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # make adjacency lists for dfs + for v in self.G: + self.adjs[v] = list(self.G[v]) + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation(v) + + # Free no longer used variables + self.G = None + self.lowpt2 = None + self.adjs = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing(v): + return None + + # Free no longer used variables + self.height = None + self.lowpt = None + self.S = None + self.stack_bottom = None + self.lowpt_edge = None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge_cw(v, w, previous_node) + previous_node = w + + # Free no longer used variables + self.DG = None + self.nesting_depth = None + self.ref = None + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding(v) + + # Free no longer used variables + self.roots = None + self.parent_edge = None + self.ordered_adjs = None + self.left_ref = None + self.right_ref = None + self.side = None + + return self.embedding + + def lr_planarity_recursive(self): + """Recursive version of :meth:`lr_planarity`.""" + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation_recursive(v) + + # Free no longer used variable + self.G = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing_recursive(v): + return None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge_cw(v, w, previous_node) + previous_node = w + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding_recursive(v) + + return self.embedding + + def dfs_orientation(self, v): + """Orient the graph by DFS, compute lowpoints and nesting order.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + + for w in self.adjs[v][ind[v] :]: + vw = (v, w) + + if not skip_init[vw]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + ind[v] += 1 + continue # the edge was already oriented + + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[vw] = True # don't redo this block + break # handle next node in dfs_stack (i.e. w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + ind[v] += 1 + + def dfs_orientation_recursive(self, v): + """Recursive version of :meth:`dfs_orientation`.""" + e = self.parent_edge[v] + for w in self.G[v]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + continue # the edge was already oriented + vw = (v, w) + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + self.dfs_orientation_recursive(w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + def dfs_testing(self, v): + """Test for LR partition.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + # to indicate whether to skip the final block after the for loop + skip_final = False + + for w in self.ordered_adjs[v][ind[v] :]: + ei = (v, w) + + if not skip_init[ei]: + self.stack_bottom[ei] = top_of_stack(self.S) + + if ei == self.parent_edge[w]: # tree edge + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[ei] = True # don't redo this block + skip_final = True # skip final work after breaking + break # handle next node in dfs_stack (i.e. w) + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + ind[v] += 1 + + if not skip_final: + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + + return True + + def dfs_testing_recursive(self, v): + """Recursive version of :meth:`dfs_testing`.""" + e = self.parent_edge[v] + for w in self.ordered_adjs[v]: + ei = (v, w) + self.stack_bottom[ei] = top_of_stack(self.S) + if ei == self.parent_edge[w]: # tree edge + if not self.dfs_testing_recursive(w): + return False + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + return True + + def add_constraints(self, ei, e): + P = ConflictPair() + # merge return edges of e_i into P.right + while True: + Q = self.S.pop() + if not Q.left.empty(): + Q.swap() + if not Q.left.empty(): # not planar + return False + if self.lowpt[Q.right.low] > self.lowpt[e]: + # merge intervals + if P.right.empty(): # topmost interval + P.right = Q.right.copy() + else: + self.ref[P.right.low] = Q.right.high + P.right.low = Q.right.low + else: # align + self.ref[Q.right.low] = self.lowpt_edge[e] + if top_of_stack(self.S) == self.stack_bottom[ei]: + break + # merge conflicting return edges of e_1,...,e_i-1 into P.L + while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack( + self.S + ).right.conflicting(ei, self): + Q = self.S.pop() + if Q.right.conflicting(ei, self): + Q.swap() + if Q.right.conflicting(ei, self): # not planar + return False + # merge interval below lowpt(e_i) into P.R + self.ref[P.right.low] = Q.right.high + if Q.right.low is not None: + P.right.low = Q.right.low + + if P.left.empty(): # topmost interval + P.left = Q.left.copy() + else: + self.ref[P.left.low] = Q.left.high + P.left.low = Q.left.low + + if not (P.left.empty() and P.right.empty()): + self.S.append(P) + return True + + def remove_back_edges(self, e): + u = e[0] + # trim back edges ending at parent u + # drop entire conflict pairs + while self.S and top_of_stack(self.S).lowest(self) == self.height[u]: + P = self.S.pop() + if P.left.low is not None: + self.side[P.left.low] = -1 + + if self.S: # one more conflict pair to consider + P = self.S.pop() + # trim left interval + while P.left.high is not None and P.left.high[1] == u: + P.left.high = self.ref[P.left.high] + if P.left.high is None and P.left.low is not None: + # just emptied + self.ref[P.left.low] = P.right.low + self.side[P.left.low] = -1 + P.left.low = None + # trim right interval + while P.right.high is not None and P.right.high[1] == u: + P.right.high = self.ref[P.right.high] + if P.right.high is None and P.right.low is not None: + # just emptied + self.ref[P.right.low] = P.left.low + self.side[P.right.low] = -1 + P.right.low = None + self.S.append(P) + + # side of e is side of a highest return edge + if self.lowpt[e] < self.height[u]: # e has return edge + hl = top_of_stack(self.S).left.high + hr = top_of_stack(self.S).right.high + + if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]): + self.ref[e] = hl + else: + self.ref[e] = hr + + def dfs_embedding(self, v): + """Completes the embedding.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + + while dfs_stack: + v = dfs_stack.pop() + + for w in self.ordered_adjs[v][ind[v] :]: + ind[v] += 1 + ei = (v, w) + + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + break # handle next node in dfs_stack (i.e. w) + else: # back edge + if self.side[ei] == 1: + self.embedding.add_half_edge_cw(w, v, self.right_ref[w]) + else: + self.embedding.add_half_edge_ccw(w, v, self.left_ref[w]) + self.left_ref[w] = v + + def dfs_embedding_recursive(self, v): + """Recursive version of :meth:`dfs_embedding`.""" + for w in self.ordered_adjs[v]: + ei = (v, w) + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + self.dfs_embedding_recursive(w) + else: # back edge + if self.side[ei] == 1: + # place v directly after right_ref[w] in embed. list of w + self.embedding.add_half_edge_cw(w, v, self.right_ref[w]) + else: + # place v directly before left_ref[w] in embed. list of w + self.embedding.add_half_edge_ccw(w, v, self.left_ref[w]) + self.left_ref[w] = v + + def sign(self, e): + """Resolve the relative side of an edge to the absolute side.""" + # the recursion stack + dfs_stack = [e] + # dict to remember reference edges + old_ref = defaultdict(lambda: None) + + while dfs_stack: + e = dfs_stack.pop() + + if self.ref[e] is not None: + dfs_stack.append(e) # revisit e after finishing self.ref[e] + dfs_stack.append(self.ref[e]) # visit self.ref[e] next + old_ref[e] = self.ref[e] # remember value of self.ref[e] + self.ref[e] = None + else: + self.side[e] *= self.side[old_ref[e]] + + return self.side[e] + + def sign_recursive(self, e): + """Recursive version of :meth:`sign`.""" + if self.ref[e] is not None: + self.side[e] = self.side[e] * self.sign_recursive(self.ref[e]) + self.ref[e] = None + return self.side[e] + + +class PlanarEmbedding(nx.DiGraph): + """Represents a planar graph with its planar embedding. + + The planar embedding is given by a `combinatorial embedding + `_. + + .. note:: `check_planarity` is the preferred way to check if a graph is planar. + + **Neighbor ordering:** + + In comparison to a usual graph structure, the embedding also stores the + order of all neighbors for every vertex. + The order of the neighbors can be given in clockwise (cw) direction or + counterclockwise (ccw) direction. This order is stored as edge attributes + in the underlying directed graph. For the edge (u, v) the edge attribute + 'cw' is set to the neighbor of u that follows immediately after v in + clockwise direction. + + In order for a PlanarEmbedding to be valid it must fulfill multiple + conditions. It is possible to check if these conditions are fulfilled with + the method :meth:`check_structure`. + The conditions are: + + * Edges must go in both directions (because the edge attributes differ) + * Every edge must have a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + * A node with non zero degree must have a node attribute 'first_nbr'. + + As long as a PlanarEmbedding is invalid only the following methods should + be called: + + * :meth:`add_half_edge_ccw` + * :meth:`add_half_edge_cw` + * :meth:`connect_components` + * :meth:`add_half_edge_first` + + Even though the graph is a subclass of nx.DiGraph, it can still be used + for algorithms that require undirected graphs, because the method + :meth:`is_directed` is overridden. This is possible, because a valid + PlanarGraph must have edges in both directions. + + **Half edges:** + + In methods like `add_half_edge_ccw` the term "half-edge" is used, which is + a term that is used in `doubly connected edge lists + `_. It is used + to emphasize that the edge is only in one direction and there exists + another half-edge in the opposite direction. + While conventional edges always have two faces (including outer face) next + to them, it is possible to assign each half-edge *exactly one* face. + For a half-edge (u, v) that is orientated such that u is below v then the + face that belongs to (u, v) is to the right of this half-edge. + + See Also + -------- + is_planar : + Preferred way to check if an existing graph is planar. + + check_planarity : + A convenient way to create a `PlanarEmbedding`. If not planar, + it returns a subgraph that shows this. + + Examples + -------- + + Create an embedding of a star graph (compare `nx.star_graph(3)`): + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge_cw(0, 1, None) + >>> G.add_half_edge_cw(0, 2, 1) + >>> G.add_half_edge_cw(0, 3, 2) + >>> G.add_half_edge_cw(1, 0, None) + >>> G.add_half_edge_cw(2, 0, None) + >>> G.add_half_edge_cw(3, 0, None) + + Alternatively the same embedding can also be defined in counterclockwise + orientation. The following results in exactly the same PlanarEmbedding: + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge_ccw(0, 1, None) + >>> G.add_half_edge_ccw(0, 3, 1) + >>> G.add_half_edge_ccw(0, 2, 3) + >>> G.add_half_edge_ccw(1, 0, None) + >>> G.add_half_edge_ccw(2, 0, None) + >>> G.add_half_edge_ccw(3, 0, None) + + After creating a graph, it is possible to validate that the PlanarEmbedding + object is correct: + + >>> G.check_structure() + + """ + + def get_data(self): + """Converts the adjacency structure into a better readable structure. + + Returns + ------- + embedding : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + set_data + + """ + embedding = dict() + for v in self: + embedding[v] = list(self.neighbors_cw_order(v)) + return embedding + + def set_data(self, data): + """Inserts edges according to given sorted neighbor list. + + The input format is the same as the output format of get_data(). + + Parameters + ---------- + data : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + get_data + + """ + for v in data: + for w in reversed(data[v]): + self.add_half_edge_first(v, w) + + def neighbors_cw_order(self, v): + """Generator for the neighbors of v in clockwise order. + + Parameters + ---------- + v : node + + Yields + ------ + node + + """ + if len(self[v]) == 0: + # v has no neighbors + return + start_node = self.nodes[v]["first_nbr"] + yield start_node + current_node = self[v][start_node]["cw"] + while start_node != current_node: + yield current_node + current_node = self[v][current_node]["cw"] + + def check_structure(self): + """Runs without exceptions if this object is valid. + + Checks that the following properties are fulfilled: + + * Edges go in both directions (because the edge attributes differ). + * Every edge has a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + * A node with a degree larger than 0 has a node attribute 'first_nbr'. + + Running this method verifies that the underlying Graph must be planar. + + Raises + ------ + NetworkXException + This exception is raised with a short explanation if the + PlanarEmbedding is invalid. + """ + # Check fundamental structure + for v in self: + try: + sorted_nbrs = set(self.neighbors_cw_order(v)) + except KeyError as err: + msg = f"Bad embedding. Missing orientation for a neighbor of {v}" + raise nx.NetworkXException(msg) from err + + unsorted_nbrs = set(self[v]) + if sorted_nbrs != unsorted_nbrs: + msg = "Bad embedding. Edge orientations not set correctly." + raise nx.NetworkXException(msg) + for w in self[v]: + # Check if opposite half-edge exists + if not self.has_edge(w, v): + msg = "Bad embedding. Opposite half-edge is missing." + raise nx.NetworkXException(msg) + + # Check planarity + counted_half_edges = set() + for component in nx.connected_components(self): + if len(component) == 1: + # Don't need to check single node component + continue + num_nodes = len(component) + num_half_edges = 0 + num_faces = 0 + for v in component: + for w in self.neighbors_cw_order(v): + num_half_edges += 1 + if (v, w) not in counted_half_edges: + # We encountered a new face + num_faces += 1 + # Mark all half-edges belonging to this face + self.traverse_face(v, w, counted_half_edges) + num_edges = num_half_edges // 2 # num_half_edges is even + if num_nodes - num_edges + num_faces != 2: + # The result does not match Euler's formula + msg = "Bad embedding. The graph does not match Euler's formula" + raise nx.NetworkXException(msg) + + def add_half_edge_ccw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added counter clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge_cw + connect_components + add_half_edge_first + + """ + if reference_neighbor is None: + # The start node has no neighbors + self.add_edge(start_node, end_node) # Add edge to graph + self[start_node][end_node]["cw"] = end_node + self[start_node][end_node]["ccw"] = end_node + self.nodes[start_node]["first_nbr"] = end_node + else: + ccw_reference = self[start_node][reference_neighbor]["ccw"] + self.add_half_edge_cw(start_node, end_node, ccw_reference) + + if reference_neighbor == self.nodes[start_node].get("first_nbr", None): + # Update first neighbor + self.nodes[start_node]["first_nbr"] = end_node + + def add_half_edge_cw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge_ccw + connect_components + add_half_edge_first + """ + self.add_edge(start_node, end_node) # Add edge to graph + + if reference_neighbor is None: + # The start node has no neighbors + self[start_node][end_node]["cw"] = end_node + self[start_node][end_node]["ccw"] = end_node + self.nodes[start_node]["first_nbr"] = end_node + return + + if reference_neighbor not in self[start_node]: + raise nx.NetworkXException( + "Cannot add edge. Reference neighbor does not exist" + ) + + # Get half-edge at the other side + cw_reference = self[start_node][reference_neighbor]["cw"] + # Alter half-edge data structures + self[start_node][reference_neighbor]["cw"] = end_node + self[start_node][end_node]["cw"] = cw_reference + self[start_node][cw_reference]["ccw"] = end_node + self[start_node][end_node]["ccw"] = reference_neighbor + + def connect_components(self, v, w): + """Adds half-edges for (v, w) and (w, v) at some position. + + This method should only be called if v and w are in different + components, or it might break the embedding. + This especially means that if `connect_components(v, w)` + is called it is not allowed to call `connect_components(w, v)` + afterwards. The neighbor orientations in both directions are + all set correctly after the first call. + + Parameters + ---------- + v : node + w : node + + See Also + -------- + add_half_edge_ccw + add_half_edge_cw + add_half_edge_first + """ + self.add_half_edge_first(v, w) + self.add_half_edge_first(w, v) + + def add_half_edge_first(self, start_node, end_node): + """The added half-edge is inserted at the first position in the order. + + Parameters + ---------- + start_node : node + end_node : node + + See Also + -------- + add_half_edge_ccw + add_half_edge_cw + connect_components + """ + if start_node in self and "first_nbr" in self.nodes[start_node]: + reference = self.nodes[start_node]["first_nbr"] + else: + reference = None + self.add_half_edge_ccw(start_node, end_node, reference) + + def next_face_half_edge(self, v, w): + """Returns the following half-edge left of a face. + + Parameters + ---------- + v : node + w : node + + Returns + ------- + half-edge : tuple + """ + new_node = self[w][v]["ccw"] + return w, new_node + + def traverse_face(self, v, w, mark_half_edges=None): + """Returns nodes on the face that belong to the half-edge (v, w). + + The face that is traversed lies to the right of the half-edge (in an + orientation where v is below w). + + Optionally it is possible to pass a set to which all encountered half + edges are added. Before calling this method, this set must not include + any half-edges that belong to the face. + + Parameters + ---------- + v : node + Start node of half-edge. + w : node + End node of half-edge. + mark_half_edges: set, optional + Set to which all encountered half-edges are added. + + Returns + ------- + face : list + A list of nodes that lie on this face. + """ + if mark_half_edges is None: + mark_half_edges = set() + + face_nodes = [v] + mark_half_edges.add((v, w)) + prev_node = v + cur_node = w + # Last half-edge is (incoming_node, v) + incoming_node = self[v][w]["cw"] + + while cur_node != v or prev_node != incoming_node: + face_nodes.append(cur_node) + prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node) + if (prev_node, cur_node) in mark_half_edges: + raise nx.NetworkXException("Bad planar embedding. Impossible face.") + mark_half_edges.add((prev_node, cur_node)) + + return face_nodes + + def is_directed(self): + """A valid PlanarEmbedding is undirected. + + All reverse edges are contained, i.e. for every existing + half-edge (v, w) the half-edge in the opposite direction (w, v) is also + contained. + """ + return False diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/polynomials.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/polynomials.py new file mode 100644 index 0000000..35c0166 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/polynomials.py @@ -0,0 +1,290 @@ +"""Provides algorithms supporting the computation of graph polynomials. + +Graph polynomials are polynomial-valued graph invariants that encode a wide +variety of structural information. Examples include the Tutte polynomial, +chromatic polynomial, characteristic polynomial, and matching polynomial. An +extensive treatment is provided in [1]_. + +.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials" +""" +from collections import deque + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["tutte_polynomial", "chromatic_polynomial"] + + +@not_implemented_for("directed") +def tutte_polynomial(G): + r"""Returns the Tutte polynomial of `G` + + This function computes the Tutte polynomial via an iterative version of + the deletion-contraction algorithm. + + The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in + two variables. It encodes a wide array of information related to the + edge-connectivity of a graph; "Many problems about graphs can be reduced to + problems of finding and evaluating the Tutte polynomial at certain values" [1]_. + In fact, every deletion-contraction-expressible feature of a graph is a + specialization of the Tutte polynomial [2]_ (see Notes for examples). + + There are several equivalent definitions; here are three: + + Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the + number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of + `G`, and `c(A)` the number of connected components of the graph with vertex + set `V` and edge set `A` [3]_: + + .. math:: + + T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)} + + Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning + tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict + linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of + $E \setminus T \cup {e}$. An edge `e` is internally active with respect to + `T` and `L` if `e` is the least edge in `B_e` according to the linear order + `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges + in $E \setminus T$ that are internally active with respect to `T` and `L`. + Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex + are the same. An edge `e` is externally active with respect to `T` and `L` + if `e` is the least edge in `P_e` according to the linear order `L`. The + external activity of `T` (denoted `e(T)`) is the number of edges in + $E \setminus T$ that are externally active with respect to `T` and `L`. + Then [4]_ [5]_: + + .. math:: + + T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)} + + Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e` + the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained + from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`, + and `l(G)` the number of self-loops of `G`: + + .. math:: + T_G(x, y) = \begin{cases} + x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\ + T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop} + \end{cases} + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the Tutte polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.tutte_polynomial(C) + x**4 + x**3 + x**2 + x + y + + >>> D = nx.diamond_graph() + >>> nx.tutte_polynomial(D) + x**3 + 2*x**2 + 2*x*y + x + y**2 + y + + Notes + ----- + Some specializations of the Tutte polynomial: + + - `T_G(1, 1)` counts the number of spanning trees of `G` + - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G` + - `T_G(2, 1)` counts the number of spanning forests in `G` + - `T_G(0, 2)` counts the number of strong orientations of `G` + - `T_G(2, 0)` counts the number of acyclic orientations of `G` + + Edge contraction is defined and deletion-contraction is introduced in [6]_. + Combinatorial meaning of the coefficients is introduced in [7]_. + Universality, properties, and applications are discussed in [8]_. + + Practically, up-front computation of the Tutte polynomial may be useful when + users wish to repeatedly calculate edge-connectivity-related information + about one or more graphs. + + References + ---------- + .. [1] M. Brandt, + "The Tutte Polynomial." + Talking About Combinatorial Objects Seminar, 2015 + https://math.berkeley.edu/~brandtm/talks/tutte.pdf + .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto, + "Computing the Tutte polynomial in vertex-exponential time" + 49th Annual IEEE Symposium on Foundations of Computer Science, 2008 + https://ieeexplore.ieee.org/abstract/document/4691000 + .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 14 + .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 46 + .. [5] A. Nešetril, J. Goodall, + "Graph invariants, homomorphisms, and the Tutte polynomial" + https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf + .. [6] D. B. West, + "Introduction to Graph Theory," p. 84 + .. [7] G. Coutinho, + "A brief introduction to the Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf + .. [8] J. A. Ellis-Monaghan, C. Merino, + "Graph polynomials and their applications I: The Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://arxiv.org/pdf/0803.3079.pdf + """ + import sympy + + x = sympy.Symbol("x") + y = sympy.Symbol("y") + stack = deque() + stack.append(nx.MultiGraph(G)) + + polynomial = 0 + while stack: + G = stack.pop() + bridges = set(nx.bridges(G)) + + e = None + for i in G.edges: + if (i[0], i[1]) not in bridges and i[0] != i[1]: + e = i + break + if not e: + loops = list(nx.selfloop_edges(G, keys=True)) + polynomial += x ** len(bridges) * y ** len(loops) + else: + # deletion-contraction + C = nx.contracted_edge(G, e, self_loops=True) + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return sympy.simplify(polynomial) + + +@not_implemented_for("directed") +def chromatic_polynomial(G): + r"""Returns the chromatic polynomial of `G` + + This function computes the chromatic polynomial via an iterative version of + the deletion-contraction algorithm. + + The chromatic polynomial `X_G(x)` is a fundamental graph polynomial + invariant in one variable. Evaluating `X_G(k)` for an natural number `k` + enumerates the proper k-colorings of `G`. + + There are several equivalent definitions; here are three: + + Def 1 (explicit formula): + For `G` an undirected graph, `c(G)` the number of connected components of + `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with + edge set `S` [1]_: + + .. math:: + + X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))} + + + Def 2 (interpolating polynomial): + For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`, + and `k_i` the number of distinct ways to color the vertices of `G` with `i` + unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the + unique Lagrange interpolating polynomial of degree `n(G)` through the points + `(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_. + + + Def 3 (chromatic recurrence): + For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting + edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)` + the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_: + + .. math:: + X_G(x) = \begin{cases} + x^{n(G)}, & \text{if $e(G)=0$} \\ + X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$} + \end{cases} + + This formulation is also known as the Fundamental Reduction Theorem [4]_. + + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the chromatic polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.chromatic_polynomial(C) + x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x + + >>> G = nx.complete_graph(4) + >>> nx.chromatic_polynomial(G) + x**4 - 6*x**3 + 11*x**2 - 6*x + + Notes + ----- + Interpretation of the coefficients is discussed in [5]_. Several special + cases are listed in [2]_. + + The chromatic polynomial is a specialization of the Tutte polynomial; in + particular, `X_G(x) = `T_G(x, 0)` [6]_. + + The chromatic polynomial may take negative arguments, though evaluations + may not have chromatic interpretations. For instance, `X_G(-1)` enumerates + the acyclic orientations of `G` [7]_. + + References + ---------- + .. [1] D. B. West, + "Introduction to Graph Theory," p. 222 + .. [2] E. W. Weisstein + "Chromatic Polynomial" + MathWorld--A Wolfram Web Resource + https://mathworld.wolfram.com/ChromaticPolynomial.html + .. [3] D. B. West, + "Introduction to Graph Theory," p. 221 + .. [4] J. Zhang, J. Goodall, + "An Introduction to Chromatic Polynomials" + https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf + .. [5] R. C. Read, + "An Introduction to Chromatic Polynomials" + Journal of Combinatorial Theory, 1968 + https://math.berkeley.edu/~mrklug/ReadChromatic.pdf + .. [6] W. T. Tutte, + "Graph-polynomials" + Advances in Applied Mathematics, 2004 + https://www.sciencedirect.com/science/article/pii/S0196885803000411 + .. [7] R. P. Stanley, + "Acyclic orientations of graphs" + Discrete Mathematics, 2006 + https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf + """ + import sympy + + x = sympy.Symbol("x") + stack = deque() + stack.append(nx.MultiGraph(G, contraction_idx=0)) + + polynomial = 0 + while stack: + G = stack.pop() + edges = list(G.edges) + if not edges: + polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G) + else: + e = edges[0] + C = nx.contracted_edge(G, e, self_loops=True) + C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1 + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return polynomial diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/reciprocity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/reciprocity.py new file mode 100644 index 0000000..1b7761b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/reciprocity.py @@ -0,0 +1,94 @@ +"""Algorithms to calculate reciprocity in a directed graph.""" +from networkx import NetworkXError + +from ..utils import not_implemented_for + +__all__ = ["reciprocity", "overall_reciprocity"] + + +@not_implemented_for("undirected", "multigraph") +def reciprocity(G, nodes=None): + r"""Compute the reciprocity in a directed graph. + + The reciprocity of a directed graph is defined as the ratio + of the number of edges pointing in both directions to the total + number of edges in the graph. + Formally, $r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|$. + + The reciprocity of a single node u is defined similarly, + it is the ratio of the number of edges in both directions to + the total number of edges attached to node u. + + Parameters + ---------- + G : graph + A networkx directed graph + nodes : container of nodes, optional (default=whole graph) + Compute reciprocity for nodes in this container. + + Returns + ------- + out : dictionary + Reciprocity keyed by node label. + + Notes + ----- + The reciprocity is not defined for isolated nodes. + In such cases this function will return None. + + """ + # If `nodes` is not specified, calculate the reciprocity of the graph. + if nodes is None: + return overall_reciprocity(G) + + # If `nodes` represents a single node in the graph, return only its + # reciprocity. + if nodes in G: + reciprocity = next(_reciprocity_iter(G, nodes))[1] + if reciprocity is None: + raise NetworkXError("Not defined for isolated nodes.") + else: + return reciprocity + + # Otherwise, `nodes` represents an iterable of nodes, so return a + # dictionary mapping node to its reciprocity. + return dict(_reciprocity_iter(G, nodes)) + + +def _reciprocity_iter(G, nodes): + """Return an iterator of (node, reciprocity).""" + n = G.nbunch_iter(nodes) + for node in n: + pred = set(G.predecessors(node)) + succ = set(G.successors(node)) + overlap = pred & succ + n_total = len(pred) + len(succ) + + # Reciprocity is not defined for isolated nodes. + # Return None. + if n_total == 0: + yield (node, None) + else: + reciprocity = 2 * len(overlap) / n_total + yield (node, reciprocity) + + +@not_implemented_for("undirected", "multigraph") +def overall_reciprocity(G): + """Compute the reciprocity for the whole graph. + + See the doc of reciprocity for the definition. + + Parameters + ---------- + G : graph + A networkx graph + + """ + n_all_edge = G.number_of_edges() + n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) * 2 + + if n_all_edge == 0: + raise NetworkXError("Not defined for empty graphs") + + return n_overlap_edge / n_all_edge diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/regular.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/regular.py new file mode 100644 index 0000000..3f76d40 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/regular.py @@ -0,0 +1,190 @@ +"""Functions for computing and verifying regular graphs.""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_regular", "is_k_regular", "k_factor"] + + +def is_regular(G): + """Determines whether the graph ``G`` is a regular graph. + + A regular graph is a graph where each vertex has the same degree. A + regular digraph is a graph where the indegree and outdegree of each + vertex are equal. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph or digraph is regular. + + """ + n1 = nx.utils.arbitrary_element(G) + if not G.is_directed(): + d1 = G.degree(n1) + return all(d1 == d for _, d in G.degree) + else: + d_in = G.in_degree(n1) + in_regular = all(d_in == d for _, d in G.in_degree) + d_out = G.out_degree(n1) + out_regular = all(d_out == d for _, d in G.out_degree) + return in_regular and out_regular + + +@not_implemented_for("directed") +def is_k_regular(G, k): + """Determines whether the graph ``G`` is a k-regular graph. + + A k-regular graph is a graph where each vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph is k-regular. + + """ + return all(d == k for n, d in G.degree) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def k_factor(G, k, matching_weight="weight"): + """Compute a k-factor of G + + A k-factor of a graph is a spanning k-regular subgraph. + A spanning k-regular subgraph of G is a subgraph that contains + each vertex of G and a subset of the edges of G such that each + vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + matching_weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + Used for finding the max-weighted perfect matching. + If key not found, uses 1 as weight. + + Returns + ------- + G2 : NetworkX graph + A k-factor of G + + References + ---------- + .. [1] "An algorithm for computing simple k-factors.", + Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport, + Information processing letters, 2009. + """ + + from networkx.algorithms.matching import is_perfect_matching, max_weight_matching + + class LargeKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.g = g + self.k = k + self.degree = degree + + self.outer_vertices = [(node, x) for x in range(degree)] + self.core_vertices = [(node, x + degree) for x in range(degree - k)] + + def replace_node(self): + adj_view = self.g[self.original] + neighbors = list(adj_view.keys()) + edge_attrs = list(adj_view.values()) + for (outer, neighbor, edge_attrs) in zip( + self.outer_vertices, neighbors, edge_attrs + ): + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for outer in self.outer_vertices: + self.g.add_edge(core, outer) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in list(adj_view.items()): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + g.remove_nodes_from(self.outer_vertices) + g.remove_nodes_from(self.core_vertices) + + class SmallKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.k = k + self.degree = degree + self.g = g + + self.outer_vertices = [(node, x) for x in range(degree)] + self.inner_vertices = [(node, x + degree) for x in range(degree)] + self.core_vertices = [(node, x + 2 * degree) for x in range(k)] + + def replace_node(self): + adj_view = self.g[self.original] + for (outer, inner, (neighbor, edge_attrs)) in zip( + self.outer_vertices, self.inner_vertices, list(adj_view.items()) + ): + self.g.add_edge(outer, inner) + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for inner in self.inner_vertices: + self.g.add_edge(core, inner) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in adj_view.items(): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + self.g.remove_nodes_from(self.outer_vertices) + self.g.remove_nodes_from(self.inner_vertices) + self.g.remove_nodes_from(self.core_vertices) + + # Step 1 + if any(d < k for _, d in G.degree): + raise nx.NetworkXUnfeasible("Graph contains a vertex with degree less than k") + g = G.copy() + + # Step 2 + gadgets = [] + for node, degree in list(g.degree): + if k < degree / 2.0: + gadget = SmallKGadget(k, degree, node, g) + else: + gadget = LargeKGadget(k, degree, node, g) + gadget.replace_node() + gadgets.append(gadget) + + # Step 3 + matching = max_weight_matching(g, maxcardinality=True, weight=matching_weight) + + # Step 4 + if not is_perfect_matching(g, matching): + raise nx.NetworkXUnfeasible( + "Cannot find k-factor because no perfect matching exists" + ) + + for edge in g.edges(): + if edge not in matching and (edge[1], edge[0]) not in matching: + g.remove_edge(edge[0], edge[1]) + + for gadget in gadgets: + gadget.restore_node() + + return g diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/richclub.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/richclub.py new file mode 100644 index 0000000..599d899 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/richclub.py @@ -0,0 +1,120 @@ +"""Functions for computing rich-club coefficients.""" + +from itertools import accumulate + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["rich_club_coefficient"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def rich_club_coefficient(G, normalized=True, Q=100, seed=None): + r"""Returns the rich-club coefficient of the graph `G`. + + For each degree *k*, the *rich-club coefficient* is the ratio of the + number of actual to the number of potential edges for nodes with + degree greater than *k*: + + .. math:: + + \phi(k) = \frac{2 E_k}{N_k (N_k - 1)} + + where `N_k` is the number of nodes with degree larger than *k*, and + `E_k` is the number of edges among those nodes. + + Parameters + ---------- + G : NetworkX graph + Undirected graph with neither parallel edges nor self-loops. + normalized : bool (optional) + Normalize using randomized network as in [1]_ + Q : float (optional, default=100) + If `normalized` is True, perform `Q * m` double-edge + swaps, where `m` is the number of edges in `G`, to use as a + null-model for normalization. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + rc : dictionary + A dictionary, keyed by degree, with rich-club coefficient values. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> rc = nx.rich_club_coefficient(G, normalized=False, seed=42) + >>> rc[0] + 0.4 + + Notes + ----- + The rich club definition and algorithm are found in [1]_. This + algorithm ignores any edge weights and is not defined for directed + graphs or graphs with parallel edges or self loops. + + Estimates for appropriate values of `Q` are found in [2]_. + + References + ---------- + .. [1] Julian J. McAuley, Luciano da Fontoura Costa, + and Tibério S. Caetano, + "The rich-club phenomenon across complex network hierarchies", + Applied Physics Letters Vol 91 Issue 8, August 2007. + https://arxiv.org/abs/physics/0701290 + .. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon, + "Uniform generation of random graphs with arbitrary degree + sequences", 2006. https://arxiv.org/abs/cond-mat/0312028 + """ + if nx.number_of_selfloops(G) > 0: + raise Exception( + "rich_club_coefficient is not implemented for " "graphs with self loops." + ) + rc = _compute_rc(G) + if normalized: + # make R a copy of G, randomize with Q*|E| double edge swaps + # and use rich_club coefficient of R to normalize + R = G.copy() + E = R.number_of_edges() + nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed) + rcran = _compute_rc(R) + rc = {k: v / rcran[k] for k, v in rc.items()} + return rc + + +def _compute_rc(G): + """Returns the rich-club coefficient for each degree in the graph + `G`. + + `G` is an undirected graph without multiedges. + + Returns a dictionary mapping degree to rich-club coefficient for + that degree. + + """ + deghist = nx.degree_histogram(G) + total = sum(deghist) + # Compute the number of nodes with degree greater than `k`, for each + # degree `k` (omitting the last entry, which is zero). + nks = (total - cs for cs in accumulate(deghist) if total - cs > 1) + # Create a sorted list of pairs of edge endpoint degrees. + # + # The list is sorted in reverse order so that we can pop from the + # right side of the list later, instead of popping from the left + # side of the list, which would have a linear time cost. + edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True) + ek = G.number_of_edges() + k1, k2 = edge_degrees.pop() + rc = {} + for d, nk in enumerate(nks): + while k1 <= d: + if len(edge_degrees) == 0: + ek = 0 + break + k1, k2 = edge_degrees.pop() + ek -= 1 + rc[d] = 2 * ek / (nk * (nk - 1)) + return rc diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/__init__.py new file mode 100644 index 0000000..eb0d91c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.shortest_paths.generic import * +from networkx.algorithms.shortest_paths.unweighted import * +from networkx.algorithms.shortest_paths.weighted import * +from networkx.algorithms.shortest_paths.astar import * +from networkx.algorithms.shortest_paths.dense import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/astar.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/astar.py new file mode 100644 index 0000000..5d5a847 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/astar.py @@ -0,0 +1,199 @@ +"""Shortest paths and path lengths using the A* ("A star") algorithm. +""" +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function + +__all__ = ["astar_path", "astar_path_length"] + + +def astar_path(G, source, target, heuristic=None, weight="weight"): + """Returns a list of nodes in a shortest path between source and target + using the A* ("A-star") algorithm. + + There may be more than one shortest path. This returns only one. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + heuristic : function + A function to evaluate the estimate of the distance + from the a node to the target. The function takes + two nodes arguments and must return a number. + If the heuristic is inadmissible (if it might + overestimate the cost of reaching the goal from a node), + the result may not be a shortest path. + The algorithm does not support updating heuristic + values for the same node due to caching the first + heuristic calculation per node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.astar_path(G, 0, 4)) + [0, 1, 2, 3, 4] + >>> G = nx.grid_graph(dim=[3, 3]) # nodes are two-tuples (x,y) + >>> nx.set_edge_attributes(G, {e: e[1][0] * 2 for e in G.edges()}, "cost") + >>> def dist(a, b): + ... (x1, y1) = a + ... (x2, y2) = b + ... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 + >>> print(nx.astar_path(G, (0, 0), (2, 2), heuristic=dist, weight="cost")) + [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)] + + + See Also + -------- + shortest_path, dijkstra_path + + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + if heuristic is None: + # The default heuristic is h=0 - same as Dijkstra's algorithm + def heuristic(u, v): + return 0 + + push = heappush + pop = heappop + weight = _weight_function(G, weight) + + # The queue stores priority, node, cost to reach, and parent. + # Uses Python heapq to keep in priority order. + # Add a counter to the queue to prevent the underlying heap from + # attempting to compare the nodes themselves. The hash breaks ties in the + # priority and is guaranteed unique for all nodes in the graph. + c = count() + queue = [(0, next(c), source, 0, None)] + + # Maps enqueued nodes to distance of discovered paths and the + # computed heuristics to target. We avoid computing the heuristics + # more than once and inserting the node into the queue too many times. + enqueued = {} + # Maps explored nodes to parent closest to the source. + explored = {} + + while queue: + # Pop the smallest item from queue. + _, __, curnode, dist, parent = pop(queue) + + if curnode == target: + path = [curnode] + node = parent + while node is not None: + path.append(node) + node = explored[node] + path.reverse() + return path + + if curnode in explored: + # Do not override the parent of starting node + if explored[curnode] is None: + continue + + # Skip bad paths that were enqueued before finding a better one + qcost, h = enqueued[curnode] + if qcost < dist: + continue + + explored[curnode] = parent + + for neighbor, w in G[curnode].items(): + ncost = dist + weight(curnode, neighbor, w) + if neighbor in enqueued: + qcost, h = enqueued[neighbor] + # if qcost <= ncost, a less costly path from the + # neighbor to the source was already determined. + # Therefore, we won't attempt to push this neighbor + # to the queue + if qcost <= ncost: + continue + else: + h = heuristic(neighbor, target) + enqueued[neighbor] = ncost, h + push(queue, (ncost + h, next(c), neighbor, ncost, curnode)) + + raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") + + +def astar_path_length(G, source, target, heuristic=None, weight="weight"): + """Returns the length of the shortest path between source and target using + the A* ("A-star") algorithm. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + heuristic : function + A function to evaluate the estimate of the distance + from the a node to the target. The function takes + two nodes arguments and must return a number. + If the heuristic is inadmissible (if it might + overestimate the cost of reaching the goal from a node), + the result may not be a shortest path. + The algorithm does not support updating heuristic + values for the same node due to caching the first + heuristic calculation per node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + astar_path + + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + weight = _weight_function(G, weight) + path = astar_path(G, source, target, heuristic, weight) + return sum(weight(u, v, G[u][v]) for u, v in zip(path[:-1], path[1:])) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/dense.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/dense.py new file mode 100644 index 0000000..8965171 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/dense.py @@ -0,0 +1,233 @@ +"""Floyd-Warshall algorithm for shortest paths. +""" +import networkx as nx + +__all__ = [ + "floyd_warshall", + "floyd_warshall_predecessor_and_distance", + "reconstruct_path", + "floyd_warshall_numpy", +] + + +def floyd_warshall_numpy(G, nodelist=None, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + This algorithm for finding shortest paths takes advantage of + matrix representations of a graph and works well for dense + graphs where all-pairs shortest path lengths are desired. + The results are returned as a NumPy array, distance[i, j], + where i and j are the indexes of two nodes in nodelist. + The entry distance[i, j] is the distance along a shortest + path from i to j. If no path exists the distance is Inf. + + Parameters + ---------- + G : NetworkX graph + + nodelist : list, optional (default=G.nodes) + The rows and columns are ordered by the nodes in nodelist. + If nodelist is None then the ordering is produced by G.nodes. + Nodelist should include all nodes in G. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + + Returns + ------- + distance : 2D numpy.ndarray + A numpy array of shortest path distances between nodes. + If there is no path between two nodes the value is Inf. + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths in + dense graphs or graphs with negative weights when Dijkstra's + algorithm fails. This algorithm can still fail if there are negative + cycles. It has running time $O(n^3)$ with running space of $O(n^2)$. + + Raises + ------ + NetworkXError + If nodelist is not a list of the nodes in G. + """ + import numpy as np + + if nodelist is not None: + if not (len(nodelist) == len(G) == len(set(nodelist))): + raise nx.NetworkXError( + "nodelist must contain every node in G with no repeats." + "If you wanted a subgraph of G use G.subgraph(nodelist)" + ) + + # To handle cases when an edge has weight=0, we must make sure that + # nonedges are not given the value 0 as well. + A = nx.to_numpy_array( + G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf + ) + n, m = A.shape + np.fill_diagonal(A, 0) # diagonal elements should be zero + for i in range(n): + # The second term has the same shape as A due to broadcasting + A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis]) + return A + + +def floyd_warshall_predecessor_and_distance(G, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + Parameters + ---------- + G : NetworkX graph + + weight: string, optional (default= 'weight') + Edge data key corresponding to the edge weight. + + Returns + ------- + predecessor,distance : dictionaries + Dictionaries, keyed by source and target, of predecessors and distances + in the shortest path. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G) + >>> print(nx.reconstruct_path("s", "v", predecessors)) + ['s', 'x', 'u', 'v'] + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths + in dense graphs or graphs with negative weights when Dijkstra's algorithm + fails. This algorithm can still fail if there are negative cycles. + It has running time $O(n^3)$ with running space of $O(n^2)$. + + See Also + -------- + floyd_warshall + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + """ + from collections import defaultdict + + # dictionary-of-dictionaries representation for dist and pred + # use some defaultdict magick here + # for dist the default is the floating point inf value + dist = defaultdict(lambda: defaultdict(lambda: float("inf"))) + for u in G: + dist[u][u] = 0 + pred = defaultdict(dict) + # initialize path distance dictionary to be the adjacency matrix + # also set the distance to self to 0 (zero diagonal) + undirected = not G.is_directed() + for u, v, d in G.edges(data=True): + e_weight = d.get(weight, 1.0) + dist[u][v] = min(e_weight, dist[u][v]) + pred[u][v] = u + if undirected: + dist[v][u] = min(e_weight, dist[v][u]) + pred[v][u] = v + for w in G: + dist_w = dist[w] # save recomputation + for u in G: + dist_u = dist[u] # save recomputation + for v in G: + d = dist_u[w] + dist_w[v] + if dist_u[v] > d: + dist_u[v] = d + pred[u][v] = pred[w][v] + return dict(pred), dict(dist) + + +def reconstruct_path(source, target, predecessors): + """Reconstruct a path from source to target using the predecessors + dict as returned by floyd_warshall_predecessor_and_distance + + Parameters + ---------- + source : node + Starting node for path + + target : node + Ending node for path + + predecessors: dictionary + Dictionary, keyed by source and target, of predecessors in the + shortest path, as returned by floyd_warshall_predecessor_and_distance + + Returns + ------- + path : list + A list of nodes containing the shortest path from source to target + + If source and target are the same, an empty list is returned + + Notes + ----- + This function is meant to give more applicability to the + floyd_warshall_predecessor_and_distance function + + See Also + -------- + floyd_warshall_predecessor_and_distance + """ + if source == target: + return [] + prev = predecessors[source] + curr = prev[target] + path = [target, curr] + while curr != source: + curr = prev[curr] + path.append(curr) + return list(reversed(path)) + + +def floyd_warshall(G, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + Parameters + ---------- + G : NetworkX graph + + weight: string, optional (default= 'weight') + Edge data key corresponding to the edge weight. + + + Returns + ------- + distance : dict + A dictionary, keyed by source and target, of shortest paths distances + between nodes. + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths + in dense graphs or graphs with negative weights when Dijkstra's algorithm + fails. This algorithm can still fail if there are negative cycles. + It has running time $O(n^3)$ with running space of $O(n^2)$. + + See Also + -------- + floyd_warshall_predecessor_and_distance + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + """ + # could make this its own function to reduce memory costs + return floyd_warshall_predecessor_and_distance(G, weight=weight)[1] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/generic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/generic.py new file mode 100644 index 0000000..129f741 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/generic.py @@ -0,0 +1,568 @@ +""" +Compute the shortest paths and path lengths between nodes in the graph. + +These algorithms work with undirected and directed graphs. + +""" + +import networkx as nx + +__all__ = [ + "shortest_path", + "all_shortest_paths", + "shortest_path_length", + "average_shortest_path_length", + "has_path", +] + + +def has_path(G, source, target): + """Returns *True* if *G* has a path from *source* to *target*. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + """ + try: + nx.shortest_path(G, source, target) + except nx.NetworkXNoPath: + return False + return True + + +def shortest_path(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. If not specified, compute shortest + paths for each possible starting node. + + target : node, optional + Ending node for path. If not specified, compute shortest + paths to all possible nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + path: list or dictionary + All returned paths include both the source and target in the path. + + If the source and target are both specified, return a single list + of nodes in a shortest path from the source to the target. + + If only the source is specified, return a dictionary keyed by + targets with a list of nodes in a shortest path from the source + to one of the targets. + + If only the target is specified, return a dictionary keyed by + sources with a list of nodes in a shortest path from one of the + sources to the target. + + If neither the source nor target are specified return a dictionary + of dictionaries with path[source][target]=[list of nodes in path]. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.shortest_path(G, source=0, target=4)) + [0, 1, 2, 3, 4] + >>> p = nx.shortest_path(G, source=0) # target not specified + >>> p[3] # shortest path from source=0 to target=3 + [0, 1, 2, 3] + >>> p = nx.shortest_path(G, target=4) # source not specified + >>> p[1] # shortest path from source=1 to target=4 + [1, 2, 3, 4] + >>> p = nx.shortest_path(G) # source, target not specified + >>> p[2][4] # shortest path from source=2 to target=4 + [2, 3, 4] + + Notes + ----- + There may be more than one shortest path between a source and target. + This returns only one of them. + + See Also + -------- + all_pairs_shortest_path + all_pairs_dijkstra_path + all_pairs_bellman_ford_path + single_source_shortest_path + single_source_dijkstra_path + single_source_bellman_ford_path + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + # Find paths between all pairs. + if method == "unweighted": + paths = dict(nx.all_pairs_shortest_path(G)) + elif method == "dijkstra": + paths = dict(nx.all_pairs_dijkstra_path(G, weight=weight)) + else: # method == 'bellman-ford': + paths = dict(nx.all_pairs_bellman_ford_path(G, weight=weight)) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + paths = nx.single_source_shortest_path(G, target) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, target, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, target, weight=weight) + # Now flip the paths so they go from a source to the target. + for target in paths: + paths[target] = list(reversed(paths[target])) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path(G, source) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, source, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + paths = nx.bidirectional_shortest_path(G, source, target) + elif method == "dijkstra": + _, paths = nx.bidirectional_dijkstra(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path(G, source, target, weight) + return paths + + +def shortest_path_length(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest path lengths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. + If not specified, compute shortest path lengths using all nodes as + source nodes. + + target : node, optional + Ending node for path. + If not specified, compute shortest path lengths using all nodes as + target nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path length. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + length: int or iterator + If the source and target are both specified, return the length of + the shortest path from the source to the target. + + If only the source is specified, return a dict keyed by target + to the shortest path length from the source to that target. + + If only the target is specified, return a dict keyed by source + to the shortest path length from that source to the target. + + If neither the source nor target are specified, return an iterator + over (source, dictionary) where dictionary is keyed by target to + shortest path length from source to that target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.shortest_path_length(G, source=0, target=4) + 4 + >>> p = nx.shortest_path_length(G, source=0) # target not specified + >>> p[4] + 4 + >>> p = nx.shortest_path_length(G, target=4) # source not specified + >>> p[0] + 4 + >>> p = dict(nx.shortest_path_length(G)) # source,target not specified + >>> p[0][4] + 4 + + Notes + ----- + The length of the path is always 1 less than the number of nodes involved + in the path since the length measures the number of edges followed. + + For digraphs this returns the shortest directed path length. To find path + lengths in the reverse direction use G.reverse(copy=False) first to flip + the edge orientation. + + See Also + -------- + all_pairs_shortest_path_length + all_pairs_dijkstra_path_length + all_pairs_bellman_ford_path_length + single_source_shortest_path_length + single_source_dijkstra_path_length + single_source_bellman_ford_path_length + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + # Find paths between all pairs. + if method == "unweighted": + paths = nx.all_pairs_shortest_path_length(G) + elif method == "dijkstra": + paths = nx.all_pairs_dijkstra_path_length(G, weight=weight) + else: # method == 'bellman-ford': + paths = nx.all_pairs_bellman_ford_path_length(G, weight=weight) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + path_length = nx.single_source_shortest_path_length + paths = path_length(G, target) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, target, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, target, weight=weight) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path_length(G, source) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, source, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + p = nx.bidirectional_shortest_path(G, source, target) + paths = len(p) - 1 + elif method == "dijkstra": + paths = nx.dijkstra_path_length(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path_length(G, source, target, weight) + return paths + + +def average_shortest_path_length(G, weight=None, method=None): + r"""Returns the average shortest path length. + + The average shortest path length is + + .. math:: + + a =\sum_{s,t \in V} \frac{d(s, t)}{n(n-1)} + + where `V` is the set of nodes in `G`, + `d(s, t)` is the shortest path from `s` to `t`, + and `n` is the number of nodes in `G`. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'unweighted' or 'djikstra') + The algorithm to use to compute the path lengths. + Supported options are 'unweighted', 'dijkstra', 'bellman-ford', + 'floyd-warshall' and 'floyd-warshall-numpy'. + Other method values produce a ValueError. + The default method is 'unweighted' if `weight` is None, + otherwise the default method is 'dijkstra'. + + Raises + ------ + NetworkXPointlessConcept + If `G` is the null graph (that is, the graph on zero nodes). + + NetworkXError + If `G` is not connected (or not weakly connected, in the case + of a directed graph). + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.average_shortest_path_length(G) + 2.0 + + For disconnected graphs, you can compute the average shortest path + length for each component + + >>> G = nx.Graph([(1, 2), (3, 4)]) + >>> for C in (G.subgraph(c).copy() for c in nx.connected_components(G)): + ... print(nx.average_shortest_path_length(C)) + 1.0 + 1.0 + + """ + single_source_methods = ["unweighted", "dijkstra", "bellman-ford"] + all_pairs_methods = ["floyd-warshall", "floyd-warshall-numpy"] + supported_methods = single_source_methods + all_pairs_methods + + if method is None: + method = "unweighted" if weight is None else "dijkstra" + if method not in supported_methods: + raise ValueError(f"method not supported: {method}") + + n = len(G) + # For the special case of the null graph, raise an exception, since + # there are no paths in the null graph. + if n == 0: + msg = ( + "the null graph has no paths, thus there is no average" + "shortest path length" + ) + raise nx.NetworkXPointlessConcept(msg) + # For the special case of the trivial graph, return zero immediately. + if n == 1: + return 0 + # Shortest path length is undefined if the graph is disconnected. + if G.is_directed() and not nx.is_weakly_connected(G): + raise nx.NetworkXError("Graph is not weakly connected.") + if not G.is_directed() and not nx.is_connected(G): + raise nx.NetworkXError("Graph is not connected.") + + # Compute all-pairs shortest paths. + def path_length(v): + if method == "unweighted": + return nx.single_source_shortest_path_length(G, v) + elif method == "dijkstra": + return nx.single_source_dijkstra_path_length(G, v, weight=weight) + elif method == "bellman-ford": + return nx.single_source_bellman_ford_path_length(G, v, weight=weight) + + if method in single_source_methods: + # Sum the distances for each (ordered) pair of source and target node. + s = sum(l for u in G for l in path_length(u).values()) + else: + if method == "floyd-warshall": + all_pairs = nx.floyd_warshall(G, weight=weight) + s = sum(sum(t.values()) for t in all_pairs.values()) + elif method == "floyd-warshall-numpy": + s = nx.floyd_warshall_numpy(G, weight=weight).sum() + return s / (n * (n - 1)) + + +def all_shortest_paths(G, source, target, weight=None, method="dijkstra"): + """Compute all shortest simple paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + target : node + Ending node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + NetworkXNoPath + If `target` cannot be reached from `source`. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2]) + >>> nx.add_path(G, [0, 10, 2]) + >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)]) + [[0, 1, 2], [0, 10, 2]] + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + + return _build_paths_from_predecessors({source}, target, pred) + + +def _build_paths_from_predecessors(sources, target, pred): + """Compute all simple paths to target, given the predecessors found in + pred, terminating when any source in sources is found. + + Parameters + ---------- + sources : set + Starting nodes for path. + + target : node + Ending node for path. + + pred : dict + A dictionary of predecessor lists, keyed by node + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + NetworkXNoPath + If `target` cannot be reached from `source`. + + Notes + ----- + There may be many paths between the sources and target. If there are + cycles among the predecessors, this function will not produce all + possible paths because doing so would produce infinitely many paths + of unbounded length -- instead, we only produce simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + all_shortest_paths + bellman_ford_path + """ + if target not in pred: + raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") + + seen = {target} + stack = [[target, 0]] + top = 0 + while top >= 0: + node, i = stack[top] + if node in sources: + yield [p for p, n in reversed(stack[: top + 1])] + if len(pred[node]) > i: + stack[top][1] = i + 1 + next = pred[node][i] + if next in seen: + continue + else: + seen.add(next) + top += 1 + if top == len(stack): + stack.append([next, 0]) + else: + stack[top][:] = [next, 0] + else: + seen.discard(node) + top -= 1 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py new file mode 100644 index 0000000..e622502 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py @@ -0,0 +1,177 @@ +import pytest + +import networkx as nx +from networkx.utils import pairwise + + +class TestAStar: + @classmethod + def setup_class(cls): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + cls.XG = nx.DiGraph() + cls.XG.add_weighted_edges_from(edges) + + def test_multiple_optimal_paths(self): + """Tests that A* algorithm finds any of multiple optimal paths""" + heuristic_values = {"a": 1.35, "b": 1.18, "c": 0.67, "d": 0} + + def h(u, v): + return heuristic_values[u] + + graph = nx.Graph() + points = ["a", "b", "c", "d"] + edges = [("a", "b", 0.18), ("a", "c", 0.68), ("b", "c", 0.50), ("c", "d", 0.67)] + + graph.add_nodes_from(points) + graph.add_weighted_edges_from(edges) + + path1 = ["a", "c", "d"] + path2 = ["a", "b", "c", "d"] + assert nx.astar_path(graph, "a", "d", h) in (path1, path2) + + def test_astar_directed(self): + assert nx.astar_path(self.XG, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(self.XG, "s", "v") == 9 + + def test_astar_multigraph(self): + G = nx.MultiDiGraph(self.XG) + G.add_weighted_edges_from((u, v, 1000) for (u, v) in list(G.edges())) + assert nx.astar_path(G, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(G, "s", "v") == 9 + + def test_astar_undirected(self): + GG = self.XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + GG["y"]["v"]["weight"] = 2 + assert nx.astar_path(GG, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(GG, "s", "v") == 8 + + def test_astar_directed2(self): + XG2 = nx.DiGraph() + edges = [ + (1, 4, 1), + (4, 5, 1), + (5, 6, 1), + (6, 3, 1), + (1, 3, 50), + (1, 2, 100), + (2, 3, 100), + ] + XG2.add_weighted_edges_from(edges) + assert nx.astar_path(XG2, 1, 3) == [1, 4, 5, 6, 3] + + def test_astar_undirected2(self): + XG3 = nx.Graph() + edges = [(0, 1, 2), (1, 2, 12), (2, 3, 1), (3, 4, 5), (4, 5, 1), (5, 0, 10)] + XG3.add_weighted_edges_from(edges) + assert nx.astar_path(XG3, 0, 3) == [0, 1, 2, 3] + assert nx.astar_path_length(XG3, 0, 3) == 15 + + def test_astar_undirected3(self): + XG4 = nx.Graph() + edges = [ + (0, 1, 2), + (1, 2, 2), + (2, 3, 1), + (3, 4, 1), + (4, 5, 1), + (5, 6, 1), + (6, 7, 1), + (7, 0, 1), + ] + XG4.add_weighted_edges_from(edges) + assert nx.astar_path(XG4, 0, 2) == [0, 1, 2] + assert nx.astar_path_length(XG4, 0, 2) == 4 + + """ Tests that A* finds correct path when multiple paths exist + and the best one is not expanded first (GH issue #3464) + """ + + def test_astar_directed3(self): + heuristic_values = {"n5": 36, "n2": 4, "n1": 0, "n0": 0} + + def h(u, v): + return heuristic_values[u] + + edges = [("n5", "n1", 11), ("n5", "n2", 9), ("n2", "n1", 1), ("n1", "n0", 32)] + graph = nx.DiGraph() + graph.add_weighted_edges_from(edges) + answer = ["n5", "n2", "n1", "n0"] + assert nx.astar_path(graph, "n5", "n0", h) == answer + + """ Tests that parent is not wrongly overridden when a node + is re-explored multiple times. + """ + + def test_astar_directed4(self): + edges = [ + ("a", "b", 1), + ("a", "c", 1), + ("b", "d", 2), + ("c", "d", 1), + ("d", "e", 1), + ] + graph = nx.DiGraph() + graph.add_weighted_edges_from(edges) + assert nx.astar_path(graph, "a", "e") == ["a", "c", "d", "e"] + + # >>> MXG4=NX.MultiGraph(XG4) + # >>> MXG4.add_edge(0,1,3) + # >>> NX.dijkstra_path(MXG4,0,2) + # [0, 1, 2] + + def test_astar_w1(self): + G = nx.DiGraph() + G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "w"), + ("w", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + assert nx.astar_path(G, "s", "v") == ["s", "u", "v"] + assert nx.astar_path_length(G, "s", "v") == 2 + + def test_astar_nopath(self): + with pytest.raises(nx.NodeNotFound): + nx.astar_path(self.XG, "s", "moon") + + def test_cycle(self): + C = nx.cycle_graph(7) + assert nx.astar_path(C, 0, 3) == [0, 1, 2, 3] + assert nx.dijkstra_path(C, 0, 4) == [0, 6, 5, 4] + + def test_unorderable_nodes(self): + """Tests that A* accommodates nodes that are not orderable. + + For more information, see issue #554. + + """ + # Create the cycle graph on four nodes, with nodes represented + # as (unorderable) Python objects. + nodes = [object() for n in range(4)] + G = nx.Graph() + G.add_edges_from(pairwise(nodes, cyclic=True)) + path = nx.astar_path(G, nodes[0], nodes[2]) + assert len(path) == 3 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py new file mode 100644 index 0000000..6923bfe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py @@ -0,0 +1,212 @@ +import pytest + +import networkx as nx + + +class TestFloyd: + @classmethod + def setup_class(cls): + pass + + def test_floyd_warshall_predecessor_and_distance(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + assert dist["s"]["v"] == 9 + assert path["s"]["v"] == "u" + assert dist == { + "y": {"y": 0, "x": 12, "s": 7, "u": 15, "v": 6}, + "x": {"y": 2, "x": 0, "s": 9, "u": 3, "v": 4}, + "s": {"y": 7, "x": 5, "s": 0, "u": 8, "v": 9}, + "u": {"y": 2, "x": 2, "s": 9, "u": 0, "v": 1}, + "v": {"y": 1, "x": 13, "s": 8, "u": 16, "v": 0}, + } + + GG = XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + path, dist = nx.floyd_warshall_predecessor_and_distance(GG) + assert dist["s"]["v"] == 8 + # skip this test, could be alternate path s-u-v + # assert_equal(path['s']['v'],'y') + + G = nx.DiGraph() # no weights + G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(G) + assert dist["s"]["v"] == 2 + # skip this test, could be alternate path s-u-v + # assert_equal(path['s']['v'],'x') + + # alternate interface + dist = nx.floyd_warshall(G) + assert dist["s"]["v"] == 2 + + # floyd_warshall_predecessor_and_distance returns + # dicts-of-defautdicts + # make sure we don't get empty dictionary + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [("v", "x", 5.0), ("y", "x", 5.0), ("v", "y", 6.0), ("x", "u", 2.0)] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + inf = float("inf") + assert dist == { + "v": {"v": 0, "x": 5.0, "y": 6.0, "u": 7.0}, + "x": {"x": 0, "u": 2.0, "v": inf, "y": inf}, + "y": {"y": 0, "x": 5.0, "v": inf, "u": 7.0}, + "u": {"u": 0, "v": inf, "x": inf, "y": inf}, + } + assert path == { + "v": {"x": "v", "y": "v", "u": "x"}, + "x": {"u": "x"}, + "y": {"x": "y", "u": "x"}, + } + + def test_reconstruct_path(self): + with pytest.raises(KeyError): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + predecessors, _ = nx.floyd_warshall_predecessor_and_distance(XG) + + path = nx.reconstruct_path("s", "v", predecessors) + assert path == ["s", "x", "u", "v"] + + path = nx.reconstruct_path("s", "s", predecessors) + assert path == [] + + # this part raises the keyError + nx.reconstruct_path("1", "2", predecessors) + + def test_cycle(self): + path, dist = nx.floyd_warshall_predecessor_and_distance(nx.cycle_graph(7)) + assert dist[0][3] == 3 + assert path[0][3] == 2 + assert dist[0][4] == 3 + + def test_weighted(self): + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG3) + assert dist[0][3] == 15 + assert path[0][3] == 2 + + def test_weighted2(self): + XG4 = nx.Graph() + XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG4) + assert dist[0][2] == 4 + assert path[0][2] == 1 + + def test_weight_parameter(self): + XG4 = nx.Graph() + XG4.add_edges_from( + [ + (0, 1, {"heavy": 2}), + (1, 2, {"heavy": 2}), + (2, 3, {"heavy": 1}), + (3, 4, {"heavy": 1}), + (4, 5, {"heavy": 1}), + (5, 6, {"heavy": 1}), + (6, 7, {"heavy": 1}), + (7, 0, {"heavy": 1}), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG4, weight="heavy") + assert dist[0][2] == 4 + assert path[0][2] == 1 + + def test_zero_distance(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + + for u in XG: + assert dist[u][u] == 0 + + GG = XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + path, dist = nx.floyd_warshall_predecessor_and_distance(GG) + + for u in GG: + dist[u][u] = 0 + + def test_zero_weight(self): + G = nx.DiGraph() + edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)] + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall(G) + assert dist[1][3] == -14 + + G = nx.MultiDiGraph() + edges.append((2, 5, -7)) + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall(G) + assert dist[1][3] == -14 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py new file mode 100644 index 0000000..1316e23 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py @@ -0,0 +1,89 @@ +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + + +def test_cycle_numpy(): + dist = nx.floyd_warshall_numpy(nx.cycle_graph(7)) + assert dist[0, 3] == 3 + assert dist[0, 4] == 3 + + +def test_weighted_numpy_three_edges(): + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + dist = nx.floyd_warshall_numpy(XG3) + assert dist[0, 3] == 15 + + +def test_weighted_numpy_two_edges(): + XG4 = nx.Graph() + XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + dist = nx.floyd_warshall_numpy(XG4) + assert dist[0, 2] == 4 + + +def test_weight_parameter_numpy(): + XG4 = nx.Graph() + XG4.add_edges_from( + [ + (0, 1, {"heavy": 2}), + (1, 2, {"heavy": 2}), + (2, 3, {"heavy": 1}), + (3, 4, {"heavy": 1}), + (4, 5, {"heavy": 1}), + (5, 6, {"heavy": 1}), + (6, 7, {"heavy": 1}), + (7, 0, {"heavy": 1}), + ] + ) + dist = nx.floyd_warshall_numpy(XG4, weight="heavy") + assert dist[0, 2] == 4 + + +def test_directed_cycle_numpy(): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + pred, dist = nx.floyd_warshall_predecessor_and_distance(G) + D = nx.utils.dict_to_numpy_array(dist) + np.testing.assert_equal(nx.floyd_warshall_numpy(G), D) + + +def test_zero_weight(): + G = nx.DiGraph() + edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)] + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall_numpy(G) + assert int(np.min(dist)) == -14 + + G = nx.MultiDiGraph() + edges.append((2, 5, -7)) + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall_numpy(G) + assert int(np.min(dist)) == -14 + + +def test_nodelist(): + G = nx.path_graph(7) + dist = nx.floyd_warshall_numpy(G, nodelist=[3, 5, 4, 6, 2, 1, 0]) + assert dist[0, 3] == 3 + assert dist[0, 1] == 2 + assert dist[6, 2] == 4 + pytest.raises(nx.NetworkXError, nx.floyd_warshall_numpy, G, [1, 3]) + pytest.raises(nx.NetworkXError, nx.floyd_warshall_numpy, G, list(range(9))) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py new file mode 100644 index 0000000..093fd9c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py @@ -0,0 +1,377 @@ +import pytest + +import networkx as nx + + +def validate_grid_path(r, c, s, t, p): + assert isinstance(p, list) + assert p[0] == s + assert p[-1] == t + s = ((s - 1) // c, (s - 1) % c) + t = ((t - 1) // c, (t - 1) % c) + assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1 + p = [((u - 1) // c, (u - 1) % c) for u in p] + for u in p: + assert 0 <= u[0] < r + assert 0 <= u[1] < c + for u, v in zip(p[:-1], p[1:]): + assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)] + + +class TestGenericPath: + @classmethod + def setup_class(cls): + from networkx import convert_node_labels_to_integers as cnlti + + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.neg_weights = nx.DiGraph() + cls.neg_weights.add_edge(0, 1, weight=1) + cls.neg_weights.add_edge(0, 2, weight=3) + cls.neg_weights.add_edge(1, 3, weight=1) + cls.neg_weights.add_edge(2, 3, weight=-2) + + def test_shortest_path(self): + assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] + assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] + validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12)) + assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] + # now with weights + assert nx.shortest_path(self.cycle, 0, 3, weight="weight") == [0, 1, 2, 3] + assert nx.shortest_path(self.cycle, 0, 4, weight="weight") == [0, 6, 5, 4] + validate_grid_path( + 4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12, weight="weight") + ) + assert nx.shortest_path(self.directed_cycle, 0, 3, weight="weight") == [ + 0, + 1, + 2, + 3, + ] + # weights and method specified + assert nx.shortest_path( + self.directed_cycle, 0, 3, weight="weight", method="dijkstra" + ) == [0, 1, 2, 3] + assert nx.shortest_path( + self.directed_cycle, 0, 3, weight="weight", method="bellman-ford" + ) == [0, 1, 2, 3] + # when Dijkstra's will probably (depending on precise implementation) + # incorrectly return [0, 1, 3] instead + assert nx.shortest_path( + self.neg_weights, 0, 3, weight="weight", method="bellman-ford" + ) == [0, 2, 3] + # confirm bad method rejection + pytest.raises(ValueError, nx.shortest_path, self.cycle, method="SPAM") + # confirm absent source rejection + pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8) + + def test_shortest_path_target(self): + answer = {0: [0, 1], 1: [1], 2: [2, 1]} + sp = nx.shortest_path(nx.path_graph(3), target=1) + assert sp == answer + # with weights + sp = nx.shortest_path(nx.path_graph(3), target=1, weight="weight") + assert sp == answer + # weights and method specified + sp = nx.shortest_path( + nx.path_graph(3), target=1, weight="weight", method="dijkstra" + ) + assert sp == answer + sp = nx.shortest_path( + nx.path_graph(3), target=1, weight="weight", method="bellman-ford" + ) + assert sp == answer + + def test_shortest_path_length(self): + assert nx.shortest_path_length(self.cycle, 0, 3) == 3 + assert nx.shortest_path_length(self.grid, 1, 12) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4 + # now with weights + assert nx.shortest_path_length(self.cycle, 0, 3, weight="weight") == 3 + assert nx.shortest_path_length(self.grid, 1, 12, weight="weight") == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight="weight") == 4 + # weights and method specified + assert ( + nx.shortest_path_length( + self.cycle, 0, 3, weight="weight", method="dijkstra" + ) + == 3 + ) + assert ( + nx.shortest_path_length( + self.cycle, 0, 3, weight="weight", method="bellman-ford" + ) + == 3 + ) + # confirm bad method rejection + pytest.raises(ValueError, nx.shortest_path_length, self.cycle, method="SPAM") + # confirm absent source rejection + pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8) + + def test_shortest_path_length_target(self): + answer = {0: 1, 1: 0, 2: 1} + sp = dict(nx.shortest_path_length(nx.path_graph(3), target=1)) + assert sp == answer + # with weights + sp = nx.shortest_path_length(nx.path_graph(3), target=1, weight="weight") + assert sp == answer + # weights and method specified + sp = nx.shortest_path_length( + nx.path_graph(3), target=1, weight="weight", method="dijkstra" + ) + assert sp == answer + sp = nx.shortest_path_length( + nx.path_graph(3), target=1, weight="weight", method="bellman-ford" + ) + assert sp == answer + + def test_single_source_shortest_path(self): + p = nx.shortest_path(self.cycle, 0) + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + p = nx.shortest_path(self.grid, 1) + validate_grid_path(4, 4, 1, 12, p[12]) + # now with weights + p = nx.shortest_path(self.cycle, 0, weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_dijkstra_path(self.cycle, 0) + p = nx.shortest_path(self.grid, 1, weight="weight") + validate_grid_path(4, 4, 1, 12, p[12]) + # weights and method specified + p = nx.shortest_path(self.cycle, 0, method="dijkstra", weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + p = nx.shortest_path(self.cycle, 0, method="bellman-ford", weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + + def test_single_source_shortest_path_length(self): + ans = dict(nx.shortest_path_length(self.cycle, 0)) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_shortest_path_length(self.cycle, 0)) + ans = dict(nx.shortest_path_length(self.grid, 1)) + assert ans[16] == 6 + # now with weights + ans = dict(nx.shortest_path_length(self.cycle, 0, weight="weight")) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0)) + ans = dict(nx.shortest_path_length(self.grid, 1, weight="weight")) + assert ans[16] == 6 + # weights and method specified + ans = dict( + nx.shortest_path_length(self.cycle, 0, weight="weight", method="dijkstra") + ) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0)) + ans = dict( + nx.shortest_path_length( + self.cycle, 0, weight="weight", method="bellman-ford" + ) + ) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_bellman_ford_path_length(self.cycle, 0)) + + def test_all_pairs_shortest_path(self): + p = nx.shortest_path(self.cycle) + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_shortest_path(self.cycle)) + p = nx.shortest_path(self.grid) + validate_grid_path(4, 4, 1, 12, p[1][12]) + # now with weights + p = nx.shortest_path(self.cycle, weight="weight") + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_dijkstra_path(self.cycle)) + p = nx.shortest_path(self.grid, weight="weight") + validate_grid_path(4, 4, 1, 12, p[1][12]) + # weights and method specified + p = nx.shortest_path(self.cycle, weight="weight", method="dijkstra") + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_dijkstra_path(self.cycle)) + p = nx.shortest_path(self.cycle, weight="weight", method="bellman-ford") + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle)) + + def test_all_pairs_shortest_path_length(self): + ans = dict(nx.shortest_path_length(self.cycle)) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle)) + ans = dict(nx.shortest_path_length(self.grid)) + assert ans[1][16] == 6 + # now with weights + ans = dict(nx.shortest_path_length(self.cycle, weight="weight")) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle)) + ans = dict(nx.shortest_path_length(self.grid, weight="weight")) + assert ans[1][16] == 6 + # weights and method specified + ans = dict( + nx.shortest_path_length(self.cycle, weight="weight", method="dijkstra") + ) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle)) + ans = dict( + nx.shortest_path_length(self.cycle, weight="weight", method="bellman-ford") + ) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_bellman_ford_path_length(self.cycle)) + + def test_has_path(self): + G = nx.Graph() + nx.add_path(G, range(3)) + nx.add_path(G, range(3, 5)) + assert nx.has_path(G, 0, 2) + assert not nx.has_path(G, 0, 4) + + def test_all_shortest_paths(self): + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(nx.all_shortest_paths(G, 0, 3)) + # with weights + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight") + ) + # weights and method specified + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight", method="dijkstra") + ) + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight", method="bellman-ford") + ) + + def test_all_shortest_paths_raise(self): + with pytest.raises(nx.NetworkXNoPath): + G = nx.path_graph(4) + G.add_node(4) + list(nx.all_shortest_paths(G, 0, 4)) + + def test_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + list(nx.all_shortest_paths(G, 0, 1, weight="weight", method="SPAM")) + + def test_all_shortest_paths_zero_weight_edge(self): + g = nx.Graph() + nx.add_path(g, [0, 1, 3]) + nx.add_path(g, [0, 1, 2, 3]) + g.edges[1, 2]["weight"] = 0 + paths30d = list( + nx.all_shortest_paths(g, 3, 0, weight="weight", method="dijkstra") + ) + paths03d = list( + nx.all_shortest_paths(g, 0, 3, weight="weight", method="dijkstra") + ) + paths30b = list( + nx.all_shortest_paths(g, 3, 0, weight="weight", method="bellman-ford") + ) + paths03b = list( + nx.all_shortest_paths(g, 0, 3, weight="weight", method="bellman-ford") + ) + assert sorted(paths03d) == sorted(p[::-1] for p in paths30d) + assert sorted(paths03d) == sorted(p[::-1] for p in paths30b) + assert sorted(paths03b) == sorted(p[::-1] for p in paths30b) + + +class TestAverageShortestPathLength: + def test_cycle_graph(self): + ans = nx.average_shortest_path_length(nx.cycle_graph(7)) + assert ans == pytest.approx(2, abs=1e-7) + + def test_path_graph(self): + ans = nx.average_shortest_path_length(nx.path_graph(5)) + assert ans == pytest.approx(2, abs=1e-7) + + def test_weighted(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight") + assert ans == pytest.approx(4, abs=1e-7) + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight") + assert ans == pytest.approx(4, abs=1e-7) + + def test_specified_methods(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall" + ) + assert ans == pytest.approx(4, abs=1e-7) + + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall" + ) + assert ans == pytest.approx(4, abs=1e-7) + + def test_disconnected(self): + g = nx.Graph() + g.add_nodes_from(range(3)) + g.add_edge(0, 1) + pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g) + g = g.to_directed() + pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g) + + def test_trivial_graph(self): + """Tests that the trivial graph has average path length zero, + since there is exactly one path of length zero in the trivial + graph. + + For more information, see issue #1960. + + """ + G = nx.trivial_graph() + assert nx.average_shortest_path_length(G) == 0 + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.average_shortest_path_length(nx.null_graph()) + + def test_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + nx.average_shortest_path_length(G, weight="weight", method="SPAM") + + +class TestAverageShortestPathLengthNumpy: + @classmethod + def setup_class(cls): + global np + import pytest + + np = pytest.importorskip("numpy") + + def test_specified_methods_numpy(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall-numpy" + ) + np.testing.assert_almost_equal(ans, 4) + + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall-numpy" + ) + np.testing.assert_almost_equal(ans, 4) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py new file mode 100644 index 0000000..96708f0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py @@ -0,0 +1,116 @@ +import networkx as nx + + +def validate_grid_path(r, c, s, t, p): + assert isinstance(p, list) + assert p[0] == s + assert p[-1] == t + s = ((s - 1) // c, (s - 1) % c) + t = ((t - 1) // c, (t - 1) % c) + assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1 + p = [((u - 1) // c, (u - 1) % c) for u in p] + for u in p: + assert 0 <= u[0] < r + assert 0 <= u[1] < c + for u, v in zip(p[:-1], p[1:]): + assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)] + + +class TestUnweightedPath: + @classmethod + def setup_class(cls): + from networkx import convert_node_labels_to_integers as cnlti + + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + + def test_bidirectional_shortest_path(self): + assert nx.bidirectional_shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] + assert nx.bidirectional_shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] + validate_grid_path( + 4, 4, 1, 12, nx.bidirectional_shortest_path(self.grid, 1, 12) + ) + assert nx.bidirectional_shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] + + def test_shortest_path_length(self): + assert nx.shortest_path_length(self.cycle, 0, 3) == 3 + assert nx.shortest_path_length(self.grid, 1, 12) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4 + # now with weights + assert nx.shortest_path_length(self.cycle, 0, 3, weight=True) == 3 + assert nx.shortest_path_length(self.grid, 1, 12, weight=True) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight=True) == 4 + + def test_single_source_shortest_path(self): + p = nx.single_source_shortest_path(self.directed_cycle, 3) + assert p[0] == [3, 4, 5, 6, 0] + p = nx.single_source_shortest_path(self.cycle, 0) + assert p[3] == [0, 1, 2, 3] + p = nx.single_source_shortest_path(self.cycle, 0, cutoff=0) + assert p == {0: [0]} + + def test_single_source_shortest_path_length(self): + pl = nx.single_source_shortest_path_length + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.cycle, 0)) == lengths + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + assert dict(pl(self.directed_cycle, 0)) == lengths + + def test_single_target_shortest_path(self): + p = nx.single_target_shortest_path(self.directed_cycle, 0) + assert p[3] == [3, 4, 5, 6, 0] + p = nx.single_target_shortest_path(self.cycle, 0) + assert p[3] == [3, 2, 1, 0] + p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0) + assert p == {0: [0]} + + def test_single_target_shortest_path_length(self): + pl = nx.single_target_shortest_path_length + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.cycle, 0)) == lengths + lengths = {0: 0, 1: 6, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.directed_cycle, 0)) == lengths + + def test_all_pairs_shortest_path(self): + p = dict(nx.all_pairs_shortest_path(self.cycle)) + assert p[0][3] == [0, 1, 2, 3] + p = dict(nx.all_pairs_shortest_path(self.grid)) + validate_grid_path(4, 4, 1, 12, p[1][12]) + + def test_all_pairs_shortest_path_length(self): + l = dict(nx.all_pairs_shortest_path_length(self.cycle)) + assert l[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + l = dict(nx.all_pairs_shortest_path_length(self.grid)) + assert l[1][16] == 6 + + def test_predecessor_path(self): + G = nx.path_graph(4) + assert nx.predecessor(G, 0) == {0: [], 1: [0], 2: [1], 3: [2]} + assert nx.predecessor(G, 0, 3) == [2] + + def test_predecessor_cycle(self): + G = nx.cycle_graph(4) + pred = nx.predecessor(G, 0) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + + def test_predecessor_cutoff(self): + G = nx.path_graph(4) + p = nx.predecessor(G, 0, 3) + assert 4 not in p + + def test_predecessor_target(self): + G = nx.path_graph(4) + p = nx.predecessor(G, 0, 3) + assert p == [2] + p = nx.predecessor(G, 0, 3, cutoff=2) + assert p == [] + p, s = nx.predecessor(G, 0, 3, return_seen=True) + assert p == [2] + assert s == 3 + p, s = nx.predecessor(G, 0, 3, cutoff=2, return_seen=True) + assert p == [] + assert s == -1 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py new file mode 100644 index 0000000..2b18696 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py @@ -0,0 +1,924 @@ +import pytest + +import networkx as nx +from networkx.utils import pairwise + + +def validate_path(G, s, t, soln_len, path, weight="weight"): + assert path[0] == s + assert path[-1] == t + + if callable(weight): + weight_f = weight + else: + if G.is_multigraph(): + + def weight_f(u, v, d): + return min(e.get(weight, 1) for e in d.values()) + + else: + + def weight_f(u, v, d): + return d.get(weight, 1) + + computed = sum(weight_f(u, v, G[u][v]) for u, v in pairwise(path)) + assert soln_len == computed + + +def validate_length_path(G, s, t, soln_len, length, path, weight="weight"): + assert soln_len == length + validate_path(G, s, t, length, path, weight=weight) + + +class WeightedTestBase: + """Base class for test classes that test functions for computing + shortest paths in weighted graphs. + + """ + + def setup(self): + """Creates some graphs for use in the unit tests.""" + cnlti = nx.convert_node_labels_to_integers + self.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + self.cycle = nx.cycle_graph(7) + self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + self.XG = nx.DiGraph() + self.XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + self.MXG = nx.MultiDiGraph(self.XG) + self.MXG.add_edge("s", "u", weight=15) + self.XG2 = nx.DiGraph() + self.XG2.add_weighted_edges_from( + [ + [1, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 3, 1], + [1, 3, 50], + [1, 2, 100], + [2, 3, 100], + ] + ) + + self.XG3 = nx.Graph() + self.XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + + self.XG4 = nx.Graph() + self.XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + self.MXG4 = nx.MultiGraph(self.XG4) + self.MXG4.add_edge(0, 1, weight=3) + self.G = nx.DiGraph() # no weights + self.G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + + +class TestWeightedPath(WeightedTestBase): + def test_dijkstra(self): + (D, P) = nx.single_source_dijkstra(self.XG, "s") + validate_path(self.XG, "s", "v", 9, P["v"]) + assert D["v"] == 9 + + validate_path( + self.XG, "s", "v", 9, nx.single_source_dijkstra_path(self.XG, "s")["v"] + ) + assert dict(nx.single_source_dijkstra_path_length(self.XG, "s"))["v"] == 9 + + validate_path( + self.XG, "s", "v", 9, nx.single_source_dijkstra(self.XG, "s")[1]["v"] + ) + validate_path( + self.MXG, "s", "v", 9, nx.single_source_dijkstra_path(self.MXG, "s")["v"] + ) + + GG = self.XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + (D, P) = nx.single_source_dijkstra(GG, "s") + validate_path(GG, "s", "v", 8, P["v"]) + assert D["v"] == 8 # uses lower weight of 2 on u<->x edge + validate_path(GG, "s", "v", 8, nx.dijkstra_path(GG, "s", "v")) + assert nx.dijkstra_path_length(GG, "s", "v") == 8 + + validate_path(self.XG2, 1, 3, 4, nx.dijkstra_path(self.XG2, 1, 3)) + validate_path(self.XG3, 0, 3, 15, nx.dijkstra_path(self.XG3, 0, 3)) + assert nx.dijkstra_path_length(self.XG3, 0, 3) == 15 + validate_path(self.XG4, 0, 2, 4, nx.dijkstra_path(self.XG4, 0, 2)) + assert nx.dijkstra_path_length(self.XG4, 0, 2) == 4 + validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2)) + validate_path( + self.G, "s", "v", 2, nx.single_source_dijkstra(self.G, "s", "v")[1] + ) + validate_path( + self.G, "s", "v", 2, nx.single_source_dijkstra(self.G, "s")[1]["v"] + ) + + validate_path(self.G, "s", "v", 2, nx.dijkstra_path(self.G, "s", "v")) + assert nx.dijkstra_path_length(self.G, "s", "v") == 2 + + # NetworkXError: node s not reachable from moon + pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path, self.G, "s", "moon") + pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path_length, self.G, "s", "moon") + + validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3)) + validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4)) + + assert nx.single_source_dijkstra(self.cycle, 0, 0) == (0, [0]) + + def test_bidirectional_dijkstra(self): + validate_length_path( + self.XG, "s", "v", 9, *nx.bidirectional_dijkstra(self.XG, "s", "v") + ) + validate_length_path( + self.G, "s", "v", 2, *nx.bidirectional_dijkstra(self.G, "s", "v") + ) + validate_length_path( + self.cycle, 0, 3, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 3) + ) + validate_length_path( + self.cycle, 0, 4, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 4) + ) + validate_length_path( + self.XG3, 0, 3, 15, *nx.bidirectional_dijkstra(self.XG3, 0, 3) + ) + validate_length_path( + self.XG4, 0, 2, 4, *nx.bidirectional_dijkstra(self.XG4, 0, 2) + ) + + # need more tests here + P = nx.single_source_dijkstra_path(self.XG, "s")["v"] + validate_path( + self.XG, + "s", + "v", + sum(self.XG[u][v]["weight"] for u, v in zip(P[:-1], P[1:])), + nx.dijkstra_path(self.XG, "s", "v"), + ) + + # check absent source + G = nx.path_graph(2) + pytest.raises(nx.NodeNotFound, nx.bidirectional_dijkstra, G, 3, 0) + + def test_weight_functions(self): + def heuristic(*z): + return sum(val**2 for val in z) + + def getpath(pred, v, s): + return [v] if v == s else getpath(pred, pred[v], s) + [v] + + def goldberg_radzik(g, s, t, weight="weight"): + pred, dist = nx.goldberg_radzik(g, s, weight=weight) + dist = dist[t] + return dist, getpath(pred, t, s) + + def astar(g, s, t, weight="weight"): + path = nx.astar_path(g, s, t, heuristic, weight=weight) + dist = nx.astar_path_length(g, s, t, heuristic, weight=weight) + return dist, path + + def vlp(G, s, t, l, F, w): + res = F(G, s, t, weight=w) + validate_length_path(G, s, t, l, *res, weight=w) + + G = self.cycle + s = 6 + t = 4 + path = [6] + list(range(t + 1)) + + def weight(u, v, _): + return 1 + v**2 + + length = sum(weight(u, v, None) for u, v in pairwise(path)) + vlp(G, s, t, length, nx.bidirectional_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_bellman_ford, weight) + vlp(G, s, t, length, goldberg_radzik, weight) + vlp(G, s, t, length, astar, weight) + + def weight(u, v, _): + return 2 ** (u * v) + + length = sum(weight(u, v, None) for u, v in pairwise(path)) + vlp(G, s, t, length, nx.bidirectional_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_bellman_ford, weight) + vlp(G, s, t, length, goldberg_radzik, weight) + vlp(G, s, t, length, astar, weight) + + def test_bidirectional_dijkstra_no_path(self): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5, 6]) + path = nx.bidirectional_dijkstra(G, 1, 6) + + @pytest.mark.parametrize( + "fn", + ( + nx.dijkstra_path, + nx.dijkstra_path_length, + nx.single_source_dijkstra_path, + nx.single_source_dijkstra_path_length, + nx.single_source_dijkstra, + nx.dijkstra_predecessor_and_distance, + ), + ) + def test_absent_source(self, fn): + G = nx.path_graph(2) + with pytest.raises(nx.NodeNotFound): + fn(G, 3, 0) + # Test when source == target, which is handled specially by some functions + with pytest.raises(nx.NodeNotFound): + fn(G, 3, 3) + + def test_dijkstra_predecessor1(self): + G = nx.path_graph(4) + assert nx.dijkstra_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2]}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + + def test_dijkstra_predecessor2(self): + # 4-cycle + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)]) + pred, dist = nx.dijkstra_predecessor_and_distance(G, (0)) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + def test_dijkstra_predecessor3(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + (P, D) = nx.dijkstra_predecessor_and_distance(XG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + (P, D) = nx.dijkstra_predecessor_and_distance(XG, "s", cutoff=8) + assert "v" not in D + + def test_single_source_dijkstra_path_length(self): + pl = nx.single_source_dijkstra_path_length + assert dict(pl(self.MXG4, 0))[2] == 4 + spl = pl(self.MXG4, 0, cutoff=2) + assert 2 not in spl + + def test_bidirectional_dijkstra_multigraph(self): + G = nx.MultiGraph() + G.add_edge("a", "b", weight=10) + G.add_edge("a", "b", weight=100) + dp = nx.bidirectional_dijkstra(G, "a", "b") + assert dp == (10, ["a", "b"]) + + def test_dijkstra_pred_distance_multigraph(self): + G = nx.MultiGraph() + G.add_edge("a", "b", key="short", foo=5, weight=100) + G.add_edge("a", "b", key="long", bar=1, weight=110) + p, d = nx.dijkstra_predecessor_and_distance(G, "a") + assert p == {"a": [], "b": ["a"]} + assert d == {"a": 0, "b": 100} + + def test_negative_edge_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + assert not nx.negative_edge_cycle(G) + G.add_edge(8, 9, weight=-7) + G.add_edge(9, 8, weight=3) + graph_size = len(G) + assert nx.negative_edge_cycle(G) + assert graph_size == len(G) + pytest.raises(ValueError, nx.single_source_dijkstra_path_length, G, 8) + pytest.raises(ValueError, nx.single_source_dijkstra, G, 8) + pytest.raises(ValueError, nx.dijkstra_predecessor_and_distance, G, 8) + G.add_edge(9, 10) + pytest.raises(ValueError, nx.bidirectional_dijkstra, G, 8, 10) + + def test_negative_edge_cycle_custom_weight_key(self): + d = nx.DiGraph() + d.add_edge("a", "b", w=-2) + d.add_edge("b", "a", w=-1) + assert nx.negative_edge_cycle(d, weight="w") + + def test_weight_function(self): + """Tests that a callable weight is interpreted as a weight + function instead of an edge attribute. + + """ + # Create a triangle in which the edge from node 0 to node 2 has + # a large weight and the other two edges have a small weight. + G = nx.complete_graph(3) + G.adj[0][2]["weight"] = 10 + G.adj[0][1]["weight"] = 1 + G.adj[1][2]["weight"] = 1 + + # The weight function will take the multiplicative inverse of + # the weights on the edges. This way, weights that were large + # before now become small and vice versa. + + def weight(u, v, d): + return 1 / d["weight"] + + # The shortest path from 0 to 2 using the actual weights on the + # edges should be [0, 1, 2]. + distance, path = nx.single_source_dijkstra(G, 0, 2) + assert distance == 2 + assert path == [0, 1, 2] + # However, with the above weight function, the shortest path + # should be [0, 2], since that has a very small weight. + distance, path = nx.single_source_dijkstra(G, 0, 2, weight=weight) + assert distance == 1 / 10 + assert path == [0, 2] + + def test_all_pairs_dijkstra_path(self): + cycle = nx.cycle_graph(7) + p = dict(nx.all_pairs_dijkstra_path(cycle)) + assert p[0][3] == [0, 1, 2, 3] + + cycle[1][2]["weight"] = 10 + p = dict(nx.all_pairs_dijkstra_path(cycle)) + assert p[0][3] == [0, 6, 5, 4, 3] + + def test_all_pairs_dijkstra_path_length(self): + cycle = nx.cycle_graph(7) + pl = dict(nx.all_pairs_dijkstra_path_length(cycle)) + assert pl[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + + cycle[1][2]["weight"] = 10 + pl = dict(nx.all_pairs_dijkstra_path_length(cycle)) + assert pl[0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + + def test_all_pairs_dijkstra(self): + cycle = nx.cycle_graph(7) + out = dict(nx.all_pairs_dijkstra(cycle)) + assert out[0][0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert out[0][1][3] == [0, 1, 2, 3] + + cycle[1][2]["weight"] = 10 + out = dict(nx.all_pairs_dijkstra(cycle)) + assert out[0][0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + assert out[0][1][3] == [0, 6, 5, 4, 3] + + +class TestDijkstraPathLength: + """Unit tests for the :func:`networkx.dijkstra_path_length` + function. + + """ + + def test_weight_function(self): + """Tests for computing the length of the shortest path using + Dijkstra's algorithm with a user-defined weight function. + + """ + # Create a triangle in which the edge from node 0 to node 2 has + # a large weight and the other two edges have a small weight. + G = nx.complete_graph(3) + G.adj[0][2]["weight"] = 10 + G.adj[0][1]["weight"] = 1 + G.adj[1][2]["weight"] = 1 + + # The weight function will take the multiplicative inverse of + # the weights on the edges. This way, weights that were large + # before now become small and vice versa. + + def weight(u, v, d): + return 1 / d["weight"] + + # The shortest path from 0 to 2 using the actual weights on the + # edges should be [0, 1, 2]. However, with the above weight + # function, the shortest path should be [0, 2], since that has a + # very small weight. + length = nx.dijkstra_path_length(G, 0, 2, weight=weight) + assert length == 1 / 10 + + +class TestMultiSourceDijkstra: + """Unit tests for the multi-source dialect of Dijkstra's shortest + path algorithms. + + """ + + def test_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra(nx.Graph(), {}) + + def test_path_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra_path(nx.Graph(), {}) + + def test_path_length_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra_path_length(nx.Graph(), {}) + + @pytest.mark.parametrize( + "fn", + ( + nx.multi_source_dijkstra_path, + nx.multi_source_dijkstra_path_length, + nx.multi_source_dijkstra, + ), + ) + def test_absent_source(self, fn): + G = nx.path_graph(2) + with pytest.raises(nx.NodeNotFound): + fn(G, [3], 0) + with pytest.raises(nx.NodeNotFound): + fn(G, [3], 3) + + def test_two_sources(self): + edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + sources = {0, 4} + distances, paths = nx.multi_source_dijkstra(G, sources) + expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0} + expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]} + assert distances == expected_distances + assert paths == expected_paths + + def test_simple_paths(self): + G = nx.path_graph(4) + lengths = nx.multi_source_dijkstra_path_length(G, [0]) + assert lengths == {n: n for n in G} + paths = nx.multi_source_dijkstra_path(G, [0]) + assert paths == {n: list(range(n + 1)) for n in G} + + +class TestBellmanFordAndGoldbergRadzik(WeightedTestBase): + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.single_source_bellman_ford_path(G, 0) == {0: [0]} + assert nx.single_source_bellman_ford_path_length(G, 0) == {0: 0} + assert nx.single_source_bellman_ford(G, 0) == ({0: 0}, {0: [0]}) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ({0: []}, {0: 0}) + assert nx.goldberg_radzik(G, 0) == ({0: None}, {0: 0}) + + def test_absent_source_bellman_ford(self): + # the check is in _bellman_ford; this provides regression testing + # against later changes to "client" Bellman-Ford functions + G = nx.path_graph(2) + for fn in ( + nx.bellman_ford_predecessor_and_distance, + nx.bellman_ford_path, + nx.bellman_ford_path_length, + nx.single_source_bellman_ford_path, + nx.single_source_bellman_ford_path_length, + nx.single_source_bellman_ford, + ): + pytest.raises(nx.NodeNotFound, fn, G, 3, 0) + pytest.raises(nx.NodeNotFound, fn, G, 3, 3) + + def test_absent_source_goldberg_radzik(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.goldberg_radzik(G, 3, 0) + + def test_negative_cycle_heuristic(self): + G = nx.DiGraph() + G.add_edge(0, 1, weight=-1) + G.add_edge(1, 2, weight=-1) + G.add_edge(2, 3, weight=-1) + G.add_edge(3, 0, weight=3) + assert not nx.negative_edge_cycle(G, heuristic=True) + G.add_edge(2, 0, weight=1.999) + assert nx.negative_edge_cycle(G, heuristic=True) + G.edges[2, 0]["weight"] = 2 + assert not nx.negative_edge_cycle(G, heuristic=True) + + def test_negative_cycle_consistency(self): + import random + + unif = random.uniform + for random_seed in range(2): # range(20): + random.seed(random_seed) + for density in [0.1, 0.9]: # .3, .7, .9]: + for N in [1, 10, 20]: # range(1, 60 - int(30 * density)): + for max_cost in [1, 90]: # [1, 10, 40, 90]: + G = nx.binomial_graph(N, density, seed=4, directed=True) + edges = ((u, v, unif(-1, max_cost)) for u, v in G.edges) + G.add_weighted_edges_from(edges) + + no_heuristic = nx.negative_edge_cycle(G, heuristic=False) + with_heuristic = nx.negative_edge_cycle(G, heuristic=True) + assert no_heuristic == with_heuristic + + def test_negative_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(1, 2, weight=-7) + for i in range(5): + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i + ) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i) + G = nx.cycle_graph(5) # undirected Graph + G.add_edge(1, 2, weight=-3) + for i in range(5): + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i + ) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i) + G = nx.DiGraph([(1, 1, {"weight": -1})]) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + + def test_find_negative_cycle_longer_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + nx.add_cycle(G, [3, 5, 6, 7, 8, 9]) + G.add_edge(1, 2, weight=-30) + assert nx.find_negative_cycle(G, 1) == [0, 1, 2, 3, 4, 0] + assert nx.find_negative_cycle(G, 7) == [2, 3, 4, 0, 1, 2] + + def test_find_negative_cycle_no_cycle(self): + G = nx.path_graph(5, create_using=nx.DiGraph()) + pytest.raises(nx.NetworkXError, nx.find_negative_cycle, G, 3) + + def test_find_negative_cycle_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1, weight=-1) + assert nx.find_negative_cycle(G, 1) == [1, 0, 1] + + def test_negative_weight(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(1, 2, weight=-3) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 1, 2], + 3: [0, 1, 2, 3], + 4: [0, 1, 2, 3, 4], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: -2, + 3: -1, + 4: 0, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2], 4: [3]}, + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 1, 3: 2, 4: 3}, + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + ) + + def test_not_connected(self): + G = nx.complete_graph(6) + G.add_edge(10, 11) + G.add_edge(10, 12) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 2], + 3: [0, 3], + 4: [0, 4], + 5: [0, 5], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + {0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + + # not connected, with a component not containing the source that + # contains a negative cycle. + G = nx.complete_graph(6) + G.add_edges_from( + [ + ("A", "B", {"load": 3}), + ("B", "C", {"load": -10}), + ("C", "A", {"load": 2}), + ] + ) + assert nx.single_source_bellman_ford_path(G, 0, weight="load") == { + 0: [0], + 1: [0, 1], + 2: [0, 2], + 3: [0, 3], + 4: [0, 4], + 5: [0, 5], + } + assert nx.single_source_bellman_ford_path_length(G, 0, weight="load") == { + 0: 0, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + } + assert nx.single_source_bellman_ford(G, 0, weight="load") == ( + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + {0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0, weight="load") == ( + {0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + assert nx.goldberg_radzik(G, 0, weight="load") == ( + {0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + + def test_multigraph(self): + assert nx.bellman_ford_path(self.MXG, "s", "v") == ["s", "x", "u", "v"] + assert nx.bellman_ford_path_length(self.MXG, "s", "v") == 9 + assert nx.single_source_bellman_ford_path(self.MXG, "s")["v"] == [ + "s", + "x", + "u", + "v", + ] + assert nx.single_source_bellman_ford_path_length(self.MXG, "s")["v"] == 9 + D, P = nx.single_source_bellman_ford(self.MXG, "s", target="v") + assert D == 9 + assert P == ["s", "x", "u", "v"] + P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + P, D = nx.goldberg_radzik(self.MXG, "s") + assert P["v"] == "u" + assert D["v"] == 9 + assert nx.bellman_ford_path(self.MXG4, 0, 2) == [0, 1, 2] + assert nx.bellman_ford_path_length(self.MXG4, 0, 2) == 4 + assert nx.single_source_bellman_ford_path(self.MXG4, 0)[2] == [0, 1, 2] + assert nx.single_source_bellman_ford_path_length(self.MXG4, 0)[2] == 4 + D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2) + assert D == 4 + assert P == [0, 1, 2] + P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0) + assert P[2] == [1] + assert D[2] == 4 + P, D = nx.goldberg_radzik(self.MXG4, 0) + assert P[2] == 1 + assert D[2] == 4 + + def test_others(self): + assert nx.bellman_ford_path(self.XG, "s", "v") == ["s", "x", "u", "v"] + assert nx.bellman_ford_path_length(self.XG, "s", "v") == 9 + assert nx.single_source_bellman_ford_path(self.XG, "s")["v"] == [ + "s", + "x", + "u", + "v", + ] + assert nx.single_source_bellman_ford_path_length(self.XG, "s")["v"] == 9 + D, P = nx.single_source_bellman_ford(self.XG, "s", target="v") + assert D == 9 + assert P == ["s", "x", "u", "v"] + (P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + (P, D) = nx.goldberg_radzik(self.XG, "s") + assert P["v"] == "u" + assert D["v"] == 9 + + def test_path_graph(self): + G = nx.path_graph(4) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 1, 2], + 3: [0, 1, 2, 3], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: 2, 3: 3}, + {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2]}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 1, 3: 2}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + assert nx.single_source_bellman_ford_path(G, 3) == { + 0: [3, 2, 1, 0], + 1: [3, 2, 1], + 2: [3, 2], + 3: [3], + } + assert nx.single_source_bellman_ford_path_length(G, 3) == { + 0: 3, + 1: 2, + 2: 1, + 3: 0, + } + assert nx.single_source_bellman_ford(G, 3) == ( + {0: 3, 1: 2, 2: 1, 3: 0}, + {0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 3) == ( + {0: [1], 1: [2], 2: [3], 3: []}, + {0: 3, 1: 2, 2: 1, 3: 0}, + ) + assert nx.goldberg_radzik(G, 3) == ( + {0: 1, 1: 2, 2: 3, 3: None}, + {0: 3, 1: 2, 2: 1, 3: 0}, + ) + + def test_4_cycle(self): + # 4-cycle + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)]) + dist, path = nx.single_source_bellman_ford(G, 0) + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + assert path[0] == [0] + assert path[1] == [0, 1] + assert path[2] in [[0, 1, 2], [0, 3, 2]] + assert path[3] == [0, 3] + + pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + pred, dist = nx.goldberg_radzik(G, 0) + assert pred[0] is None + assert pred[1] == 0 + assert pred[2] in [1, 3] + assert pred[3] == 0 + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + def test_negative_weight_bf_path(self): + G = nx.DiGraph() + G.add_nodes_from("abcd") + G.add_edge("a", "d", weight=0) + G.add_edge("a", "b", weight=1) + G.add_edge("b", "c", weight=-3) + G.add_edge("c", "d", weight=1) + + assert nx.bellman_ford_path(G, "a", "d") == ["a", "b", "c", "d"] + assert nx.bellman_ford_path_length(G, "a", "d") == -1 + + def test_zero_cycle_smoke(self): + D = nx.DiGraph() + D.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1), (3, 1, -2)]) + + nx.bellman_ford_path(D, 1, 3) + nx.dijkstra_path(D, 1, 3) + nx.bidirectional_dijkstra(D, 1, 3) + # FIXME nx.goldberg_radzik(D, 1) + + +class TestJohnsonAlgorithm(WeightedTestBase): + def test_single_node_graph(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + G.add_node(0) + nx.johnson(G) + + def test_negative_cycle(self): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + pytest.raises(nx.NetworkXUnbounded, nx.johnson, G) + G = nx.Graph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + pytest.raises(nx.NetworkXUnbounded, nx.johnson, G) + + def test_negative_weights(self): + G = nx.DiGraph() + G.add_weighted_edges_from( + [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)] + ) + paths = nx.johnson(G) + assert paths == { + "1": {"1": ["1"], "3": ["1", "2", "3"], "2": ["1", "2"]}, + "0": { + "1": ["0", "1"], + "0": ["0"], + "3": ["0", "1", "2", "3"], + "2": ["0", "1", "2"], + }, + "3": {"3": ["3"]}, + "2": {"3": ["2", "3"], "2": ["2"]}, + } + + def test_unweighted_graph(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(5) + nx.johnson(G) + + def test_graphs(self): + validate_path(self.XG, "s", "v", 9, nx.johnson(self.XG)["s"]["v"]) + validate_path(self.MXG, "s", "v", 9, nx.johnson(self.MXG)["s"]["v"]) + validate_path(self.XG2, 1, 3, 4, nx.johnson(self.XG2)[1][3]) + validate_path(self.XG3, 0, 3, 15, nx.johnson(self.XG3)[0][3]) + validate_path(self.XG4, 0, 2, 4, nx.johnson(self.XG4)[0][2]) + validate_path(self.MXG4, 0, 2, 4, nx.johnson(self.MXG4)[0][2]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/unweighted.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/unweighted.py new file mode 100644 index 0000000..5363b24 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/unweighted.py @@ -0,0 +1,530 @@ +""" +Shortest path algorithms for unweighted graphs. +""" +import networkx as nx + +__all__ = [ + "bidirectional_shortest_path", + "single_source_shortest_path", + "single_source_shortest_path_length", + "single_target_shortest_path", + "single_target_shortest_path_length", + "all_pairs_shortest_path", + "all_pairs_shortest_path_length", + "predecessor", +] + + +def single_source_shortest_path_length(G, source, cutoff=None): + """Compute the shortest path lengths from source to all reachable nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : dict + Dict keyed by node to shortest path length to source. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_shortest_path_length(G, 0) + >>> length[4] + 4 + >>> for node in length: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + See Also + -------- + shortest_path_length + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} is not in G") + if cutoff is None: + cutoff = float("inf") + nextlevel = {source: 1} + return dict(_single_shortest_path_length(G.adj, nextlevel, cutoff)) + + +def _single_shortest_path_length(adj, firstlevel, cutoff): + """Yields (node, level) in a breadth first search + + Shortest Path Length helper function + Parameters + ---------- + adj : dict + Adjacency dict or view + firstlevel : dict + starting nodes, e.g. {source: 1} or {target: 1} + cutoff : int or float + level at which we stop the process + """ + seen = {} # level (number of hops) when seen in BFS + level = 0 # the current level + nextlevel = set(firstlevel) # set of nodes to check at next level + n = len(adj) + while nextlevel and cutoff >= level: + thislevel = nextlevel # advance to next level + nextlevel = set() # and start a new set (fringe) + found = [] + for v in thislevel: + if v not in seen: + seen[v] = level # set the level of vertex v + found.append(v) + yield (v, level) + if len(seen) == n: + return + for v in found: + nextlevel.update(adj[v]) + level += 1 + del seen + + +def single_target_shortest_path_length(G, target, cutoff=None): + """Compute the shortest path lengths to target from all reachable nodes. + + Parameters + ---------- + G : NetworkX graph + + target : node + Target node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : iterator + (source, shortest path length) iterator + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> length = dict(nx.single_target_shortest_path_length(G, 4)) + >>> length[0] + 4 + >>> for node in range(5): + ... print(f"{node}: {length[node]}") + 0: 4 + 1: 3 + 2: 2 + 3: 1 + 4: 0 + + See Also + -------- + single_source_shortest_path_length, shortest_path_length + """ + if target not in G: + raise nx.NodeNotFound(f"Target {target} is not in G") + + if cutoff is None: + cutoff = float("inf") + # handle either directed or undirected + adj = G.pred if G.is_directed() else G.adj + nextlevel = {target: 1} + return _single_shortest_path_length(adj, nextlevel, cutoff) + + +def all_pairs_shortest_path_length(G, cutoff=None): + """Computes the shortest path lengths between all nodes in `G`. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer, optional + Depth at which to stop the search. Only paths of length at most + `cutoff` are returned. + + Returns + ------- + lengths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Notes + ----- + The iterator returned only has reachable node pairs. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_shortest_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + """ + length = single_source_shortest_path_length + # TODO This can be trivially parallelized. + for n in G: + yield (n, length(G, n, cutoff=cutoff)) + + +def bidirectional_shortest_path(G, source, target): + """Returns a list of nodes in a shortest path between source and target. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + shortest_path + + Notes + ----- + This algorithm is used by shortest_path(G, source, target). + """ + + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target): + """Bidirectional shortest path helper. + + Returns (pred, succ, w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.pred + Gsucc = G.succ + else: + Gpred = G.adj + Gsucc = G.adj + + # predecesssor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc[v]: + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: # path found + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred[v]: + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +def single_source_shortest_path(G, source, cutoff=None): + """Compute shortest path between source + and all other nodes reachable from source. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : dictionary + Dictionary, keyed by target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_shortest_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + The shortest path is not necessarily unique. So there can be multiple + paths between the source and each target node, all of which have the + same 'shortest' length. For each target node, this function returns + only one of those paths. + + See Also + -------- + shortest_path + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} not in G") + + def join(p1, p2): + return p1 + p2 + + if cutoff is None: + cutoff = float("inf") + nextlevel = {source: 1} # list of nodes to check at next level + paths = {source: [source]} # paths dictionary (paths to key from source) + return dict(_single_shortest_path(G.adj, nextlevel, paths, cutoff, join)) + + +def _single_shortest_path(adj, firstlevel, paths, cutoff, join): + """Returns shortest paths + + Shortest Path helper function + Parameters + ---------- + adj : dict + Adjacency dict or view + firstlevel : dict + starting nodes, e.g. {source: 1} or {target: 1} + paths : dict + paths for starting nodes, e.g. {source: [source]} + cutoff : int or float + level at which we stop the process + join : function + function to construct a path from two partial paths. Requires two + list inputs `p1` and `p2`, and returns a list. Usually returns + `p1 + p2` (forward from source) or `p2 + p1` (backward from target) + """ + level = 0 # the current level + nextlevel = firstlevel + while nextlevel and cutoff > level: + thislevel = nextlevel + nextlevel = {} + for v in thislevel: + for w in adj[v]: + if w not in paths: + paths[w] = join(paths[v], [w]) + nextlevel[w] = 1 + level += 1 + return paths + + +def single_target_shortest_path(G, target, cutoff=None): + """Compute shortest path to target from all nodes that reach target. + + Parameters + ---------- + G : NetworkX graph + + target : node label + Target node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : dictionary + Dictionary, keyed by target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> path = nx.single_target_shortest_path(G, 4) + >>> path[0] + [0, 1, 2, 3, 4] + + Notes + ----- + The shortest path is not necessarily unique. So there can be multiple + paths between the source and each target node, all of which have the + same 'shortest' length. For each target node, this function returns + only one of those paths. + + See Also + -------- + shortest_path, single_source_shortest_path + """ + if target not in G: + raise nx.NodeNotFound(f"Target {target} not in G") + + def join(p1, p2): + return p2 + p1 + + # handle undirected graphs + adj = G.pred if G.is_directed() else G.adj + if cutoff is None: + cutoff = float("inf") + nextlevel = {target: 1} # list of nodes to check at next level + paths = {target: [target]} # paths dictionary (paths to key from source) + return dict(_single_shortest_path(adj, nextlevel, paths, cutoff, join)) + + +def all_pairs_shortest_path(G, cutoff=None): + """Compute shortest paths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer, optional + Depth at which to stop the search. Only paths of length at most + `cutoff` are returned. + + Returns + ------- + lengths : dictionary + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_shortest_path(G)) + >>> print(path[0][4]) + [0, 1, 2, 3, 4] + + See Also + -------- + floyd_warshall + + """ + # TODO This can be trivially parallelized. + for n in G: + yield (n, single_source_shortest_path(G, n, cutoff=cutoff)) + + +def predecessor(G, source, target=None, cutoff=None, return_seen=None): + """Returns dict of predecessors for the path from source to all nodes in G + + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path. If provided only predecessors between + source and target are returned + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + + Returns + ------- + pred : dictionary + Dictionary, keyed by node, of predecessors in the shortest path. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> list(G) + [0, 1, 2, 3] + >>> nx.predecessor(G, 0) + {0: [], 1: [0], 2: [1], 3: [2]} + + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} not in G") + + level = 0 # the current level + nextlevel = [source] # list of nodes to check at next level + seen = {source: level} # level (number of hops) when seen in BFS + pred = {source: []} # predecessor dictionary + while nextlevel: + level = level + 1 + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in G[v]: + if w not in seen: + pred[w] = [v] + seen[w] = level + nextlevel.append(w) + elif seen[w] == level: # add v to predecessor list if it + pred[w].append(v) # is at the correct level + if cutoff and cutoff <= level: + break + + if target is not None: + if return_seen: + if target not in pred: + return ([], -1) # No predecessor + return (pred[target], seen[target]) + else: + if target not in pred: + return [] # No predecessor + return pred[target] + else: + if return_seen: + return (pred, seen) + else: + return pred diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/weighted.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/weighted.py new file mode 100644 index 0000000..ef0ee63 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/shortest_paths/weighted.py @@ -0,0 +1,2464 @@ +""" +Shortest path algorithms for weighted graphs. +""" + +from collections import deque +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors + +__all__ = [ + "dijkstra_path", + "dijkstra_path_length", + "bidirectional_dijkstra", + "single_source_dijkstra", + "single_source_dijkstra_path", + "single_source_dijkstra_path_length", + "multi_source_dijkstra", + "multi_source_dijkstra_path", + "multi_source_dijkstra_path_length", + "all_pairs_dijkstra", + "all_pairs_dijkstra_path", + "all_pairs_dijkstra_path_length", + "dijkstra_predecessor_and_distance", + "bellman_ford_path", + "bellman_ford_path_length", + "single_source_bellman_ford", + "single_source_bellman_ford_path", + "single_source_bellman_ford_path_length", + "all_pairs_bellman_ford_path", + "all_pairs_bellman_ford_path_length", + "bellman_ford_predecessor_and_distance", + "negative_edge_cycle", + "find_negative_cycle", + "goldberg_radzik", + "johnson", +] + + +def _weight_function(G, weight): + """Returns a function that returns the weight of an edge. + + The returned function is specifically suitable for input to + functions :func:`_dijkstra` and :func:`_bellman_ford_relaxation`. + + Parameters + ---------- + G : NetworkX graph. + + weight : string or function + If it is callable, `weight` itself is returned. If it is a string, + it is assumed to be the name of the edge attribute that represents + the weight of an edge. In that case, a function is returned that + gets the edge weight according to the specified edge attribute. + + Returns + ------- + function + This function returns a callable that accepts exactly three inputs: + a node, an node adjacent to the first one, and the edge attribute + dictionary for the eedge joining those nodes. That function returns + a number representing the weight of an edge. + + If `G` is a multigraph, and `weight` is not callable, the + minimum edge weight over all parallel edges is returned. If any edge + does not have an attribute with key `weight`, it is assumed to + have weight one. + + """ + if callable(weight): + return weight + # If the weight keyword argument is not callable, we assume it is a + # string representing the edge attribute containing the weight of + # the edge. + if G.is_multigraph(): + return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values()) + return lambda u, v, data: data.get(weight, 1) + + +def dijkstra_path(G, source, target, weight="weight"): + """Returns the shortest weighted path from source to target in G. + + Uses Dijkstra's Method to compute the shortest weighted path + between two nodes in a graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node + + target : node + Ending node + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + path : list + List of nodes in a shortest path. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.dijkstra_path(G, 0, 4)) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + The weight function can be used to include node weights. + + >>> def func(u, v, d): + ... node_u_wt = G.nodes[u].get("node_weight", 1) + ... node_v_wt = G.nodes[v].get("node_weight", 1) + ... edge_wt = d.get("weight", 1) + ... return node_u_wt / 2 + node_v_wt / 2 + edge_wt + + In this example we take the average of start and end node + weights of an edge and add it to the weight of the edge. + + The function :func:`single_source_dijkstra` computes both + path and length-of-path if you need both, use that. + + See Also + -------- + bidirectional_dijkstra + bellman_ford_path + single_source_dijkstra + """ + (length, path) = single_source_dijkstra(G, source, target=target, weight=weight) + return path + + +def dijkstra_path_length(G, source, target, weight="weight"): + """Returns the shortest weighted path length in G from source to target. + + Uses Dijkstra's Method to compute the shortest weighted path length + between two nodes in a graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : number + Shortest path length. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dijkstra_path_length(G, 0, 4) + 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + The function :func:`single_source_dijkstra` computes both + path and length-of-path if you need both, use that. + + See Also + -------- + bidirectional_dijkstra + bellman_ford_path_length + single_source_dijkstra + + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} not found in graph") + if source == target: + return 0 + weight = _weight_function(G, weight) + length = _dijkstra(G, source, weight, target=target) + try: + return length[target] + except KeyError as err: + raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from err + + +def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"): + """Find shortest weighted paths in G from a source node. + + Compute shortest path between source and all other reachable + nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : dictionary + Dictionary of shortest path lengths keyed by target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_dijkstra_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight) + + +def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"): + """Find shortest weighted path lengths in G from a source node. + + Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : dict + Dict keyed by node to shortest path length from source. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_dijkstra_path_length(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford_path_length + + """ + return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight) + + +def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths from a source node. + + Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Uses Dijkstra's algorithm to compute shortest paths and lengths + between a source and all other reachable nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list. + If target is None, paths and lengths to all nodes are computed. + The return value is a tuple of two dictionaries keyed by target nodes. + The first dictionary stores distance to each target node. + The second stores the path to each target node. + If target is not None, returns a tuple (distance, path), where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_dijkstra(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + >>> path[4] + [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_dijkstra(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Based on the Python cookbook recipe (119466) at + https://code.activestate.com/recipes/119466/ + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + single_source_dijkstra_path + single_source_dijkstra_path_length + single_source_bellman_ford + """ + return multi_source_dijkstra( + G, {source}, cutoff=cutoff, target=target, weight=weight + ) + + +def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"): + """Find shortest weighted paths in G from a given set of source + nodes. + + Compute shortest path between any of the source nodes and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : dictionary + Dictionary of shortest paths keyed by target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.multi_source_dijkstra_path(G, {0, 4}) + >>> path[1] + [0, 1] + >>> path[3] + [4, 3] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra, multi_source_bellman_ford + + """ + length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight) + return path + + +def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"): + """Find shortest weighted path lengths in G from a given set of + source nodes. + + Compute the shortest path length between any of the source nodes and + all other reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : dict + Dict keyed by node to shortest path length to nearest source. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.multi_source_dijkstra_path_length(G, {0, 4}) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 1 + 4: 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra + + """ + if not sources: + raise ValueError("sources must not be empty") + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Node {s} not found in graph") + weight = _weight_function(G, weight) + return _dijkstra_multisource(G, sources, weight, cutoff=cutoff) + + +def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths from a given set of + source nodes. + + Uses Dijkstra's algorithm to compute the shortest paths and lengths + between one of the source nodes and the given `target`, or all other + reachable nodes if not specified, for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + target : node label, optional + Ending node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 1 + 4: 0 + >>> path[1] + [0, 1] + >>> path[3] + [4, 3] + + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Based on the Python cookbook recipe (119466) at + https://code.activestate.com/recipes/119466/ + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra_path + multi_source_dijkstra_path_length + + """ + if not sources: + raise ValueError("sources must not be empty") + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Node {s} not found in graph") + if target in sources: + return (0, [target]) + weight = _weight_function(G, weight) + paths = {source: [source] for source in sources} # dictionary of paths + dist = _dijkstra_multisource( + G, sources, weight, paths=paths, cutoff=cutoff, target=target + ) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError as err: + raise nx.NetworkXNoPath(f"No path to {target}.") from err + + +def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None): + """Uses Dijkstra's algorithm to find shortest weighted paths from a + single source. + + This is a convenience function for :func:`_dijkstra_multisource` + with all the arguments the same, except the keyword argument + `sources` set to ``[source]``. + + """ + return _dijkstra_multisource( + G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target + ) + + +def _dijkstra_multisource( + G, sources, weight, pred=None, paths=None, cutoff=None, target=None +): + """Uses Dijkstra's algorithm to find shortest weighted paths + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty iterable of nodes + Starting nodes for paths. If this is just an iterable containing + a single node, then all paths computed by this function will + start from that node. If there are two or more nodes in this + iterable, the computed paths may begin from any one of the start + nodes. + + weight: function + Function with (u, v, data) input that returns that edges weight + + pred: dict of lists, optional(default=None) + dict to store a list of predecessors keyed by that node + If None, predecessors are not stored. + + paths: dict, optional (default=None) + dict to store the path list from source to each node, keyed by node. + If None, paths are not stored. + + target : node label, optional + Ending node for path. Search is halted when target is found. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + Returns + ------- + distance : dictionary + A mapping from node to shortest distance to that node from one + of the source nodes. + + Raises + ------ + NodeNotFound + If any of `sources` is not in `G`. + + Notes + ----- + The optional predecessor and path dictionaries can be accessed by + the caller through the original pred and paths objects passed + as arguments. No need to explicitly return pred or paths. + + """ + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + push = heappush + pop = heappop + dist = {} # dictionary of final distances + seen = {} + # fringe is heapq with 3-tuples (distance,c,node) + # use the count c to avoid comparing nodes (may not be able to) + c = count() + fringe = [] + for source in sources: + seen[source] = 0 + push(fringe, (0, next(c), source)) + while fringe: + (d, _, v) = pop(fringe) + if v in dist: + continue # already searched this node. + dist[v] = d + if v == target: + break + for u, e in G_succ[v].items(): + cost = weight(v, u, e) + if cost is None: + continue + vu_dist = dist[v] + cost + if cutoff is not None: + if vu_dist > cutoff: + continue + if u in dist: + u_dist = dist[u] + if vu_dist < u_dist: + raise ValueError("Contradictory paths found:", "negative weights?") + elif pred is not None and vu_dist == u_dist: + pred[u].append(v) + elif u not in seen or vu_dist < seen[u]: + seen[u] = vu_dist + push(fringe, (vu_dist, next(c), u)) + if paths is not None: + paths[u] = paths[v] + [u] + if pred is not None: + pred[u] = [v] + elif vu_dist == seen[u]: + if pred is not None: + pred[u].append(v) + + # The optional predecessor and path dictionaries can be accessed + # by the caller via the pred and paths objects passed as arguments. + return dist + + +def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"): + """Compute weighted shortest path length and predecessors. + + Uses Dijkstra's Method to obtain the shortest weighted paths + and return dictionaries of predecessors for each node and + distance for each node from the `source`. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + pred, distance : dictionaries + Returns two dictionaries representing a list of predecessors + of a node and the distance to each node. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The list of predecessors contains more than one element only when + there are more than one shortest paths to the key node. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0])] + >>> sorted(dist.items()) + [(0, 0), (1, 1)] + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + pred = {source: []} # dictionary of predecessors + return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff)) + + +def all_pairs_dijkstra(G, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edge[u][v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Yields + ------ + (node, (distance, path)) : (node obj, (dict, dict)) + Each source node has two associated dicts. The first holds distance + keyed by target and the second holds paths keyed by target. + (See single_source_dijkstra for the source/target node terminology.) + If desired you can apply `dict()` to this function to create a dict + keyed by source node to the two dicts. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> len_path = dict(nx.all_pairs_dijkstra(G)) + >>> len_path[3][0][1] + 2 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"3 - {node}: {len_path[3][0][node]}") + 3 - 0: 3 + 3 - 1: 2 + 3 - 2: 1 + 3 - 3: 0 + 3 - 4: 1 + >>> len_path[3][1][1] + [3, 2, 1] + >>> for n, (dist, path) in nx.all_pairs_dijkstra(G): + ... print(path[1]) + [0, 1] + [1] + [2, 1] + [3, 2, 1] + [4, 3, 2, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The yielded dicts only have keys for reachable nodes. + """ + for n in G: + dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight) + yield (n, (dist, path)) + + +def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"): + """Compute shortest path lengths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_dijkstra_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionary returned only has keys for reachable node pairs. + """ + length = single_source_dijkstra_path_length + for n in G: + yield (n, length(G, n, cutoff=cutoff, weight=weight)) + + +def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"): + """Compute shortest paths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : dictionary + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_dijkstra_path(G)) + >>> path[0][4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + floyd_warshall, all_pairs_bellman_ford_path + + """ + path = single_source_dijkstra_path + # TODO This can be trivially parallelized. + for n in G: + yield (n, path(G, n, cutoff=cutoff, weight=weight)) + + +def bellman_ford_predecessor_and_distance( + G, source, target=None, weight="weight", heuristic=False +): + """Compute shortest path lengths and predecessors on shortest paths + in weighted graphs. + + The algorithm has a running time of $O(mn)$ where $n$ is the number of + nodes and $m$ is the number of edges. It is slower than Dijkstra but + can handle negative edge weights. + + If a negative cycle is detected, you can use :func:`find_negative_cycle` + to return the cycle and examine it. Shortest paths are not defined when + a negative cycle exists because once reached, the path can cycle forever + to build up arbitrarily low weights. + + Parameters + ---------- + G : NetworkX graph + The algorithm works for all types of graphs, including directed + graphs and multigraphs. + + source: node label + Starting node for path + + target : node label, optional + Ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + pred, dist : dictionaries + Returns two dictionaries keyed by node to predecessor in the + path and to the distance from the source respectively. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> G[1][2]["weight"] = -7 + >>> nx.bellman_ford_predecessor_and_distance(G, 0) + Traceback (most recent call last): + ... + networkx.exception.NetworkXUnbounded: Negative cycle detected. + + See Also + -------- + find_negative_cycle + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionaries returned only have keys for nodes reachable from + the source. + + In the case where the (di)graph is not connected, if a component + not containing the source contains a negative (di)cycle, it + will not be detected. + + In NetworkX v2.1 and prior, the source node had predecessor `[None]`. + In NetworkX v2.2 this changed to the source node having predecessor `[]` + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)): + raise nx.NetworkXUnbounded("Negative cycle detected.") + + dist = {source: 0} + pred = {source: []} + + if len(G) == 1: + return pred, dist + + weight = _weight_function(G, weight) + + dist = _bellman_ford( + G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic + ) + return (pred, dist) + + +def _bellman_ford( + G, + source, + weight, + pred=None, + paths=None, + dist=None, + target=None, + heuristic=True, +): + """Calls relaxation loop for Bellman–Ford algorithm and builds paths + + This is an implementation of the SPFA variant. + See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm + + Parameters + ---------- + G : NetworkX graph + + source: list + List of source nodes. The shortest path from any of the source + nodes will be found if multiple sources are provided. + + weight : function + The weight of an edge is the value returned by the function. The + function must accept exactly three positional arguments: the two + endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + pred: dict of lists, optional (default=None) + dict to store a list of predecessors keyed by that node + If None, predecessors are not stored + + paths: dict, optional (default=None) + dict to store the path list from source to each node, keyed by node + If None, paths are not stored + + dist: dict, optional (default=None) + dict to store distance from source to the keyed node + If None, returned dist dict contents default to 0 for every node in the + source list + + target: node label, optional + Ending node for path. Path lengths to other destinations may (and + probably will) be incorrect. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + dist : dict + Returns a dict keyed by node to the distance from the source. + Dicts for paths and pred are in the mutated input dicts by those names. + + Raises + ------ + NodeNotFound + If any of `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle + """ + if pred is None: + pred = {v: [] for v in source} + + if dist is None: + dist = {v: 0 for v in source} + + negative_cycle_found = _inner_bellman_ford( + G, + source, + weight, + pred, + dist, + heuristic, + ) + if negative_cycle_found is not None: + raise nx.NetworkXUnbounded("Negative cycle detected.") + + if paths is not None: + sources = set(source) + dsts = [target] if target is not None else pred + for dst in dsts: + gen = _build_paths_from_predecessors(sources, dst, pred) + paths[dst] = next(gen) + + return dist + + +def _inner_bellman_ford( + G, + sources, + weight, + pred, + dist=None, + heuristic=True, +): + """Inner Relaxation loop for Bellman–Ford algorithm. + + This is an implementation of the SPFA variant. + See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm + + Parameters + ---------- + G : NetworkX graph + + source: list + List of source nodes. The shortest path from any of the source + nodes will be found if multiple sources are provided. + + weight : function + The weight of an edge is the value returned by the function. The + function must accept exactly three positional arguments: the two + endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + pred: dict of lists + dict to store a list of predecessors keyed by that node + + dist: dict, optional (default=None) + dict to store distance from source to the keyed node + If None, returned dist dict contents default to 0 for every node in the + source list + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + node or None + Return a node `v` where processing discovered a negative cycle. + If no negative cycle found, return None. + + Raises + ------ + NodeNotFound + If any of `source` is not in `G`. + """ + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Source {s} not in G") + + if pred is None: + pred = {v: [] for v in sources} + + if dist is None: + dist = {v: 0 for v in sources} + + # Heuristic Storage setup. Note: use None because nodes cannot be None + nonexistent_edge = (None, None) + pred_edge = {v: None for v in sources} + recent_update = {v: nonexistent_edge for v in sources} + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + inf = float("inf") + n = len(G) + + count = {} + q = deque(sources) + in_q = set(sources) + while q: + u = q.popleft() + in_q.remove(u) + + # Skip relaxations if any of the predecessors of u is in the queue. + if all(pred_u not in in_q for pred_u in pred[u]): + dist_u = dist[u] + for v, e in G_succ[u].items(): + dist_v = dist_u + weight(u, v, e) + + if dist_v < dist.get(v, inf): + # In this conditional branch we are updating the path with v. + # If it happens that some earlier update also added node v + # that implies the existence of a negative cycle since + # after the update node v would lie on the update path twice. + # The update path is stored up to one of the source nodes, + # therefore u is always in the dict recent_update + if heuristic: + if v in recent_update[u]: + # Negative cycle found! + pred[v].append(u) + return v + + # Transfer the recent update info from u to v if the + # same source node is the head of the update path. + # If the source node is responsible for the cost update, + # then clear the history and use it instead. + if v in pred_edge and pred_edge[v] == u: + recent_update[v] = recent_update[u] + else: + recent_update[v] = (u, v) + + if v not in in_q: + q.append(v) + in_q.add(v) + count_v = count.get(v, 0) + 1 + if count_v == n: + # Negative cycle found! + return v + + count[v] = count_v + dist[v] = dist_v + pred[v] = [u] + pred_edge[v] = u + + elif dist.get(v) is not None and dist_v == dist.get(v): + pred[v].append(u) + + # successfully found shortest_path. No negative cycles found. + return None + + +def bellman_ford_path(G, source, target, weight="weight"): + """Returns the shortest path from source to target in a weighted graph G. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node + + target : node + Ending node + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + path : list + List of nodes in a shortest path. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.bellman_ford_path(G, 0, 4) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + dijkstra_path, bellman_ford_path_length + """ + length, path = single_source_bellman_ford(G, source, target=target, weight=weight) + return path + + +def bellman_ford_path_length(G, source, target, weight="weight"): + """Returns the shortest path length from source to target + in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : number + Shortest path length. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.bellman_ford_path_length(G, 0, 4) + 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + dijkstra_path_length, bellman_ford_path + """ + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not found in graph") + return 0 + + weight = _weight_function(G, weight) + + length = _bellman_ford(G, [source], weight, target=target) + + try: + return length[target] + except KeyError as err: + raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from err + + +def single_source_bellman_ford_path(G, source, weight="weight"): + """Compute shortest path between source and all other reachable + nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : dictionary + Dictionary of shortest path lengths keyed by target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_bellman_ford_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + (length, path) = single_source_bellman_ford(G, source, weight=weight) + return path + + +def single_source_bellman_ford_path_length(G, source, weight="weight"): + """Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : iterator + (target, shortest path length) iterator + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.single_source_bellman_ford_path_length(G, 0)) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + weight = _weight_function(G, weight) + return _bellman_ford(G, [source], weight) + + +def single_source_bellman_ford(G, source, target=None, weight="weight"): + """Compute shortest paths and lengths in a weighted graph G. + + Uses Bellman-Ford algorithm for shortest paths. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_bellman_ford(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + >>> path[4] + [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_bellman_ford(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra + single_source_bellman_ford_path + single_source_bellman_ford_path_length + """ + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + return (0, [source]) + + weight = _weight_function(G, weight) + + paths = {source: [source]} # dictionary of paths + dist = _bellman_ford(G, [source], weight, paths=paths, target=target) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError as err: + msg = f"Node {target} not reachable from {source}" + raise nx.NetworkXNoPath(msg) from err + + +def all_pairs_bellman_ford_path_length(G, weight="weight"): + """Compute shortest path lengths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_bellman_ford_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionary returned only has keys for reachable node pairs. + """ + length = single_source_bellman_ford_path_length + for n in G: + yield (n, dict(length(G, n, weight=weight))) + + +def all_pairs_bellman_ford_path(G, weight="weight"): + """Compute shortest paths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : dictionary + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_bellman_ford_path(G)) + >>> path[0][4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + floyd_warshall, all_pairs_dijkstra_path + + """ + path = single_source_bellman_ford_path + # TODO This can be trivially parallelized. + for n in G: + yield (n, path(G, n, weight=weight)) + + +def goldberg_radzik(G, source, weight="weight"): + """Compute shortest path lengths and predecessors on shortest paths + in weighted graphs. + + The algorithm has a running time of $O(mn)$ where $n$ is the number of + nodes and $m$ is the number of edges. It is slower than Dijkstra but + can handle negative edge weights. + + Parameters + ---------- + G : NetworkX graph + The algorithm works for all types of graphs, including directed + graphs and multigraphs. + + source: node label + Starting node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + pred, dist : dictionaries + Returns two dictionaries keyed by node to predecessor in the + path and to the distance from the source respectively. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.goldberg_radzik(G, 0) + >>> sorted(pred.items()) + [(0, None), (1, 0), (2, 1), (3, 2), (4, 3)] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> G[1][2]["weight"] = -7 + >>> nx.goldberg_radzik(G, 0) + Traceback (most recent call last): + ... + networkx.exception.NetworkXUnbounded: Negative cycle detected. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionaries returned only have keys for nodes reachable from + the source. + + In the case where the (di)graph is not connected, if a component + not containing the source contains a negative (di)cycle, it + will not be detected. + + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)): + raise nx.NetworkXUnbounded("Negative cycle detected.") + + if len(G) == 1: + return {source: None}, {source: 0} + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + inf = float("inf") + d = {u: inf for u in G} + d[source] = 0 + pred = {source: None} + + def topo_sort(relabeled): + """Topologically sort nodes relabeled in the previous round and detect + negative cycles. + """ + # List of nodes to scan in this round. Denoted by A in Goldberg and + # Radzik's paper. + to_scan = [] + # In the DFS in the loop below, neg_count records for each node the + # number of edges of negative reduced costs on the path from a DFS root + # to the node in the DFS forest. The reduced cost of an edge (u, v) is + # defined as d[u] + weight[u][v] - d[v]. + # + # neg_count also doubles as the DFS visit marker array. + neg_count = {} + for u in relabeled: + # Skip visited nodes. + if u in neg_count: + continue + d_u = d[u] + # Skip nodes without out-edges of negative reduced costs. + if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()): + continue + # Nonrecursive DFS that inserts nodes reachable from u via edges of + # nonpositive reduced costs into to_scan in (reverse) topological + # order. + stack = [(u, iter(G_succ[u].items()))] + in_stack = {u} + neg_count[u] = 0 + while stack: + u, it = stack[-1] + try: + v, e = next(it) + except StopIteration: + to_scan.append(u) + stack.pop() + in_stack.remove(u) + continue + t = d[u] + weight(u, v, e) + d_v = d[v] + if t <= d_v: + is_neg = t < d_v + d[v] = t + pred[v] = u + if v not in neg_count: + neg_count[v] = neg_count[u] + int(is_neg) + stack.append((v, iter(G_succ[v].items()))) + in_stack.add(v) + elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]: + # (u, v) is a back edge, and the cycle formed by the + # path v to u and (u, v) contains at least one edge of + # negative reduced cost. The cycle must be of negative + # cost. + raise nx.NetworkXUnbounded("Negative cycle detected.") + to_scan.reverse() + return to_scan + + def relax(to_scan): + """Relax out-edges of relabeled nodes.""" + relabeled = set() + # Scan nodes in to_scan in topological order and relax incident + # out-edges. Add the relabled nodes to labeled. + for u in to_scan: + d_u = d[u] + for v, e in G_succ[u].items(): + w_e = weight(u, v, e) + if d_u + w_e < d[v]: + d[v] = d_u + w_e + pred[v] = u + relabeled.add(v) + return relabeled + + # Set of nodes relabled in the last round of scan operations. Denoted by B + # in Goldberg and Radzik's paper. + relabeled = {source} + + while relabeled: + to_scan = topo_sort(relabeled) + relabeled = relax(to_scan) + + d = {u: d[u] for u in pred} + return pred, d + + +def negative_edge_cycle(G, weight="weight", heuristic=True): + """Returns True if there exists a negative edge cycle anywhere in G. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a negligible cost. In case of graphs with a negative cycle, + the performance of detection increases by at least an order of magnitude. + + Returns + ------- + negative_cycle : bool + True if a negative edge cycle exists, otherwise False. + + Examples + -------- + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> print(nx.negative_edge_cycle(G)) + False + >>> G[1][2]["weight"] = -7 + >>> print(nx.negative_edge_cycle(G)) + True + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + This algorithm uses bellman_ford_predecessor_and_distance() but finds + negative cycles on any component by first adding a new node connected to + every node, and starting bellman_ford_predecessor_and_distance on that + node. It then removes that extra node. + """ + # find unused node to use temporarily + newnode = -1 + while newnode in G: + newnode -= 1 + # connect it to all nodes + G.add_edges_from([(newnode, n) for n in G]) + + try: + bellman_ford_predecessor_and_distance( + G, newnode, weight=weight, heuristic=heuristic + ) + except nx.NetworkXUnbounded: + return True + finally: + G.remove_node(newnode) + return False + + +def find_negative_cycle(G, source, weight="weight"): + """Returns a cycle with negative total weight if it exists. + + Bellman-Ford is used to find shortest_paths. That algorithm + stops if there exists a negative cycle. This algorithm + picks up from there and returns the found negative cycle. + + The cycle consists of a list of nodes in the cycle order. The last + node equals the first to make it a cycle. + You can look up the edge weights in the original graph. In the case + of multigraphs the relevant edge is the minimal weight edge between + the nodes in the 2-tuple. + + If the graph has no negative cycle, a NetworkXError is raised. + + Parameters + ---------- + G : NetworkX graph + + source: node label + The search for the negative cycle will start from this node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)]) + >>> nx.find_negative_cycle(G, 0) + [4, 0, 1, 4] + + Returns + ------- + cycle : list + A list of nodes in the order of the cycle found. The last node + equals the first to indicate a cycle. + + Raises + ------ + NetworkXError + If no negative cycle is found. + """ + weight = _weight_function(G, weight) + pred = {source: []} + + v = _inner_bellman_ford(G, [source], weight, pred=pred) + if v is None: + raise nx.NetworkXError("No negative cycles detected.") + + # negative cycle detected... find it + neg_cycle = [] + stack = [(v, list(pred[v]))] + seen = {v} + while stack: + node, preds = stack[-1] + if v in preds: + # found the cycle + neg_cycle.extend([node, v]) + neg_cycle = list(reversed(neg_cycle)) + return neg_cycle + + if preds: + nbr = preds.pop() + if nbr not in seen: + stack.append((nbr, list(pred[nbr]))) + neg_cycle.append(node) + seen.add(nbr) + else: + stack.pop() + if neg_cycle: + neg_cycle.pop() + else: + if v in G[v] and weight(G, v, v) < 0: + return [v, v] + # should not reach here + raise nx.NetworkXError("Negative cycle is detected but not found") + # should not get here... + msg = "negative cycle detected but not identified" + raise nx.NetworkXUnbounded(msg) + + +def bidirectional_dijkstra(G, source, target, weight="weight"): + r"""Dijkstra's algorithm for shortest paths using bidirectional search. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length, path : number and list + length is the distance from source to target. + path is a list of nodes on a path from source to target. + + Raises + ------ + NodeNotFound + If either `source` or `target` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.bidirectional_dijkstra(G, 0, 4) + >>> print(length) + 4 + >>> print(path) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is `\pi*r*r` while the + others are `2*\pi*r/2*r/2`, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + if source == target: + return (0, [source]) + + weight = _weight_function(G, weight) + push = heappush + pop = heappop + # Init: [Forward, Backward] + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) for choosing node to expand + seen = [{source: 0}, {target: 0}] # dict of distances to seen nodes + c = count() + # initialize fringe heap + push(fringe[0], (0, next(c), source)) + push(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + if G.is_directed(): + neighs = [G._succ, G._pred] + else: + neighs = [G._adj, G._adj] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = pop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + for w, d in neighs[dir][v].items(): + # weight(v, w, d) for forward and weight(w, v, d) for back direction + cost = weight(v, w, d) if dir == 0 else weight(w, v, d) + if cost is None: + continue + vwLength = dists[dir][v] + cost + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + push(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +def johnson(G, weight="weight"): + r"""Uses Johnson's Algorithm to compute shortest paths. + + Johnson's Algorithm finds a shortest path between each pair of + nodes in a weighted graph even if negative weights are present. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : dictionary + Dictionary, keyed by source and target, of shortest paths. + + Raises + ------ + NetworkXError + If given graph is not weighted. + + Examples + -------- + >>> graph = nx.DiGraph() + >>> graph.add_weighted_edges_from( + ... [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)] + ... ) + >>> paths = nx.johnson(graph, weight="weight") + >>> paths["0"]["2"] + ['0', '1', '2'] + + Notes + ----- + Johnson's algorithm is suitable even for graphs with negative weights. It + works by using the Bellman–Ford algorithm to compute a transformation of + the input graph that removes all negative weights, allowing Dijkstra's + algorithm to be used on the transformed graph. + + The time complexity of this algorithm is $O(n^2 \log n + n m)$, + where $n$ is the number of nodes and $m$ the number of edges in the + graph. For dense graphs, this may be faster than the Floyd–Warshall + algorithm. + + See Also + -------- + floyd_warshall_predecessor_and_distance + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + all_pairs_dijkstra_path + bellman_ford_predecessor_and_distance + all_pairs_bellman_ford_path + all_pairs_bellman_ford_path_length + + """ + if not nx.is_weighted(G, weight=weight): + raise nx.NetworkXError("Graph is not weighted.") + + dist = {v: 0 for v in G} + pred = {v: [] for v in G} + weight = _weight_function(G, weight) + + # Calculate distance of shortest paths + dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist) + + # Update the weight function to take into account the Bellman--Ford + # relaxation distances. + def new_weight(u, v, d): + return weight(u, v, d) + dist_bellman[u] - dist_bellman[v] + + def dist_path(v): + paths = {v: [v]} + _dijkstra(G, v, new_weight, paths=paths) + return paths + + return {v: dist_path(v) for v in G} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/similarity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/similarity.py new file mode 100644 index 0000000..fe6e0f2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/similarity.py @@ -0,0 +1,1731 @@ +""" Functions measuring similarity using graph edit distance. + +The graph edit distance is the number of edge/node changes needed +to make two graphs isomorphic. + +The default algorithm/implementation is sub-optimal for some graphs. +The problem of finding the exact Graph Edit Distance (GED) is NP-hard +so it is often slow. If the simple interface `graph_edit_distance` +takes too long for your graph, try `optimize_graph_edit_distance` +and/or `optimize_edit_paths`. + +At the same time, I encourage capable people to investigate +alternative GED algorithms, in order to improve the choices available. +""" + +import math +import time +import warnings +from functools import reduce +from itertools import product +from operator import mul + +import networkx as nx + +__all__ = [ + "graph_edit_distance", + "optimal_edit_paths", + "optimize_graph_edit_distance", + "optimize_edit_paths", + "simrank_similarity", + "simrank_similarity_numpy", + "panther_similarity", + "generate_random_paths", +] + + +def debug_print(*args, **kwargs): + print(*args, **kwargs) + + +def graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + roots=None, + upper_bound=None, + timeout=None, +): + """Returns GED (graph edit distance) between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + upper_bound : numeric + Maximum edit distance to consider. Return None if no edit + distance under or equal to upper_bound exists. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> nx.graph_edit_distance(G1, G2) + 7.0 + + >>> G1 = nx.star_graph(5) + >>> G2 = nx.star_graph(5) + >>> nx.graph_edit_distance(G1, G2, roots=(0, 0)) + 0.0 + >>> nx.graph_edit_distance(G1, G2, roots=(1, 0)) + 8.0 + + See Also + -------- + optimal_edit_paths, optimize_graph_edit_distance, + + is_isomorphic: test for graph edit distance of 0 + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + bestcost = None + for vertex_path, edge_path, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + roots, + timeout, + ): + # assert bestcost is None or cost < bestcost + bestcost = cost + return bestcost + + +def optimal_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns all minimum-cost edit paths transforming G1 to G2. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + edit_paths : list of tuples (node_edit_path, edge_edit_path) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + + cost : numeric + Optimal edit path cost (graph edit distance). + + Examples + -------- + >>> G1 = nx.cycle_graph(4) + >>> G2 = nx.wheel_graph(5) + >>> paths, cost = nx.optimal_edit_paths(G1, G2) + >>> len(paths) + 40 + >>> cost + 5.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + paths = list() + bestcost = None + for vertex_path, edge_path, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + False, + ): + # assert bestcost is None or cost <= bestcost + if bestcost is not None and cost < bestcost: + paths = list() + paths.append((vertex_path, edge_path)) + bestcost = cost + return paths, bestcost + + +def optimize_graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns consecutive approximations of GED (graph edit distance) + between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + Generator of consecutive approximations of graph edit distance. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> for v in nx.optimize_graph_edit_distance(G1, G2): + ... minv = v + >>> minv + 7.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + """ + for vertex_path, edge_path, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + ): + yield cost + + +def optimize_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, + strictly_decreasing=True, + roots=None, + timeout=None, +): + """GED (graph edit distance) calculation: advanced interface. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Graph edit distance is defined as minimum cost of edit path. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + strictly_decreasing : bool + If True, return consecutive approximations of strictly + decreasing cost. Otherwise, return all edit paths of cost + less than or equal to the previous minimum cost. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Returns + ------- + Generator of tuples (node_edit_path, edge_edit_path, cost) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + cost : numeric + + See Also + -------- + graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + # TODO: support DiGraph + + import numpy as np + import scipy as sp + import scipy.optimize # call as sp.optimize + + class CostMatrix: + def __init__(self, C, lsa_row_ind, lsa_col_ind, ls): + # assert C.shape[0] == len(lsa_row_ind) + # assert C.shape[1] == len(lsa_col_ind) + # assert len(lsa_row_ind) == len(lsa_col_ind) + # assert set(lsa_row_ind) == set(range(len(lsa_row_ind))) + # assert set(lsa_col_ind) == set(range(len(lsa_col_ind))) + # assert ls == C[lsa_row_ind, lsa_col_ind].sum() + self.C = C + self.lsa_row_ind = lsa_row_ind + self.lsa_col_ind = lsa_col_ind + self.ls = ls + + def make_CostMatrix(C, m, n): + # assert(C.shape == (m + n, m + n)) + lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C) + + # Fixup dummy assignments: + # each substitution i<->j should have dummy assignment m+j<->n+i + # NOTE: fast reduce of Cv relies on it + # assert len(lsa_row_ind) == len(lsa_col_ind) + indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind) + subst_ind = list(k for k, i, j in indexes if i < m and j < n) + indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind) + dummy_ind = list(k for k, i, j in indexes if i >= m and j >= n) + # assert len(subst_ind) == len(dummy_ind) + lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m + lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n + + return CostMatrix( + C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum() + ) + + def extract_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k in i or k - m in j for k in range(m + n)] + col_ind = [k in j or k - n in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k not in i and k - m not in j for k in range(m + n)] + col_ind = [k not in j and k - n not in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_ind(ind, i): + # assert set(ind) == set(range(len(ind))) + rind = ind[[k not in i for k in ind]] + for k in set(i): + rind[rind >= k] -= 1 + return rind + + def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=[]): + """ + Parameters: + u, v: matched vertices, u=None or v=None for + deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_uv: partial vertex edit path + list of tuples (u, v) of previously matched vertex + mappings u<->v, u=None or v=None for + deletion/insertion + + Returns: + list of (i, j): indices of edge mappings g<->h + localCe: local CostMatrix of edge mappings + (basically submatrix of Ce at cross of rows i, cols j) + """ + M = len(pending_g) + N = len(pending_h) + # assert Ce.C.shape == (M + N, M + N) + + # only attempt to match edges after one node match has been made + # this will stop self-edges on the first node being automatically deleted + # even when a substitution is the better option + if matched_uv: + g_ind = [ + i + for i in range(M) + if pending_g[i][:2] == (u, u) + or any( + pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv + ) + ] + h_ind = [ + j + for j in range(N) + if pending_h[j][:2] == (v, v) + or any( + pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv + ) + ] + else: + g_ind = [] + h_ind = [] + + m = len(g_ind) + n = len(h_ind) + + if m or n: + C = extract_C(Ce.C, g_ind, h_ind, M, N) + # assert C.shape == (m + n, m + n) + + # Forbid structurally invalid matches + # NOTE: inf remembered from Ce construction + for k, i in zip(range(m), g_ind): + g = pending_g[i][:2] + for l, j in zip(range(n), h_ind): + h = pending_h[j][:2] + if nx.is_directed(G1) or nx.is_directed(G2): + if any( + g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q) + for p, q in matched_uv + ): + continue + else: + if any( + g in ((p, u), (u, p)) and h in ((q, v), (v, q)) + for p, q in matched_uv + ): + continue + if g == (u, u) or any(g == (p, p) for p, q in matched_uv): + continue + if h == (v, v) or any(h == (q, q) for p, q in matched_uv): + continue + C[k, l] = inf + + localCe = make_CostMatrix(C, m, n) + ij = list( + ( + g_ind[k] if k < m else M + h_ind[l], + h_ind[l] if l < n else N + g_ind[k], + ) + for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind) + if k < m or l < n + ) + + else: + ij = [] + localCe = CostMatrix(np.empty((0, 0)), [], [], 0) + + return ij, localCe + + def reduce_Ce(Ce, ij, m, n): + if len(ij): + i, j = zip(*ij) + m_i = m - sum(1 for t in i if t < m) + n_j = n - sum(1 for t in j if t < n) + return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j) + else: + return Ce + + def get_edit_ops( + matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of + (i, j): indices of vertex mapping u<->v + Cv_ij: reduced CostMatrix of pending vertex mappings + (basically Cv with row i, col j removed) + list of (x, y): indices of edge mappings g<->h + Ce_xy: reduced CostMatrix of pending edge mappings + (basically Ce with rows x, cols y removed) + cost: total cost of edit operation + NOTE: most promising ops first + """ + m = len(pending_u) + n = len(pending_v) + # assert Cv.C.shape == (m + n, m + n) + + # 1) a vertex mapping from optimal linear sum assignment + i, j = min( + (k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n + ) + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls): + pass + else: + # get reduced Cv efficiently + Cv_ij = CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + reduce_ind(Cv.lsa_row_ind, (i, m + j)), + reduce_ind(Cv.lsa_col_ind, (j, n + i)), + Cv.ls - Cv.C[i, j], + ) + yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls + + # 2) other candidates, sorted by lower-bound cost estimate + other = list() + fixed_i, fixed_j = i, j + if m <= n: + candidates = ( + (t, fixed_j) + for t in range(m + n) + if t != fixed_i and (t < m or t == m + fixed_j) + ) + else: + candidates = ( + (fixed_i, t) + for t in range(m + n) + if t != fixed_j and (t < n or t == n + fixed_i) + ) + for i, j in candidates: + if prune(matched_cost + Cv.C[i, j] + Ce.ls): + continue + Cv_ij = make_CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + m - 1 if i < m else m, + n - 1 if j < n else n, + ) + # assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls): + continue + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls): + continue + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls): + continue + other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls)) + + yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls) + + def get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv, + matched_gh, + pending_g, + pending_h, + Ce, + matched_cost, + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + matched_gh: partial edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of (vertex_path, edge_path, cost) + vertex_path: complete vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + edge_path: complete edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + cost: total cost of edit path + NOTE: path costs are non-increasing + """ + # debug_print('matched-uv:', matched_uv) + # debug_print('matched-gh:', matched_gh) + # debug_print('matched-cost:', matched_cost) + # debug_print('pending-u:', pending_u) + # debug_print('pending-v:', pending_v) + # debug_print(Cv.C) + # assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u)) + # assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v)) + # debug_print('pending-g:', pending_g) + # debug_print('pending-h:', pending_h) + # debug_print(Ce.C) + # assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g)) + # assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h)) + # debug_print() + + if prune(matched_cost + Cv.ls + Ce.ls): + return + + if not max(len(pending_u), len(pending_v)): + # assert not len(pending_g) + # assert not len(pending_h) + # path completed! + # assert matched_cost <= maxcost.value + maxcost.value = min(maxcost.value, matched_cost) + yield matched_uv, matched_gh, matched_cost + + else: + edit_ops = get_edit_ops( + matched_uv, + pending_u, + pending_v, + Cv, + pending_g, + pending_h, + Ce, + matched_cost, + ) + for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops: + i, j = ij + # assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost + if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls): + continue + + # dive deeper + u = pending_u.pop(i) if i < len(pending_u) else None + v = pending_v.pop(j) if j < len(pending_v) else None + matched_uv.append((u, v)) + for x, y in xy: + len_g = len(pending_g) + len_h = len(pending_h) + matched_gh.append( + ( + pending_g[x] if x < len_g else None, + pending_h[y] if y < len_h else None, + ) + ) + sortedx = list(sorted(x for x, y in xy)) + sortedy = list(sorted(y for x, y in xy)) + G = list( + (pending_g.pop(x) if x < len(pending_g) else None) + for x in reversed(sortedx) + ) + H = list( + (pending_h.pop(y) if y < len(pending_h) else None) + for y in reversed(sortedy) + ) + + yield from get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv_ij, + matched_gh, + pending_g, + pending_h, + Ce_xy, + matched_cost + edit_cost, + ) + + # backtrack + if u is not None: + pending_u.insert(i, u) + if v is not None: + pending_v.insert(j, v) + matched_uv.pop() + for x, g in zip(sortedx, reversed(G)): + if g is not None: + pending_g.insert(x, g) + for y, h in zip(sortedy, reversed(H)): + if h is not None: + pending_h.insert(y, h) + for t in xy: + matched_gh.pop() + + # Initialization + + pending_u = list(G1.nodes) + pending_v = list(G2.nodes) + + initial_cost = 0 + if roots: + root_u, root_v = roots + if root_u not in pending_u or root_v not in pending_v: + raise nx.NodeNotFound("Root node not in graph.") + + # remove roots from pending + pending_u.remove(root_u) + pending_v.remove(root_v) + + # cost matrix of vertex mappings + m = len(pending_u) + n = len(pending_v) + C = np.zeros((m + n, m + n)) + if node_subst_cost: + C[0:m, 0:n] = np.array( + [ + node_subst_cost(G1.nodes[u], G2.nodes[v]) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v]) + elif node_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(node_match(G1.nodes[u], G2.nodes[v])) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v]) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if node_del_cost: + del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u] + else: + del_costs = [1] * len(pending_u) + # assert not m or min(del_costs) >= 0 + if node_ins_cost: + ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v] + else: + ins_costs = [1] * len(pending_v) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Cv = make_CostMatrix(C, m, n) + # debug_print(f"Cv: {m} x {n}") + # debug_print(Cv.C) + + pending_g = list(G1.edges) + pending_h = list(G2.edges) + + # cost matrix of edge mappings + m = len(pending_g) + n = len(pending_h) + C = np.zeros((m + n, m + n)) + if edge_subst_cost: + C[0:m, 0:n] = np.array( + [ + edge_subst_cost(G1.edges[g], G2.edges[h]) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + elif edge_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(edge_match(G1.edges[g], G2.edges[h])) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if edge_del_cost: + del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g] + else: + del_costs = [1] * len(pending_g) + # assert not m or min(del_costs) >= 0 + if edge_ins_cost: + ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h] + else: + ins_costs = [1] * len(pending_h) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Ce = make_CostMatrix(C, m, n) + # debug_print(f'Ce: {m} x {n}') + # debug_print(Ce.C) + # debug_print() + + class MaxCost: + def __init__(self): + # initial upper-bound estimate + # NOTE: should work for empty graph + self.value = Cv.C.sum() + Ce.C.sum() + 1 + + maxcost = MaxCost() + + if timeout is not None: + if timeout <= 0: + raise nx.NetworkXError("Timeout value must be greater than 0") + start = time.perf_counter() + + def prune(cost): + if timeout is not None: + if time.perf_counter() - start > timeout: + return True + if upper_bound is not None: + if cost > upper_bound: + return True + if cost > maxcost.value: + return True + elif strictly_decreasing and cost >= maxcost.value: + return True + + # Now go! + + done_uv = [] if roots is None else [roots] + + for vertex_path, edge_path, cost in get_edit_paths( + done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost + ): + # assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None) + # assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None) + # assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None) + # assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None) + # print(vertex_path, edge_path, cost, file = sys.stderr) + # assert cost == maxcost.value + yield list(vertex_path), list(edge_path), cost + + +def simrank_similarity( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + SimRank is a similarity metric that says "two objects are considered + to be similar if they are referenced by similar objects." [1]_. + + The pseudo-code definition from the paper is:: + + def simrank(G, u, v): + in_neighbors_u = G.predecessors(u) + in_neighbors_v = G.predecessors(v) + scale = C / (len(in_neighbors_u) * len(in_neighbors_v)) + return scale * sum(simrank(G, w, x) + for w, x in product(in_neighbors_u, + in_neighbors_v)) + + where ``G`` is the graph, ``u`` is the source, ``v`` is the target, + and ``C`` is a float decay or importance factor between 0 and 1. + + The SimRank algorithm for determining node similarity is defined in + [2]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : dictionary or float + If ``source`` and ``target`` are both ``None``, this returns a + dictionary of dictionaries, where keys are node pairs and value + are similarity of the pair of nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns a + dictionary mapping node to the similarity of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.simrank_similarity(G) + {0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}} + >>> nx.simrank_similarity(G, source=0) + {0: 1.0, 1: 0.0} + >>> nx.simrank_similarity(G, source=0, target=0) + 1.0 + + The result of this function can be converted to a numpy array + representing the SimRank matrix by using the node order of the + graph to determine which row and column represent each node. + Other ordering of nodes is also possible. + + >>> import numpy as np + >>> sim = nx.simrank_similarity(G) + >>> np.array([[sim[u][v] for v in G] for u in G]) + array([[1., 0.], + [0., 1.]]) + >>> sim_1d = nx.simrank_similarity(G, source=0) + >>> np.array([sim[0][v] for v in G]) + array([1., 0.]) + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/SimRank + .. [2] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + import numpy as np + + nodelist = list(G) + s_indx = None if source is None else nodelist.index(source) + t_indx = None if target is None else nodelist.index(target) + + x = _simrank_similarity_numpy( + G, s_indx, t_indx, importance_factor, max_iterations, tolerance + ) + + if isinstance(x, np.ndarray): + if x.ndim == 1: + return {node: val for node, val in zip(G, x)} + else: # x.ndim == 2: + return {u: dict(zip(G, row)) for u, row in zip(G, x)} + return x + + +def _simrank_similarity_python( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + This pure Python version is provided for pedagogical purposes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_python(G) + {0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}} + >>> nx.similarity._simrank_similarity_python(G, source=0) + {0: 1, 1: 0.0} + >>> nx.similarity._simrank_similarity_python(G, source=0, target=0) + 1 + """ + # build up our similarity adjacency dictionary output + newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G} + + # These functions compute the update to the similarity value of the nodes + # `u` and `v` with respect to the previous similarity values. + def avg_sim(s): + return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0 + + Gadj = G.pred if G.is_directed() else G.adj + + def sim(u, v): + return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v]))) + + for its in range(max_iterations): + oldsim = newsim + newsim = {u: {v: sim(u, v) if u is not v else 1 for v in G} for u in G} + is_close = all( + all( + abs(newsim[u][v] - old) <= tolerance * (1 + abs(old)) + for v, old in nbrs.items() + ) + for u, nbrs in oldsim.items() + ) + if is_close: + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source][target] + if source is not None: + return newsim[source] + return newsim + + +def _simrank_similarity_numpy( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``. + + The SimRank algorithm for determining node similarity is defined in + [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : numpy array or float + If ``source`` and ``target`` are both ``None``, this returns a + 2D array containing SimRank scores of the nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns an + 1D array containing SimRank scores of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_numpy(G) + array([[1., 0.], + [0., 1.]]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0) + array([1., 0.]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + 1.0 + + References + ---------- + .. [1] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + # This algorithm follows roughly + # + # S = max{C * (A.T * S * A), I} + # + # where C is the importance factor, A is the column normalized + # adjacency matrix, and I is the identity matrix. + import numpy as np + + adjacency_matrix = nx.to_numpy_array(G) + + # column-normalize the ``adjacency_matrix`` + s = np.array(adjacency_matrix.sum(axis=0)) + s[s == 0] = 1 + adjacency_matrix /= s # adjacency_matrix.sum(axis=0) + + newsim = np.eye(len(G), dtype=np.float64) + for its in range(max_iterations): + prevsim = newsim.copy() + newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix) + np.fill_diagonal(newsim, 1.0) + + if np.allclose(prevsim, newsim, atol=tolerance): + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source, target] + if source is not None: + return newsim[source] + return newsim + + +def simrank_similarity_numpy( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=100, + tolerance=1e-4, +): + """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``. + + .. deprecated:: 2.6 + simrank_similarity_numpy is deprecated and will be removed in networkx 3.0. + Use simrank_similarity + + """ + warnings.warn( + ( + "networkx.simrank_similarity_numpy is deprecated and will be removed" + "in NetworkX 3.0, use networkx.simrank_similarity instead." + ), + DeprecationWarning, + stacklevel=2, + ) + return _simrank_similarity_numpy( + G, source, target, importance_factor, max_iterations, tolerance + ) + + +def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None): + r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``. + + Panther is a similarity metric that says "two objects are considered + to be similar if they frequently appear on the same paths." [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + source : node + Source node for which to find the top `k` similar other nodes + k : int (default = 5) + The number of most similar nodes to return + path_length : int (default = 5) + How long the randomly generated paths should be (``T`` in [1]_) + c : float (default = 0.5) + A universal positive constant used to scale the number + of sample random paths to generate. + delta : float (default = 0.1) + The probability that the similarity $S$ is not an epsilon-approximation to (R, phi), + where $R$ is the number of random paths and $\phi$ is the probability + that an element sampled from a set $A \subseteq D$, where $D$ is the domain. + eps : float or None (default = None) + The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore, + if no value is provided, the recommended computed value will be used. + + Returns + ------- + similarity : dictionary + Dictionary of nodes to similarity scores (as floats). Note: + the self-similarity (i.e., ``v``) will not be included in + the returned dictionary. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> sim = nx.panther_similarity(G, 0) + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + num_nodes = G.number_of_nodes() + if num_nodes < k: + warnings.warn( + f"Number of nodes is {num_nodes}, but requested k is {k}. " + "Setting k to number of nodes." + ) + k = num_nodes + # According to [1], they empirically determined + # a good value for ``eps`` to be sqrt( 1 / |E| ) + if eps is None: + eps = np.sqrt(1.0 / G.number_of_edges()) + + inv_node_map = {name: index for index, name in enumerate(G.nodes)} + node_map = np.array(G) + + # Calculate the sample size ``R`` for how many paths + # to randomly generate + t_choose_2 = math.comb(path_length, 2) + sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta))) + index_map = {} + _ = list( + generate_random_paths( + G, sample_size, path_length=path_length, index_map=index_map + ) + ) + S = np.zeros(num_nodes) + + inv_sample_size = 1 / sample_size + + source_paths = set(index_map[source]) + + # Calculate the path similarities + # between ``source`` (v) and ``node`` (v_j) + # using our inverted index mapping of + # vertices to paths + for node, paths in index_map.items(): + # Only consider paths where both + # ``node`` and ``source`` are present + common_paths = source_paths.intersection(paths) + S[inv_node_map[node]] = len(common_paths) * inv_sample_size + + # Retrieve top ``k`` similar + # Note: the below performed anywhere from 4-10x faster + # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]`` + top_k_unsorted = np.argpartition(S, -k)[-k:] + top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1] + + # Add back the similarity scores + top_k_sorted_names = map(lambda n: node_map[n], top_k_sorted) + top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted])) + + # Remove the self-similarity + top_k_with_val.pop(source, None) + return top_k_with_val + + +def generate_random_paths(G, sample_size, path_length=5, index_map=None): + """Randomly generate `sample_size` paths of length `path_length`. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + sample_size : integer + The number of paths to generate. This is ``R`` in [1]_. + path_length : integer (default = 5) + The maximum size of the path to randomly generate. + This is ``T`` in [1]_. According to the paper, ``T >= 5`` is + recommended. + index_map : dictionary, optional + If provided, this will be populated with the inverted + index of nodes mapped to the set of generated random path + indices within ``paths``. + + Returns + ------- + paths : generator of lists + Generator of `sample_size` paths each with length `path_length`. + + Examples + -------- + Note that the return value is the list of paths: + + >>> G = nx.star_graph(3) + >>> random_path = nx.generate_random_paths(G, 2) + + By passing a dictionary into `index_map`, it will build an + inverted index mapping of nodes to the paths in which that node is present: + + >>> G = nx.star_graph(3) + >>> index_map = {} + >>> random_path = nx.generate_random_paths(G, 3, index_map=index_map) + >>> paths_containing_node_0 = [random_path[path_idx] for path_idx in index_map.get(0, [])] + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + # Calculate transition probabilities between + # every pair of vertices according to Eq. (3) + adj_mat = nx.to_numpy_array(G) + inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1) + transition_probabilities = adj_mat * inv_row_sums + + node_map = np.array(G) + num_nodes = G.number_of_nodes() + + for path_index in range(sample_size): + # Sample current vertex v = v_i uniformly at random + node_index = np.random.randint(0, high=num_nodes) + node = node_map[node_index] + + # Add v into p_r and add p_r into the path set + # of v, i.e., P_v + path = [node] + + # Build the inverted index (P_v) of vertices to paths + if index_map is not None: + if node in index_map: + index_map[node].add(path_index) + else: + index_map[node] = {path_index} + + starting_index = node_index + for _ in range(path_length): + # Randomly sample a neighbor (v_j) according + # to transition probabilities from ``node`` (v) to its neighbors + neighbor_index = np.random.choice( + num_nodes, p=transition_probabilities[starting_index] + ) + + # Set current vertex (v = v_j) + starting_index = neighbor_index + + # Add v into p_r + neighbor_node = node_map[neighbor_index] + path.append(neighbor_node) + + # Add p_r into P_v + if index_map is not None: + if neighbor_node in index_map: + index_map[neighbor_node].add(path_index) + else: + index_map[neighbor_node] = {path_index} + + yield path diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/simple_paths.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/simple_paths.py new file mode 100644 index 0000000..0ce2721 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/simple_paths.py @@ -0,0 +1,955 @@ +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "all_simple_paths", + "is_simple_path", + "shortest_simple_paths", + "all_simple_edge_paths", +] + + +def is_simple_path(G, nodes): + """Returns True if and only if `nodes` form a simple path in `G`. + + A *simple path* in a graph is a nonempty sequence of nodes in which + no node appears more than once in the sequence, and each adjacent + pair of nodes in the sequence is adjacent in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. + nodes : list + A list of one or more nodes in the graph `G`. + + Returns + ------- + bool + Whether the given list of nodes represents a simple path in `G`. + + Notes + ----- + An empty list of nodes is not a path but a list of one node is a + path. Here's an explanation why. + + This function operates on *node paths*. One could also consider + *edge paths*. There is a bijection between node paths and edge + paths. + + The *length of a path* is the number of edges in the path, so a list + of nodes of length *n* corresponds to a path of length *n* - 1. + Thus the smallest edge path would be a list of zero edges, the empty + path. This corresponds to a list of one node. + + To convert between a node path and an edge path, you can use code + like the following:: + + >>> from networkx.utils import pairwise + >>> nodes = [0, 1, 2, 3] + >>> edges = list(pairwise(nodes)) + >>> edges + [(0, 1), (1, 2), (2, 3)] + >>> nodes = [edges[0][0]] + [v for u, v in edges] + >>> nodes + [0, 1, 2, 3] + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.is_simple_path(G, [2, 3, 0]) + True + >>> nx.is_simple_path(G, [0, 2]) + False + + """ + # The empty list is not a valid path. Could also return + # NetworkXPointlessConcept here. + if len(nodes) == 0: + return False + # If the list is a single node, just check that the node is actually + # in the graph. + if len(nodes) == 1: + return nodes[0] in G + # Test that no node appears more than once, and that each + # adjacent pair of nodes is adjacent. + return len(set(nodes)) == len(nodes) and all(v in G[u] for u, v in pairwise(nodes)) + + +def all_simple_paths(G, source, target, cutoff=None): + """Generate all simple paths in the graph G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. + + Examples + -------- + This iterator generates lists of nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=3): + ... print(path) + ... + [0, 1, 2, 3] + [0, 1, 3] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + + You can generate only those paths that are shorter than a certain + length by using the `cutoff` keyword argument:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2) + >>> print(list(paths)) + [[0, 1, 3], [0, 2, 3], [0, 3]] + + To get each path as the corresponding list of edges, you can use the + :func:`networkx.utils.pairwise` helper function:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3) + >>> for path in map(nx.utils.pairwise, paths): + ... print(list(path)) + [(0, 1), (1, 2), (2, 3)] + [(0, 1), (1, 3)] + [(0, 2), (2, 1), (1, 3)] + [(0, 2), (2, 3)] + [(0, 3)] + + Pass an iterable of nodes as target to generate all paths ending in any of several nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]): + ... print(path) + ... + [0, 1, 2] + [0, 1, 2, 3] + [0, 1, 3] + [0, 1, 3, 2] + [0, 2] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + [0, 3, 1, 2] + [0, 3, 2] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph using a functional programming approach:: + + >>> from itertools import chain + >>> from itertools import product + >>> from itertools import starmap + >>> from functools import partial + >>> + >>> chaini = chain.from_iterable + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = partial(nx.all_simple_paths, G) + >>> list(chaini(starmap(all_paths, product(roots, leaves)))) + [[0, 1, 2], [0, 3, 2]] + + The same list computed using an iterative approach:: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = [] + >>> for root in roots: + ... for leaf in leaves: + ... paths = nx.all_simple_paths(G, root, leaf) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 2], [0, 3, 2]] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph passing all leaves together to avoid unnecessary + compute:: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = [v for v, d in G.out_degree() if d == 0] + >>> all_paths = [] + >>> for root in roots: + ... paths = nx.all_simple_paths(G, root, leaves) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]] + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + This function does not check that a path exists between `source` and + `target`. For large graphs, this may result in very long runtimes. + Consider using `has_path` to check that a path exists between `source` and + `target` before calling this function on large graphs. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, has_path + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError as err: + raise nx.NodeNotFound(f"target node {target} not in graph") from err + if source in targets: + return _empty_generator() + if cutoff is None: + cutoff = len(G) - 1 + if cutoff < 1: + return _empty_generator() + if G.is_multigraph(): + return _all_simple_paths_multigraph(G, source, targets, cutoff) + else: + return _all_simple_paths_graph(G, source, targets, cutoff) + + +def _empty_generator(): + yield from () + + +def _all_simple_paths_graph(G, source, targets, cutoff): + visited = dict.fromkeys([source]) + stack = [iter(G[source])] + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.popitem() + elif len(visited) < cutoff: + if child in visited: + continue + if child in targets: + yield list(visited) + [child] + visited[child] = None + if targets - set(visited.keys()): # expand stack until find all targets + stack.append(iter(G[child])) + else: + visited.popitem() # maybe other ways to child + else: # len(visited) == cutoff: + for target in (targets & (set(children) | {child})) - set(visited.keys()): + yield list(visited) + [target] + stack.pop() + visited.popitem() + + +def _all_simple_paths_multigraph(G, source, targets, cutoff): + visited = dict.fromkeys([source]) + stack = [(v for u, v in G.edges(source))] + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.popitem() + elif len(visited) < cutoff: + if child in visited: + continue + if child in targets: + yield list(visited) + [child] + visited[child] = None + if targets - set(visited.keys()): + stack.append((v for u, v in G.edges(child))) + else: + visited.popitem() + else: # len(visited) == cutoff: + for target in targets - set(visited.keys()): + count = ([child] + list(children)).count(target) + for i in range(count): + yield list(visited) + [target] + stack.pop() + visited.popitem() + + +def all_simple_edge_paths(G, source, target, cutoff=None): + """Generate lists of edges for all simple paths in G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. + For multigraphs, the list of edges have elements of the form `(u,v,k)`. + Where `k` corresponds to the edge key. + + Examples + -------- + + Print the simple path edges of a Graph:: + + >>> g = nx.Graph([(1, 2), (2, 4), (1, 3), (3, 4)]) + >>> for path in sorted(nx.all_simple_edge_paths(g, 1, 4)): + ... print(path) + [(1, 2), (2, 4)] + [(1, 3), (3, 4)] + + Print the simple path edges of a MultiGraph. Returned edges come with + their associated keys:: + + >>> mg = nx.MultiGraph() + >>> mg.add_edge(1, 2, key="k0") + 'k0' + >>> mg.add_edge(1, 2, key="k1") + 'k1' + >>> mg.add_edge(2, 3, key="k0") + 'k0' + >>> for path in sorted(nx.all_simple_edge_paths(mg, 1, 3)): + ... print(path) + [(1, 2, 'k0'), (2, 3, 'k0')] + [(1, 2, 'k1'), (2, 3, 'k0')] + + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, all_simple_paths + + """ + if source not in G: + raise nx.NodeNotFound("source node %s not in graph" % source) + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError: + raise nx.NodeNotFound("target node %s not in graph" % target) + if source in targets: + return [] + if cutoff is None: + cutoff = len(G) - 1 + if cutoff < 1: + return [] + if G.is_multigraph(): + for simp_path in _all_simple_edge_paths_multigraph(G, source, targets, cutoff): + yield simp_path + else: + for simp_path in _all_simple_paths_graph(G, source, targets, cutoff): + yield list(zip(simp_path[:-1], simp_path[1:])) + + +def _all_simple_edge_paths_multigraph(G, source, targets, cutoff): + if not cutoff or cutoff < 1: + return [] + visited = [source] + stack = [iter(G.edges(source, keys=True))] + + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.pop() + elif len(visited) < cutoff: + if child[1] in targets: + yield visited[1:] + [child] + elif child[1] not in [v[0] for v in visited[1:]]: + visited.append(child) + stack.append(iter(G.edges(child[1], keys=True))) + else: # len(visited) == cutoff: + for (u, v, k) in [child] + list(children): + if v in targets: + yield visited[1:] + [(u, v, k)] + stack.pop() + visited.pop() + + +@not_implemented_for("multigraph") +def shortest_simple_paths(G, source, target, weight=None): + """Generate all simple paths in the graph G from source to target, + starting from shortest ones. + + A simple path is a path with no repeated nodes. + + If a weighted shortest path search is to be used, no negative weights + are allowed. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + weight : string or function + If it is a string, it is the name of the edge attribute to be + used as a weight. + + If it is a function, the weight of an edge is the value returned + by the function. The function must accept exactly three positional + arguments: the two endpoints of an edge and the dictionary of edge + attributes for that edge. The function must return a number. + + If None all edges are considered to have unit weight. Default + value None. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths, in order from + shortest to longest. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + NetworkXError + If source or target nodes are not in the input graph. + + NetworkXNotImplemented + If the input graph is a Multi[Di]Graph. + + Examples + -------- + + >>> G = nx.cycle_graph(7) + >>> paths = list(nx.shortest_simple_paths(G, 0, 3)) + >>> print(paths) + [[0, 1, 2, 3], [0, 6, 5, 4, 3]] + + You can use this function to efficiently compute the k shortest/best + paths between two nodes. + + >>> from itertools import islice + >>> def k_shortest_paths(G, source, target, k, weight=None): + ... return list( + ... islice(nx.shortest_simple_paths(G, source, target, weight=weight), k) + ... ) + >>> for path in k_shortest_paths(G, 0, 3, 2): + ... print(path) + [0, 1, 2, 3] + [0, 6, 5, 4, 3] + + Notes + ----- + This procedure is based on algorithm by Jin Y. Yen [1]_. Finding + the first $K$ paths requires $O(KN^3)$ operations. + + See Also + -------- + all_shortest_paths + shortest_path + all_simple_paths + + References + ---------- + .. [1] Jin Y. Yen, "Finding the K Shortest Loopless Paths in a + Network", Management Science, Vol. 17, No. 11, Theory Series + (Jul., 1971), pp. 712-716. + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + + if target not in G: + raise nx.NodeNotFound(f"target node {target} not in graph") + + if weight is None: + length_func = len + shortest_path_func = _bidirectional_shortest_path + else: + wt = _weight_function(G, weight) + + def length_func(path): + return sum( + wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:]) + ) + + shortest_path_func = _bidirectional_dijkstra + + listA = list() + listB = PathBuffer() + prev_path = None + while True: + if not prev_path: + length, path = shortest_path_func(G, source, target, weight=weight) + listB.push(length, path) + else: + ignore_nodes = set() + ignore_edges = set() + for i in range(1, len(prev_path)): + root = prev_path[:i] + root_length = length_func(root) + for path in listA: + if path[:i] == root: + ignore_edges.add((path[i - 1], path[i])) + try: + length, spur = shortest_path_func( + G, + root[-1], + target, + ignore_nodes=ignore_nodes, + ignore_edges=ignore_edges, + weight=weight, + ) + path = root[:-1] + spur + listB.push(root_length + length, path) + except nx.NetworkXNoPath: + pass + ignore_nodes.add(root[-1]) + + if listB: + path = listB.pop() + yield path + listA.append(path) + prev_path = path + else: + break + + +class PathBuffer: + def __init__(self): + self.paths = set() + self.sortedpaths = list() + self.counter = count() + + def __len__(self): + return len(self.sortedpaths) + + def push(self, cost, path): + hashable_path = tuple(path) + if hashable_path not in self.paths: + heappush(self.sortedpaths, (cost, next(self.counter), path)) + self.paths.add(hashable_path) + + def pop(self): + (cost, num, path) = heappop(self.sortedpaths) + hashable_path = tuple(path) + self.paths.remove(hashable_path) + return path + + +def _bidirectional_shortest_path( + G, source, target, ignore_nodes=None, ignore_edges=None, weight=None +): + """Returns the shortest path between source and target ignoring + nodes and edges in the containers ignore_nodes and ignore_edges. + + This is a custom modification of the standard bidirectional shortest + path implementation at networkx.algorithms.unweighted + + Parameters + ---------- + G : NetworkX graph + + source : node + starting node for path + + target : node + ending node for path + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + weight : None + This function accepts a weight argument for convenience of + shortest_simple_paths function. It will be ignored. + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + shortest_path + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from w to target + while w is not None: + path.append(w) + w = succ[w] + # from source to w + w = pred[path[0]] + while w is not None: + path.insert(0, w) + w = pred[w] + + return len(path), path + + +def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None): + """Bidirectional shortest path helper. + Returns (pred,succ,w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # predecesssor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + # found path + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +def _bidirectional_dijkstra( + G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None +): + """Dijkstra's algorithm for shortest paths using bidirectional search. + + This function returns the shortest path between source and target + ignoring nodes and edges in the containers ignore_nodes and + ignore_edges. + + This is a custom modification of the standard Dijkstra bidirectional + shortest path implementation at networkx.algorithms.weighted + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight: string, function, optional (default='weight') + Edge data key or weight function corresponding to the edge weight + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + Returns + ------- + length : number + Shortest path length. + + Returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from the source. + The second stores the path from the source to that node. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is pi*r*r while the + others are 2*pi*r/2*r/2, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not in graph") + return (0, [source]) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + push = heappush + pop = heappop + # Init: Forward Backward + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) tuples for + # extracting next node to expand + seen = [{source: 0}, {target: 0}] # dictionary of distances to + # nodes seen + c = count() + # initialize fringe heap + push(fringe[0], (0, next(c), source)) + push(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + neighs = [Gsucc, Gpred] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = pop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + wt = _weight_function(G, weight) + for w in neighs[dir](v): + if dir == 0: # forward + minweight = wt(v, w, G.get_edge_data(v, w)) + vwLength = dists[dir][v] + minweight + else: # back, must remember to change v,w->w,v + minweight = wt(w, v, G.get_edge_data(w, v)) + vwLength = dists[dir][v] + minweight + + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + push(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/smallworld.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/smallworld.py new file mode 100644 index 0000000..fc64d13 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/smallworld.py @@ -0,0 +1,385 @@ +"""Functions for estimating the small-world-ness of graphs. + +A small world network is characterized by a small average shortest path length, +and a large clustering coefficient. + +Small-worldness is commonly measured with the coefficient sigma or omega. + +Both coefficients compare the average clustering coefficient and shortest path +length of a given graph against the same quantities for an equivalent random +or lattice graph. + +For more information, see the Wikipedia article on small-world network [1]_. + +.. [1] Small-world network:: https://en.wikipedia.org/wiki/Small-world_network + +""" +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["random_reference", "lattice_reference", "sigma", "omega"] + + +@py_random_state(3) +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def random_reference(G, niter=1, connectivity=True, seed=None): + """Compute a random graph by swapping edges of a given graph. + + Parameters + ---------- + G : graph + An undirected graph with 4 or more nodes. + + niter : integer (optional, default=1) + An edge is rewired approximately `niter` times. + + connectivity : boolean (optional, default=True) + When True, ensure connectivity for the randomized graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The randomized graph. + + Notes + ----- + The implementation is adapted from the algorithm by Maslov and Sneppen + (2002) [1]_. + + References + ---------- + .. [1] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + if len(G) < 4: + raise nx.NetworkXError("Graph has less than four nodes.") + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + nnodes = len(G) + nedges = nx.number_of_edges(G) + niter = niter * nedges + ntries = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + swapcount = 0 + + for i in range(niter): + n = 0 + while n < ntries: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + swapcount += 1 + break + n += 1 + return G + + +@py_random_state(4) +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): + """Latticize the given graph by swapping edges. + + Parameters + ---------- + G : graph + An undirected graph with 4 or more nodes. + + niter : integer (optional, default=1) + An edge is rewired approximatively niter times. + + D : numpy.array (optional, default=None) + Distance to the diagonal matrix. + + connectivity : boolean (optional, default=True) + Ensure connectivity for the latticized graph when set to True. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The latticized graph. + + Notes + ----- + The implementation is adapted from the algorithm by Sporns et al. [1]_. + which is inspired from the original work by Maslov and Sneppen(2002) [2]_. + + References + ---------- + .. [1] Sporns, Olaf, and Jonathan D. Zwi. + "The small world of the cerebral cortex." + Neuroinformatics 2.2 (2004): 145-162. + .. [2] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + import numpy as np + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + if len(G) < 4: + raise nx.NetworkXError("Graph has less than four nodes.") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + + nnodes = len(G) + nedges = nx.number_of_edges(G) + if D is None: + D = np.zeros((nnodes, nnodes)) + un = np.arange(1, nnodes) + um = np.arange(nnodes - 1, 0, -1) + u = np.append((0,), np.where(un < um, un, um)) + + for v in range(int(np.ceil(nnodes / 2))): + D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) + D[v, :] = D[nnodes - v - 1, :][::-1] + + niter = niter * nedges + # maximal number of rewiring attempts per 'niter' + max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + + for _ in range(niter): + n = 0 + while n < max_attempts: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + bi = keys.index(b) + di = keys.index(d) + + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: + # only swap if we get closer to the diagonal + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + break + n += 1 + + return G + + +@py_random_state(3) +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def sigma(G, niter=100, nrand=10, seed=None): + """Returns the small-world coefficient (sigma) of the given graph. + + The small-world coefficient is defined as: + sigma = C/Cr / L/Lr + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Cr and Lr are respectively the average + clustering coefficient and average shortest path length of an equivalent + random graph. + + A graph is commonly classified as small-world if sigma>1. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + niter : integer (optional, default=100) + Approximate number of rewiring per edge to compute the equivalent + random graph. + nrand : integer (optional, default=10) + Number of random graphs generated to compute the average clustering + coefficient (Cr) and average shortest path length (Lr). + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + sigma : float + The small-world coefficient of G. + + Notes + ----- + The implementation is adapted from Humphries et al. [1]_ [2]_. + + References + ---------- + .. [1] The brainstem reticular formation is a small-world, not scale-free, + network M. D. Humphries, K. Gurney and T. J. Prescott, + Proc. Roy. Soc. B 2006 273, 503-511, doi:10.1098/rspb.2005.3354. + .. [2] Humphries and Gurney (2008). + "Network 'Small-World-Ness': A Quantitative Method for Determining + Canonical Network Equivalence". + PLoS One. 3 (4). PMID 18446219. doi:10.1371/journal.pone.0002051. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + for i in range(nrand): + Gr = random_reference(G, niter=niter, seed=seed) + randMetrics["C"].append(nx.transitivity(Gr)) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + C = nx.transitivity(G) + L = nx.average_shortest_path_length(G) + Cr = np.mean(randMetrics["C"]) + Lr = np.mean(randMetrics["L"]) + + sigma = (C / Cr) / (L / Lr) + + return sigma + + +@py_random_state(3) +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def omega(G, niter=5, nrand=10, seed=None): + """Returns the small-world coefficient (omega) of a graph + + The small-world coefficient of a graph G is: + + omega = Lr/L - C/Cl + + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Lr is the average shortest path length + of an equivalent random graph and Cl is the average clustering coefficient + of an equivalent lattice graph. + + The small-world coefficient (omega) measures how much G is like a lattice + or a random graph. Negative values mean G is similar to a lattice whereas + positive values mean G is a random graph. + Values close to 0 mean that G has small-world characteristics. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + niter: integer (optional, default=5) + Approximate number of rewiring per edge to compute the equivalent + random graph. + + nrand: integer (optional, default=10) + Number of random graphs generated to compute the maximal clustering + coefficient (Cr) and average shortest path length (Lr). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + + Returns + ------- + omega : float + The small-world coefficient (omega) + + Notes + ----- + The implementation is adapted from the algorithm by Telesford et al. [1]_. + + References + ---------- + .. [1] Telesford, Joyce, Hayasaka, Burdette, and Laurienti (2011). + "The Ubiquity of Small-World Networks". + Brain Connectivity. 1 (0038): 367-75. PMC 3604768. PMID 22432451. + doi:10.1089/brain.2011.0038. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + + # Calculate initial average clustering coefficient which potentially will + # get replaced by higher clustering coefficients from generated lattice + # reference graphs + Cl = nx.average_clustering(G) + + niter_lattice_reference = niter + niter_random_reference = niter * 2 + + for _ in range(nrand): + # Generate random graph + Gr = random_reference(G, niter=niter_random_reference, seed=seed) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + # Generate lattice graph + Gl = lattice_reference(G, niter=niter_lattice_reference, seed=seed) + + # Replace old clustering coefficient, if clustering is higher in + # generated lattice reference + Cl_temp = nx.average_clustering(Gl) + if Cl_temp > Cl: + Cl = Cl_temp + + C = nx.average_clustering(G) + L = nx.average_shortest_path_length(G) + Lr = np.mean(randMetrics["L"]) + + omega = (Lr / L) - (C / Cl) + + return omega diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/smetric.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/smetric.py new file mode 100644 index 0000000..b851e1e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/smetric.py @@ -0,0 +1,38 @@ +import networkx as nx + +__all__ = ["s_metric"] + + +def s_metric(G, normalized=True): + """Returns the s-metric of graph. + + The s-metric is defined as the sum of the products deg(u)*deg(v) + for every edge (u,v) in G. If norm is provided construct the + s-max graph and compute it's s_metric, and return the normalized + s value + + Parameters + ---------- + G : graph + The graph used to compute the s-metric. + normalized : bool (optional) + Normalize the value. + + Returns + ------- + s : float + The s-metric of the graph. + + References + ---------- + .. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger, + Towards a Theory of Scale-Free Graphs: + Definition, Properties, and Implications (Extended Version), 2005. + https://arxiv.org/abs/cond-mat/0501169 + """ + if normalized: + raise nx.NetworkXError("Normalization not implemented") + # Gmax = li_smax_graph(list(G.degree().values())) + # return s_metric(G,normalized=False)/s_metric(Gmax,normalized=False) + # else: + return float(sum(G.degree(u) * G.degree(v) for (u, v) in G.edges())) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/sparsifiers.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/sparsifiers.py new file mode 100644 index 0000000..2c425d1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/sparsifiers.py @@ -0,0 +1,294 @@ +"""Functions for computing sparsifiers of graphs.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["spanner"] + + +@py_random_state(3) +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def spanner(G, stretch, weight=None, seed=None): + """Returns a spanner of the given graph with the given stretch. + + A spanner of a graph G = (V, E) with stretch t is a subgraph + H = (V, E_S) such that E_S is a subset of E and the distance between + any pair of nodes in H is at most t times the distance between the + nodes in G. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + stretch : float + The stretch of the spanner. + + weight : object + The edge attribute to use as distance. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + NetworkX graph + A spanner of the given graph with the given stretch. + + Raises + ------ + ValueError + If a stretch less than 1 is given. + + Notes + ----- + This function implements the spanner algorithm by Baswana and Sen, + see [1]. + + This algorithm is a randomized las vegas algorithm: The expected + running time is O(km) where k = (stretch + 1) // 2 and m is the + number of edges in G. The returned graph is always a spanner of the + given graph with the specified stretch. For weighted graphs the + number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is + defined as above and n is the number of nodes in G. For unweighted + graphs the number of edges is O(n^(1 + 1 / k) + kn). + + References + ---------- + [1] S. Baswana, S. Sen. A Simple and Linear Time Randomized + Algorithm for Computing Sparse Spanners in Weighted Graphs. + Random Struct. Algorithms 30(4): 532-563 (2007). + """ + if stretch < 1: + raise ValueError("stretch must be at least 1") + + k = (stretch + 1) // 2 + + # initialize spanner H with empty edge set + H = nx.empty_graph() + H.add_nodes_from(G.nodes) + + # phase 1: forming the clusters + # the residual graph has V' from the paper as its node set + # and E' from the paper as its edge set + residual_graph = _setup_residual_graph(G, weight) + # clustering is a dictionary that maps nodes in a cluster to the + # cluster center + clustering = {v: v for v in G.nodes} + sample_prob = math.pow(G.number_of_nodes(), -1 / k) + size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k) + + i = 0 + while i < k - 1: + # step 1: sample centers + sampled_centers = set() + for center in set(clustering.values()): + if seed.random() < sample_prob: + sampled_centers.add(center) + + # combined loop for steps 2 and 3 + edges_to_add = set() + edges_to_remove = set() + new_clustering = {} + for v in residual_graph.nodes: + if clustering[v] in sampled_centers: + continue + + # step 2: find neighboring (sampled) clusters and + # lightest edges to them + lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts( + residual_graph, clustering, v + ) + neighboring_sampled_centers = ( + set(lightest_edge_weight.keys()) & sampled_centers + ) + + # step 3: add edges to spanner + if not neighboring_sampled_centers: + # connect to each neighboring center via lightest edge + for neighbor in lightest_edge_neighbor.values(): + edges_to_add.add((v, neighbor)) + # remove all incident edges + for neighbor in residual_graph.adj[v]: + edges_to_remove.add((v, neighbor)) + + else: # there is a neighboring sampled center + closest_center = min( + neighboring_sampled_centers, key=lightest_edge_weight.get + ) + closest_center_weight = lightest_edge_weight[closest_center] + closest_center_neighbor = lightest_edge_neighbor[closest_center] + + edges_to_add.add((v, closest_center_neighbor)) + new_clustering[v] = closest_center + + # connect to centers with edge weight less than + # closest_center_weight + for center, edge_weight in lightest_edge_weight.items(): + if edge_weight < closest_center_weight: + neighbor = lightest_edge_neighbor[center] + edges_to_add.add((v, neighbor)) + + # remove edges to centers with edge weight less than + # closest_center_weight + for neighbor in residual_graph.adj[v]: + neighbor_cluster = clustering[neighbor] + neighbor_weight = lightest_edge_weight[neighbor_cluster] + if ( + neighbor_cluster == closest_center + or neighbor_weight < closest_center_weight + ): + edges_to_remove.add((v, neighbor)) + + # check whether iteration added too many edges to spanner, + # if so repeat + if len(edges_to_add) > size_limit: + # an iteration is repeated O(1) times on expectation + continue + + # iteration succeeded + i = i + 1 + + # actually add edges to spanner + for u, v in edges_to_add: + _add_edge_to_spanner(H, residual_graph, u, v, weight) + + # actually delete edges from residual graph + residual_graph.remove_edges_from(edges_to_remove) + + # copy old clustering data to new_clustering + for node, center in clustering.items(): + if center in sampled_centers: + new_clustering[node] = center + clustering = new_clustering + + # step 4: remove intra-cluster edges + for u in residual_graph.nodes: + for v in list(residual_graph.adj[u]): + if clustering[u] == clustering[v]: + residual_graph.remove_edge(u, v) + + # update residual graph node set + for v in list(residual_graph.nodes): + if v not in clustering: + residual_graph.remove_node(v) + + # phase 2: vertex-cluster joining + for v in residual_graph.nodes: + lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v) + for neighbor in lightest_edge_neighbor.values(): + _add_edge_to_spanner(H, residual_graph, v, neighbor, weight) + + return H + + +def _setup_residual_graph(G, weight): + """Setup residual graph as a copy of G with unique edges weights. + + The node set of the residual graph corresponds to the set V' from + the Baswana-Sen paper and the edge set corresponds to the set E' + from the paper. + + This function associates distinct weights to the edges of the + residual graph (even for unweighted input graphs), as required by + the algorithm. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + weight : object + The edge attribute to use as distance. + + Returns + ------- + NetworkX graph + The residual graph used for the Baswana-Sen algorithm. + """ + residual_graph = G.copy() + + # establish unique edge weights, even for unweighted graphs + for u, v in G.edges(): + if not weight: + residual_graph[u][v]["weight"] = (id(u), id(v)) + else: + residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v)) + + return residual_graph + + +def _lightest_edge_dicts(residual_graph, clustering, node): + """Find the lightest edge to each cluster. + + Searches for the minimum-weight edge to each cluster adjacent to + the given node. + + Parameters + ---------- + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. + + clustering : dictionary + The current clustering of the nodes. + + node : node + The node from which the search originates. + + Returns + ------- + lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary + lightest_edge_neighbor is a dictionary that maps a center C to + a node v in the corresponding cluster such that the edge from + the given node to v is the lightest edge from the given node to + any node in cluster. lightest_edge_weight maps a center C to the + weight of the aforementioned edge. + + Notes + ----- + If a cluster has no node that is adjacent to the given node in the + residual graph then the center of the cluster is not a key in the + returned dictionaries. + """ + lightest_edge_neighbor = {} + lightest_edge_weight = {} + for neighbor in residual_graph.adj[node]: + neighbor_center = clustering[neighbor] + weight = residual_graph[node][neighbor]["weight"] + if ( + neighbor_center not in lightest_edge_weight + or weight < lightest_edge_weight[neighbor_center] + ): + lightest_edge_neighbor[neighbor_center] = neighbor + lightest_edge_weight[neighbor_center] = weight + return lightest_edge_neighbor, lightest_edge_weight + + +def _add_edge_to_spanner(H, residual_graph, u, v, weight): + """Add the edge {u, v} to the spanner H and take weight from + the residual graph. + + Parameters + ---------- + H : NetworkX graph + The spanner under construction. + + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. The weight + for the edge is taken from this graph. + + u : node + One endpoint of the edge. + + v : node + The other endpoint of the edge. + + weight : object + The edge attribute to use as distance. + """ + H.add_edge(u, v) + if weight: + H[u][v][weight] = residual_graph[u][v]["weight"][0] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/structuralholes.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/structuralholes.py new file mode 100644 index 0000000..55cdfe4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/structuralholes.py @@ -0,0 +1,278 @@ +"""Functions for computing measures of structural holes.""" + +import networkx as nx + +__all__ = ["constraint", "local_constraint", "effective_size"] + + +def mutual_weight(G, u, v, weight=None): + """Returns the sum of the weights of the edge from `u` to `v` and + the edge from `v` to `u` in `G`. + + `weight` is the edge data key that represents the edge weight. If + the specified key is `None` or is not in the edge data for an edge, + that edge is assumed to have weight 1. + + Pre-conditions: `u` and `v` must both be in `G`. + + """ + try: + a_uv = G[u][v].get(weight, 1) + except KeyError: + a_uv = 0 + try: + a_vu = G[v][u].get(weight, 1) + except KeyError: + a_vu = 0 + return a_uv + a_vu + + +def normalized_mutual_weight(G, u, v, norm=sum, weight=None): + """Returns normalized mutual weight of the edges from `u` to `v` + with respect to the mutual weights of the neighbors of `u` in `G`. + + `norm` specifies how the normalization factor is computed. It must + be a function that takes a single argument and returns a number. + The argument will be an iterable of mutual weights + of pairs ``(u, w)``, where ``w`` ranges over each (in- and + out-)neighbor of ``u``. Commons values for `normalization` are + ``sum`` and ``max``. + + `weight` can be ``None`` or a string, if None, all edge weights + are considered equal. Otherwise holds the name of the edge + attribute used as weight. + + """ + scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u))) + return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale + + +def effective_size(G, nodes=None, weight=None): + r"""Returns the effective size of all nodes in the graph ``G``. + + The *effective size* of a node's ego network is based on the concept + of redundancy. A person's ego network has redundancy to the extent + that her contacts are connected to each other as well. The + nonredundant part of a person's relationships it's the effective + size of her ego network [1]_. Formally, the effective size of a + node $u$, denoted $e(u)$, is defined by + + .. math:: + + e(u) = \sum_{v \in N(u) \setminus \{u\}} + \left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right) + + where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. And $m_{vw}$ + is the mutual weight of $v$ and $w$ divided by $v$ highest mutual + weight with any of its neighbors. The *mutual weight* of $u$ and $v$ + is the sum of the weights of edges joining them (edge weights are + assumed to be one if the graph is unweighted). + + For the case of unweighted and undirected graphs, Borgatti proposed + a simplified formula to compute effective size [2]_ + + .. math:: + + e(u) = n - \frac{2t}{n} + + where `t` is the number of ties in the ego network (not including + ties to ego) and `n` is the number of nodes (excluding ego). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. Directed graphs are treated like + undirected graphs when computing neighbors of ``v``. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the effective size. + If None, the effective size of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the effective size of the node as values. + + Notes + ----- + Burt also defined the related concept of *efficiency* of a node's ego + network, which is its effective size divided by the degree of that + node [1]_. So you can easily compute efficiency: + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + >>> esize = nx.effective_size(G) + >>> efficiency = {n: v / G.degree(n) for n, v in esize.items()} + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + *Structural Holes: The Social Structure of Competition.* + Cambridge: Harvard University Press, 1995. + + .. [2] Borgatti, S. + "Structural Holes: Unpacking Burt's Redundancy Measures" + CONNECTIONS 20(1):35-38. + http://www.analytictech.com/connections/v20(1)/holes.htm + + """ + + def redundancy(G, u, v, weight=None): + nmw = normalized_mutual_weight + r = sum( + nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return 1 - r + + effective_size = {} + if nodes is None: + nodes = G + # Use Borgatti's simplified formula for unweighted and undirected graphs + if not G.is_directed() and weight is None: + for v in nodes: + # Effective size is not defined for isolated nodes + if len(G[v]) == 0: + effective_size[v] = float("nan") + continue + E = nx.ego_graph(G, v, center=False, undirected=True) + effective_size[v] = len(E) - (2 * E.size()) / len(E) + else: + for v in nodes: + # Effective size is not defined for isolated nodes + if len(G[v]) == 0: + effective_size[v] = float("nan") + continue + effective_size[v] = sum( + redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v)) + ) + return effective_size + + +def constraint(G, nodes=None, weight=None): + r"""Returns the constraint on all nodes in the graph ``G``. + + The *constraint* is a measure of the extent to which a node *v* is + invested in those nodes that are themselves invested in the + neighbors of *v*. Formally, the *constraint on v*, denoted `c(v)`, + is defined by + + .. math:: + + c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w) + + where $N(v)$ is the subset of the neighbors of `v` that are either + predecessors or successors of `v` and $\ell(v, w)$ is the local + constraint on `v` with respect to `w` [1]_. For the definition of local + constraint, see :func:`local_constraint`. + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. This can be either directed or undirected. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the constraint. If + None, the constraint of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the constraint on the node as values. + + See also + -------- + local_constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + if nodes is None: + nodes = G + constraint = {} + for v in nodes: + # Constraint is not defined for isolated nodes + if len(G[v]) == 0: + constraint[v] = float("nan") + continue + constraint[v] = sum( + local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v)) + ) + return constraint + + +def local_constraint(G, u, v, weight=None): + r"""Returns the local constraint on the node ``u`` with respect to + the node ``v`` in the graph ``G``. + + Formally, the *local constraint on u with respect to v*, denoted + $\ell(v)$, is defined by + + .. math:: + + \ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2, + + where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. The *mutual + weight* of $u$ and $v$ is the sum of the weights of edges joining + them (edge weights are assumed to be one if the graph is + unweighted). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``u`` and ``v``. This can be either + directed or undirected. + + u : node + A node in the graph ``G``. + + v : node + A node in the graph ``G``. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + float + The constraint of the node ``v`` in the graph ``G``. + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + nmw = normalized_mutual_weight + direct = nmw(G, u, v, weight=weight) + indirect = sum( + nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return (direct + indirect) ** 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/summarization.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/summarization.py new file mode 100644 index 0000000..16c7e62 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/summarization.py @@ -0,0 +1,556 @@ +""" +Graph summarization finds smaller representations of graphs resulting in faster +runtime of algorithms, reduced storage needs, and noise reduction. +Summarization has applications in areas such as visualization, pattern mining, +clustering and community detection, and more. Core graph summarization +techniques are grouping/aggregation, bit-compression, +simplification/sparsification, and influence based. Graph summarization +algorithms often produce either summary graphs in the form of supergraphs or +sparsified graphs, or a list of independent structures. Supergraphs are the +most common product, which consist of supernodes and original nodes and are +connected by edges and superedges, which represent aggregate edges between +nodes and supernodes. + +Grouping/aggregation based techniques compress graphs by representing +close/connected nodes and edges in a graph by a single node/edge in a +supergraph. Nodes can be grouped together into supernodes based on their +structural similarities or proximity within a graph to reduce the total number +of nodes in a graph. Edge-grouping techniques group edges into lossy/lossless +nodes called compressor or virtual nodes to reduce the total number of edges in +a graph. Edge-grouping techniques can be lossless, meaning that they can be +used to re-create the original graph, or techniques can be lossy, requiring +less space to store the summary graph, but at the expense of lower +recontruction accuracy of the original graph. + +Bit-compression techniques minimize the amount of information needed to +describe the original graph, while revealing structural patterns in the +original graph. The two-part minimum description length (MDL) is often used to +represent the model and the original graph in terms of the model. A key +difference between graph compression and graph summarization is that graph +summarization focuses on finding structural patterns within the original graph, +whereas graph compression focuses on compressions the original graph to be as +small as possible. **NOTE**: Some bit-compression methods exist solely to +compress a graph without creating a summary graph or finding comprehensible +structural patterns. + +Simplification/Sparsification techniques attempt to create a sparse +representation of a graph by removing unimportant nodes and edges from the +graph. Sparsified graphs differ from supergraphs created by +grouping/aggregation by only containing a subset of the original nodes and +edges of the original graph. + +Influence based techniques aim to find a high-level description of influence +propagation in a large graph. These methods are scarce and have been mostly +applied to social graphs. + +*dedensification* is a grouping/aggregation based technique to compress the +neighborhoods around high-degree nodes in unweighted graphs by adding +compressor nodes that summarize multiple edges of the same type to +high-degree nodes (nodes with a degree greater than a given threshold). +Dedensification was developed for the purpose of increasing performance of +query processing around high-degree nodes in graph databases and enables direct +operations on the compressed graph. The structural patterns surrounding +high-degree nodes in the original is preserved while using fewer edges and +adding a small number of compressor nodes. The degree of nodes present in the +original graph is also preserved. The current implementation of dedensification +supports graphs with one edge type. + +For more information on graph summarization, see `Graph Summarization Methods +and Applications: A Survey `_ +""" +from collections import Counter, defaultdict + +import networkx as nx + +__all__ = ["dedensify", "snap_aggregation"] + + +def dedensify(G, threshold, prefix=None, copy=True): + """Compresses neighborhoods around high-degree nodes + + Reduces the number of edges to high-degree nodes by adding compressor nodes + that summarize multiple edges of the same type to high-degree nodes (nodes + with a degree greater than a given threshold). Dedensification also has + the added benefit of reducing the number of edges around high-degree nodes. + The implementation currently supports graphs with a single edge type. + + Parameters + ---------- + G: graph + A networkx graph + threshold: int + Minimum degree threshold of a node to be considered a high degree node. + The threshold must be greater than or equal to 2. + prefix: str or None, optional (default: None) + An optional prefix for denoting compressor nodes + copy: bool, optional (default: True) + Indicates if dedensification should be done inplace + + Returns + ------- + dedensified networkx graph : (graph, set) + 2-tuple of the dedensified graph and set of compressor nodes + + Notes + ----- + According to the algorithm in [1]_, removes edges in a graph by + compressing/decompressing the neighborhoods around high degree nodes by + adding compressor nodes that summarize multiple edges of the same type + to high-degree nodes. Dedensification will only add a compressor node when + doing so will reduce the total number of edges in the given graph. This + implementation currently supports graphs with a single edge type. + + Examples + -------- + Dedensification will only add compressor nodes when doing so would result + in fewer edges:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> original_graph.number_of_edges() + 15 + >>> c_graph.number_of_edges() + 14 + + A dedensified, directed graph can be "densified" to reconstruct the + original graph:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> # re-densifies the compressed graph into the original graph + >>> for c_node in c_nodes: + ... all_neighbors = set(nx.all_neighbors(c_graph, c_node)) + ... out_neighbors = set(c_graph.neighbors(c_node)) + ... for out_neighbor in out_neighbors: + ... c_graph.remove_edge(c_node, out_neighbor) + ... in_neighbors = all_neighbors - out_neighbors + ... for in_neighbor in in_neighbors: + ... c_graph.remove_edge(in_neighbor, c_node) + ... for out_neighbor in out_neighbors: + ... c_graph.add_edge(in_neighbor, out_neighbor) + ... c_graph.remove_node(c_node) + ... + >>> nx.is_isomorphic(original_graph, c_graph) + True + + References + ---------- + .. [1] Maccioni, A., & Abadi, D. J. (2016, August). + Scalable pattern matching over compressed graphs via dedensification. + In Proceedings of the 22nd ACM SIGKDD International Conference on + Knowledge Discovery and Data Mining (pp. 1755-1764). + http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf + """ + if threshold < 2: + raise nx.NetworkXError("The degree threshold must be >= 2") + + degrees = G.in_degree if G.is_directed() else G.degree + # Group nodes based on degree threshold + high_degree_nodes = {n for n, d in degrees if d > threshold} + low_degree_nodes = G.nodes() - high_degree_nodes + + auxillary = {} + for node in G: + high_degree_neighbors = frozenset(high_degree_nodes & set(G[node])) + if high_degree_neighbors: + if high_degree_neighbors in auxillary: + auxillary[high_degree_neighbors].add(node) + else: + auxillary[high_degree_neighbors] = {node} + + if copy: + G = G.copy() + + compressor_nodes = set() + for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxillary.items()): + low_degree_node_count = len(low_degree_nodes) + high_degree_node_count = len(high_degree_nodes) + old_edges = high_degree_node_count * low_degree_node_count + new_edges = high_degree_node_count + low_degree_node_count + if old_edges <= new_edges: + continue + compression_node = "".join(str(node) for node in high_degree_nodes) + if prefix: + compression_node = str(prefix) + compression_node + for node in low_degree_nodes: + for high_node in high_degree_nodes: + if G.has_edge(node, high_node): + G.remove_edge(node, high_node) + + G.add_edge(node, compression_node) + for node in high_degree_nodes: + G.add_edge(compression_node, node) + compressor_nodes.add(compression_node) + return G, compressor_nodes + + +def _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, +): + """ + Build the summary graph from the data structures produced in the SNAP aggregation algorithm + + Used in the SNAP aggregation algorithm to build the output summary graph and supernode + lookup dictionary. This process uses the original graph and the data structures to + create the supernodes with the correct node attributes, and the superedges with the correct + edge attributes + + Parameters + ---------- + G: networkx.Graph + the original graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + node_attributes: iterable + An iterable of the node attributes considered in the summarization process + edge_attributes: iterable + An iterable of the edge attributes considered in the summarization process + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + prefix: string + The prefix to be added to all supernodes + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes + superedge_attribute: str + The edge attribute for recording the edge types represented by superedges + + Returns + ------- + summary graph: Networkx graph + """ + output = G.__class__() + node_label_lookup = dict() + for index, group_id in enumerate(groups): + group_set = groups[group_id] + supernode = f"{prefix}{index}" + node_label_lookup[group_id] = supernode + supernode_attributes = { + attr: G.nodes[next(iter(group_set))][attr] for attr in node_attributes + } + supernode_attributes[supernode_attribute] = group_set + output.add_node(supernode, **supernode_attributes) + + for group_id in groups: + group_set = groups[group_id] + source_supernode = node_label_lookup[group_id] + for other_group, group_edge_types in neighbor_info[ + next(iter(group_set)) + ].items(): + if group_edge_types: + target_supernode = node_label_lookup[other_group] + summary_graph_edge = (source_supernode, target_supernode) + + edge_types = [ + dict(zip(edge_attributes, edge_type)) + for edge_type in group_edge_types + ] + + has_edge = output.has_edge(*summary_graph_edge) + if output.is_multigraph(): + if not has_edge: + for edge_type in edge_types: + output.add_edge(*summary_graph_edge, **edge_type) + elif not output.is_directed(): + existing_edge_data = output.get_edge_data(*summary_graph_edge) + for edge_type in edge_types: + if edge_type not in existing_edge_data.values(): + output.add_edge(*summary_graph_edge, **edge_type) + else: + superedge_attributes = {superedge_attribute: edge_types} + output.add_edge(*summary_graph_edge, **superedge_attributes) + + return output + + +def _snap_eligible_group(G, groups, group_lookup, edge_types): + """ + Determines if a group is eligible to be split. + + A group is eligible to be split if all nodes in the group have edges of the same type(s) + with the same other groups. + + Parameters + ---------- + G: graph + graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + group_lookup: dict + dictionary of nodes and their current corresponding group ID + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + + Returns + ------- + tuple: group ID to split, and neighbor-groups participation_counts data structure + """ + neighbor_info = {node: {gid: Counter() for gid in groups} for node in group_lookup} + for group_id in groups: + current_group = groups[group_id] + + # build neighbor_info for nodes in group + for node in current_group: + neighbor_info[node] = {group_id: Counter() for group_id in groups} + edges = G.edges(node, keys=True) if G.is_multigraph() else G.edges(node) + for edge in edges: + neighbor = edge[1] + edge_type = edge_types[edge] + neighbor_group_id = group_lookup[neighbor] + neighbor_info[node][neighbor_group_id][edge_type] += 1 + + # check if group_id is eligible to be split + group_size = len(current_group) + for other_group_id in groups: + edge_counts = Counter() + for node in current_group: + edge_counts.update(neighbor_info[node][other_group_id].keys()) + + if not all(count == group_size for count in edge_counts.values()): + # only the neighbor_info of the returned group_id is required for handling group splits + return group_id, neighbor_info + + # if no eligible groups, complete neighbor_info is calculated + return None, neighbor_info + + +def _snap_split(groups, neighbor_info, group_lookup, group_id): + """ + Splits a group based on edge types and updates the groups accordingly + + Splits the group with the given group_id based on the edge types + of the nodes so that each new grouping will all have the same + edges with other nodes. + + Parameters + ---------- + groups: dict + A dictionary of unique group IDs and their corresponding node groups + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + group_lookup: dict + dictionary of nodes and their current corresponding group ID + group_id: object + ID of group to be split + + Returns + ------- + dict + The updated groups based on the split + """ + new_group_mappings = defaultdict(set) + for node in groups[group_id]: + signature = tuple( + frozenset(edge_types) for edge_types in neighbor_info[node].values() + ) + new_group_mappings[signature].add(node) + + # leave the biggest new_group as the original group + new_groups = sorted(new_group_mappings.values(), key=len) + for new_group in new_groups[:-1]: + # Assign unused integer as the new_group_id + # ids are tuples, so will not interact with the original group_ids + new_group_id = len(groups) + groups[new_group_id] = new_group + groups[group_id] -= new_group + for node in new_group: + group_lookup[node] = new_group_id + + return groups + + +def snap_aggregation( + G, + node_attributes, + edge_attributes=(), + prefix="Supernode-", + supernode_attribute="group", + superedge_attribute="types", +): + """Creates a summary graph based on attributes and connectivity. + + This function uses the Summarization by Grouping Nodes on Attributes + and Pairwise edges (SNAP) algorithm for summarizing a given + graph by grouping nodes by node attributes and their edge attributes + into supernodes in a summary graph. This name SNAP should not be + confused with the Stanford Network Analysis Project (SNAP). + + Here is a high-level view of how this algorithm works: + + 1) Group nodes by node attribute values. + + 2) Iteratively split groups until all nodes in each group have edges + to nodes in the same groups. That is, until all the groups are homogeneous + in their member nodes' edges to other groups. For example, + if all the nodes in group A only have edge to nodes in group B, then the + group is homogeneous and does not need to be split. If all nodes in group B + have edges with nodes in groups {A, C}, but some also have edges with other + nodes in B, then group B is not homogeneous and needs to be split into + groups have edges with {A, C} and a group of nodes having + edges with {A, B, C}. This way, viewers of the summary graph can + assume that all nodes in the group have the exact same node attributes and + the exact same edges. + + 3) Build the output summary graph, where the groups are represented by + super-nodes. Edges represent the edges shared between all the nodes in each + respective groups. + + A SNAP summary graph can be used to visualize graphs that are too large to display + or visually analyze, or to efficiently identify sets of similar nodes with similar connectivity + patterns to other sets of similar nodes based on specified node and/or edge attributes in a graph. + + Parameters + ---------- + G: graph + Networkx Graph to be summarized + edge_attributes: iterable, optional + An iterable of the edge attributes considered in the summarization process. If provided, unique + combinations of the attribute values found in the graph are used to + determine the edge types in the graph. If not provided, all edges + are considered to be of the same type. + prefix: str + The prefix used to denote supernodes in the summary graph. Defaults to 'Supernode-'. + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes. Defaults to 'group'. + superedge_attribute: str + The edge attribute for recording the edge types of multiple edges. Defaults to 'types'. + + Returns + ------- + networkx.Graph: summary graph + + Examples + -------- + SNAP aggregation takes a graph and summarizes it in the context of user-provided + node and edge attributes such that a viewer can more easily extract and + analyze the information represented by the graph + + >>> nodes = { + ... "A": dict(color="Red"), + ... "B": dict(color="Red"), + ... "C": dict(color="Red"), + ... "D": dict(color="Red"), + ... "E": dict(color="Blue"), + ... "F": dict(color="Blue"), + ... } + >>> edges = [ + ... ("A", "E", "Strong"), + ... ("B", "F", "Strong"), + ... ("C", "E", "Weak"), + ... ("D", "F", "Weak"), + ... ] + >>> G = nx.Graph() + >>> for node in nodes: + ... attributes = nodes[node] + ... G.add_node(node, **attributes) + ... + >>> for source, target, type in edges: + ... G.add_edge(source, target, type=type) + ... + >>> node_attributes = ('color', ) + >>> edge_attributes = ('type', ) + >>> summary_graph = nx.snap_aggregation(G, node_attributes=node_attributes, edge_attributes=edge_attributes) + + Notes + ----- + The summary graph produced is called a maximum Attribute-edge + compatible (AR-compatible) grouping. According to [1]_, an + AR-compatible grouping means that all nodes in each group have the same + exact node attribute values and the same exact edges and + edge types to one or more nodes in the same groups. The maximal + AR-compatible grouping is the grouping with the minimal cardinality. + + The AR-compatible grouping is the most detailed grouping provided by + any of the SNAP algorithms. + + References + ---------- + .. [1] Y. Tian, R. A. Hankins, and J. M. Patel. Efficient aggregation + for graph summarization. In Proc. 2008 ACM-SIGMOD Int. Conf. + Management of Data (SIGMOD’08), pages 567–580, Vancouver, Canada, + June 2008. + """ + edge_types = { + edge: tuple(attrs.get(attr) for attr in edge_attributes) + for edge, attrs in G.edges.items() + } + if not G.is_directed(): + if G.is_multigraph(): + # list is needed to avoid mutating while iterating + edges = [((v, u, k), etype) for (u, v, k), etype in edge_types.items()] + else: + # list is needed to avoid mutating while iterating + edges = [((v, u), etype) for (u, v), etype in edge_types.items()] + edge_types.update(edges) + + group_lookup = { + node: tuple(attrs[attr] for attr in node_attributes) + for node, attrs in G.nodes.items() + } + groups = defaultdict(set) + for node, node_type in group_lookup.items(): + groups[node_type].add(node) + + eligible_group_id, neighbor_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + while eligible_group_id: + groups = _snap_split(groups, neighbor_info, group_lookup, eligible_group_id) + eligible_group_id, neighbor_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + return _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, + ) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/swap.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/swap.py new file mode 100644 index 0000000..26a1f31 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/swap.py @@ -0,0 +1,269 @@ +"""Swap edges in a graph. +""" + +import math + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["double_edge_swap", "connected_double_edge_swap"] + + +@py_random_state(3) +def double_edge_swap(G, nswap=1, max_tries=100, seed=None): + """Swap two edges in the graph while keeping the node degrees fixed. + + A double-edge swap removes two randomly chosen edges u-v and x-y + and creates the new edges u-x and v-y:: + + u--v u v + becomes | | + x--y x y + + If either the edge u-x or v-y already exist no swap is performed + and another attempt is made to find a suitable edge pair. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + max_tries : integer (optional) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The graph after double edge swaps. + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + """ + if G.is_directed(): + raise nx.NetworkXError("double_edge_swap() not defined for directed graphs.") + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("Graph has less than four nodes.") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + n = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + while swapcount < nswap: + # if random.random() < 0.5: continue # trick to avoid periodicities? + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ui == xi: + continue # same source, skip + u = keys[ui] # convert index to label + x = keys[xi] + # choose target uniformly from neighbors + v = seed.choice(list(G[u])) + y = seed.choice(list(G[x])) + if v == y: + continue # same target, skip + if (x not in G[u]) and (y not in G[v]): # don't create parallel edges + G.add_edge(u, x) + G.add_edge(v, y) + G.remove_edge(u, v) + G.remove_edge(x, y) + swapcount += 1 + if n >= max_tries: + e = ( + f"Maximum number of swap attempts ({n}) exceeded " + f"before desired swaps achieved ({nswap})." + ) + raise nx.NetworkXAlgorithmError(e) + n += 1 + return G + + +@py_random_state(3) +def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None): + """Attempts the specified number of double-edge swaps in the graph `G`. + + A double-edge swap removes two randomly chosen edges `(u, v)` and `(x, + y)` and creates the new edges `(u, x)` and `(v, y)`:: + + u--v u v + becomes | | + x--y x y + + If either `(u, x)` or `(v, y)` already exist, then no swap is performed + so the actual number of swapped edges is always *at most* `nswap`. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + _window_threshold : integer + + The window size below which connectedness of the graph will be checked + after each swap. + + The "window" in this function is a dynamically updated integer that + represents the number of swap attempts to make before checking if the + graph remains connected. It is an optimization used to decrease the + running time of the algorithm in exchange for increased complexity of + implementation. + + If the window size is below this threshold, then the algorithm checks + after each swap if the graph remains connected by checking if there is a + path joining the two nodes whose edge was just removed. If the window + size is above this threshold, then the algorithm performs do all the + swaps in the window and only then check if the graph is still connected. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + int + The number of successful swaps + + Raises + ------ + + NetworkXError + + If the input graph is not connected, or if the graph has fewer than four + nodes. + + Notes + ----- + + The initial graph `G` must be connected, and the resulting graph is + connected. The graph `G` is modified in place. + + References + ---------- + .. [1] C. Gkantsidis and M. Mihail and E. Zegura, + The Markov chain simulation method for generating connected + power law random graphs, 2003. + http://citeseer.ist.psu.edu/gkantsidis03markov.html + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected") + if len(G) < 4: + raise nx.NetworkXError("Graph has less than four nodes.") + n = 0 + swapcount = 0 + deg = G.degree() + # Label key for nodes + dk = list(n for n, d in G.degree()) + cdf = nx.utils.cumulative_distribution(list(d for n, d in G.degree())) + discrete_sequence = nx.utils.discrete_sequence + window = 1 + while n < nswap: + wcount = 0 + swapped = [] + # If the window is small, we just check each time whether the graph is + # connected by checking if the nodes that were just separated are still + # connected. + if window < _window_threshold: + # This Boolean keeps track of whether there was a failure or not. + fail = False + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + # If G remains connected... + if nx.has_path(G, u, v): + wcount += 1 + # Otherwise, undo the changes. + else: + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + fail = True + # If one of the swaps failed, reduce the window size. + if fail: + window = math.ceil(window / 2) + else: + window += 1 + # If the window is large, then there is a good chance that a bunch of + # swaps will work. It's quicker to do all those swaps first and then + # check if the graph remains connected. + else: + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = nx.utils.discrete_sequence(2, cdistribution=cdf) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + wcount += 1 + # If the graph remains connected, increase the window size. + if nx.is_connected(G): + window += 1 + # Otherwise, undo the changes from the previous window and decrease + # the window size. + else: + while swapped: + (u, v, x, y) = swapped.pop() + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + window = math.ceil(window / 2) + return swapcount diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_asteroidal.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_asteroidal.py new file mode 100644 index 0000000..b0487af --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_asteroidal.py @@ -0,0 +1,24 @@ +import networkx as nx + + +def test_is_at_free(): + + is_at_free = nx.asteroidal.is_at_free + + cycle = nx.cycle_graph(6) + assert not is_at_free(cycle) + + path = nx.path_graph(6) + assert is_at_free(path) + + small_graph = nx.complete_graph(2) + assert is_at_free(small_graph) + + petersen = nx.petersen_graph() + assert not is_at_free(petersen) + + clique = nx.complete_graph(6) + assert is_at_free(clique) + + line_clique = nx.line_graph(clique) + assert not is_at_free(line_clique) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_boundary.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_boundary.py new file mode 100644 index 0000000..23837f0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_boundary.py @@ -0,0 +1,154 @@ +"""Unit tests for the :mod:`networkx.algorithms.boundary` module.""" + +from itertools import combinations + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.utils import edges_equal + + +class TestNodeBoundary: + """Unit tests for the :func:`~networkx.node_boundary` function.""" + + def test_null_graph(self): + """Tests that the null graph has empty node boundaries.""" + null = nx.null_graph() + assert nx.node_boundary(null, []) == set() + assert nx.node_boundary(null, [], []) == set() + assert nx.node_boundary(null, [1, 2, 3]) == set() + assert nx.node_boundary(null, [1, 2, 3], [4, 5, 6]) == set() + assert nx.node_boundary(null, [1, 2, 3], [3, 4, 5]) == set() + + def test_path_graph(self): + P10 = cnlti(nx.path_graph(10), first_label=1) + assert nx.node_boundary(P10, []) == set() + assert nx.node_boundary(P10, [], []) == set() + assert nx.node_boundary(P10, [1, 2, 3]) == {4} + assert nx.node_boundary(P10, [4, 5, 6]) == {3, 7} + assert nx.node_boundary(P10, [3, 4, 5, 6, 7]) == {2, 8} + assert nx.node_boundary(P10, [8, 9, 10]) == {7} + assert nx.node_boundary(P10, [4, 5, 6], [9, 10]) == set() + + def test_complete_graph(self): + K10 = cnlti(nx.complete_graph(10), first_label=1) + assert nx.node_boundary(K10, []) == set() + assert nx.node_boundary(K10, [], []) == set() + assert nx.node_boundary(K10, [1, 2, 3]) == {4, 5, 6, 7, 8, 9, 10} + assert nx.node_boundary(K10, [4, 5, 6]) == {1, 2, 3, 7, 8, 9, 10} + assert nx.node_boundary(K10, [3, 4, 5, 6, 7]) == {1, 2, 8, 9, 10} + assert nx.node_boundary(K10, [4, 5, 6], []) == set() + assert nx.node_boundary(K10, K10) == set() + assert nx.node_boundary(K10, [1, 2, 3], [3, 4, 5]) == {4, 5} + + def test_petersen(self): + """Check boundaries in the petersen graph + + cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0>> list(cycles("abc")) + [('a', 'b', 'c'), ('b', 'c', 'a'), ('c', 'a', 'b')] + + """ + n = len(seq) + cycled_seq = cycle(seq) + for x in seq: + yield tuple(islice(cycled_seq, n)) + next(cycled_seq) + + +def cyclic_equals(seq1, seq2): + """Decide whether two sequences are equal up to cyclic permutations. + + For example:: + + >>> cyclic_equals("xyz", "zxy") + True + >>> cyclic_equals("xyz", "zyx") + False + + """ + # Cast seq2 to a tuple since `cycles()` yields tuples. + seq2 = tuple(seq2) + return any(x == tuple(seq2) for x in cycles(seq1)) + + +class TestChainDecomposition: + """Unit tests for the chain decomposition function.""" + + def assertContainsChain(self, chain, expected): + # A cycle could be expressed in two different orientations, one + # forward and one backward, so we need to check for cyclic + # equality in both orientations. + reversed_chain = list(reversed([tuple(reversed(e)) for e in chain])) + for candidate in expected: + if cyclic_equals(chain, candidate): + break + if cyclic_equals(reversed_chain, candidate): + break + else: + self.fail("chain not found") + + def test_decomposition(self): + edges = [ + # DFS tree edges. + (1, 2), + (2, 3), + (3, 4), + (3, 5), + (5, 6), + (6, 7), + (7, 8), + (5, 9), + (9, 10), + # Nontree edges. + (1, 3), + (1, 4), + (2, 5), + (5, 10), + (6, 8), + ] + G = nx.Graph(edges) + expected = [ + [(1, 3), (3, 2), (2, 1)], + [(1, 4), (4, 3)], + [(2, 5), (5, 3)], + [(5, 10), (10, 9), (9, 5)], + [(6, 8), (8, 7), (7, 6)], + ] + chains = list(nx.chain_decomposition(G, root=1)) + assert len(chains) == len(expected) + + # This chain decomposition isn't unique + # for chain in chains: + # print(chain) + # self.assertContainsChain(chain, expected) + + def test_barbell_graph(self): + # The (3, 0) barbell graph has two triangles joined by a single edge. + G = nx.barbell_graph(3, 0) + chains = list(nx.chain_decomposition(G, root=0)) + expected = [[(0, 1), (1, 2), (2, 0)], [(3, 4), (4, 5), (5, 3)]] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_disconnected_graph(self): + """Test for a graph with multiple connected components.""" + G = nx.barbell_graph(3, 0) + H = nx.barbell_graph(3, 0) + mapping = dict(zip(range(6), "abcdef")) + nx.relabel_nodes(H, mapping, copy=False) + G = nx.union(G, H) + chains = list(nx.chain_decomposition(G)) + expected = [ + [(0, 1), (1, 2), (2, 0)], + [(3, 4), (4, 5), (5, 3)], + [("a", "b"), ("b", "c"), ("c", "a")], + [("d", "e"), ("e", "f"), ("f", "d")], + ] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_disconnected_graph_root_node(self): + """Test for a single component of a disconnected graph.""" + G = nx.barbell_graph(3, 0) + H = nx.barbell_graph(3, 0) + mapping = dict(zip(range(6), "abcdef")) + nx.relabel_nodes(H, mapping, copy=False) + G = nx.union(G, H) + chains = list(nx.chain_decomposition(G, root="a")) + expected = [ + [("a", "b"), ("b", "c"), ("c", "a")], + [("d", "e"), ("e", "f"), ("f", "d")], + ] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_chordal.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_chordal.py new file mode 100644 index 0000000..c72699c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_chordal.py @@ -0,0 +1,129 @@ +import pytest + +import networkx as nx + + +class TestMCS: + @classmethod + def setup_class(cls): + # simple graph + connected_chordal_G = nx.Graph() + connected_chordal_G.add_edges_from( + [ + (1, 2), + (1, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 5), + (3, 6), + (4, 5), + (4, 6), + (5, 6), + ] + ) + cls.connected_chordal_G = connected_chordal_G + + chordal_G = nx.Graph() + chordal_G.add_edges_from( + [ + (1, 2), + (1, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 5), + (3, 6), + (4, 5), + (4, 6), + (5, 6), + (7, 8), + ] + ) + chordal_G.add_node(9) + cls.chordal_G = chordal_G + + non_chordal_G = nx.Graph() + non_chordal_G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5), (3, 4), (3, 5)]) + cls.non_chordal_G = non_chordal_G + + self_loop_G = nx.Graph() + self_loop_G.add_edges_from([(1, 1)]) + cls.self_loop_G = self_loop_G + + @pytest.mark.parametrize("G", (nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph())) + def test_is_chordal_not_implemented(self, G): + with pytest.raises(nx.NetworkXNotImplemented): + nx.is_chordal(G) + + def test_is_chordal(self): + assert not nx.is_chordal(self.non_chordal_G) + assert nx.is_chordal(self.chordal_G) + assert nx.is_chordal(self.connected_chordal_G) + assert nx.is_chordal(nx.complete_graph(3)) + assert nx.is_chordal(nx.cycle_graph(3)) + assert not nx.is_chordal(nx.cycle_graph(5)) + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + nx.is_chordal(self.self_loop_G) + + def test_induced_nodes(self): + G = nx.generators.classic.path_graph(10) + Induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + assert Induced_nodes == {1, 2, 3, 4, 5, 6, 7, 8, 9} + pytest.raises( + nx.NetworkXTreewidthBoundExceeded, nx.find_induced_nodes, G, 1, 9, 1 + ) + Induced_nodes = nx.find_induced_nodes(self.chordal_G, 1, 6) + assert Induced_nodes == {1, 2, 4, 6} + pytest.raises(nx.NetworkXError, nx.find_induced_nodes, self.non_chordal_G, 1, 5) + + def test_graph_treewidth(self): + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + nx.chordal_graph_treewidth(self.non_chordal_G) + + def test_chordal_find_cliques(self): + cliques = { + frozenset([9]), + frozenset([7, 8]), + frozenset([1, 2, 3]), + frozenset([2, 3, 4]), + frozenset([3, 4, 5, 6]), + } + assert nx.chordal_graph_cliques(self.chordal_G) == cliques + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + nx.chordal_graph_cliques(self.non_chordal_G) + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + nx.chordal_graph_cliques(self.self_loop_G) + + def test_chordal_find_cliques_path(self): + G = nx.path_graph(10) + cliqueset = nx.chordal_graph_cliques(G) + for (u, v) in G.edges(): + assert frozenset([u, v]) in cliqueset or frozenset([v, u]) in cliqueset + + def test_chordal_find_cliquesCC(self): + cliques = {frozenset([1, 2, 3]), frozenset([2, 3, 4]), frozenset([3, 4, 5, 6])} + cgc = nx.chordal_graph_cliques + assert cgc(self.connected_chordal_G) == cliques + + def test_complete_to_chordal_graph(self): + fgrg = nx.fast_gnp_random_graph + test_graphs = [ + nx.barbell_graph(6, 2), + nx.cycle_graph(15), + nx.wheel_graph(20), + nx.grid_graph([10, 4]), + nx.ladder_graph(15), + nx.star_graph(5), + nx.bull_graph(), + fgrg(20, 0.3, seed=1), + ] + for G in test_graphs: + H, a = nx.complete_to_chordal_graph(G) + assert nx.is_chordal(H) + assert len(a) == H.number_of_nodes() + if nx.is_chordal(G): + assert G.number_of_edges() == H.number_of_edges() + assert set(a.values()) == {0} + else: + assert len(set(a.values())) == H.number_of_nodes() diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_clique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_clique.py new file mode 100644 index 0000000..f6d5335 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_clique.py @@ -0,0 +1,316 @@ +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti + + +class TestCliques: + def setup_method(self): + z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1] + self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1) + self.cl = list(nx.find_cliques(self.G)) + H = nx.complete_graph(6) + H = nx.relabel_nodes(H, {i: i + 1 for i in range(6)}) + H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)]) + self.H = H + + def test_find_cliques1(self): + cl = list(nx.find_cliques(self.G)) + rcl = nx.find_cliques_recursive(self.G) + expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + assert sorted(map(sorted, cl)) == sorted(map(sorted, rcl)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + def test_selfloops(self): + self.G.add_edge(1, 1) + cl = list(nx.find_cliques(self.G)) + rcl = list(nx.find_cliques_recursive(self.G)) + assert set(map(frozenset, cl)) == set(map(frozenset, rcl)) + answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}] + assert len(answer) == len(cl) + assert all(set(c) in answer for c in cl) + + def test_find_cliques2(self): + hcl = list(nx.find_cliques(self.H)) + assert sorted(map(sorted, hcl)) == [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]] + + def test_find_cliques3(self): + # all cliques are [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + + cl = list(nx.find_cliques(self.G, [2])) + rcl = nx.find_cliques_recursive(self.G, [2]) + expected = [[2, 6, 1, 3], [2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 3])) + rcl = nx.find_cliques_recursive(self.G, [2, 3]) + expected = [[2, 6, 1, 3]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + with pytest.raises(ValueError): + list(nx.find_cliques(self.G, [2, 6, 4, 1])) + + with pytest.raises(ValueError): + list(nx.find_cliques_recursive(self.G, [2, 6, 4, 1])) + + def test_clique_number(self): + G = self.G + assert nx.graph_clique_number(G) == 4 + assert nx.graph_clique_number(G, cliques=self.cl) == 4 + + def test_clique_number2(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + assert nx.graph_clique_number(G) == 1 + + def test_clique_number3(self): + G = nx.Graph() + assert nx.graph_clique_number(G) == 0 + + def test_number_of_cliques(self): + G = self.G + assert nx.graph_number_of_cliques(G) == 5 + assert nx.graph_number_of_cliques(G, cliques=self.cl) == 5 + assert nx.number_of_cliques(G, 1) == 1 + assert list(nx.number_of_cliques(G, [1]).values()) == [1] + assert list(nx.number_of_cliques(G, [1, 2]).values()) == [1, 2] + assert nx.number_of_cliques(G, [1, 2]) == {1: 1, 2: 2} + assert nx.number_of_cliques(G, 2) == 2 + assert nx.number_of_cliques(G) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=list(G)) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=[2, 3, 4]) == {2: 2, 3: 1, 4: 2} + assert nx.number_of_cliques(G, cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, list(G), cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + + def test_node_clique_number(self): + G = self.G + assert nx.node_clique_number(G, 1) == 4 + assert list(nx.node_clique_number(G, [1]).values()) == [4] + assert list(nx.node_clique_number(G, [1, 2]).values()) == [4, 4] + assert nx.node_clique_number(G, [1, 2]) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1) == 4 + assert nx.node_clique_number(G) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, cliques=self.cl) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, [1, 2], cliques=self.cl) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1, cliques=self.cl) == 4 + + def test_cliques_containing_node(self): + G = self.G + assert nx.cliques_containing_node(G, 1) == [[2, 6, 1, 3]] + assert list(nx.cliques_containing_node(G, [1]).values()) == [[[2, 6, 1, 3]]] + assert [ + sorted(c) for c in list(nx.cliques_containing_node(G, [1, 2]).values()) + ] == [[[2, 6, 1, 3]], [[2, 6, 1, 3], [2, 6, 4]]] + result = nx.cliques_containing_node(G, [1, 2]) + for k, v in result.items(): + result[k] = sorted(v) + assert result == {1: [[2, 6, 1, 3]], 2: [[2, 6, 1, 3], [2, 6, 4]]} + assert nx.cliques_containing_node(G, 1) == [[2, 6, 1, 3]] + expected = [{2, 6, 1, 3}, {2, 6, 4}] + answer = [set(c) for c in nx.cliques_containing_node(G, 2)] + assert answer in (expected, list(reversed(expected))) + + answer = [set(c) for c in nx.cliques_containing_node(G, 2, cliques=self.cl)] + assert answer in (expected, list(reversed(expected))) + assert len(nx.cliques_containing_node(G)) == 11 + + def test_make_clique_bipartite(self): + G = self.G + B = nx.make_clique_bipartite(G) + assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + # Project onto the nodes of the original graph. + H = nx.projected_graph(B, range(1, 12)) + assert H.adj == G.adj + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as positive ones. + H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) + assert sorted(H1) == [1, 2, 3, 4, 5] + + def test_make_max_clique_graph(self): + """Tests that the maximal clique graph is the same as the bipartite + clique graph after being projected onto the nodes representing the + cliques. + + """ + G = self.G + B = nx.make_clique_bipartite(G) + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as nonnegative ones, starting at + # 0. + H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) + H2 = nx.make_max_clique_graph(G) + assert H1.adj == H2.adj + + def test_directed(self): + with pytest.raises(nx.NetworkXNotImplemented): + next(nx.find_cliques(nx.DiGraph())) + + +class TestEnumerateAllCliques: + def test_paper_figure_4(self): + # Same graph as given in Fig. 4 of paper enumerate_all_cliques is + # based on. + # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129 + G = nx.Graph() + edges_fig_4 = [ + ("a", "b"), + ("a", "c"), + ("a", "d"), + ("a", "e"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("c", "d"), + ("c", "e"), + ("d", "e"), + ("f", "b"), + ("f", "c"), + ("f", "g"), + ("g", "f"), + ("g", "c"), + ("g", "d"), + ("g", "e"), + ] + G.add_edges_from(edges_fig_4) + + cliques = list(nx.enumerate_all_cliques(G)) + clique_sizes = list(map(len, cliques)) + assert sorted(clique_sizes) == clique_sizes + + expected_cliques = [ + ["a"], + ["b"], + ["c"], + ["d"], + ["e"], + ["f"], + ["g"], + ["a", "b"], + ["a", "b", "d"], + ["a", "b", "d", "e"], + ["a", "b", "e"], + ["a", "c"], + ["a", "c", "d"], + ["a", "c", "d", "e"], + ["a", "c", "e"], + ["a", "d"], + ["a", "d", "e"], + ["a", "e"], + ["b", "c"], + ["b", "c", "d"], + ["b", "c", "d", "e"], + ["b", "c", "e"], + ["b", "c", "f"], + ["b", "d"], + ["b", "d", "e"], + ["b", "e"], + ["b", "f"], + ["c", "d"], + ["c", "d", "e"], + ["c", "d", "e", "g"], + ["c", "d", "g"], + ["c", "e"], + ["c", "e", "g"], + ["c", "f"], + ["c", "f", "g"], + ["c", "g"], + ["d", "e"], + ["d", "e", "g"], + ["d", "g"], + ["e", "g"], + ["f", "g"], + ["a", "b", "c"], + ["a", "b", "c", "d"], + ["a", "b", "c", "d", "e"], + ["a", "b", "c", "e"], + ] + + assert sorted(map(sorted, cliques)) == sorted(map(sorted, expected_cliques)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cluster.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cluster.py new file mode 100644 index 0000000..d69f036 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cluster.py @@ -0,0 +1,543 @@ +import pytest + +import networkx as nx + + +class TestTriangles: + def test_empty(self): + G = nx.Graph() + assert list(nx.triangles(G).values()) == [] + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + assert nx.triangles(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0] + assert nx.triangles(G, 1) == 0 + assert list(nx.triangles(G, [1, 2]).values()) == [0, 0] + assert nx.triangles(G, 1) == 0 + assert nx.triangles(G, [1, 2]) == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.triangles(G).values()) == [6, 6, 6, 6, 6] + assert sum(nx.triangles(G).values()) / 3 == 10 + assert nx.triangles(G, 1) == 6 + G.remove_edge(1, 2) + assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5] + assert nx.triangles(G, 1) == 3 + G.add_edge(3, 3) # ignore self-edges + assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5] + assert nx.triangles(G, 3) == 5 + + +class TestDirectedClustering: + def test_clustering(self): + G = nx.DiGraph() + assert list(nx.clustering(G).values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10, create_using=nx.DiGraph()) + assert list(nx.clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + assert nx.clustering(G, 0) == 0 + + def test_k5(self): + G = nx.complete_graph(5, create_using=nx.DiGraph()) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G).values()) == [ + 11 / 12, + 1, + 1, + 11 / 12, + 11 / 12, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 11 / 12} + G.remove_edge(2, 1) + assert list(nx.clustering(G).values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337} + assert nx.clustering(G, 4) == 5 / 6 + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(0, 4) + assert nx.clustering(G)[0] == 1 / 6 + + +class TestDirectedWeightedClustering: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.DiGraph() + assert list(nx.clustering(G, weight="weight").values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10, create_using=nx.DiGraph()) + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, weight="weight") == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_k5(self): + G = nx.complete_graph(5, create_using=nx.DiGraph()) + assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G, weight="weight") == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G, weight="weight").values()) == [ + 11 / 12, + 1, + 1, + 11 / 12, + 11 / 12, + ] + assert nx.clustering(G, [1, 4], weight="weight") == {1: 1, 4: 11 / 12} + G.remove_edge(2, 1) + assert list(nx.clustering(G, weight="weight").values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4], weight="weight") == { + 1: 1, + 4: 0.83333333333333337, + } + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(0, 4, weight=2) + assert nx.clustering(G)[0] == 1 / 6 + # Relaxed comparisons to allow graphblas-algorithms to pass tests + np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 12) + np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 12) + + +class TestWeightedClustering: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.Graph() + assert list(nx.clustering(G, weight="weight").values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, weight="weight") == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, 1) == 0 + assert list(nx.clustering(G, [1, 2], weight="weight").values()) == [0, 0] + assert nx.clustering(G, 1, weight="weight") == 0 + assert nx.clustering(G, [1, 2], weight="weight") == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G, weight="weight") == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G, weight="weight").values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4], weight="weight") == { + 1: 1, + 4: 0.83333333333333337, + } + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3) + G.add_edge(0, 4, weight=2) + assert nx.clustering(G)[0] == 1 / 3 + np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 6) + np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 6) + + def test_triangle_and_signed_edge(self): + G = nx.cycle_graph(3) + G.add_edge(0, 1, weight=-1) + G.add_edge(3, 0, weight=0) + assert nx.clustering(G)[0] == 1 / 3 + assert nx.clustering(G, weight="weight")[0] == -1 / 3 + + +class TestClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.Graph() + assert list(nx.clustering(G).values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.clustering(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0] + assert nx.clustering(G, 1) == 0 + assert list(nx.clustering(G, [1, 2]).values()) == [0, 0] + assert nx.clustering(G, 1) == 0 + assert nx.clustering(G, [1, 2]) == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G).values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337} + + def test_k5_signed(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + G.add_edge(0, 1, weight=-1) + assert list(nx.clustering(G, weight="weight").values()) == [ + 1 / 6, + -1 / 3, + 1, + 3 / 6, + 3 / 6, + ] + + +class TestTransitivity: + def test_transitivity(self): + G = nx.Graph() + assert nx.transitivity(G) == 0 + + def test_path(self): + G = nx.path_graph(10) + assert nx.transitivity(G) == 0 + + def test_cubical(self): + G = nx.cubical_graph() + assert nx.transitivity(G) == 0 + + def test_k5(self): + G = nx.complete_graph(5) + assert nx.transitivity(G) == 1 + G.remove_edge(1, 2) + assert nx.transitivity(G) == 0.875 + + +class TestSquareClustering: + def test_clustering(self): + G = nx.Graph() + assert list(nx.square_clustering(G).values()) == [] + assert nx.square_clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.square_clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.square_clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.square_clustering(G).values()) == [ + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + ] + assert list(nx.square_clustering(G, [1, 2]).values()) == [1 / 3, 1 / 3] + assert nx.square_clustering(G, [1])[1] == 1 / 3 + assert nx.square_clustering(G, 1) == 1 / 3 + assert nx.square_clustering(G, [1, 2]) == {1: 1 / 3, 2: 1 / 3} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1] + + def test_bipartite_k5(self): + G = nx.complete_bipartite_graph(5, 5) + assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + def test_lind_square_clustering(self): + """Test C4 for figure 1 Lind et al (2005)""" + G = nx.Graph( + [ + (1, 2), + (1, 3), + (1, 6), + (1, 7), + (2, 4), + (2, 5), + (3, 4), + (3, 5), + (6, 7), + (7, 8), + (6, 8), + (7, 9), + (7, 10), + (6, 11), + (6, 12), + (2, 13), + (2, 14), + (3, 15), + (3, 16), + ] + ) + G1 = G.subgraph([1, 2, 3, 4, 5, 13, 14, 15, 16]) + G2 = G.subgraph([1, 6, 7, 8, 9, 10, 11, 12]) + assert nx.square_clustering(G, [1])[1] == 3 / 43 + assert nx.square_clustering(G1, [1])[1] == 2 / 6 + assert nx.square_clustering(G2, [1])[1] == 1 / 5 + + def test_peng_square_clustering(self): + """Test eq2 for figure 1 Peng et al (2008)""" + G = nx.Graph([(1, 2), (1, 3), (2, 4), (3, 4), (3, 5), (3, 6)]) + assert nx.square_clustering(G, [1])[1] == 1 / 3 + + +class TestAverageClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_empty(self): + G = nx.Graph() + with pytest.raises(ZeroDivisionError): + nx.average_clustering(G) + + def test_average_clustering(self): + G = nx.cycle_graph(3) + G.add_edge(2, 3) + assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 4 + assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 4 + assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 2 + + def test_average_clustering_signed(self): + G = nx.cycle_graph(3) + G.add_edge(2, 3) + G.add_edge(0, 1, weight=-1) + assert nx.average_clustering(G, weight="weight") == (-1 - 1 - 1 / 3) / 4 + assert ( + nx.average_clustering(G, weight="weight", count_zeros=True) + == (-1 - 1 - 1 / 3) / 4 + ) + assert ( + nx.average_clustering(G, weight="weight", count_zeros=False) + == (-1 - 1 - 1 / 3) / 3 + ) + + +class TestDirectedAverageClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_empty(self): + G = nx.DiGraph() + with pytest.raises(ZeroDivisionError): + nx.average_clustering(G) + + def test_average_clustering(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(2, 3) + assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 8 + assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 8 + assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 4 + + +class TestGeneralizedDegree: + def test_generalized_degree(self): + G = nx.Graph() + assert nx.generalized_degree(G) == {} + + def test_path(self): + G = nx.path_graph(5) + assert nx.generalized_degree(G, 0) == {0: 1} + assert nx.generalized_degree(G, 1) == {0: 2} + + def test_cubical(self): + G = nx.cubical_graph() + assert nx.generalized_degree(G, 0) == {0: 3} + + def test_k5(self): + G = nx.complete_graph(5) + assert nx.generalized_degree(G, 0) == {3: 4} + G.remove_edge(0, 1) + assert nx.generalized_degree(G, 0) == {2: 3} + assert nx.generalized_degree(G, [1, 2]) == {1: {2: 3}, 2: {2: 2, 3: 2}} + assert nx.generalized_degree(G) == { + 0: {2: 3}, + 1: {2: 3}, + 2: {2: 2, 3: 2}, + 3: {2: 2, 3: 2}, + 4: {2: 2, 3: 2}, + } diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_communicability.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_communicability.py new file mode 100644 index 0000000..bf21988 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_communicability.py @@ -0,0 +1,81 @@ +from collections import defaultdict + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.communicability_alg import communicability, communicability_exp + + +class TestCommunicability: + def test_communicability(self): + answer = { + 0: {0: 1.5430806348152435, 1: 1.1752011936438012}, + 1: {0: 1.1752011936438012, 1: 1.5430806348152435}, + } + # answer={(0, 0): 1.5430806348152435, + # (0, 1): 1.1752011936438012, + # (1, 0): 1.1752011936438012, + # (1, 1): 1.5430806348152435} + + result = communicability(nx.path_graph(2)) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) + + def test_communicability2(self): + + answer_orig = { + ("1", "1"): 1.6445956054135658, + ("1", "Albert"): 0.7430186221096251, + ("1", "Aric"): 0.7430186221096251, + ("1", "Dan"): 1.6208126320442937, + ("1", "Franck"): 0.42639707170035257, + ("Albert", "1"): 0.7430186221096251, + ("Albert", "Albert"): 2.4368257358712189, + ("Albert", "Aric"): 1.4368257358712191, + ("Albert", "Dan"): 2.0472097037446453, + ("Albert", "Franck"): 1.8340111678944691, + ("Aric", "1"): 0.7430186221096251, + ("Aric", "Albert"): 1.4368257358712191, + ("Aric", "Aric"): 2.4368257358712193, + ("Aric", "Dan"): 2.0472097037446457, + ("Aric", "Franck"): 1.8340111678944691, + ("Dan", "1"): 1.6208126320442937, + ("Dan", "Albert"): 2.0472097037446453, + ("Dan", "Aric"): 2.0472097037446457, + ("Dan", "Dan"): 3.1306328496328168, + ("Dan", "Franck"): 1.4860372442192515, + ("Franck", "1"): 0.42639707170035257, + ("Franck", "Albert"): 1.8340111678944691, + ("Franck", "Aric"): 1.8340111678944691, + ("Franck", "Dan"): 1.4860372442192515, + ("Franck", "Franck"): 2.3876142275231915, + } + + answer = defaultdict(dict) + for (k1, k2), v in answer_orig.items(): + answer[k1][k2] = v + + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + + result = communicability(G1) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) + + result = communicability_exp(G1) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_core.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_core.py new file mode 100644 index 0000000..db2d277 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_core.py @@ -0,0 +1,171 @@ +import networkx as nx +from networkx.utils import nodes_equal + + +class TestCore: + @classmethod + def setup_class(cls): + # G is the example graph in Figure 1 from Batagelj and + # Zaversnik's paper titled An O(m) Algorithm for Cores + # Decomposition of Networks, 2003, + # http://arXiv.org/abs/cs/0310049. With nodes labeled as + # shown, the 3-core is given by nodes 1-8, the 2-core by nodes + # 9-16, the 1-core by nodes 17-20 and node 21 is in the + # 0-core. + t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1) + t2 = nx.convert_node_labels_to_integers(t1, 5) + G = nx.union(t1, t2) + G.add_edges_from( + [ + (3, 7), + (2, 11), + (11, 5), + (11, 12), + (5, 12), + (12, 19), + (12, 18), + (3, 9), + (7, 9), + (7, 10), + (9, 10), + (9, 20), + (17, 13), + (13, 14), + (14, 15), + (15, 16), + (16, 13), + ] + ) + G.add_node(21) + cls.G = G + + # Create the graph H resulting from the degree sequence + # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm. + + degseq = [0, 1, 2, 2, 2, 2, 3] + H = nx.havel_hakimi_graph(degseq) + mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5} + cls.H = nx.relabel_nodes(H, mapping) + + def test_trivial(self): + """Empty graph""" + G = nx.Graph() + assert nx.core_number(G) == {} + + def test_core_number(self): + core = nx.core_number(self.G) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(4)] + assert nodes_equal(nodes_by_core[0], [21]) + assert nodes_equal(nodes_by_core[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_core[2], [9, 10, 11, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8]) + + def test_core_number2(self): + core = nx.core_number(self.H) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(3)] + assert nodes_equal(nodes_by_core[0], [0]) + assert nodes_equal(nodes_by_core[1], [1, 3]) + assert nodes_equal(nodes_by_core[2], [2, 4, 5, 6]) + + def test_directed_core_number(self): + """core number had a bug for directed graphs found in issue #1959""" + # small example where too timid edge removal can make cn[2] = 3 + G = nx.DiGraph() + edges = [(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)] + G.add_edges_from(edges) + assert nx.core_number(G) == {1: 2, 2: 2, 3: 2, 4: 2} + # small example where too aggressive edge removal can make cn[2] = 2 + more_edges = [(1, 5), (3, 5), (4, 5), (3, 6), (4, 6), (5, 6)] + G.add_edges_from(more_edges) + assert nx.core_number(G) == {1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3} + + def test_main_core(self): + main_core_subgraph = nx.k_core(self.H) + assert sorted(main_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_core(self): + # k=0 + k_core_subgraph = nx.k_core(self.H, k=0) + assert sorted(k_core_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_core_subgraph = nx.k_core(self.H, k=1) + assert sorted(k_core_subgraph.nodes()) == [1, 2, 3, 4, 5, 6] + # k = 2 + k_core_subgraph = nx.k_core(self.H, k=2) + assert sorted(k_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_main_crust(self): + main_crust_subgraph = nx.k_crust(self.H) + assert sorted(main_crust_subgraph.nodes()) == [0, 1, 3] + + def test_k_crust(self): + # k = 0 + k_crust_subgraph = nx.k_crust(self.H, k=2) + assert sorted(k_crust_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_crust_subgraph = nx.k_crust(self.H, k=1) + assert sorted(k_crust_subgraph.nodes()) == [0, 1, 3] + # k=2 + k_crust_subgraph = nx.k_crust(self.H, k=0) + assert sorted(k_crust_subgraph.nodes()) == [0] + + def test_main_shell(self): + main_shell_subgraph = nx.k_shell(self.H) + assert sorted(main_shell_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_shell(self): + # k=0 + k_shell_subgraph = nx.k_shell(self.H, k=2) + assert sorted(k_shell_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_shell_subgraph = nx.k_shell(self.H, k=1) + assert sorted(k_shell_subgraph.nodes()) == [1, 3] + # k=2 + k_shell_subgraph = nx.k_shell(self.H, k=0) + assert sorted(k_shell_subgraph.nodes()) == [0] + + def test_k_corona(self): + # k=0 + k_corona_subgraph = nx.k_corona(self.H, k=2) + assert sorted(k_corona_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_corona_subgraph = nx.k_corona(self.H, k=1) + assert sorted(k_corona_subgraph.nodes()) == [1] + # k=2 + k_corona_subgraph = nx.k_corona(self.H, k=0) + assert sorted(k_corona_subgraph.nodes()) == [0] + + def test_k_truss(self): + # k=-1 + k_truss_subgraph = nx.k_truss(self.G, -1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=0 + k_truss_subgraph = nx.k_truss(self.G, 0) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=1 + k_truss_subgraph = nx.k_truss(self.G, 1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=2 + k_truss_subgraph = nx.k_truss(self.G, 2) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=3 + k_truss_subgraph = nx.k_truss(self.G, 3) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 13)) + + k_truss_subgraph = nx.k_truss(self.G, 4) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 9)) + + k_truss_subgraph = nx.k_truss(self.G, 5) + assert sorted(k_truss_subgraph.nodes()) == [] + + def test_onion_layers(self): + layers = nx.onion_layers(self.G) + nodes_by_layer = [ + sorted(n for n in layers if layers[n] == val) for val in range(1, 7) + ] + assert nodes_equal(nodes_by_layer[0], [21]) + assert nodes_equal(nodes_by_layer[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_layer[2], [10, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_layer[3], [9, 11]) + assert nodes_equal(nodes_by_layer[4], [1, 2, 4, 5, 6, 8]) + assert nodes_equal(nodes_by_layer[5], [3, 7]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_covering.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_covering.py new file mode 100644 index 0000000..4097196 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_covering.py @@ -0,0 +1,74 @@ +import networkx as nx + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert nx.min_edge_cover(G) == set() + + def test_graph_with_loop(self): + G = nx.Graph() + G.add_edge(0, 0) + assert nx.min_edge_cover(G) == {(0, 0)} + + def test_graph_single_edge(self): + G = nx.Graph([(0, 1)]) + assert nx.min_edge_cover(G) in ({(0, 1)}, {(1, 0)}) + + def test_graph_two_edge_path(self): + G = nx.path_graph(3) + min_cover = nx.min_edge_cover(G) + assert len(min_cover) == 2 + for u, v in G.edges: + assert (u, v) in min_cover or (v, u) in min_cover + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + # Use bipartite method by prescribing the algorithm + min_cover = nx.min_edge_cover( + G, nx.algorithms.bipartite.matching.eppstein_matching + ) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + # Use the default method which is not specialized for bipartite + min_cover2 = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover2) + assert len(min_cover2) == 4 + + def test_complete_graph_even(self): + G = nx.complete_graph(10) + min_cover = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 5 + + def test_complete_graph_odd(self): + G = nx.complete_graph(11) + min_cover = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 6 + + +class TestIsEdgeCover: + """Tests for :func:`networkx.algorithms.is_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert nx.is_edge_cover(G, set()) + + def test_graph_with_loop(self): + G = nx.Graph() + G.add_edge(1, 1) + assert nx.is_edge_cover(G, {(1, 1)}) + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert nx.is_edge_cover(G, {(0, 0), (1, 1)}) + assert nx.is_edge_cover(G, {(0, 1), (1, 0)}) + assert nx.is_edge_cover(G, {(0, 1)}) + assert not nx.is_edge_cover(G, {(0, 0)}) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cuts.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cuts.py new file mode 100644 index 0000000..6d8656e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cuts.py @@ -0,0 +1,172 @@ +"""Unit tests for the :mod:`networkx.algorithms.cuts` module.""" + + +import networkx as nx + + +class TestCutSize: + """Unit tests for the :func:`~networkx.cut_size` function.""" + + def test_symmetric(self): + """Tests that the cut size is symmetric.""" + G = nx.barbell_graph(3, 0) + S = {0, 1, 4} + T = {2, 3, 5} + assert nx.cut_size(G, S, T) == 4 + assert nx.cut_size(G, T, S) == 4 + + def test_single_edge(self): + """Tests for a cut of a single edge.""" + G = nx.barbell_graph(3, 0) + S = {0, 1, 2} + T = {3, 4, 5} + assert nx.cut_size(G, S, T) == 1 + assert nx.cut_size(G, T, S) == 1 + + def test_directed(self): + """Tests that each directed edge is counted once in the cut.""" + G = nx.barbell_graph(3, 0).to_directed() + S = {0, 1, 2} + T = {3, 4, 5} + assert nx.cut_size(G, S, T) == 2 + assert nx.cut_size(G, T, S) == 2 + + def test_directed_symmetric(self): + """Tests that a cut in a directed graph is symmetric.""" + G = nx.barbell_graph(3, 0).to_directed() + S = {0, 1, 4} + T = {2, 3, 5} + assert nx.cut_size(G, S, T) == 8 + assert nx.cut_size(G, T, S) == 8 + + def test_multigraph(self): + """Tests that parallel edges are each counted for a cut.""" + G = nx.MultiGraph(["ab", "ab"]) + assert nx.cut_size(G, {"a"}, {"b"}) == 2 + + +class TestVolume: + """Unit tests for the :func:`~networkx.volume` function.""" + + def test_graph(self): + G = nx.cycle_graph(4) + assert nx.volume(G, {0, 1}) == 4 + + def test_digraph(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0)]) + assert nx.volume(G, {0, 1}) == 2 + + def test_multigraph(self): + edges = list(nx.cycle_graph(4).edges()) + G = nx.MultiGraph(edges * 2) + assert nx.volume(G, {0, 1}) == 8 + + def test_multidigraph(self): + edges = [(0, 1), (1, 2), (2, 3), (3, 0)] + G = nx.MultiDiGraph(edges * 2) + assert nx.volume(G, {0, 1}) == 4 + + def test_barbell(self): + G = nx.barbell_graph(3, 0) + assert nx.volume(G, {0, 1, 2}) == 7 + assert nx.volume(G, {3, 4, 5}) == 7 + + +class TestNormalizedCutSize: + """Unit tests for the :func:`~networkx.normalized_cut_size` function.""" + + def test_graph(self): + G = nx.path_graph(4) + S = {1, 2} + T = set(G) - S + size = nx.normalized_cut_size(G, S, T) + # The cut looks like this: o-{-o--o-}-o + expected = 2 * ((1 / 4) + (1 / 2)) + assert expected == size + # Test with no input T + assert expected == nx.normalized_cut_size(G, S) + + def test_directed(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + S = {1, 2} + T = set(G) - S + size = nx.normalized_cut_size(G, S, T) + # The cut looks like this: o-{->o-->o-}->o + expected = 2 * ((1 / 2) + (1 / 1)) + assert expected == size + # Test with no input T + assert expected == nx.normalized_cut_size(G, S) + + +class TestConductance: + """Unit tests for the :func:`~networkx.conductance` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + # Consider the singleton sets containing the "bridge" nodes. + # There is only one cut edge, and each set has volume five. + S = {4} + T = {5} + conductance = nx.conductance(G, S, T) + expected = 1 / 5 + assert expected == conductance + # Test with no input T + G2 = nx.barbell_graph(3, 0) + # There is only one cut edge, and each set has volume seven. + S2 = {0, 1, 2} + assert nx.conductance(G2, S2) == 1 / 7 + + +class TestEdgeExpansion: + """Unit tests for the :func:`~networkx.edge_expansion` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + S = set(range(5)) + T = set(G) - S + expansion = nx.edge_expansion(G, S, T) + expected = 1 / 5 + assert expected == expansion + # Test with no input T + assert expected == nx.edge_expansion(G, S) + + +class TestNodeExpansion: + """Unit tests for the :func:`~networkx.node_expansion` function.""" + + def test_graph(self): + G = nx.path_graph(8) + S = {3, 4, 5} + expansion = nx.node_expansion(G, S) + # The neighborhood of S has cardinality five, and S has + # cardinality three. + expected = 5 / 3 + assert expected == expansion + + +class TestBoundaryExpansion: + """Unit tests for the :func:`~networkx.boundary_expansion` function.""" + + def test_graph(self): + G = nx.complete_graph(10) + S = set(range(4)) + expansion = nx.boundary_expansion(G, S) + # The node boundary of S has cardinality six, and S has + # cardinality three. + expected = 6 / 4 + assert expected == expansion + + +class TestMixingExpansion: + """Unit tests for the :func:`~networkx.mixing_expansion` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + S = set(range(5)) + T = set(G) - S + expansion = nx.mixing_expansion(G, S, T) + # There is one cut edge, and the total number of edges in the + # graph is twice the total number of edges in a clique of size + # five, plus one more for the bridge. + expected = 1 / (2 * (5 * 4 + 1)) + assert expected == expansion diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cycles.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cycles.py new file mode 100644 index 0000000..42a2f01 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_cycles.py @@ -0,0 +1,347 @@ +import pytest + +import networkx +import networkx as nx +from networkx.algorithms import find_cycle, minimum_cycle_basis +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +class TestCycles: + @classmethod + def setup_class(cls): + G = networkx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 3, 4, 5]) + nx.add_cycle(G, [0, 1, 6, 7, 8]) + G.add_edge(8, 9) + cls.G = G + + def is_cyclic_permutation(self, a, b): + n = len(a) + if len(b) != n: + return False + l = a + a + return any(l[i : i + n] == b for i in range(n)) + + def test_cycle_basis(self): + G = self.G + cy = networkx.cycle_basis(G, 0) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = networkx.cycle_basis(G, 1) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = networkx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + # test disconnected graphs + nx.add_cycle(G, "ABC") + cy = networkx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])] + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5], ["A", "B", "C"]] + + def test_cycle_basis2(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + cy = networkx.cycle_basis(G, 0) + + def test_cycle_basis3(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + cy = networkx.cycle_basis(G, 0) + + def test_simple_cycles(self): + edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + G = nx.DiGraph(edges) + cc = sorted(nx.simple_cycles(G)) + ca = [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + assert len(cc) == len(ca) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_simple_cycles_graph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.Graph() + c = sorted(nx.simple_cycles(G)) + + def test_unsortable(self): + # TODO What does this test do? das 6/2013 + G = nx.DiGraph() + nx.add_cycle(G, ["a", 1]) + c = list(nx.simple_cycles(G)) + + def test_simple_cycles_small(self): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3]) + c = sorted(nx.simple_cycles(G)) + assert len(c) == 1 + assert self.is_cyclic_permutation(c[0], [1, 2, 3]) + nx.add_cycle(G, [10, 20, 30]) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 2 + ca = [[1, 2, 3], [10, 20, 30]] + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_simple_cycles_empty(self): + G = nx.DiGraph() + assert list(nx.simple_cycles(G)) == [] + + def test_complete_directed_graph(self): + # see table 2 in Johnson's paper + ncircuits = [1, 5, 20, 84, 409, 2365, 16064] + for n, c in zip(range(2, 9), ncircuits): + G = nx.DiGraph(nx.complete_graph(n)) + assert len(list(nx.simple_cycles(G))) == c + + def worst_case_graph(self, k): + # see figure 1 in Johnson's paper + # this graph has exactly 3k simple cycles + G = nx.DiGraph() + for n in range(2, k + 2): + G.add_edge(1, n) + G.add_edge(n, k + 2) + G.add_edge(2 * k + 1, 1) + for n in range(k + 2, 2 * k + 2): + G.add_edge(n, 2 * k + 2) + G.add_edge(n, n + 1) + G.add_edge(2 * k + 3, k + 2) + for n in range(2 * k + 3, 3 * k + 3): + G.add_edge(2 * k + 2, n) + G.add_edge(n, 3 * k + 3) + G.add_edge(3 * k + 3, 2 * k + 2) + return G + + def test_worst_case_graph(self): + # see figure 1 in Johnson's paper + for k in range(3, 10): + G = self.worst_case_graph(k) + l = len(list(nx.simple_cycles(G))) + assert l == 3 * k + + def test_recursive_simple_and_not(self): + for k in range(2, 10): + G = self.worst_case_graph(k) + cc = sorted(nx.simple_cycles(G)) + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, r) for r in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + def test_simple_graph_with_reported_bug(self): + G = nx.DiGraph() + edges = [ + (0, 2), + (0, 3), + (1, 0), + (1, 3), + (2, 1), + (2, 4), + (3, 2), + (3, 4), + (4, 0), + (4, 1), + (4, 5), + (5, 0), + (5, 1), + (5, 2), + (5, 3), + ] + G.add_edges_from(edges) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 26 + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + +# These tests might fail with hash randomization since they depend on +# edge_dfs. For more information, see the comments in: +# networkx/algorithms/traversal/tests/test_edgedfs.py + + +class TestFindCycle: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(-1, 0), (0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_graph_nocycle(self): + G = nx.Graph(self.edges) + pytest.raises(nx.exception.NetworkXNoCycle, find_cycle, G, self.nodes) + + def test_graph_cycle(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_none(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_original(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 0, FORWARD)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2) + # Hash randomization...could be any edge. + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_digraph_reverse(self): + G = nx.DiGraph(self.edges) + x = list(find_cycle(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + assert x[1][3] == x_[1][3] + + def test_multidigraph_ignore2(self): + # Loop traversed an edge while ignoring its orientation. + G = nx.MultiDiGraph([(0, 1), (1, 2), (1, 2)]) + x = list(find_cycle(G, [0, 1, 2], orientation="ignore")) + x_ = [(1, 2, 0, FORWARD), (1, 2, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_original(self): + # Node 2 doesn't need to be searched again from visited from 4. + # The goal here is to cover the case when 2 to be researched from 4, + # when 4 is visited from the first time (so we must make sure that 4 + # is not visited from 2, and hence, we respect the edge orientation). + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 3), (4, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, + find_cycle, + G, + [0, 1, 2, 3, 4], + orientation="original", + ) + + def test_dag(self): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, find_cycle, G, orientation="original" + ) + x = list(find_cycle(G, orientation="ignore")) + assert x == [(0, 1, FORWARD), (1, 2, FORWARD), (0, 2, REVERSE)] + + def test_prev_explored(self): + # https://github.com/networkx/networkx/issues/2323 + + G = nx.DiGraph() + G.add_edges_from([(1, 0), (2, 0), (1, 2), (2, 1)]) + pytest.raises(nx.NetworkXNoCycle, find_cycle, G, source=0) + x = list(nx.find_cycle(G, 1)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + x = list(nx.find_cycle(G, 2)) + x_ = [(2, 1), (1, 2)] + assert x == x_ + + x = list(nx.find_cycle(G)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + def test_no_cycle(self): + # https://github.com/networkx/networkx/issues/2439 + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 0), (3, 1), (3, 2)]) + pytest.raises(nx.NetworkXNoCycle, find_cycle, G, source=0) + pytest.raises(nx.NetworkXNoCycle, find_cycle, G) + + +def assert_basis_equal(a, b): + assert sorted(a) == sorted(b) + + +class TestMinimumCycles: + @classmethod + def setup_class(cls): + T = nx.Graph() + nx.add_cycle(T, [1, 2, 3, 4], weight=1) + T.add_edge(2, 4, weight=5) + cls.diamond_graph = T + + def test_unweighted_diamond(self): + mcb = minimum_cycle_basis(self.diamond_graph) + assert_basis_equal([sorted(c) for c in mcb], [[1, 2, 4], [2, 3, 4]]) + + def test_weighted_diamond(self): + mcb = minimum_cycle_basis(self.diamond_graph, weight="weight") + assert_basis_equal([sorted(c) for c in mcb], [[1, 2, 4], [1, 2, 3, 4]]) + + def test_dimensionality(self): + # checks |MCB|=|E|-|V|+|NC| + ntrial = 10 + for _ in range(ntrial): + rg = nx.erdos_renyi_graph(10, 0.3) + nnodes = rg.number_of_nodes() + nedges = rg.number_of_edges() + ncomp = nx.number_connected_components(rg) + + dim_mcb = len(minimum_cycle_basis(rg)) + assert dim_mcb == nedges - nnodes + ncomp + + def test_complete_graph(self): + cg = nx.complete_graph(5) + mcb = minimum_cycle_basis(cg) + assert all([len(cycle) == 3 for cycle in mcb]) + + def test_tree_graph(self): + tg = nx.balanced_tree(3, 3) + assert not minimum_cycle_basis(tg) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_d_separation.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_d_separation.py new file mode 100644 index 0000000..23367a0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_d_separation.py @@ -0,0 +1,158 @@ +from itertools import combinations + +import pytest + +import networkx as nx + + +def path_graph(): + """Return a path graph of length three.""" + G = nx.path_graph(3, create_using=nx.DiGraph) + G.graph["name"] = "path" + nx.freeze(G) + return G + + +def fork_graph(): + """Return a three node fork graph.""" + G = nx.DiGraph(name="fork") + G.add_edges_from([(0, 1), (0, 2)]) + nx.freeze(G) + return G + + +def collider_graph(): + """Return a collider/v-structure graph with three nodes.""" + G = nx.DiGraph(name="collider") + G.add_edges_from([(0, 2), (1, 2)]) + nx.freeze(G) + return G + + +def naive_bayes_graph(): + """Return a simply Naive Bayes PGM graph.""" + G = nx.DiGraph(name="naive_bayes") + G.add_edges_from([(0, 1), (0, 2), (0, 3), (0, 4)]) + nx.freeze(G) + return G + + +def asia_graph(): + """Return the 'Asia' PGM graph.""" + G = nx.DiGraph(name="asia") + G.add_edges_from( + [ + ("asia", "tuberculosis"), + ("smoking", "cancer"), + ("smoking", "bronchitis"), + ("tuberculosis", "either"), + ("cancer", "either"), + ("either", "xray"), + ("either", "dyspnea"), + ("bronchitis", "dyspnea"), + ] + ) + nx.freeze(G) + return G + + +@pytest.fixture(name="path_graph") +def path_graph_fixture(): + return path_graph() + + +@pytest.fixture(name="fork_graph") +def fork_graph_fixture(): + return fork_graph() + + +@pytest.fixture(name="collider_graph") +def collider_graph_fixture(): + return collider_graph() + + +@pytest.fixture(name="naive_bayes_graph") +def naive_bayes_graph_fixture(): + return naive_bayes_graph() + + +@pytest.fixture(name="asia_graph") +def asia_graph_fixture(): + return asia_graph() + + +@pytest.mark.parametrize( + "graph", + [path_graph(), fork_graph(), collider_graph(), naive_bayes_graph(), asia_graph()], +) +def test_markov_condition(graph): + """Test that the Markov condition holds for each PGM graph.""" + for node in graph.nodes: + parents = set(graph.predecessors(node)) + non_descendants = graph.nodes - nx.descendants(graph, node) - {node} - parents + assert nx.d_separated(graph, {node}, non_descendants, parents) + + +def test_path_graph_dsep(path_graph): + """Example-based test of d-separation for path_graph.""" + assert nx.d_separated(path_graph, {0}, {2}, {1}) + assert not nx.d_separated(path_graph, {0}, {2}, {}) + + +def test_fork_graph_dsep(fork_graph): + """Example-based test of d-separation for fork_graph.""" + assert nx.d_separated(fork_graph, {1}, {2}, {0}) + assert not nx.d_separated(fork_graph, {1}, {2}, {}) + + +def test_collider_graph_dsep(collider_graph): + """Example-based test of d-separation for collider_graph.""" + assert nx.d_separated(collider_graph, {0}, {1}, {}) + assert not nx.d_separated(collider_graph, {0}, {1}, {2}) + + +def test_naive_bayes_dsep(naive_bayes_graph): + """Example-based test of d-separation for naive_bayes_graph.""" + for u, v in combinations(range(1, 5), 2): + assert nx.d_separated(naive_bayes_graph, {u}, {v}, {0}) + assert not nx.d_separated(naive_bayes_graph, {u}, {v}, {}) + + +def test_asia_graph_dsep(asia_graph): + """Example-based test of d-separation for asia_graph.""" + assert nx.d_separated( + asia_graph, {"asia", "smoking"}, {"dyspnea", "xray"}, {"bronchitis", "either"} + ) + assert nx.d_separated( + asia_graph, {"tuberculosis", "cancer"}, {"bronchitis"}, {"smoking", "xray"} + ) + + +def test_undirected_graphs_are_not_supported(): + """ + Test that undirected graphs are not supported. + + d-separation does not apply in the case of undirected graphs. + """ + with pytest.raises(nx.NetworkXNotImplemented): + g = nx.path_graph(3, nx.Graph) + nx.d_separated(g, {0}, {1}, {2}) + + +def test_cyclic_graphs_raise_error(): + """ + Test that cycle graphs should cause erroring. + + This is because PGMs assume a directed acyclic graph. + """ + with pytest.raises(nx.NetworkXError): + g = nx.cycle_graph(3, nx.DiGraph) + nx.d_separated(g, {0}, {1}, {2}) + + +def test_invalid_nodes_raise_error(asia_graph): + """ + Test that graphs that have invalid nodes passed in raise errors. + """ + with pytest.raises(nx.NodeNotFound): + nx.d_separated(asia_graph, {0}, {1}, {2}) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dag.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dag.py new file mode 100644 index 0000000..b39b033 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dag.py @@ -0,0 +1,710 @@ +from collections import deque +from itertools import combinations, permutations + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, pairwise + + +# Recipe from the itertools documentation. +def _consume(iterator): + "Consume the iterator entirely." + # Feed the entire iterator into a zero-length deque. + deque(iterator, maxlen=0) + + +class TestDagLongestPath: + """Unit tests computing the longest path in a directed acyclic graph.""" + + def test_empty(self): + G = nx.DiGraph() + assert nx.dag_longest_path(G) == [] + + def test_unweighted1(self): + edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (3, 7)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 5, 6] + + def test_unweighted2(self): + edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 4, 5] + + def test_weighted(self): + G = nx.DiGraph() + edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4), (1, 6, 2)] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path(G) == [2, 3, 5] + + def test_undirected_not_implemented(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.dag_longest_path, G) + + def test_unorderable_nodes(self): + """Tests that computing the longest path does not depend on + nodes being orderable. + + For more information, see issue #1989. + + """ + # Create the directed path graph on four nodes in a diamond shape, + # with nodes represented as (unorderable) Python objects. + nodes = [object() for n in range(4)] + G = nx.DiGraph() + G.add_edge(nodes[0], nodes[1]) + G.add_edge(nodes[0], nodes[2]) + G.add_edge(nodes[2], nodes[3]) + G.add_edge(nodes[1], nodes[3]) + + # this will raise NotImplementedError when nodes need to be ordered + nx.dag_longest_path(G) + + +class TestDagLongestPathLength: + """Unit tests for computing the length of a longest path in a + directed acyclic graph. + + """ + + def test_unweighted(self): + edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + # test degenerate graphs + G = nx.DiGraph() + G.add_node(1) + assert nx.dag_longest_path_length(G) == 0 + + def test_undirected_not_implemented(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.dag_longest_path_length, G) + + def test_weighted(self): + edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4), (1, 6, 2)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path_length(G) == 5 + + +class TestDAG: + @classmethod + def setup_class(cls): + pass + + def test_topological_sort1(self): + DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)]) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + assert tuple(algorithm(DG)) == (1, 2, 3) + + DG.add_edge(3, 2) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + pytest.raises(nx.NetworkXUnfeasible, _consume, algorithm(DG)) + + DG.remove_edge(2, 3) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + assert tuple(algorithm(DG)) == (1, 3, 2) + + DG.remove_edge(3, 2) + + assert tuple(nx.topological_sort(DG)) in {(1, 2, 3), (1, 3, 2)} + assert tuple(nx.lexicographical_topological_sort(DG)) == (1, 2, 3) + + def test_is_directed_acyclic_graph(self): + G = nx.generators.complete_graph(2) + assert not nx.is_directed_acyclic_graph(G) + assert not nx.is_directed_acyclic_graph(G.to_directed()) + assert not nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])) + assert nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])) + + def test_topological_sort2(self): + DG = nx.DiGraph( + { + 1: [2], + 2: [3], + 3: [4], + 4: [5], + 5: [1], + 11: [12], + 12: [13], + 13: [14], + 14: [15], + } + ) + pytest.raises(nx.NetworkXUnfeasible, _consume, nx.topological_sort(DG)) + + assert not nx.is_directed_acyclic_graph(DG) + + DG.remove_edge(1, 2) + _consume(nx.topological_sort(DG)) + assert nx.is_directed_acyclic_graph(DG) + + def test_topological_sort3(self): + DG = nx.DiGraph() + DG.add_edges_from([(1, i) for i in range(2, 5)]) + DG.add_edges_from([(2, i) for i in range(5, 9)]) + DG.add_edges_from([(6, i) for i in range(9, 12)]) + DG.add_edges_from([(4, i) for i in range(12, 15)]) + + def validate(order): + assert isinstance(order, list) + assert set(order) == set(DG) + for u, v in combinations(order, 2): + assert not nx.has_path(DG, v, u) + + validate(list(nx.topological_sort(DG))) + + DG.add_edge(14, 1) + pytest.raises(nx.NetworkXUnfeasible, _consume, nx.topological_sort(DG)) + + def test_topological_sort4(self): + G = nx.Graph() + G.add_edge(1, 2) + # Only directed graphs can be topologically sorted. + pytest.raises(nx.NetworkXError, _consume, nx.topological_sort(G)) + + def test_topological_sort5(self): + G = nx.DiGraph() + G.add_edge(0, 1) + assert list(nx.topological_sort(G)) == [0, 1] + + def test_topological_sort6(self): + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + + def runtime_error(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.add_edge(5 - x, 5) + + def unfeasible_error(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.remove_node(4) + + def runtime_error2(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.remove_node(2) + + pytest.raises(RuntimeError, runtime_error) + pytest.raises(RuntimeError, runtime_error2) + pytest.raises(nx.NetworkXUnfeasible, unfeasible_error) + + def test_all_topological_sorts_1(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 5)]) + assert list(nx.all_topological_sorts(DG)) == [[1, 2, 3, 4, 5]] + + def test_all_topological_sorts_2(self): + DG = nx.DiGraph([(1, 3), (2, 1), (2, 4), (4, 3), (4, 5)]) + assert sorted(nx.all_topological_sorts(DG)) == [ + [2, 1, 4, 3, 5], + [2, 1, 4, 5, 3], + [2, 4, 1, 3, 5], + [2, 4, 1, 5, 3], + [2, 4, 5, 1, 3], + ] + + def test_all_topological_sorts_3(self): + def unfeasible(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 2), (4, 5)]) + # convert to list to execute generator + list(nx.all_topological_sorts(DG)) + + def not_implemented(): + G = nx.Graph([(1, 2), (2, 3)]) + # convert to list to execute generator + list(nx.all_topological_sorts(G)) + + def not_implemted_2(): + G = nx.MultiGraph([(1, 2), (1, 2), (2, 3)]) + list(nx.all_topological_sorts(G)) + + pytest.raises(nx.NetworkXUnfeasible, unfeasible) + pytest.raises(nx.NetworkXNotImplemented, not_implemented) + pytest.raises(nx.NetworkXNotImplemented, not_implemted_2) + + def test_all_topological_sorts_4(self): + DG = nx.DiGraph() + for i in range(7): + DG.add_node(i) + assert sorted(map(list, permutations(DG.nodes))) == sorted( + nx.all_topological_sorts(DG) + ) + + def test_all_topological_sorts_multigraph_1(self): + DG = nx.MultiDiGraph([(1, 2), (1, 2), (2, 3), (3, 4), (3, 5), (3, 5), (3, 5)]) + assert sorted(nx.all_topological_sorts(DG)) == sorted( + [[1, 2, 3, 4, 5], [1, 2, 3, 5, 4]] + ) + + def test_all_topological_sorts_multigraph_2(self): + N = 9 + edges = [] + for i in range(1, N): + edges.extend([(i, i + 1)] * i) + DG = nx.MultiDiGraph(edges) + assert list(nx.all_topological_sorts(DG)) == [list(range(1, N + 1))] + + def test_ancestors(self): + G = nx.DiGraph() + ancestors = nx.algorithms.dag.ancestors + G.add_edges_from([(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)]) + assert ancestors(G, 6) == {1, 2, 4, 5} + assert ancestors(G, 3) == {1, 4} + assert ancestors(G, 1) == set() + pytest.raises(nx.NetworkXError, ancestors, G, 8) + + def test_descendants(self): + G = nx.DiGraph() + descendants = nx.algorithms.dag.descendants + G.add_edges_from([(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)]) + assert descendants(G, 1) == {2, 3, 6} + assert descendants(G, 4) == {2, 3, 5, 6} + assert descendants(G, 3) == set() + pytest.raises(nx.NetworkXError, descendants, G, 8) + + def test_transitive_closure(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(nx.transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + assert edges_equal(nx.transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + solution = [(1, 2), (2, 1), (2, 3), (3, 2), (1, 3), (3, 1)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(sorted(nx.transitive_closure(G).edges()), soln) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + G = nx.MultiDiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + # test if edge data is copied + G = nx.DiGraph([(1, 2, {"a": 3}), (2, 3, {"b": 0}), (3, 4)]) + H = nx.transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + k = 10 + G = nx.DiGraph((i, i + 1, {"f": "b", "weight": i}) for i in range(k)) + H = nx.transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.transitive_closure(G, reflexive="wrong input") + + def test_reflexive_transitive_closure(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + solution = sorted([(1, 2), (2, 1), (2, 3), (3, 2), (1, 3), (3, 1)]) + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(sorted(nx.transitive_closure(G).edges()), soln) + assert edges_equal(sorted(nx.transitive_closure(G, False).edges()), soln) + assert edges_equal(sorted(nx.transitive_closure(G, None).edges()), solution) + assert edges_equal(sorted(nx.transitive_closure(G, True).edges()), soln) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.MultiDiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + def test_transitive_closure_dag(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + transitive_closure = nx.algorithms.dag.transitive_closure_dag + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + assert edges_equal(transitive_closure(G).edges(), solution) + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, transitive_closure, G) + + # test if edge data is copied + G = nx.DiGraph([(1, 2, {"a": 3}), (2, 3, {"b": 0}), (3, 4)]) + H = transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + k = 10 + G = nx.DiGraph((i, i + 1, {"foo": "bar", "weight": i}) for i in range(k)) + H = transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + def test_transitive_reduction(self): + G = nx.DiGraph([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]) + transitive_reduction = nx.algorithms.dag.transitive_reduction + solution = [(1, 2), (2, 3), (3, 4)] + assert edges_equal(transitive_reduction(G).edges(), solution) + G = nx.DiGraph([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)]) + transitive_reduction = nx.algorithms.dag.transitive_reduction + solution = [(1, 2), (2, 3), (2, 4)] + assert edges_equal(transitive_reduction(G).edges(), solution) + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, transitive_reduction, G) + + def _check_antichains(self, solution, result): + sol = [frozenset(a) for a in solution] + res = [frozenset(a) for a in result] + assert set(sol) == set(res) + + def test_antichains(self): + antichains = nx.algorithms.dag.antichains + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [[], [4], [3], [2], [1]] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)]) + solution = [ + [], + [4], + [7], + [7, 4], + [6], + [6, 4], + [6, 7], + [6, 7, 4], + [5], + [5, 4], + [3], + [3, 4], + [2], + [1], + ] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph([(1, 2), (1, 3), (3, 4), (3, 5), (5, 6)]) + solution = [ + [], + [6], + [5], + [4], + [4, 6], + [4, 5], + [3], + [2], + [2, 6], + [2, 5], + [2, 4], + [2, 4, 6], + [2, 4, 5], + [2, 3], + [1], + ] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph({0: [1, 2], 1: [4], 2: [3], 3: [4]}) + solution = [[], [4], [3], [2], [1], [1, 3], [1, 2], [0]] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph() + self._check_antichains(list(antichains(G)), [[]]) + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2]) + solution = [[], [0], [1], [1, 0], [2], [2, 0], [2, 1], [2, 1, 0]] + self._check_antichains(list(antichains(G)), solution) + + def f(x): + return list(antichains(x)) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, f, G) + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + pytest.raises(nx.NetworkXUnfeasible, f, G) + + def test_lexicographical_topological_sort(self): + G = nx.DiGraph([(1, 2), (2, 3), (1, 4), (1, 5), (2, 6)]) + assert list(nx.lexicographical_topological_sort(G)) == [1, 2, 3, 4, 5, 6] + assert list(nx.lexicographical_topological_sort(G, key=lambda x: x)) == [ + 1, + 2, + 3, + 4, + 5, + 6, + ] + assert list(nx.lexicographical_topological_sort(G, key=lambda x: -x)) == [ + 1, + 5, + 4, + 2, + 6, + 3, + ] + + def test_lexicographical_topological_sort2(self): + """ + Check the case of two or more nodes with same key value. + Want to avoid exception raised due to comparing nodes directly. + See Issue #3493 + """ + + class Test_Node: + def __init__(self, n): + self.label = n + self.priority = 1 + + def __repr__(self): + return f"Node({self.label})" + + def sorting_key(node): + return node.priority + + test_nodes = [Test_Node(n) for n in range(4)] + G = nx.DiGraph() + edges = [(0, 1), (0, 2), (0, 3), (2, 3)] + G.add_edges_from((test_nodes[a], test_nodes[b]) for a, b in edges) + + sorting = list(nx.lexicographical_topological_sort(G, key=sorting_key)) + assert sorting == test_nodes + + +def test_topological_generations(): + G = nx.DiGraph( + {1: [2, 3], 2: [4, 5], 3: [7], 4: [], 5: [6, 7], 6: [], 7: []} + ).reverse() + # order within each generation is inconsequential + generations = [sorted(gen) for gen in nx.topological_generations(G)] + expected = [[4, 6, 7], [3, 5], [2], [1]] + assert generations == expected + + MG = nx.MultiDiGraph(G.edges) + MG.add_edge(2, 1) + generations = [sorted(gen) for gen in nx.topological_generations(MG)] + assert generations == expected + + +def test_topological_generations_empty(): + G = nx.DiGraph() + assert list(nx.topological_generations(G)) == [] + + +def test_topological_generations_cycle(): + G = nx.DiGraph([[2, 1], [3, 1], [1, 2]]) + with pytest.raises(nx.NetworkXUnfeasible): + list(nx.topological_generations(G)) + + +def test_is_aperiodic_cycle(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle2(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [3, 4, 5, 6, 7]) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle3(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [3, 4, 5, 6]) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle4(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + G.add_edge(1, 3) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_selfloop(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + G.add_edge(1, 1) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_raise(): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.is_aperiodic, G) + + +def test_is_aperiodic_bipartite(): + # Bipartite graph + G = nx.DiGraph(nx.davis_southern_women_graph()) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_rary_tree(): + G = nx.full_rary_tree(3, 27, create_using=nx.DiGraph()) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_disconnected(): + # disconnected graph + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [5, 6, 7, 8]) + assert not nx.is_aperiodic(G) + G.add_edge(1, 3) + G.add_edge(5, 7) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_disconnected2(): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2]) + G.add_edge(3, 3) + assert not nx.is_aperiodic(G) + + +class TestDagToBranching: + """Unit tests for the :func:`networkx.dag_to_branching` function.""" + + def test_single_root(self): + """Tests that a directed acyclic graph with a single degree + zero node produces an arborescence. + + """ + G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3)]) + B = nx.dag_to_branching(G) + expected = nx.DiGraph([(0, 1), (1, 3), (0, 2), (2, 4)]) + assert nx.is_arborescence(B) + assert nx.is_isomorphic(B, expected) + + def test_multiple_roots(self): + """Tests that a directed acyclic graph with multiple degree zero + nodes creates an arborescence with multiple (weakly) connected + components. + + """ + G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3), (5, 2)]) + B = nx.dag_to_branching(G) + expected = nx.DiGraph([(0, 1), (1, 3), (0, 2), (2, 4), (5, 6), (6, 7)]) + assert nx.is_branching(B) + assert not nx.is_arborescence(B) + assert nx.is_isomorphic(B, expected) + + # # Attributes are not copied by this function. If they were, this would + # # be a good test to uncomment. + # def test_copy_attributes(self): + # """Tests that node attributes are copied in the branching.""" + # G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3)]) + # for v in G: + # G.node[v]['label'] = str(v) + # B = nx.dag_to_branching(G) + # # Determine the root node of the branching. + # root = next(v for v, d in B.in_degree() if d == 0) + # assert_equal(B.node[root]['label'], '0') + # children = B[root] + # # Get the left and right children, nodes 1 and 2, respectively. + # left, right = sorted(children, key=lambda v: B.node[v]['label']) + # assert_equal(B.node[left]['label'], '1') + # assert_equal(B.node[right]['label'], '2') + # # Get the left grandchild. + # children = B[left] + # assert_equal(len(children), 1) + # left_grandchild = arbitrary_element(children) + # assert_equal(B.node[left_grandchild]['label'], '3') + # # Get the right grandchild. + # children = B[right] + # assert_equal(len(children), 1) + # right_grandchild = arbitrary_element(children) + # assert_equal(B.node[right_grandchild]['label'], '3') + + def test_already_arborescence(self): + """Tests that a directed acyclic graph that is already an + arborescence produces an isomorphic arborescence as output. + + """ + A = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + B = nx.dag_to_branching(A) + assert nx.is_isomorphic(A, B) + + def test_already_branching(self): + """Tests that a directed acyclic graph that is already a + branching produces an isomorphic branching as output. + + """ + T1 = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + T2 = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + G = nx.disjoint_union(T1, T2) + B = nx.dag_to_branching(G) + assert nx.is_isomorphic(G, B) + + def test_not_acyclic(self): + """Tests that a non-acyclic graph causes an exception.""" + with pytest.raises(nx.HasACycle): + G = nx.DiGraph(pairwise("abc", cyclic=True)) + nx.dag_to_branching(G) + + def test_undirected(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.Graph()) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.MultiGraph()) + + def test_multidigraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.MultiDiGraph()) + + +def test_ancestors_descendants_undirected(): + """Regression test to ensure anscestors and descendants work as expected on + undirected graphs.""" + G = nx.path_graph(5) + nx.ancestors(G, 2) == nx.descendants(G, 2) == {0, 1, 3, 4} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_distance_measures.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_distance_measures.py new file mode 100644 index 0000000..d7cec15 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_distance_measures.py @@ -0,0 +1,261 @@ +from random import Random + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.algorithms.distance_measures import _extrema_bounding + + +@pytest.mark.parametrize( + "compute", ("diameter", "radius", "periphery", "center", "eccentricities") +) +def test_extrema_bounding_deprecated(compute): + G = nx.complete_graph(3) + with pytest.deprecated_call(): + nx.extrema_bounding(G, compute=compute) + + +def test__extrema_bounding_invalid_compute_kwarg(): + G = nx.path_graph(3) + with pytest.raises(ValueError, match="compute must be one of"): + _extrema_bounding(G, compute="spam") + + +class TestDistance: + def setup_method(self): + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + self.G = G + + def test_eccentricity(self): + assert nx.eccentricity(self.G, 1) == 6 + e = nx.eccentricity(self.G) + assert e[1] == 6 + + sp = dict(nx.shortest_path_length(self.G)) + e = nx.eccentricity(self.G, sp=sp) + assert e[1] == 6 + + e = nx.eccentricity(self.G, v=1) + assert e == 6 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1]) + assert e[1] == 6 + e = nx.eccentricity(self.G, v=[1, 2]) + assert e[1] == 6 + + # test against graph with one node + G = nx.path_graph(1) + e = nx.eccentricity(G) + assert e[0] == 0 + e = nx.eccentricity(G, v=0) + assert e == 0 + pytest.raises(nx.NetworkXError, nx.eccentricity, G, 1) + + # test against empty graph + G = nx.empty_graph() + e = nx.eccentricity(G) + assert e == {} + + def test_diameter(self): + assert nx.diameter(self.G) == 6 + + def test_radius(self): + assert nx.radius(self.G) == 4 + + def test_periphery(self): + assert set(nx.periphery(self.G)) == {1, 4, 13, 16} + + def test_center(self): + assert set(nx.center(self.G)) == {6, 7, 10, 11} + + def test_bound_diameter(self): + assert nx.diameter(self.G, usebounds=True) == 6 + + def test_bound_radius(self): + assert nx.radius(self.G, usebounds=True) == 4 + + def test_bound_periphery(self): + result = {1, 4, 13, 16} + assert set(nx.periphery(self.G, usebounds=True)) == result + + def test_bound_center(self): + result = {6, 7, 10, 11} + assert set(nx.center(self.G, usebounds=True)) == result + + def test_radius_exception(self): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(3, 4) + pytest.raises(nx.NetworkXError, nx.diameter, G) + + def test_eccentricity_infinite(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph([(1, 2), (3, 4)]) + e = nx.eccentricity(G) + + def test_eccentricity_undirected_not_connected(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph([(1, 2), (3, 4)]) + e = nx.eccentricity(G, sp=1) + + def test_eccentricity_directed_weakly_connected(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph([(1, 2), (1, 3)]) + nx.eccentricity(DG) + + +class TestResistanceDistance: + @classmethod + def setup_class(cls): + global np + global sp + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + def setup_method(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(2, 3, weight=4) + G.add_edge(3, 4, weight=1) + G.add_edge(1, 4, weight=3) + self.G = G + + def test_resistance_distance(self): + rd = nx.resistance_distance(self.G, 1, 3, "weight", True) + test_data = 1 / (1 / (2 + 4) + 1 / (1 + 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_resistance_distance_noinv(self): + rd = nx.resistance_distance(self.G, 1, 3, "weight", False) + test_data = 1 / (1 / (1 / 2 + 1 / 4) + 1 / (1 / 1 + 1 / 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_resistance_distance_no_weight(self): + rd = nx.resistance_distance(self.G, 1, 3) + assert round(rd, 5) == 1 + + def test_resistance_distance_neg_weight(self): + self.G[2][3]["weight"] = -4 + rd = nx.resistance_distance(self.G, 1, 3, "weight", True) + test_data = 1 / (1 / (2 + -4) + 1 / (1 + 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=2) + G.add_edge(2, 3, weight=4) + G.add_edge(3, 4, weight=1) + G.add_edge(1, 4, weight=3) + rd = nx.resistance_distance(G, 1, 3, "weight", True) + assert np.isclose(rd, 1 / (1 / (2 + 4) + 1 / (1 + 3))) + + def test_resistance_distance_div0(self): + with pytest.raises(ZeroDivisionError): + self.G[1][2]["weight"] = 0 + nx.resistance_distance(self.G, 1, 3, "weight") + + def test_resistance_distance_not_connected(self): + with pytest.raises(nx.NetworkXError): + self.G.add_node(5) + nx.resistance_distance(self.G, 1, 5) + + def test_resistance_distance_same_node(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 1, 1) + + def test_resistance_distance_nodeA_not_in_graph(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 9, 1) + + def test_resistance_distance_nodeB_not_in_graph(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 1, 9) + + +class TestBarycenter: + """Test :func:`networkx.algorithms.distance_measures.barycenter`.""" + + def barycenter_as_subgraph(self, g, **kwargs): + """Return the subgraph induced on the barycenter of g""" + b = nx.barycenter(g, **kwargs) + assert isinstance(b, list) + assert set(b) <= set(g) + return g.subgraph(b) + + def test_must_be_connected(self): + pytest.raises(nx.NetworkXNoPath, nx.barycenter, nx.empty_graph(5)) + + def test_sp_kwarg(self): + # Complete graph K_5. Normally it works... + K_5 = nx.complete_graph(5) + sp = dict(nx.shortest_path_length(K_5)) + assert nx.barycenter(K_5, sp=sp) == list(K_5) + + # ...but not with the weight argument + for u, v, data in K_5.edges.data(): + data["weight"] = 1 + pytest.raises(ValueError, nx.barycenter, K_5, sp=sp, weight="weight") + + # ...and a corrupted sp can make it seem like K_5 is disconnected + del sp[0][1] + pytest.raises(nx.NetworkXNoPath, nx.barycenter, K_5, sp=sp) + + def test_trees(self): + """The barycenter of a tree is a single vertex or an edge. + + See [West01]_, p. 78. + """ + prng = Random(0xDEADBEEF) + for i in range(50): + RT = nx.random_tree(prng.randint(1, 75), prng) + b = self.barycenter_as_subgraph(RT) + if len(b) == 2: + assert b.size() == 1 + else: + assert len(b) == 1 + assert b.size() == 0 + + def test_this_one_specific_tree(self): + """Test the tree pictured at the bottom of [West01]_, p. 78.""" + g = nx.Graph( + { + "a": ["b"], + "b": ["a", "x"], + "x": ["b", "y"], + "y": ["x", "z"], + "z": ["y", 0, 1, 2, 3, 4], + 0: ["z"], + 1: ["z"], + 2: ["z"], + 3: ["z"], + 4: ["z"], + } + ) + b = self.barycenter_as_subgraph(g, attr="barycentricity") + assert list(b) == ["z"] + assert not b.edges + expected_barycentricity = { + 0: 23, + 1: 23, + 2: 23, + 3: 23, + 4: 23, + "a": 35, + "b": 27, + "x": 21, + "y": 17, + "z": 15, + } + for node, barycentricity in expected_barycentricity.items(): + assert g.nodes[node]["barycentricity"] == barycentricity + + # Doubling weights should do nothing but double the barycentricities + for edge in g.edges: + g.edges[edge]["weight"] = 2 + b = self.barycenter_as_subgraph(g, weight="weight", attr="barycentricity2") + assert list(b) == ["z"] + assert not b.edges + for node, barycentricity in expected_barycentricity.items(): + assert g.nodes[node]["barycentricity2"] == barycentricity * 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_distance_regular.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_distance_regular.py new file mode 100644 index 0000000..d336b18 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_distance_regular.py @@ -0,0 +1,66 @@ +import networkx as nx +from networkx import is_strongly_regular + + +class TestDistanceRegular: + def test_is_distance_regular(self): + assert nx.is_distance_regular(nx.icosahedral_graph()) + assert nx.is_distance_regular(nx.petersen_graph()) + assert nx.is_distance_regular(nx.cubical_graph()) + assert nx.is_distance_regular(nx.complete_bipartite_graph(3, 3)) + assert nx.is_distance_regular(nx.tetrahedral_graph()) + assert nx.is_distance_regular(nx.dodecahedral_graph()) + assert nx.is_distance_regular(nx.pappus_graph()) + assert nx.is_distance_regular(nx.heawood_graph()) + assert nx.is_distance_regular(nx.cycle_graph(3)) + # no distance regular + assert not nx.is_distance_regular(nx.path_graph(4)) + + def test_not_connected(self): + G = nx.cycle_graph(4) + nx.add_cycle(G, [5, 6, 7]) + assert not nx.is_distance_regular(G) + + def test_global_parameters(self): + b, c = nx.intersection_array(nx.cycle_graph(5)) + g = nx.global_parameters(b, c) + assert list(g) == [(0, 0, 2), (1, 0, 1), (1, 1, 0)] + b, c = nx.intersection_array(nx.cycle_graph(3)) + g = nx.global_parameters(b, c) + assert list(g) == [(0, 0, 2), (1, 1, 0)] + + def test_intersection_array(self): + b, c = nx.intersection_array(nx.cycle_graph(5)) + assert b == [2, 1] + assert c == [1, 1] + b, c = nx.intersection_array(nx.dodecahedral_graph()) + assert b == [3, 2, 1, 1, 1] + assert c == [1, 1, 1, 2, 3] + b, c = nx.intersection_array(nx.icosahedral_graph()) + assert b == [5, 2, 1] + assert c == [1, 2, 5] + + +class TestStronglyRegular: + """Unit tests for the :func:`~networkx.is_strongly_regular` + function. + + """ + + def test_cycle_graph(self): + """Tests that the cycle graph on five vertices is strongly + regular. + + """ + G = nx.cycle_graph(5) + assert is_strongly_regular(G) + + def test_petersen_graph(self): + """Tests that the Petersen graph is strongly regular.""" + G = nx.petersen_graph() + assert is_strongly_regular(G) + + def test_path_graph(self): + """Tests that the path graph is not strongly regular.""" + G = nx.path_graph(4) + assert not is_strongly_regular(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dominance.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dominance.py new file mode 100644 index 0000000..a8d0882 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dominance.py @@ -0,0 +1,285 @@ +import pytest + +import networkx as nx + + +class TestImmediateDominators: + def test_exceptions(self): + G = nx.Graph() + G.add_node(0) + pytest.raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0) + G = nx.MultiGraph(G) + pytest.raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0) + G = nx.DiGraph([[0, 0]]) + pytest.raises(nx.NetworkXError, nx.immediate_dominators, G, 1) + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.immediate_dominators(G, 0) == {0: 0} + G.add_edge(0, 0) + assert nx.immediate_dominators(G, 0) == {0: 0} + + def test_path(self): + n = 5 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, 0) == {i: max(i - 1, 0) for i in range(n)} + + def test_cycle(self): + n = 5 + G = nx.cycle_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, 0) == {i: max(i - 1, 0) for i in range(n)} + + def test_unreachable(self): + n = 5 + assert n > 1 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, n // 2) == { + i: max(i - 1, n // 2) for i in range(n // 2, n) + } + + def test_irreducible1(self): + # Graph taken from Figure 2 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] + G = nx.DiGraph(edges) + assert nx.immediate_dominators(G, 5) == {i: 5 for i in range(1, 6)} + + def test_irreducible2(self): + # Graph taken from Figure 4 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 6) + assert result == {i: 6 for i in range(1, 7)} + + def test_domrel_png(self): + # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png + edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 1) + assert result == {1: 1, 2: 1, 3: 2, 4: 2, 5: 2, 6: 2} + # Test postdominance. + result = nx.immediate_dominators(G.reverse(copy=False), 6) + assert result == {1: 2, 2: 6, 3: 5, 4: 5, 5: 2, 6: 6} + + def test_boost_example(self): + # Graph taken from Figure 1 of + # http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm + edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 0) + assert result == {0: 0, 1: 0, 2: 1, 3: 1, 4: 3, 5: 4, 6: 4, 7: 1} + # Test postdominance. + result = nx.immediate_dominators(G.reverse(copy=False), 7) + assert result == {0: 1, 1: 7, 2: 7, 3: 4, 4: 5, 5: 7, 6: 4, 7: 7} + + +class TestDominanceFrontiers: + def test_exceptions(self): + G = nx.Graph() + G.add_node(0) + pytest.raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0) + G = nx.MultiGraph(G) + pytest.raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0) + G = nx.DiGraph([[0, 0]]) + pytest.raises(nx.NetworkXError, nx.dominance_frontiers, G, 1) + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.dominance_frontiers(G, 0) == {0: set()} + G.add_edge(0, 0) + assert nx.dominance_frontiers(G, 0) == {0: set()} + + def test_path(self): + n = 5 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, 0) == {i: set() for i in range(n)} + + def test_cycle(self): + n = 5 + G = nx.cycle_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, 0) == {i: set() for i in range(n)} + + def test_unreachable(self): + n = 5 + assert n > 1 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, n // 2) == {i: set() for i in range(n // 2, n)} + + def test_irreducible1(self): + # Graph taken from Figure 2 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] + G = nx.DiGraph(edges) + assert {u: df for u, df in nx.dominance_frontiers(G, 5).items()} == { + 1: {2}, + 2: {1}, + 3: {2}, + 4: {1}, + 5: set(), + } + + def test_irreducible2(self): + # Graph taken from Figure 4 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 6) == { + 1: {2}, + 2: {1, 3}, + 3: {2}, + 4: {2, 3}, + 5: {1}, + 6: set(), + } + + def test_domrel_png(self): + # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png + edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 1) == { + 1: set(), + 2: {2}, + 3: {5}, + 4: {5}, + 5: {2}, + 6: set(), + } + # Test postdominance. + result = nx.dominance_frontiers(G.reverse(copy=False), 6) + assert result == {1: set(), 2: {2}, 3: {2}, 4: {2}, 5: {2}, 6: set()} + + def test_boost_example(self): + # Graph taken from Figure 1 of + # http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm + edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 0) == { + 0: set(), + 1: set(), + 2: {7}, + 3: {7}, + 4: {4, 7}, + 5: {7}, + 6: {4}, + 7: set(), + } + # Test postdominance. + result = nx.dominance_frontiers(G.reverse(copy=False), 7) + expected = { + 0: set(), + 1: set(), + 2: {1}, + 3: {1}, + 4: {1, 4}, + 5: {1}, + 6: {4}, + 7: set(), + } + assert result == expected + + def test_discard_issue(self): + # https://github.com/networkx/networkx/issues/2071 + g = nx.DiGraph() + g.add_edges_from( + [ + ("b0", "b1"), + ("b1", "b2"), + ("b2", "b3"), + ("b3", "b1"), + ("b1", "b5"), + ("b5", "b6"), + ("b5", "b8"), + ("b6", "b7"), + ("b8", "b7"), + ("b7", "b3"), + ("b3", "b4"), + ] + ) + df = nx.dominance_frontiers(g, "b0") + assert df == { + "b4": set(), + "b5": {"b3"}, + "b6": {"b7"}, + "b7": {"b3"}, + "b0": set(), + "b1": {"b1"}, + "b2": {"b3"}, + "b3": {"b1"}, + "b8": {"b7"}, + } + + def test_loop(self): + g = nx.DiGraph() + g.add_edges_from([("a", "b"), ("b", "c"), ("b", "a")]) + df = nx.dominance_frontiers(g, "a") + assert df == {"a": set(), "b": set(), "c": set()} + + def test_missing_immediate_doms(self): + # see https://github.com/networkx/networkx/issues/2070 + g = nx.DiGraph() + edges = [ + ("entry_1", "b1"), + ("b1", "b2"), + ("b2", "b3"), + ("b3", "exit"), + ("entry_2", "b3"), + ] + + # entry_1 + # | + # b1 + # | + # b2 entry_2 + # | / + # b3 + # | + # exit + + g.add_edges_from(edges) + # formerly raised KeyError on entry_2 when parsing b3 + # because entry_2 does not have immediate doms (no path) + nx.dominance_frontiers(g, "entry_1") + + def test_loops_larger(self): + # from + # http://ecee.colorado.edu/~waite/Darmstadt/motion.html + g = nx.DiGraph() + edges = [ + ("entry", "exit"), + ("entry", "1"), + ("1", "2"), + ("2", "3"), + ("3", "4"), + ("4", "5"), + ("5", "6"), + ("6", "exit"), + ("6", "2"), + ("5", "3"), + ("4", "4"), + ] + + g.add_edges_from(edges) + df = nx.dominance_frontiers(g, "entry") + answer = { + "entry": set(), + "1": {"exit"}, + "2": {"exit", "2"}, + "3": {"exit", "3", "2"}, + "4": {"exit", "4", "3", "2"}, + "5": {"exit", "3", "2"}, + "6": {"exit", "2"}, + "exit": set(), + } + for n in df: + assert set(df[n]) == set(answer[n]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dominating.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dominating.py new file mode 100644 index 0000000..b945c73 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_dominating.py @@ -0,0 +1,46 @@ +import pytest + +import networkx as nx + + +def test_dominating_set(): + G = nx.gnp_random_graph(100, 0.1) + D = nx.dominating_set(G) + assert nx.is_dominating_set(G, D) + D = nx.dominating_set(G, start_with=0) + assert nx.is_dominating_set(G, D) + + +def test_complete(): + """In complete graphs each node is a dominating set. + Thus the dominating set has to be of cardinality 1. + """ + K4 = nx.complete_graph(4) + assert len(nx.dominating_set(K4)) == 1 + K5 = nx.complete_graph(5) + assert len(nx.dominating_set(K5)) == 1 + + +def test_raise_dominating_set(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + D = nx.dominating_set(G, start_with=10) + + +def test_is_dominating_set(): + G = nx.path_graph(4) + d = {1, 3} + assert nx.is_dominating_set(G, d) + d = {0, 2} + assert nx.is_dominating_set(G, d) + d = {1} + assert not nx.is_dominating_set(G, d) + + +def test_wikipedia_is_dominating_set(): + """Example from https://en.wikipedia.org/wiki/Dominating_set""" + G = nx.cycle_graph(4) + G.add_edges_from([(0, 4), (1, 4), (2, 5)]) + assert nx.is_dominating_set(G, {4, 3, 5}) + assert nx.is_dominating_set(G, {0, 2}) + assert nx.is_dominating_set(G, {1, 2}) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_efficiency.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_efficiency.py new file mode 100644 index 0000000..9a2e7d0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_efficiency.py @@ -0,0 +1,58 @@ +"""Unit tests for the :mod:`networkx.algorithms.efficiency` module.""" + +import networkx as nx + + +class TestEfficiency: + def setup_method(self): + # G1 is a disconnected graph + self.G1 = nx.Graph() + self.G1.add_nodes_from([1, 2, 3]) + # G2 is a cycle graph + self.G2 = nx.cycle_graph(4) + # G3 is the triangle graph with one additional edge + self.G3 = nx.lollipop_graph(3, 1) + + def test_efficiency_disconnected_nodes(self): + """ + When nodes are disconnected, efficiency is 0 + """ + assert nx.efficiency(self.G1, 1, 2) == 0 + + def test_local_efficiency_disconnected_graph(self): + """ + In a disconnected graph the efficiency is 0 + """ + assert nx.local_efficiency(self.G1) == 0 + + def test_efficiency(self): + assert nx.efficiency(self.G2, 0, 1) == 1 + assert nx.efficiency(self.G2, 0, 2) == 1 / 2 + + def test_global_efficiency(self): + assert nx.global_efficiency(self.G2) == 5 / 6 + + def test_global_efficiency_complete_graph(self): + """ + Tests that the average global efficiency of the complete graph is one. + """ + for n in range(2, 10): + G = nx.complete_graph(n) + assert nx.global_efficiency(G) == 1 + + def test_local_efficiency_complete_graph(self): + """ + Test that the local efficiency for a complete graph with at least 3 + nodes should be one. For a graph with only 2 nodes, the induced + subgraph has no edges. + """ + for n in range(3, 10): + G = nx.complete_graph(n) + assert nx.local_efficiency(G) == 1 + + def test_using_ego_graph(self): + """ + Test that the ego graph is used when computing local efficiency. + For more information, see GitHub issue #2710. + """ + assert nx.local_efficiency(self.G3) == 7 / 12 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_euler.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_euler.py new file mode 100644 index 0000000..7dfe2d1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_euler.py @@ -0,0 +1,275 @@ +import collections + +import pytest + +import networkx as nx + + +class TestIsEulerian: + def test_is_eulerian(self): + assert nx.is_eulerian(nx.complete_graph(5)) + assert nx.is_eulerian(nx.complete_graph(7)) + assert nx.is_eulerian(nx.hypercube_graph(4)) + assert nx.is_eulerian(nx.hypercube_graph(6)) + + assert not nx.is_eulerian(nx.complete_graph(4)) + assert not nx.is_eulerian(nx.complete_graph(6)) + assert not nx.is_eulerian(nx.hypercube_graph(3)) + assert not nx.is_eulerian(nx.hypercube_graph(5)) + + assert not nx.is_eulerian(nx.petersen_graph()) + assert not nx.is_eulerian(nx.path_graph(4)) + + def test_is_eulerian2(self): + # not connected + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + assert not nx.is_eulerian(G) + # not strongly connected + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3]) + assert not nx.is_eulerian(G) + G = nx.MultiDiGraph() + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(2, 3) + G.add_edge(3, 1) + assert not nx.is_eulerian(G) + + +class TestEulerianCircuit: + def test_eulerian_circuit_cycle(self): + G = nx.cycle_graph(4) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 3, 2, 1] + assert edges == [(0, 3), (3, 2), (2, 1), (1, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 3, 0] + assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)] + + G = nx.complete_graph(3) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 2, 1] + assert edges == [(0, 2), (2, 1), (1, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 0] + assert edges == [(1, 2), (2, 0), (0, 1)] + + def test_eulerian_circuit_digraph(self): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 1, 2, 3] + assert edges == [(0, 1), (1, 2), (2, 3), (3, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 3, 0] + assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)] + + def test_multigraph(self): + G = nx.MultiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + G.add_edge(1, 2) + G.add_edge(1, 2) + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 3, 2, 1, 2, 1] + assert edges == [(0, 3), (3, 2), (2, 1), (1, 2), (2, 1), (1, 0)] + + def test_multigraph_with_keys(self): + G = nx.MultiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + G.add_edge(1, 2) + G.add_edge(1, 2) + edges = list(nx.eulerian_circuit(G, source=0, keys=True)) + nodes = [u for u, v, k in edges] + assert nodes == [0, 3, 2, 1, 2, 1] + assert edges[:2] == [(0, 3, 0), (3, 2, 0)] + assert collections.Counter(edges[2:5]) == collections.Counter( + [(2, 1, 0), (1, 2, 1), (2, 1, 2)] + ) + assert edges[5:] == [(1, 0, 0)] + + def test_not_eulerian(self): + with pytest.raises(nx.NetworkXError): + f = list(nx.eulerian_circuit(nx.complete_graph(4))) + + +class TestIsSemiEulerian: + def test_is_semieulerian(self): + # Test graphs with Eulerian paths but no cycles return True. + assert nx.is_semieulerian(nx.path_graph(4)) + G = nx.path_graph(6, create_using=nx.DiGraph) + assert nx.is_semieulerian(G) + + # Test graphs with Eulerian cycles return False. + assert not nx.is_semieulerian(nx.complete_graph(5)) + assert not nx.is_semieulerian(nx.complete_graph(7)) + assert not nx.is_semieulerian(nx.hypercube_graph(4)) + assert not nx.is_semieulerian(nx.hypercube_graph(6)) + + +class TestHasEulerianPath: + def test_has_eulerian_path_cyclic(self): + # Test graphs with Eulerian cycles return True. + assert nx.has_eulerian_path(nx.complete_graph(5)) + assert nx.has_eulerian_path(nx.complete_graph(7)) + assert nx.has_eulerian_path(nx.hypercube_graph(4)) + assert nx.has_eulerian_path(nx.hypercube_graph(6)) + + def test_has_eulerian_path_non_cyclic(self): + # Test graphs with Eulerian paths but no cycles return True. + assert nx.has_eulerian_path(nx.path_graph(4)) + G = nx.path_graph(6, create_using=nx.DiGraph) + assert nx.has_eulerian_path(G) + + def test_has_eulerian_path_directed_graph(self): + # Test directed graphs and returns False + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (0, 2)]) + assert not nx.has_eulerian_path(G) + + # Test directed graphs without isolated node returns True + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0)]) + assert nx.has_eulerian_path(G) + + # Test directed graphs with isolated node returns False + G.add_node(3) + assert not nx.has_eulerian_path(G) + + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + def test_has_eulerian_path_not_weakly_connected(self, G): + G.add_edges_from([(0, 1), (2, 3), (3, 2)]) + assert not nx.has_eulerian_path(G) + + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + def test_has_eulerian_path_unbalancedins_more_than_one(self, G): + G.add_edges_from([(0, 1), (2, 3)]) + assert not nx.has_eulerian_path(G) + + +class TestFindPathStart: + def testfind_path_start(self): + find_path_start = nx.algorithms.euler._find_path_start + # Test digraphs return correct starting node. + G = nx.path_graph(6, create_using=nx.DiGraph) + assert find_path_start(G) == 0 + edges = [(0, 1), (1, 2), (2, 0), (4, 0)] + assert find_path_start(nx.DiGraph(edges)) == 4 + + # Test graph with no Eulerian path return None. + edges = [(0, 1), (1, 2), (2, 3), (2, 4)] + assert find_path_start(nx.DiGraph(edges)) is None + + +class TestEulerianPath: + def test_eulerian_path(self): + x = [(4, 0), (0, 1), (1, 2), (2, 0)] + for e1, e2 in zip(x, nx.eulerian_path(nx.DiGraph(x))): + assert e1 == e2 + + def test_eulerian_path_straight_link(self): + G = nx.DiGraph() + result = [(1, 2), (2, 3), (3, 4), (4, 5)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=1)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=4)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=5)) + + def test_eulerian_path_multigraph(self): + G = nx.MultiDiGraph() + result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4), (4, 3)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=2)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=4)) + + def test_eulerian_path_eulerian_circuit(self): + G = nx.DiGraph() + result = [(1, 2), (2, 3), (3, 4), (4, 1)] + result2 = [(2, 3), (3, 4), (4, 1), (1, 2)] + result3 = [(3, 4), (4, 1), (1, 2), (2, 3)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=1)) + assert result2 == list(nx.eulerian_path(G, source=2)) + assert result3 == list(nx.eulerian_path(G, source=3)) + + def test_eulerian_path_undirected(self): + G = nx.Graph() + result = [(1, 2), (2, 3), (3, 4), (4, 5)] + result2 = [(5, 4), (4, 3), (3, 2), (2, 1)] + G.add_edges_from(result) + assert list(nx.eulerian_path(G)) in (result, result2) + assert result == list(nx.eulerian_path(G, source=1)) + assert result2 == list(nx.eulerian_path(G, source=5)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=2)) + + def test_eulerian_path_multigraph_undirected(self): + G = nx.MultiGraph() + result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=2)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=1)) + + +class TestEulerize: + def test_disconnected(self): + with pytest.raises(nx.NetworkXError): + G = nx.from_edgelist([(0, 1), (2, 3)]) + nx.eulerize(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.eulerize(nx.Graph()) + + def test_null_multigraph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.eulerize(nx.MultiGraph()) + + def test_on_empty_graph(self): + with pytest.raises(nx.NetworkXError): + nx.eulerize(nx.empty_graph(3)) + + def test_on_eulerian(self): + G = nx.cycle_graph(3) + H = nx.eulerize(G) + assert nx.is_isomorphic(G, H) + + def test_on_eulerian_multigraph(self): + G = nx.MultiGraph(nx.cycle_graph(3)) + G.add_edge(0, 1) + H = nx.eulerize(G) + assert nx.is_eulerian(H) + + def test_on_complete_graph(self): + G = nx.complete_graph(4) + assert nx.is_eulerian(nx.eulerize(G)) + assert nx.is_eulerian(nx.eulerize(nx.MultiGraph(G))) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_graph_hashing.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_graph_hashing.py new file mode 100644 index 0000000..cffa8bb --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_graph_hashing.py @@ -0,0 +1,657 @@ +import pytest + +import networkx as nx +from networkx.generators import directed + +# Unit tests for the :func:`~networkx.weisfeiler_lehman_graph_hash` function + + +def test_empty_graph_hash(): + """ + empty graphs should give hashes regardless of other params + """ + G1 = nx.empty_graph() + G2 = nx.empty_graph() + + h1 = nx.weisfeiler_lehman_graph_hash(G1) + h2 = nx.weisfeiler_lehman_graph_hash(G2) + h3 = nx.weisfeiler_lehman_graph_hash(G2, edge_attr="edge_attr1") + h4 = nx.weisfeiler_lehman_graph_hash(G2, node_attr="node_attr1") + h5 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + h6 = nx.weisfeiler_lehman_graph_hash(G2, iterations=10) + + assert h1 == h2 + assert h1 == h3 + assert h1 == h4 + assert h1 == h5 + assert h1 == h6 + + +def test_directed(): + """ + A directed graph with no bi-directional edges should yield different a graph hash + to the same graph taken as undirected if there are no hash collisions. + """ + r = 10 + for i in range(r): + G_directed = nx.gn_graph(10 + r, seed=100 + i) + G_undirected = nx.to_undirected(G_directed) + + h_directed = nx.weisfeiler_lehman_graph_hash(G_directed) + h_undirected = nx.weisfeiler_lehman_graph_hash(G_undirected) + + assert h_directed != h_undirected + + +def test_reversed(): + """ + A directed graph with no bi-directional edges should yield different a graph hash + to the same graph taken with edge directions reversed if there are no hash collisions. + Here we test a cycle graph which is the minimal counterexample + """ + G = nx.cycle_graph(5, create_using=nx.DiGraph) + nx.set_node_attributes(G, {n: str(n) for n in G.nodes()}, name="label") + + G_reversed = G.reverse() + + h = nx.weisfeiler_lehman_graph_hash(G, node_attr="label") + h_reversed = nx.weisfeiler_lehman_graph_hash(G_reversed, node_attr="label") + + assert h != h_reversed + + +def test_isomorphic(): + """ + graph hashes should be invariant to node-relabeling (when the output is reindexed + by the same mapping) + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=200 + i) + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g1_hash = nx.weisfeiler_lehman_graph_hash(G1) + g2_hash = nx.weisfeiler_lehman_graph_hash(G2) + + assert g1_hash == g2_hash + + +def test_isomorphic_edge_attr(): + """ + Isomorphic graphs with differing edge attributes should yield different graph + hashes if the 'edge_attr' argument is supplied and populated in the graph, + and there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=300 + i) + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_with_edge_attr1 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1" + ) + g1_hash_with_edge_attr2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr2" + ) + g1_hash_no_edge_attr = nx.weisfeiler_lehman_graph_hash(G1, edge_attr=None) + + assert g1_hash_with_edge_attr1 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr2 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr1 != g1_hash_with_edge_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_edge_attr1 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1" + ) + g2_hash_with_edge_attr2 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr2" + ) + + assert g1_hash_with_edge_attr1 == g2_hash_with_edge_attr1 + assert g1_hash_with_edge_attr2 == g2_hash_with_edge_attr2 + + +def test_missing_edge_attr(): + """ + If the 'edge_attr' argument is supplied but is missing from an edge in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_edges_from([(1, 2, {"edge_attr1": "a"}), (1, 3, {})]) + pytest.raises(KeyError, nx.weisfeiler_lehman_graph_hash, G, edge_attr="edge_attr1") + + +def test_isomorphic_node_attr(): + """ + Isomorphic graphs with differing node attributes should yield different graph + hashes if the 'node_attr' argument is supplied and populated in the graph, and + there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=400 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + g1_hash_with_node_attr1 = nx.weisfeiler_lehman_graph_hash( + G1, node_attr="node_attr1" + ) + g1_hash_with_node_attr2 = nx.weisfeiler_lehman_graph_hash( + G1, node_attr="node_attr2" + ) + g1_hash_no_node_attr = nx.weisfeiler_lehman_graph_hash(G1, node_attr=None) + + assert g1_hash_with_node_attr1 != g1_hash_no_node_attr + assert g1_hash_with_node_attr2 != g1_hash_no_node_attr + assert g1_hash_with_node_attr1 != g1_hash_with_node_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_node_attr1 = nx.weisfeiler_lehman_graph_hash( + G2, node_attr="node_attr1" + ) + g2_hash_with_node_attr2 = nx.weisfeiler_lehman_graph_hash( + G2, node_attr="node_attr2" + ) + + assert g1_hash_with_node_attr1 == g2_hash_with_node_attr1 + assert g1_hash_with_node_attr2 == g2_hash_with_node_attr2 + + +def test_missing_node_attr(): + """ + If the 'node_attr' argument is supplied but is missing from a node in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_nodes_from([(1, {"node_attr1": "a"}), (2, {})]) + G.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)]) + pytest.raises(KeyError, nx.weisfeiler_lehman_graph_hash, G, node_attr="node_attr1") + + +def test_isomorphic_edge_attr_and_node_attr(): + """ + Isomorphic graphs with differing node attributes should yield different graph + hashes if the 'node_attr' and 'edge_attr' argument is supplied and populated in + the graph, and there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_edge1_node1 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g1_hash_edge2_node2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr2", node_attr="node_attr2" + ) + g1_hash_edge1_node2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1", node_attr="node_attr2" + ) + g1_hash_no_attr = nx.weisfeiler_lehman_graph_hash(G1) + + assert g1_hash_edge1_node1 != g1_hash_no_attr + assert g1_hash_edge2_node2 != g1_hash_no_attr + assert g1_hash_edge1_node1 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge1_node1 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_edge1_node1 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g2_hash_edge2_node2 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr2", node_attr="node_attr2" + ) + + assert g1_hash_edge1_node1 == g2_hash_edge1_node1 + assert g1_hash_edge2_node2 == g2_hash_edge2_node2 + + +def test_digest_size(): + """ + The hash string lengths should be as expected for a variety of graphs and + digest sizes + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=1000 + i) + + h16 = nx.weisfeiler_lehman_graph_hash(G) + h32 = nx.weisfeiler_lehman_graph_hash(G, digest_size=32) + + assert h16 != h32 + assert len(h16) == 16 * 2 + assert len(h32) == 32 * 2 + + +# Unit tests for the :func:`~networkx.weisfeiler_lehman_hash_subgraphs` function + + +def is_subiteration(a, b): + """ + returns True if that each hash sequence in 'a' is a prefix for + the corresponding sequence indexed by the same node in 'b'. + """ + return all(b[node][: len(hashes)] == hashes for node, hashes in a.items()) + + +def hexdigest_sizes_correct(a, digest_size): + """ + returns True if all hex digest sizes are the expected length in a node:subgraph-hashes + dictionary. Hex digest string length == 2 * bytes digest length since each pair of hex + digits encodes 1 byte (https://docs.python.org/3/library/hashlib.html) + """ + hexdigest_size = digest_size * 2 + list_digest_sizes_correct = lambda l: all(len(x) == hexdigest_size for x in l) + return all(list_digest_sizes_correct(hashes) for hashes in a.values()) + + +def test_empty_graph_subgraph_hash(): + """ " + empty graphs should give empty dict subgraph hashes regardless of other params + """ + G = nx.empty_graph() + + subgraph_hashes1 = nx.weisfeiler_lehman_subgraph_hashes(G) + subgraph_hashes2 = nx.weisfeiler_lehman_subgraph_hashes(G, edge_attr="edge_attr") + subgraph_hashes3 = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="edge_attr") + subgraph_hashes4 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=2) + subgraph_hashes5 = nx.weisfeiler_lehman_subgraph_hashes(G, digest_size=64) + + assert subgraph_hashes1 == {} + assert subgraph_hashes2 == {} + assert subgraph_hashes3 == {} + assert subgraph_hashes4 == {} + assert subgraph_hashes5 == {} + + +def test_directed_subgraph_hash(): + """ + A directed graph with no bi-directional edges should yield different subgraph hashes + to the same graph taken as undirected, if all hashes don't collide. + """ + r = 10 + for i in range(r): + G_directed = nx.gn_graph(10 + r, seed=100 + i) + G_undirected = nx.to_undirected(G_directed) + + directed_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G_directed) + undirected_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G_undirected) + + assert directed_subgraph_hashes != undirected_subgraph_hashes + + +def test_reversed_subgraph_hash(): + """ + A directed graph with no bi-directional edges should yield different subgraph hashes + to the same graph taken with edge directions reversed if there are no hash collisions. + Here we test a cycle graph which is the minimal counterexample + """ + G = nx.cycle_graph(5, create_using=nx.DiGraph) + nx.set_node_attributes(G, {n: str(n) for n in G.nodes()}, name="label") + + G_reversed = G.reverse() + + h = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="label") + h_reversed = nx.weisfeiler_lehman_subgraph_hashes(G_reversed, node_attr="label") + + assert h != h_reversed + + +def test_isomorphic_subgraph_hash(): + """ + the subgraph hashes should be invariant to node-relabeling when the output is reindexed + by the same mapping and all hashes don't collide. + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=200 + i) + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g1_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1) + g2_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2) + + assert g1_subgraph_hashes == {-1 * k: v for k, v in g2_subgraph_hashes.items()} + + +def test_isomorphic_edge_attr_subgraph_hash(): + """ + Isomorphic graphs with differing edge attributes should yield different subgraph + hashes if the 'edge_attr' argument is supplied and populated in the graph, and + all hashes don't collide. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=300 + i) + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_with_edge_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1" + ) + g1_hash_with_edge_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr2" + ) + g1_hash_no_edge_attr = nx.weisfeiler_lehman_subgraph_hashes(G1, edge_attr=None) + + assert g1_hash_with_edge_attr1 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr2 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr1 != g1_hash_with_edge_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_edge_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr1" + ) + g2_hash_with_edge_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr2" + ) + + assert g1_hash_with_edge_attr1 == { + -1 * k: v for k, v in g2_hash_with_edge_attr1.items() + } + assert g1_hash_with_edge_attr2 == { + -1 * k: v for k, v in g2_hash_with_edge_attr2.items() + } + + +def test_missing_edge_attr_subgraph_hash(): + """ + If the 'edge_attr' argument is supplied but is missing from an edge in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_edges_from([(1, 2, {"edge_attr1": "a"}), (1, 3, {})]) + pytest.raises( + KeyError, nx.weisfeiler_lehman_subgraph_hashes, G, edge_attr="edge_attr1" + ) + + +def test_isomorphic_node_attr_subgraph_hash(): + """ + Isomorphic graphs with differing node attributes should yield different subgraph + hashes if the 'node_attr' argument is supplied and populated in the graph, and + all hashes don't collide. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=400 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + g1_hash_with_node_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, node_attr="node_attr1" + ) + g1_hash_with_node_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, node_attr="node_attr2" + ) + g1_hash_no_node_attr = nx.weisfeiler_lehman_subgraph_hashes(G1, node_attr=None) + + assert g1_hash_with_node_attr1 != g1_hash_no_node_attr + assert g1_hash_with_node_attr2 != g1_hash_no_node_attr + assert g1_hash_with_node_attr1 != g1_hash_with_node_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_node_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, node_attr="node_attr1" + ) + g2_hash_with_node_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, node_attr="node_attr2" + ) + + assert g1_hash_with_node_attr1 == { + -1 * k: v for k, v in g2_hash_with_node_attr1.items() + } + assert g1_hash_with_node_attr2 == { + -1 * k: v for k, v in g2_hash_with_node_attr2.items() + } + + +def test_missing_node_attr_subgraph_hash(): + """ + If the 'node_attr' argument is supplied but is missing from a node in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_nodes_from([(1, {"node_attr1": "a"}), (2, {})]) + G.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)]) + pytest.raises( + KeyError, nx.weisfeiler_lehman_subgraph_hashes, G, node_attr="node_attr1" + ) + + +def test_isomorphic_edge_attr_and_node_attr_subgraph_hash(): + """ + Isomorphic graphs with differing node attributes should yield different subgraph + hashes if the 'node_attr' and 'edge_attr' argument is supplied and populated in + the graph, and all hashes don't collide + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_edge1_node1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g1_hash_edge2_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr2", node_attr="node_attr2" + ) + g1_hash_edge1_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1", node_attr="node_attr2" + ) + g1_hash_no_attr = nx.weisfeiler_lehman_subgraph_hashes(G1) + + assert g1_hash_edge1_node1 != g1_hash_no_attr + assert g1_hash_edge2_node2 != g1_hash_no_attr + assert g1_hash_edge1_node1 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge1_node1 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_edge1_node1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g2_hash_edge2_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr2", node_attr="node_attr2" + ) + + assert g1_hash_edge1_node1 == { + -1 * k: v for k, v in g2_hash_edge1_node1.items() + } + assert g1_hash_edge2_node2 == { + -1 * k: v for k, v in g2_hash_edge2_node2.items() + } + + +def test_iteration_depth(): + """ + All nodes should have the correct number of subgraph hashes in the output when + using degree as initial node labels + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=600 + i) + + depth3 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=3) + depth4 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=4) + depth5 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=5) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_edge_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels empty and using an edge attribute when aggregating + neighborhoods. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=700 + i) + + for a, b in G.edges: + G[a][b]["edge_attr1"] = f"{a}-{b}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_node_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels to an attribute. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=800 + i) + + for u in G.nodes(): + G.nodes[u]["node_attr1"] = f"{u}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_node_edge_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels to an attribute and also using an edge attribute when + aggregating neighborhoods. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=900 + i) + + for u in G.nodes(): + G.nodes[u]["node_attr1"] = f"{u}-1" + + for a, b in G.edges: + G[a][b]["edge_attr1"] = f"{a}-{b}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_digest_size_subgraph_hash(): + """ + The hash string lengths should be as expected for a variety of graphs and + digest sizes + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=1000 + i) + + digest_size16_hashes = nx.weisfeiler_lehman_subgraph_hashes(G) + digest_size32_hashes = nx.weisfeiler_lehman_subgraph_hashes(G, digest_size=32) + + assert digest_size16_hashes != digest_size32_hashes + + assert hexdigest_sizes_correct(digest_size16_hashes, 16) + assert hexdigest_sizes_correct(digest_size32_hashes, 32) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_graphical.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_graphical.py new file mode 100644 index 0000000..d55ac8c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_graphical.py @@ -0,0 +1,163 @@ +import pytest + +import networkx as nx + + +def test_valid_degree_sequence1(): + n = 100 + p = 0.3 + for i in range(10): + G = nx.erdos_renyi_graph(n, p) + deg = (d for n, d in G.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_valid_degree_sequence2(): + n = 100 + for i in range(10): + G = nx.barabasi_albert_graph(n, 1) + deg = (d for n, d in G.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_string_input(): + pytest.raises(nx.NetworkXException, nx.is_graphical, [], "foo") + pytest.raises(nx.NetworkXException, nx.is_graphical, ["red"], "hh") + pytest.raises(nx.NetworkXException, nx.is_graphical, ["red"], "eg") + + +def test_non_integer_input(): + pytest.raises(nx.NetworkXException, nx.is_graphical, [72.5], "eg") + pytest.raises(nx.NetworkXException, nx.is_graphical, [72.5], "hh") + + +def test_negative_input(): + assert not nx.is_graphical([-1], "hh") + assert not nx.is_graphical([-1], "eg") + + +class TestAtlas: + @classmethod + def setup_class(cls): + global atlas + import networkx.generators.atlas as atlas + + cls.GAG = atlas.graph_atlas_g() + + def test_atlas(self): + for graph in self.GAG: + deg = (d for n, d in graph.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_small_graph_true(): + z = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + z = [10, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + z = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + + +def test_small_graph_false(): + z = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + z = [6, 5, 4, 4, 2, 1, 1, 1] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + + +def test_directed_degree_sequence(): + # Test a range of valid directed degree sequences + n, r = 100, 10 + p = 1.0 / r + for i in range(r): + G = nx.erdos_renyi_graph(n, p * (i + 1), None, True) + din = (d for n, d in G.in_degree()) + dout = (d for n, d in G.out_degree()) + assert nx.is_digraphical(din, dout) + + +def test_small_directed_sequences(): + dout = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1] + assert nx.is_digraphical(din, dout) + # Test nongraphical directed sequence + dout = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [103, 102, 102, 102, 102, 102, 102, 102, 102, 102] + assert not nx.is_digraphical(din, dout) + # Test digraphical small sequence + dout = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1] + assert nx.is_digraphical(din, dout) + # Test nonmatching sum + din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1] + assert not nx.is_digraphical(din, dout) + # Test for negative integer in sequence + din = [2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4] + assert not nx.is_digraphical(din, dout) + # Test for noninteger + din = dout = [1, 1, 1.1, 1] + assert not nx.is_digraphical(din, dout) + din = dout = [1, 1, "rer", 1] + assert not nx.is_digraphical(din, dout) + + +def test_multi_sequence(): + # Test nongraphical multi sequence + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1] + assert not nx.is_multigraphical(seq) + # Test small graphical multi sequence + seq = [6, 5, 4, 4, 2, 1, 1, 1] + assert nx.is_multigraphical(seq) + # Test for negative integer in sequence + seq = [6, 5, 4, -4, 2, 1, 1, 1] + assert not nx.is_multigraphical(seq) + # Test for sequence with odd sum + seq = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert not nx.is_multigraphical(seq) + # Test for noninteger + seq = [1, 1, 1.1, 1] + assert not nx.is_multigraphical(seq) + seq = [1, 1, "rer", 1] + assert not nx.is_multigraphical(seq) + + +def test_pseudo_sequence(): + # Test small valid pseudo sequence + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1] + assert nx.is_pseudographical(seq) + # Test for sequence with odd sum + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert not nx.is_pseudographical(seq) + # Test for negative integer in sequence + seq = [1000, 3, 3, 3, 3, 2, 2, -2, 1, 1] + assert not nx.is_pseudographical(seq) + # Test for noninteger + seq = [1, 1, 1.1, 1] + assert not nx.is_pseudographical(seq) + seq = [1, 1, "rer", 1] + assert not nx.is_pseudographical(seq) + + +def test_numpy_degree_sequence(): + np = pytest.importorskip("numpy") + ds = np.array([1, 2, 2, 2, 1], dtype=np.int64) + assert nx.is_graphical(ds, "eg") + assert nx.is_graphical(ds, "hh") + ds = np.array([1, 2, 2, 2, 1], dtype=np.float64) + assert nx.is_graphical(ds, "eg") + assert nx.is_graphical(ds, "hh") + ds = np.array([1.1, 2, 2, 2, 1], dtype=np.float64) + pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "eg") + pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "hh") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_hierarchy.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_hierarchy.py new file mode 100644 index 0000000..227c89c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_hierarchy.py @@ -0,0 +1,39 @@ +import pytest + +import networkx as nx + + +def test_hierarchy_exception(): + G = nx.cycle_graph(5) + pytest.raises(nx.NetworkXError, nx.flow_hierarchy, G) + + +def test_hierarchy_cycle(): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + assert nx.flow_hierarchy(G) == 0.0 + + +def test_hierarchy_tree(): + G = nx.full_rary_tree(2, 16, create_using=nx.DiGraph()) + assert nx.flow_hierarchy(G) == 1.0 + + +def test_hierarchy_1(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 1), (3, 4), (0, 4)]) + assert nx.flow_hierarchy(G) == 0.5 + + +def test_hierarchy_weight(): + G = nx.DiGraph() + G.add_edges_from( + [ + (0, 1, {"weight": 0.3}), + (1, 2, {"weight": 0.1}), + (2, 3, {"weight": 0.1}), + (3, 1, {"weight": 0.1}), + (3, 4, {"weight": 0.3}), + (0, 4, {"weight": 0.3}), + ] + ) + assert nx.flow_hierarchy(G, weight="weight") == 0.75 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_hybrid.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_hybrid.py new file mode 100644 index 0000000..6af0016 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_hybrid.py @@ -0,0 +1,24 @@ +import networkx as nx + + +def test_2d_grid_graph(): + # FC article claims 2d grid graph of size n is (3,3)-connected + # and (5,9)-connected, but I don't think it is (5,9)-connected + G = nx.grid_2d_graph(8, 8, periodic=True) + assert nx.is_kl_connected(G, 3, 3) + assert not nx.is_kl_connected(G, 5, 9) + (H, graphOK) = nx.kl_connected_subgraph(G, 5, 9, same_as_graph=True) + assert not graphOK + + +def test_small_graph(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + assert nx.is_kl_connected(G, 2, 2) + H = nx.kl_connected_subgraph(G, 2, 2) + (H, graphOK) = nx.kl_connected_subgraph( + G, 2, 2, low_memory=True, same_as_graph=True + ) + assert graphOK diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_isolate.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_isolate.py new file mode 100644 index 0000000..d29b306 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_isolate.py @@ -0,0 +1,26 @@ +"""Unit tests for the :mod:`networkx.algorithms.isolates` module.""" + +import networkx as nx + + +def test_is_isolate(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_node(2) + assert not nx.is_isolate(G, 0) + assert not nx.is_isolate(G, 1) + assert nx.is_isolate(G, 2) + + +def test_isolates(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_nodes_from([2, 3]) + assert sorted(nx.isolates(G)) == [2, 3] + + +def test_number_of_isolates(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_nodes_from([2, 3]) + assert nx.number_of_isolates(G) == 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_link_prediction.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_link_prediction.py new file mode 100644 index 0000000..7fc04d2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_link_prediction.py @@ -0,0 +1,582 @@ +import math +from functools import partial + +import pytest + +import networkx as nx + + +def _test_func(G, ebunch, expected, predict_func, **kwargs): + result = predict_func(G, ebunch, **kwargs) + exp_dict = {tuple(sorted([u, v])): score for u, v, score in expected} + res_dict = {tuple(sorted([u, v])): score for u, v, score in result} + + assert len(exp_dict) == len(res_dict) + for p in exp_dict: + assert exp_dict[p] == pytest.approx(res_dict[p], abs=1e-7) + + +class TestResourceAllocationIndex: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.resource_allocation_index) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 0.75)]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 0.5)]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 0.25)]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + self.test(G, [(0, 0)], [(0, 0, 1)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)]) + + +class TestJaccardCoefficient: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.jaccard_coefficient) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 0.6)]) + + def test_P4(self): + G = nx.path_graph(4) + self.test(G, [(0, 2)], [(0, 2, 0.5)]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (2, 3)]) + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_isolated_nodes(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)]) + + +class TestAdamicAdarIndex: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.adamic_adar_index) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 3 / math.log(4))]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 1 / math.log(2))]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 1 / math.log(4))]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + self.test(G, [(0, 0)], [(0, 0, 3 / math.log(3))]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test( + G, None, [(0, 3, 1 / math.log(2)), (1, 2, 1 / math.log(2)), (1, 3, 0)] + ) + + +class TestCommonNeighborCentrality: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.common_neighbor_centrality) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 3.0)], alpha=1) + self.test(G, [(0, 1)], [(0, 1, 5.0)], alpha=0) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 1.25)], alpha=0.5) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 1.75)], alpha=0.5) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + assert pytest.raises(nx.NetworkXAlgorithmError, self.test, G, [(0, 0)], []) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 1.5), (1, 2, 1.5), (1, 3, 2 / 3)], alpha=0.5) + + +class TestPreferentialAttachment: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.preferential_attachment) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 16)]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 1)], [(0, 1, 2)]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(0, 2)], [(0, 2, 4)]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_zero_degrees(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 2), (1, 2, 2), (1, 3, 1)]) + + +class TestCNSoundarajanHopcroft: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.cn_soundarajan_hopcroft) + cls.test = partial(_test_func, predict_func=cls.func, community="community") + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 5)]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 1)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 2)]) + + def test_notimplemented(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 4)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 2)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 4)]) + + def test_custom_community_attribute_name(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 1 + self.test(G, [(0, 3)], [(0, 3, 2)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 2), (1, 2, 1), (1, 3, 0)]) + + +class TestRAIndexSoundarajanHopcroft: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.ra_index_soundarajan_hopcroft) + cls.test = partial(_test_func, predict_func=cls.func, community="community") + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 0.5)]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 0.25)]) + + def test_notimplemented(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 1)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 1)]) + + def test_custom_community_attribute_name(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 0.5), (1, 2, 0), (1, 3, 0)]) + + +class TestWithinInterCluster: + @classmethod + def setup_class(cls): + cls.delta = 0.001 + cls.func = staticmethod(nx.within_inter_cluster) + cls.test = partial( + _test_func, predict_func=cls.func, delta=cls.delta, community="community" + ) + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 2 / (1 + self.delta))]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 1 / self.delta)]) + + def test_notimplemented(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 2 / self.delta)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)]) + + def test_no_inter_cluster_common_neighbor(self): + G = nx.complete_graph(4) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, [(0, 3)], [(0, 3, 2 / self.delta)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 2 / self.delta)]) + + def test_invalid_delta(self): + G = nx.complete_graph(3) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXAlgorithmError, self.func, G, [(0, 1)], 0) + assert pytest.raises(nx.NetworkXAlgorithmError, self.func, G, [(0, 1)], -0.5) + + def test_custom_community_attribute_name(self): + G = nx.complete_graph(4) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 0 + self.test(G, [(0, 3)], [(0, 3, 2 / self.delta)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 1 / self.delta), (1, 2, 0), (1, 3, 0)]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py new file mode 100644 index 0000000..acb2336 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py @@ -0,0 +1,313 @@ +from itertools import chain, combinations, product + +import pytest + +import networkx as nx + +tree_all_pairs_lca = nx.tree_all_pairs_lowest_common_ancestor +all_pairs_lca = nx.all_pairs_lowest_common_ancestor + + +def get_pair(dictionary, n1, n2): + if (n1, n2) in dictionary: + return dictionary[n1, n2] + else: + return dictionary[n2, n1] + + +class TestTreeLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + @staticmethod + def assert_has_same_pairs(d1, d2): + for (a, b) in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert get_pair(d1, a, b) == get_pair(d2, a, b) + + def test_tree_all_pairs_lowest_common_ancestor1(self): + """Specifying the root is optional.""" + assert dict(tree_all_pairs_lca(self.DG)) == self.ans + + def test_tree_all_pairs_lowest_common_ancestor2(self): + """Specifying only some pairs gives only those pairs.""" + test_pairs = [(0, 1), (0, 1), (1, 0)] + ans = dict(tree_all_pairs_lca(self.DG, 0, test_pairs)) + assert (0, 1) in ans and (1, 0) in ans + assert len(ans) == 2 + + def test_tree_all_pairs_lowest_common_ancestor3(self): + """Specifying no pairs same as specifying all.""" + all_pairs = chain(combinations(self.DG, 2), ((node, node) for node in self.DG)) + + ans = dict(tree_all_pairs_lca(self.DG, 0, all_pairs)) + self.assert_has_same_pairs(ans, self.ans) + + def test_tree_all_pairs_lowest_common_ancestor4(self): + """Gives the right answer.""" + ans = dict(tree_all_pairs_lca(self.DG)) + self.assert_has_same_pairs(self.gold, ans) + + def test_tree_all_pairs_lowest_common_ancestor5(self): + """Handles invalid input correctly.""" + empty_digraph = tree_all_pairs_lca(nx.DiGraph()) + pytest.raises(nx.NetworkXPointlessConcept, list, empty_digraph) + + bad_pairs_digraph = tree_all_pairs_lca(self.DG, pairs=[(-1, -2)]) + pytest.raises(nx.NodeNotFound, list, bad_pairs_digraph) + + def test_tree_all_pairs_lowest_common_ancestor6(self): + """Works on subtrees.""" + ans = dict(tree_all_pairs_lca(self.DG, 1)) + gold = { + pair: lca + for (pair, lca) in self.gold.items() + if all(n in (1, 3, 4) for n in pair) + } + self.assert_has_same_pairs(gold, ans) + + def test_tree_all_pairs_lowest_common_ancestor7(self): + """Works on disconnected nodes.""" + G = nx.DiGraph() + G.add_node(1) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G)) + + G.add_node(0) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G, 1)) + assert {(0, 0): 0} == dict(tree_all_pairs_lca(G, 0)) + + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lowest_common_ancestor8(self): + """Raises right errors if not a tree.""" + # Cycle + G = nx.DiGraph([(1, 2), (2, 1)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + # DAG + G = nx.DiGraph([(0, 2), (1, 2)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lowest_common_ancestor9(self): + """Test that pairs works correctly as a generator.""" + pairs = iter([(0, 1), (0, 1), (1, 0)]) + some_pairs = dict(tree_all_pairs_lca(self.DG, 0, pairs)) + assert (0, 1) in some_pairs and (1, 0) in some_pairs + assert len(some_pairs) == 2 + + def test_tree_all_pairs_lowest_common_ancestor10(self): + """Test that pairs not in the graph raises error.""" + lca = tree_all_pairs_lca(self.DG, 0, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + # check if node is None + lca = tree_all_pairs_lca(self.DG, None, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + + def test_tree_all_pairs_lowest_common_ancestor12(self): + """Test that tree routine bails on DAGs.""" + G = nx.DiGraph([(3, 4), (5, 4)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_not_implemented_for(self): + NNI = nx.NetworkXNotImplemented + G = nx.Graph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + G = nx.MultiDiGraph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + + def test_tree_all_pairs_lowest_common_ancestor13(self): + """Test that it works on non-empty trees with no LCAs.""" + G = nx.DiGraph() + G.add_node(3) + ans = list(tree_all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + +class TestDAGLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 8, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + def assert_lca_dicts_same(self, d1, d2, G=None): + """Checks if d1 and d2 contain the same pairs and + have a node at the same distance from root for each. + If G is None use self.DG.""" + if G is None: + G = self.DG + root_distance = self.root_distance + else: + roots = [n for n, deg in G.in_degree if deg == 0] + assert len(roots) == 1 + root_distance = nx.shortest_path_length(G, source=roots[0]) + + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert ( + root_distance[get_pair(d1, a, b)] == root_distance[get_pair(d2, a, b)] + ) + + def test_all_pairs_lowest_common_ancestor1(self): + """Produces the correct results.""" + self.assert_lca_dicts_same(dict(all_pairs_lca(self.DG)), self.gold) + + def test_all_pairs_lowest_common_ancestor2(self): + """Produces the correct results when all pairs given.""" + all_pairs = list(product(self.DG.nodes(), self.DG.nodes())) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lowest_common_ancestor3(self): + """Produces the correct results when all pairs given as a generator.""" + all_pairs = product(self.DG.nodes(), self.DG.nodes()) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lowest_common_ancestor4(self): + """Graph with two roots.""" + G = self.DG.copy() + G.add_edge(9, 10) + G.add_edge(9, 4) + gold = self.gold.copy() + gold[9, 9] = 9 + gold[9, 10] = 9 + gold[9, 4] = 9 + gold[9, 3] = 9 + gold[10, 4] = 9 + gold[10, 3] = 9 + gold[10, 10] = 10 + + testing = dict(all_pairs_lca(G)) + + G.add_edge(-1, 9) + G.add_edge(-1, 0) + self.assert_lca_dicts_same(testing, gold, G) + + def test_all_pairs_lowest_common_ancestor5(self): + """Test that pairs not in the graph raises error.""" + pytest.raises(nx.NodeNotFound, all_pairs_lca, self.DG, [(-1, -1)]) + + def test_all_pairs_lowest_common_ancestor6(self): + """Test that pairs with no LCA specified emits nothing.""" + G = self.DG.copy() + G.add_node(-1) + gen = all_pairs_lca(G, [(-1, -1), (-1, 0)]) + assert dict(gen) == {(-1, -1): -1} + + def test_all_pairs_lowest_common_ancestor7(self): + """Test that LCA on null graph bails.""" + pytest.raises(nx.NetworkXPointlessConcept, all_pairs_lca, nx.DiGraph()) + + def test_all_pairs_lowest_common_ancestor8(self): + """Test that LCA on non-dags bails.""" + pytest.raises(nx.NetworkXError, all_pairs_lca, nx.DiGraph([(3, 4), (4, 3)])) + + def test_all_pairs_lowest_common_ancestor9(self): + """Test that it works on non-empty graphs with no LCAs.""" + G = nx.DiGraph() + G.add_node(3) + ans = list(all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + def test_all_pairs_lowest_common_ancestor10(self): + """Test that it works on a small graph that previously revealed a bug gh-4942""" + G = nx.DiGraph([(0, 2), (1, 2), (2, 3)]) + ans = list(all_pairs_lca(G)) + assert len(ans) == 9 + + def test_lowest_common_ancestor1(self): + """Test that the one-pair function works on default.""" + G = nx.DiGraph([(0, 1), (2, 1)]) + sentinel = object() + assert nx.lowest_common_ancestor(G, 0, 2, default=sentinel) is sentinel + + def test_lowest_common_ancestor2(self): + """Test that the one-pair function works on identity.""" + G = nx.DiGraph() + G.add_node(3) + assert nx.lowest_common_ancestor(G, 3, 3) == 3 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_matching.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_matching.py new file mode 100644 index 0000000..b5a466d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_matching.py @@ -0,0 +1,572 @@ +import math +from itertools import permutations + +from pytest import raises + +import networkx as nx +from networkx.algorithms.matching import matching_dict_to_set +from networkx.utils import edges_equal + + +class TestMaxWeightMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.max_weight_matching` function. + + """ + + def test_trivial1(self): + """Empty graph""" + G = nx.Graph() + assert nx.max_weight_matching(G) == set() + assert nx.min_weight_matching(G) == set() + + def test_selfloop(self): + G = nx.Graph() + G.add_edge(0, 0, weight=100) + assert nx.max_weight_matching(G) == set() + assert nx.min_weight_matching(G) == set() + + def test_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({0: 1, 1: 0}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({0: 1, 1: 0}) + ) + + def test_two_path(self): + G = nx.Graph() + G.add_edge("one", "two", weight=10) + G.add_edge("two", "three", weight=11) + assert edges_equal( + nx.max_weight_matching(G), + matching_dict_to_set({"three": "two", "two": "three"}), + ) + assert edges_equal( + nx.min_weight_matching(G), + matching_dict_to_set({"one": "two", "two": "one"}), + ) + + def test_path(self): + G = nx.Graph() + G.add_edge(1, 2, weight=5) + G.add_edge(2, 3, weight=11) + G.add_edge(3, 4, weight=5) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({2: 3, 3: 2}) + ) + assert edges_equal( + nx.max_weight_matching(G, 1), matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 2, 3: 4}) + ) + assert edges_equal( + nx.min_weight_matching(G, 1), matching_dict_to_set({1: 2, 3: 4}) + ) + + def test_square(self): + G = nx.Graph() + G.add_edge(1, 4, weight=2) + G.add_edge(2, 3, weight=2) + G.add_edge(1, 2, weight=1) + G.add_edge(3, 4, weight=4) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({1: 2, 3: 4}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 4, 2: 3}) + ) + + def test_edge_attribute_name(self): + G = nx.Graph() + G.add_edge("one", "two", weight=10, abcd=11) + G.add_edge("two", "three", weight=11, abcd=10) + assert edges_equal( + nx.max_weight_matching(G, weight="abcd"), + matching_dict_to_set({"one": "two", "two": "one"}), + ) + assert edges_equal( + nx.min_weight_matching(G, weight="abcd"), + matching_dict_to_set({"three": "two"}), + ) + + def test_floating_point_weights(self): + G = nx.Graph() + G.add_edge(1, 2, weight=math.pi) + G.add_edge(2, 3, weight=math.exp(1)) + G.add_edge(1, 3, weight=3.0) + G.add_edge(1, 4, weight=math.sqrt(2.0)) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({1: 4, 2: 3, 3: 2, 4: 1}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 4, 2: 3, 3: 2, 4: 1}) + ) + + def test_negative_weights(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=-2) + G.add_edge(2, 3, weight=1) + G.add_edge(2, 4, weight=-1) + G.add_edge(3, 4, weight=-6) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({1: 2, 2: 1}) + ) + assert edges_equal( + nx.max_weight_matching(G, 1), matching_dict_to_set({1: 3, 2: 4, 3: 1, 4: 2}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 2, 3: 4}) + ) + assert edges_equal( + nx.min_weight_matching(G, 1), matching_dict_to_set({1: 2, 3: 4}) + ) + + def test_s_blossom(self): + """Create S-blossom and use it for augmentation:""" + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 8), (1, 3, 9), (2, 3, 10), (3, 4, 7)]) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.add_weighted_edges_from([(1, 6, 5), (4, 5, 6)]) + answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_s_t_blossom(self): + """Create S-blossom, relabel as T-blossom, use for augmentation:""" + G = nx.Graph() + G.add_weighted_edges_from( + [(1, 2, 9), (1, 3, 8), (2, 3, 10), (1, 4, 5), (4, 5, 4), (1, 6, 3)] + ) + answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.add_edge(4, 5, weight=3) + G.add_edge(1, 6, weight=4) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.remove_edge(1, 6) + G.add_edge(3, 6, weight=4) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 6, 4: 5, 5: 4, 6: 3}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom(self): + """Create nested S-blossom, use for augmentation:""" + + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 9), + (1, 3, 9), + (2, 3, 10), + (2, 4, 8), + (3, 5, 8), + (4, 5, 10), + (5, 6, 6), + ] + ) + dict_format = {1: 3, 2: 4, 3: 1, 4: 2, 5: 6, 6: 5} + expected = {frozenset(e) for e in matching_dict_to_set(dict_format)} + answer = {frozenset(e) for e in nx.max_weight_matching(G)} + assert answer == expected + answer = {frozenset(e) for e in nx.min_weight_matching(G)} + assert answer == expected + + def test_nested_s_blossom_relabel(self): + """Create S-blossom, relabel as S, include in nested S-blossom:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 10), + (1, 7, 10), + (2, 3, 12), + (3, 4, 20), + (3, 5, 20), + (4, 5, 25), + (5, 6, 10), + (6, 7, 10), + (7, 8, 8), + ] + ) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3, 5: 6, 6: 5, 7: 8, 8: 7}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom_expand(self): + """Create nested S-blossom, augment, expand recursively:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 8), + (1, 3, 8), + (2, 3, 10), + (2, 4, 12), + (3, 5, 12), + (4, 5, 14), + (4, 6, 12), + (5, 7, 12), + (6, 7, 14), + (7, 8, 12), + ] + ) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 5, 4: 6, 5: 3, 6: 4, 7: 8, 8: 7}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_s_blossom_relabel_expand(self): + """Create S-blossom, relabel as T, expand:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 23), + (1, 5, 22), + (1, 6, 15), + (2, 3, 25), + (3, 4, 22), + (4, 5, 25), + (4, 8, 14), + (5, 7, 13), + ] + ) + answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom_relabel_expand(self): + """Create nested S-blossom, relabel as T, expand:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 19), + (1, 3, 20), + (1, 8, 8), + (2, 3, 25), + (2, 4, 18), + (3, 5, 18), + (4, 5, 13), + (4, 7, 7), + (5, 6, 7), + ] + ) + answer = matching_dict_to_set({1: 8, 2: 3, 3: 2, 4: 7, 5: 6, 6: 5, 7: 4, 8: 1}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom1(self): + """Create blossom, relabel as T in more than one way, expand, + augment: + """ + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 35), + (5, 7, 26), + (9, 10, 5), + ] + ) + ansdict = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9} + answer = matching_dict_to_set(ansdict) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom2(self): + """Again but slightly different:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 26), + (5, 7, 40), + (9, 10, 5), + ] + ) + ans = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9} + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_least_slack(self): + """Create blossom, relabel as T, expand such that a new + least-slack S-to-free dge is produced, augment: + """ + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 28), + (5, 7, 26), + (9, 10, 5), + ] + ) + ans = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9} + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_augmenting(self): + """Create nested blossom, relabel as T in more than one way""" + # expand outer blossom such that inner blossom ends up on an + # augmenting path: + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 7, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 95), + (4, 6, 94), + (5, 6, 94), + (6, 7, 50), + (1, 8, 30), + (3, 11, 35), + (5, 9, 36), + (7, 10, 26), + (11, 12, 5), + ] + ) + ans = { + 1: 8, + 2: 3, + 3: 2, + 4: 6, + 5: 9, + 6: 4, + 7: 10, + 8: 1, + 9: 5, + 10: 7, + 11: 12, + 12: 11, + } + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_expand_recursively(self): + """Create nested S-blossom, relabel as S, expand recursively:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 40), + (1, 3, 40), + (2, 3, 60), + (2, 4, 55), + (3, 5, 55), + (4, 5, 50), + (1, 8, 15), + (5, 7, 30), + (7, 6, 10), + (8, 10, 10), + (4, 9, 30), + ] + ) + ans = {1: 2, 2: 1, 3: 5, 4: 9, 5: 3, 6: 7, 7: 6, 8: 10, 9: 4, 10: 8} + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_wrong_graph_type(self): + error = nx.NetworkXNotImplemented + raises(error, nx.max_weight_matching, nx.MultiGraph()) + raises(error, nx.max_weight_matching, nx.MultiDiGraph()) + raises(error, nx.max_weight_matching, nx.DiGraph()) + raises(error, nx.min_weight_matching, nx.DiGraph()) + + +class TestIsMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_empty_matching(self): + G = nx.path_graph(4) + assert nx.is_matching(G, set()) + + def test_single_edge(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(1, 2)}) + + def test_edge_order(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(0, 1), (2, 3)}) + assert nx.is_matching(G, {(1, 0), (2, 3)}) + assert nx.is_matching(G, {(0, 1), (3, 2)}) + assert nx.is_matching(G, {(1, 0), (3, 2)}) + + def test_valid_matching(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(0, 1), (2, 3)}) + + def test_invalid_input(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # edge to node not in G + raises(error, nx.is_matching, G, {(0, 5), (2, 3)}) + # edge not a 2-tuple + raises(error, nx.is_matching, G, {(0, 1, 2), (2, 3)}) + raises(error, nx.is_matching, G, {(0,), (2, 3)}) + + def test_selfloops(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # selfloop for node not in G + raises(error, nx.is_matching, G, {(5, 5), (2, 3)}) + # selfloop edge not in G + assert not nx.is_matching(G, {(0, 0), (1, 2), (2, 3)}) + # selfloop edge in G + G.add_edge(0, 0) + assert not nx.is_matching(G, {(0, 0), (1, 2), (2, 3)}) + + def test_invalid_matching(self): + G = nx.path_graph(4) + assert not nx.is_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_invalid_edge(self): + G = nx.path_graph(4) + assert not nx.is_matching(G, {(0, 3), (1, 2)}) + raises(nx.NetworkXError, nx.is_matching, G, {(0, 55)}) + + G = nx.DiGraph(G.edges) + assert nx.is_matching(G, {(0, 1)}) + assert not nx.is_matching(G, {(1, 0)}) + + +class TestIsMaximalMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_maximal_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_maximal_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_valid(self): + G = nx.path_graph(4) + assert nx.is_maximal_matching(G, {(0, 1), (2, 3)}) + + def test_not_matching(self): + G = nx.path_graph(4) + assert not nx.is_maximal_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_not_maximal(self): + G = nx.path_graph(4) + assert not nx.is_maximal_matching(G, {(0, 1)}) + + +class TestIsPerfectMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_perfect_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_perfect_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_valid(self): + G = nx.path_graph(4) + assert nx.is_perfect_matching(G, {(0, 1), (2, 3)}) + + def test_valid_not_path(self): + G = nx.cycle_graph(4) + G.add_edge(0, 4) + G.add_edge(1, 4) + G.add_edge(5, 2) + + assert nx.is_perfect_matching(G, {(1, 4), (0, 3), (5, 2)}) + + def test_not_matching(self): + G = nx.path_graph(4) + assert not nx.is_perfect_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_maximal_but_not_perfect(self): + G = nx.cycle_graph(4) + G.add_edge(0, 4) + G.add_edge(1, 4) + + assert not nx.is_perfect_matching(G, {(1, 4), (0, 3)}) + + +class TestMaximalMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.maximal_matching`. + + """ + + def test_valid_matching(self): + edges = [(1, 2), (1, 5), (2, 3), (2, 5), (3, 4), (3, 6), (5, 6)] + G = nx.Graph(edges) + matching = nx.maximal_matching(G) + assert nx.is_maximal_matching(G, matching) + + def test_single_edge_matching(self): + # In the star graph, any maximal matching has just one edge. + G = nx.star_graph(5) + matching = nx.maximal_matching(G) + assert 1 == len(matching) + assert nx.is_maximal_matching(G, matching) + + def test_self_loops(self): + # Create the path graph with two self-loops. + G = nx.path_graph(3) + G.add_edges_from([(0, 0), (1, 1)]) + matching = nx.maximal_matching(G) + assert len(matching) == 1 + # The matching should never include self-loops. + assert not any(u == v for u, v in matching) + assert nx.is_maximal_matching(G, matching) + + def test_ordering(self): + """Tests that a maximal matching is computed correctly + regardless of the order in which nodes are added to the graph. + + """ + for nodes in permutations(range(3)): + G = nx.Graph() + G.add_nodes_from(nodes) + G.add_edges_from([(0, 1), (0, 2)]) + matching = nx.maximal_matching(G) + assert len(matching) == 1 + assert nx.is_maximal_matching(G, matching) + + def test_wrong_graph_type(self): + error = nx.NetworkXNotImplemented + raises(error, nx.maximal_matching, nx.MultiGraph()) + raises(error, nx.maximal_matching, nx.MultiDiGraph()) + raises(error, nx.maximal_matching, nx.DiGraph()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_max_weight_clique.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_max_weight_clique.py new file mode 100644 index 0000000..cebe37f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_max_weight_clique.py @@ -0,0 +1,181 @@ +"""Maximum weight clique test suite. + +""" + +import pytest + +import networkx as nx + + +class TestMaximumWeightClique: + def test_basic_cases(self): + def check_basic_case(graph_func, expected_weight, weight_accessor): + graph = graph_func() + clique, weight = nx.algorithms.max_weight_clique(graph, weight_accessor) + assert verify_clique( + graph, clique, weight, expected_weight, weight_accessor + ) + + for graph_func, (expected_weight, expected_size) in TEST_CASES.items(): + check_basic_case(graph_func, expected_weight, "weight") + check_basic_case(graph_func, expected_size, None) + + def test_key_error(self): + graph = two_node_graph() + with pytest.raises(KeyError): + nx.algorithms.max_weight_clique(graph, "non-existent-key") + + def test_error_on_non_integer_weight(self): + graph = two_node_graph() + graph.nodes[2]["weight"] = 1.5 + with pytest.raises(ValueError): + nx.algorithms.max_weight_clique(graph) + + def test_unaffected_by_self_loops(self): + graph = two_node_graph() + graph.add_edge(1, 1) + graph.add_edge(2, 2) + clique, weight = nx.algorithms.max_weight_clique(graph, "weight") + assert verify_clique(graph, clique, weight, 30, "weight") + graph = three_node_independent_set() + graph.add_edge(1, 1) + clique, weight = nx.algorithms.max_weight_clique(graph, "weight") + assert verify_clique(graph, clique, weight, 20, "weight") + + def test_30_node_prob(self): + G = nx.Graph() + G.add_nodes_from(range(1, 31)) + for i in range(1, 31): + G.nodes[i]["weight"] = i + 1 + # fmt: off + G.add_edges_from( + [ + (1, 12), (1, 13), (1, 15), (1, 16), (1, 18), (1, 19), (1, 20), + (1, 23), (1, 26), (1, 28), (1, 29), (1, 30), (2, 3), (2, 4), + (2, 5), (2, 8), (2, 9), (2, 10), (2, 14), (2, 17), (2, 18), + (2, 21), (2, 22), (2, 23), (2, 27), (3, 9), (3, 15), (3, 21), + (3, 22), (3, 23), (3, 24), (3, 27), (3, 28), (3, 29), (4, 5), + (4, 6), (4, 8), (4, 21), (4, 22), (4, 23), (4, 26), (4, 28), + (4, 30), (5, 6), (5, 8), (5, 9), (5, 13), (5, 14), (5, 15), + (5, 16), (5, 20), (5, 21), (5, 22), (5, 25), (5, 28), (5, 29), + (6, 7), (6, 8), (6, 13), (6, 17), (6, 18), (6, 19), (6, 24), + (6, 26), (6, 27), (6, 28), (6, 29), (7, 12), (7, 14), (7, 15), + (7, 16), (7, 17), (7, 20), (7, 25), (7, 27), (7, 29), (7, 30), + (8, 10), (8, 15), (8, 16), (8, 18), (8, 20), (8, 22), (8, 24), + (8, 26), (8, 27), (8, 28), (8, 30), (9, 11), (9, 12), (9, 13), + (9, 14), (9, 15), (9, 16), (9, 19), (9, 20), (9, 21), (9, 24), + (9, 30), (10, 12), (10, 15), (10, 18), (10, 19), (10, 20), + (10, 22), (10, 23), (10, 24), (10, 26), (10, 27), (10, 29), + (10, 30), (11, 13), (11, 15), (11, 16), (11, 17), (11, 18), + (11, 19), (11, 20), (11, 22), (11, 29), (11, 30), (12, 14), + (12, 17), (12, 18), (12, 19), (12, 20), (12, 21), (12, 23), + (12, 25), (12, 26), (12, 30), (13, 20), (13, 22), (13, 23), + (13, 24), (13, 30), (14, 16), (14, 20), (14, 21), (14, 22), + (14, 23), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30), + (15, 17), (15, 18), (15, 20), (15, 21), (15, 26), (15, 27), + (15, 28), (16, 17), (16, 18), (16, 19), (16, 20), (16, 21), + (16, 29), (16, 30), (17, 18), (17, 21), (17, 22), (17, 25), + (17, 27), (17, 28), (17, 30), (18, 19), (18, 20), (18, 21), + (18, 22), (18, 23), (18, 24), (19, 20), (19, 22), (19, 23), + (19, 24), (19, 25), (19, 27), (19, 30), (20, 21), (20, 23), + (20, 24), (20, 26), (20, 28), (20, 29), (21, 23), (21, 26), + (21, 27), (21, 29), (22, 24), (22, 25), (22, 26), (22, 29), + (23, 25), (23, 30), (24, 25), (24, 26), (25, 27), (25, 29), + (26, 27), (26, 28), (26, 30), (28, 29), (29, 30), + ] + ) + # fmt: on + clique, weight = nx.algorithms.max_weight_clique(G) + assert verify_clique(G, clique, weight, 111, "weight") + + +# ############################ Utility functions ############################ +def verify_clique( + graph, clique, reported_clique_weight, expected_clique_weight, weight_accessor +): + for node1 in clique: + for node2 in clique: + if node1 == node2: + continue + if not graph.has_edge(node1, node2): + return False + + if weight_accessor is None: + clique_weight = len(clique) + else: + clique_weight = sum(graph.nodes[v]["weight"] for v in clique) + + if clique_weight != expected_clique_weight: + return False + if clique_weight != reported_clique_weight: + return False + + return True + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + graph.nodes[1]["weight"] = 10 + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + return graph + + +def three_node_independent_set(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + graph.nodes[4]["weight"] = 100 + graph.nodes[5]["weight"] = 200 + graph.nodes[6]["weight"] = 50 + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify expected weight of max weight clique +# and expected size of maximum clique +TEST_CASES = { + empty_graph: (0, 0), + one_node_graph: (10, 1), + two_node_graph: (30, 2), + three_node_clique: (35, 3), + three_node_independent_set: (20, 1), + disconnected: (300, 2), +} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_mis.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_mis.py new file mode 100644 index 0000000..c2f1eb1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_mis.py @@ -0,0 +1,62 @@ +""" +Tests for maximal (not maximum) independent sets. + +""" + +import random + +import pytest + +import networkx as nx + + +def test_random_seed(): + G = nx.empty_graph(5) + assert nx.maximal_independent_set(G, seed=1) == [1, 0, 3, 2, 4] + + +@pytest.mark.parametrize("graph", [nx.complete_graph(5), nx.complete_graph(55)]) +def test_K5(graph): + """Maximal independent set for complete graphs""" + assert all(nx.maximal_independent_set(graph, [n]) == [n] for n in graph) + + +def test_exceptions(): + """Bad input should raise exception.""" + G = nx.florentine_families_graph() + pytest.raises(nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Smith"]) + pytest.raises( + nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Salviati", "Pazzi"] + ) + # MaximalIndependantSet is not implemented for directed graphs + pytest.raises(nx.NetworkXNotImplemented, nx.maximal_independent_set, nx.DiGraph(G)) + + +def test_florentine_family(): + G = nx.florentine_families_graph() + indep = nx.maximal_independent_set(G, ["Medici", "Bischeri"]) + assert set(indep) == { + "Medici", + "Bischeri", + "Castellani", + "Pazzi", + "Ginori", + "Lamberteschi", + } + + +def test_bipartite(): + G = nx.complete_bipartite_graph(12, 34) + indep = nx.maximal_independent_set(G, [4, 5, 9, 10]) + assert sorted(indep) == list(range(12)) + + +def test_random_graphs(): + """Generate 5 random graphs of different types and sizes and + make sure that all sets are independent and maximal.""" + for i in range(0, 50, 10): + G = nx.erdos_renyi_graph(i * 10 + 1, random.random()) + IS = nx.maximal_independent_set(G) + assert G.subgraph(IS).number_of_edges() == 0 + neighbors_of_MIS = set.union(*(set(G.neighbors(v)) for v in IS)) + assert all(v in neighbors_of_MIS for v in set(G.nodes()).difference(IS)) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_moral.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_moral.py new file mode 100644 index 0000000..fc98c97 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_moral.py @@ -0,0 +1,15 @@ +import networkx as nx +from networkx.algorithms.moral import moral_graph + + +def test_get_moral_graph(): + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from([(1, 2), (3, 2), (4, 1), (4, 5), (6, 5), (7, 5)]) + H = moral_graph(graph) + assert not H.is_directed() + assert H.has_edge(1, 3) + assert H.has_edge(4, 6) + assert H.has_edge(6, 7) + assert H.has_edge(4, 7) + assert not H.has_edge(1, 5) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_node_classification.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_node_classification.py new file mode 100644 index 0000000..ff99841 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_node_classification.py @@ -0,0 +1,140 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms import node_classification + + +class TestHarmonicFunction: + def test_path_graph(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + predicted = node_classification.harmonic_function(G, label_name=label_name) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "B" + assert predicted[3] == "B" + + def test_no_labels(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + node_classification.harmonic_function(G) + + def test_no_nodes(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + node_classification.harmonic_function(G) + + def test_no_edges(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + node_classification.harmonic_function(G) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(1, 2) + G.add_edge(2, 3) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + node_classification.harmonic_function(G) + + def test_one_labeled_node(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + predicted = node_classification.harmonic_function(G, label_name=label_name) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "A" + assert predicted[3] == "A" + + def test_nodes_all_labeled(self): + G = nx.karate_club_graph() + label_name = "club" + predicted = node_classification.harmonic_function(G, label_name=label_name) + for i in range(len(G)): + assert predicted[i] == G.nodes[i][label_name] + + def test_labeled_nodes_are_not_changed(self): + G = nx.karate_club_graph() + label_name = "club" + label_removed = {0, 1, 2, 3, 4, 5, 6, 7} + for i in label_removed: + del G.nodes[i][label_name] + predicted = node_classification.harmonic_function(G, label_name=label_name) + label_not_removed = set(list(range(len(G)))) - label_removed + for i in label_not_removed: + assert predicted[i] == G.nodes[i][label_name] + + +class TestLocalAndGlobalConsistency: + def test_path_graph(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + predicted = node_classification.local_and_global_consistency( + G, label_name=label_name + ) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "B" + assert predicted[3] == "B" + + def test_no_labels(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + node_classification.local_and_global_consistency(G) + + def test_no_nodes(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + node_classification.local_and_global_consistency(G) + + def test_no_edges(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + node_classification.local_and_global_consistency(G) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(1, 2) + G.add_edge(2, 3) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + node_classification.harmonic_function(G) + + def test_one_labeled_node(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + predicted = node_classification.local_and_global_consistency( + G, label_name=label_name + ) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "A" + assert predicted[3] == "A" + + def test_nodes_all_labeled(self): + G = nx.karate_club_graph() + label_name = "club" + predicted = node_classification.local_and_global_consistency( + G, alpha=0, label_name=label_name + ) + for i in range(len(G)): + assert predicted[i] == G.nodes[i][label_name] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_node_classification_deprecations.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_node_classification_deprecations.py new file mode 100644 index 0000000..2d12561 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_node_classification_deprecations.py @@ -0,0 +1,41 @@ +"""TODO: Remove this test module for version 3.0.""" + + +import sys + +import pytest + +# NOTE: It is necessary to prevent previous imports in the test suite from +# "contaminating" the tests for the deprecation warnings by removing +# node_classification from sys.modules. + + +def test_hmn_deprecation_warning(): + sys.modules.pop("networkx.algorithms.node_classification", None) + with pytest.warns(DeprecationWarning): + from networkx.algorithms.node_classification import hmn + + +def test_lgc_deprecation_warning(): + sys.modules.pop("networkx.algorithms.node_classification", None) + with pytest.warns(DeprecationWarning): + from networkx.algorithms.node_classification import lgc + + +def test_no_warn_on_function_import(recwarn): + # Accessing the functions shouldn't raise any warning + sys.modules.pop("networkx.algorithms.node_classification", None) + from networkx.algorithms.node_classification import ( + harmonic_function, + local_and_global_consistency, + ) + + assert len(recwarn) == 0 + + +def test_no_warn_on_package_import(recwarn): + # Accessing the package shouldn't raise any warning + sys.modules.pop("networkx.algorithms.node_classification", None) + from networkx.algorithms import node_classification + + assert len(recwarn) == 0 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_non_randomness.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_non_randomness.py new file mode 100644 index 0000000..1f6de59 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_non_randomness.py @@ -0,0 +1,37 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") + + +@pytest.mark.parametrize( + "k, weight, expected", + [ + (None, None, 7.21), # infers 3 communities + (2, None, 11.7), + (None, "weight", 25.45), + (2, "weight", 38.8), + ], +) +def test_non_randomness(k, weight, expected): + G = nx.karate_club_graph() + np.testing.assert_almost_equal( + nx.non_randomness(G, k, weight)[0], expected, decimal=2 + ) + + +def test_non_connected(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_node(3) + with pytest.raises(nx.NetworkXException): + nx.non_randomness(G) + + +def test_self_loops(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(1, 1) + with pytest.raises(nx.NetworkXError): + nx.non_randomness(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_planar_drawing.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_planar_drawing.py new file mode 100644 index 0000000..2a12c06 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_planar_drawing.py @@ -0,0 +1,274 @@ +import math + +import pytest + +import networkx as nx +from networkx.algorithms.planar_drawing import triangulate_embedding + + +def test_graph1(): + embedding_data = {0: [1, 2, 3], 1: [2, 0], 2: [3, 0, 1], 3: [2, 0]} + check_embedding_data(embedding_data) + + +def test_graph2(): + embedding_data = { + 0: [8, 6], + 1: [2, 6, 9], + 2: [8, 1, 7, 9, 6, 4], + 3: [9], + 4: [2], + 5: [6, 8], + 6: [9, 1, 0, 5, 2], + 7: [9, 2], + 8: [0, 2, 5], + 9: [1, 6, 2, 7, 3], + } + check_embedding_data(embedding_data) + + +def test_circle_graph(): + embedding_data = { + 0: [1, 9], + 1: [0, 2], + 2: [1, 3], + 3: [2, 4], + 4: [3, 5], + 5: [4, 6], + 6: [5, 7], + 7: [6, 8], + 8: [7, 9], + 9: [8, 0], + } + check_embedding_data(embedding_data) + + +def test_grid_graph(): + embedding_data = { + (0, 1): [(0, 0), (1, 1), (0, 2)], + (1, 2): [(1, 1), (2, 2), (0, 2)], + (0, 0): [(0, 1), (1, 0)], + (2, 1): [(2, 0), (2, 2), (1, 1)], + (1, 1): [(2, 1), (1, 2), (0, 1), (1, 0)], + (2, 0): [(1, 0), (2, 1)], + (2, 2): [(1, 2), (2, 1)], + (1, 0): [(0, 0), (2, 0), (1, 1)], + (0, 2): [(1, 2), (0, 1)], + } + check_embedding_data(embedding_data) + + +def test_one_node_graph(): + embedding_data = {0: []} + check_embedding_data(embedding_data) + + +def test_two_node_graph(): + embedding_data = {0: [1], 1: [0]} + check_embedding_data(embedding_data) + + +def test_three_node_graph(): + embedding_data = {0: [1, 2], 1: [0, 2], 2: [0, 1]} + check_embedding_data(embedding_data) + + +def test_multiple_component_graph1(): + embedding_data = {0: [], 1: []} + check_embedding_data(embedding_data) + + +def test_multiple_component_graph2(): + embedding_data = {0: [1, 2], 1: [0, 2], 2: [0, 1], 3: [4, 5], 4: [3, 5], 5: [3, 4]} + check_embedding_data(embedding_data) + + +def test_invalid_half_edge(): + with pytest.raises(nx.NetworkXException): + embedding_data = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2, 4], 4: [1, 2, 3]} + embedding = nx.PlanarEmbedding() + embedding.set_data(embedding_data) + nx.combinatorial_embedding_to_pos(embedding) + + +def test_triangulate_embedding1(): + embedding = nx.PlanarEmbedding() + embedding.add_node(1) + expected_embedding = {1: []} + check_triangulation(embedding, expected_embedding) + + +def test_triangulate_embedding2(): + embedding = nx.PlanarEmbedding() + embedding.connect_components(1, 2) + expected_embedding = {1: [2], 2: [1]} + check_triangulation(embedding, expected_embedding) + + +def check_triangulation(embedding, expected_embedding): + res_embedding, _ = triangulate_embedding(embedding, True) + assert ( + res_embedding.get_data() == expected_embedding + ), "Expected embedding incorrect" + res_embedding, _ = triangulate_embedding(embedding, False) + assert ( + res_embedding.get_data() == expected_embedding + ), "Expected embedding incorrect" + + +def check_embedding_data(embedding_data): + """Checks that the planar embedding of the input is correct""" + embedding = nx.PlanarEmbedding() + embedding.set_data(embedding_data) + pos_fully = nx.combinatorial_embedding_to_pos(embedding, False) + msg = "Planar drawing does not conform to the embedding (fully " "triangulation)" + assert planar_drawing_conforms_to_embedding(embedding, pos_fully), msg + check_edge_intersections(embedding, pos_fully) + pos_internally = nx.combinatorial_embedding_to_pos(embedding, True) + msg = "Planar drawing does not conform to the embedding (internal " "triangulation)" + assert planar_drawing_conforms_to_embedding(embedding, pos_internally), msg + check_edge_intersections(embedding, pos_internally) + + +def is_close(a, b, rel_tol=1e-09, abs_tol=0.0): + # Check if float numbers are basically equal, for python >=3.5 there is + # function for that in the standard library + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + + +def point_in_between(a, b, p): + # checks if p is on the line between a and b + x1, y1 = a + x2, y2 = b + px, py = p + dist_1_2 = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + dist_1_p = math.sqrt((x1 - px) ** 2 + (y1 - py) ** 2) + dist_2_p = math.sqrt((x2 - px) ** 2 + (y2 - py) ** 2) + return is_close(dist_1_p + dist_2_p, dist_1_2) + + +def check_edge_intersections(G, pos): + """Check all edges in G for intersections. + + Raises an exception if an intersection is found. + + Parameters + ---------- + G : NetworkX graph + pos : dict + Maps every node to a tuple (x, y) representing its position + + """ + for a, b in G.edges(): + for c, d in G.edges(): + # Check if end points are different + if a != c and b != d and b != c and a != d: + x1, y1 = pos[a] + x2, y2 = pos[b] + x3, y3 = pos[c] + x4, y4 = pos[d] + determinant = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + if determinant != 0: # the lines are not parallel + # calculate intersection point, see: + # https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection + px = (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * ( + x3 * y4 - y3 * x4 + ) / determinant + py = (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * ( + x3 * y4 - y3 * x4 + ) / determinant + + # Check if intersection lies between the points + if point_in_between(pos[a], pos[b], (px, py)) and point_in_between( + pos[c], pos[d], (px, py) + ): + msg = f"There is an intersection at {px},{py}" + raise nx.NetworkXException(msg) + + # Check overlap + msg = "A node lies on a edge connecting two other nodes" + if ( + point_in_between(pos[a], pos[b], pos[c]) + or point_in_between(pos[a], pos[b], pos[d]) + or point_in_between(pos[c], pos[d], pos[a]) + or point_in_between(pos[c], pos[d], pos[b]) + ): + raise nx.NetworkXException(msg) + # No edge intersection found + + +class Vector: + """Compare vectors by their angle without loss of precision + + All vectors in direction [0, 1] are the smallest. + The vectors grow in clockwise direction. + """ + + __slots__ = ["x", "y", "node", "quadrant"] + + def __init__(self, x, y, node): + self.x = x + self.y = y + self.node = node + if self.x >= 0 and self.y > 0: + self.quadrant = 1 + elif self.x > 0 and self.y <= 0: + self.quadrant = 2 + elif self.x <= 0 and self.y < 0: + self.quadrant = 3 + else: + self.quadrant = 4 + + def __eq__(self, other): + return self.quadrant == other.quadrant and self.x * other.y == self.y * other.x + + def __lt__(self, other): + if self.quadrant < other.quadrant: + return True + elif self.quadrant > other.quadrant: + return False + else: + return self.x * other.y < self.y * other.x + + def __ne__(self, other): + return not self == other + + def __le__(self, other): + return not other < self + + def __gt__(self, other): + return other < self + + def __ge__(self, other): + return not self < other + + +def planar_drawing_conforms_to_embedding(embedding, pos): + """Checks if pos conforms to the planar embedding + + Returns true iff the neighbors are actually oriented in the orientation + specified of the embedding + """ + for v in embedding: + nbr_vectors = [] + v_pos = pos[v] + for nbr in embedding[v]: + new_vector = Vector(pos[nbr][0] - v_pos[0], pos[nbr][1] - v_pos[1], nbr) + nbr_vectors.append(new_vector) + # Sort neighbors according to their phi angle + nbr_vectors.sort() + for idx, nbr_vector in enumerate(nbr_vectors): + cw_vector = nbr_vectors[(idx + 1) % len(nbr_vectors)] + ccw_vector = nbr_vectors[idx - 1] + if ( + embedding[v][nbr_vector.node]["cw"] != cw_vector.node + or embedding[v][nbr_vector.node]["ccw"] != ccw_vector.node + ): + return False + if cw_vector.node != nbr_vector.node and cw_vector == nbr_vector: + # Lines overlap + return False + if ccw_vector.node != nbr_vector.node and ccw_vector == nbr_vector: + # Lines overlap + return False + return True diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_planarity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_planarity.py new file mode 100644 index 0000000..675a5d9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_planarity.py @@ -0,0 +1,442 @@ +import pytest + +import networkx as nx +from networkx.algorithms.planarity import ( + check_planarity_recursive, + get_counterexample, + get_counterexample_recursive, +) + + +class TestLRPlanarity: + """Nose Unit tests for the :mod:`networkx.algorithms.planarity` module. + + Tests three things: + 1. Check that the result is correct + (returns planar if and only if the graph is actually planar) + 2. In case a counter example is returned: Check if it is correct + 3. In case an embedding is returned: Check if its actually an embedding + """ + + @staticmethod + def check_graph(G, is_planar=None): + """Raises an exception if the lr_planarity check returns a wrong result + + Parameters + ---------- + G : NetworkX graph + is_planar : bool + The expected result of the planarity check. + If set to None only counter example or embedding are verified. + + """ + + # obtain results of planarity check + is_planar_lr, result = nx.check_planarity(G, True) + is_planar_lr_rec, result_rec = check_planarity_recursive(G, True) + + if is_planar is not None: + # set a message for the assert + if is_planar: + msg = "Wrong planarity check result. Should be planar." + else: + msg = "Wrong planarity check result. Should be non-planar." + + # check if the result is as expected + assert is_planar == is_planar_lr, msg + assert is_planar == is_planar_lr_rec, msg + + if is_planar_lr: + # check embedding + check_embedding(G, result) + check_embedding(G, result_rec) + else: + # check counter example + check_counterexample(G, result) + check_counterexample(G, result_rec) + + def test_simple_planar_graph(self): + e = [ + (1, 2), + (2, 3), + (3, 4), + (4, 6), + (6, 7), + (7, 1), + (1, 5), + (5, 2), + (2, 4), + (4, 5), + (5, 7), + ] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_planar_with_selfloop(self): + e = [ + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (1, 2), + (1, 3), + (1, 5), + (2, 5), + (2, 4), + (3, 4), + (3, 5), + (4, 5), + ] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_k3_3(self): + self.check_graph(nx.complete_bipartite_graph(3, 3), is_planar=False) + + def test_k5(self): + self.check_graph(nx.complete_graph(5), is_planar=False) + + def test_multiple_components_planar(self): + e = [(1, 2), (2, 3), (3, 1), (4, 5), (5, 6), (6, 4)] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_multiple_components_non_planar(self): + G = nx.complete_graph(5) + # add another planar component to the non planar component + # G stays non planar + G.add_edges_from([(6, 7), (7, 8), (8, 6)]) + self.check_graph(G, is_planar=False) + + def test_non_planar_with_selfloop(self): + G = nx.complete_graph(5) + # add self loops + for i in range(5): + G.add_edge(i, i) + self.check_graph(G, is_planar=False) + + def test_non_planar1(self): + # tests a graph that has no subgraph directly isomorph to K5 or K3_3 + e = [ + (1, 5), + (1, 6), + (1, 7), + (2, 6), + (2, 3), + (3, 5), + (3, 7), + (4, 5), + (4, 6), + (4, 7), + ] + self.check_graph(nx.Graph(e), is_planar=False) + + def test_loop(self): + # test a graph with a selfloop + e = [(1, 2), (2, 2)] + G = nx.Graph(e) + self.check_graph(G, is_planar=True) + + def test_comp(self): + # test multiple component graph + e = [(1, 2), (3, 4)] + G = nx.Graph(e) + G.remove_edge(1, 2) + self.check_graph(G, is_planar=True) + + def test_goldner_harary(self): + # test goldner-harary graph (a maximal planar graph) + e = [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 7), + (1, 8), + (1, 10), + (1, 11), + (2, 3), + (2, 4), + (2, 6), + (2, 7), + (2, 9), + (2, 10), + (2, 11), + (3, 4), + (4, 5), + (4, 6), + (4, 7), + (5, 7), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 10), + (9, 10), + (10, 11), + ] + G = nx.Graph(e) + self.check_graph(G, is_planar=True) + + def test_planar_multigraph(self): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 2), (1, 2), (2, 3), (3, 1)]) + self.check_graph(G, is_planar=True) + + def test_non_planar_multigraph(self): + G = nx.MultiGraph(nx.complete_graph(5)) + G.add_edges_from([(1, 2)] * 5) + self.check_graph(G, is_planar=False) + + def test_planar_digraph(self): + G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (4, 1), (4, 2), (1, 4), (3, 2)]) + self.check_graph(G, is_planar=True) + + def test_non_planar_digraph(self): + G = nx.DiGraph(nx.complete_graph(5)) + G.remove_edge(1, 2) + G.remove_edge(4, 1) + self.check_graph(G, is_planar=False) + + def test_single_component(self): + # Test a graph with only a single node + G = nx.Graph() + G.add_node(1) + self.check_graph(G, is_planar=True) + + def test_graph1(self): + G = nx.OrderedGraph( + [ + (3, 10), + (2, 13), + (1, 13), + (7, 11), + (0, 8), + (8, 13), + (0, 2), + (0, 7), + (0, 10), + (1, 7), + ] + ) + self.check_graph(G, is_planar=True) + + def test_graph2(self): + G = nx.OrderedGraph( + [ + (1, 2), + (4, 13), + (0, 13), + (4, 5), + (7, 10), + (1, 7), + (0, 3), + (2, 6), + (5, 6), + (7, 13), + (4, 8), + (0, 8), + (0, 9), + (2, 13), + (6, 7), + (3, 6), + (2, 8), + ] + ) + self.check_graph(G, is_planar=False) + + def test_graph3(self): + G = nx.OrderedGraph( + [ + (0, 7), + (3, 11), + (3, 4), + (8, 9), + (4, 11), + (1, 7), + (1, 13), + (1, 11), + (3, 5), + (5, 7), + (1, 3), + (0, 4), + (5, 11), + (5, 13), + ] + ) + self.check_graph(G, is_planar=False) + + def test_counterexample_planar(self): + with pytest.raises(nx.NetworkXException): + # Try to get a counterexample of a planar graph + G = nx.Graph() + G.add_node(1) + get_counterexample(G) + + def test_counterexample_planar_recursive(self): + with pytest.raises(nx.NetworkXException): + # Try to get a counterexample of a planar graph + G = nx.Graph() + G.add_node(1) + get_counterexample_recursive(G) + + +def check_embedding(G, embedding): + """Raises an exception if the combinatorial embedding is not correct + + Parameters + ---------- + G : NetworkX graph + embedding : a dict mapping nodes to a list of edges + This specifies the ordering of the outgoing edges from a node for + a combinatorial embedding + + Notes + ----- + Checks the following things: + - The type of the embedding is correct + - The nodes and edges match the original graph + - Every half edge has its matching opposite half edge + - No intersections of edges (checked by Euler's formula) + """ + + if not isinstance(embedding, nx.PlanarEmbedding): + raise nx.NetworkXException("Bad embedding. Not of type nx.PlanarEmbedding") + + # Check structure + embedding.check_structure() + + # Check that graphs are equivalent + + assert set(G.nodes) == set( + embedding.nodes + ), "Bad embedding. Nodes don't match the original graph." + + # Check that the edges are equal + g_edges = set() + for edge in G.edges: + if edge[0] != edge[1]: + g_edges.add((edge[0], edge[1])) + g_edges.add((edge[1], edge[0])) + assert g_edges == set( + embedding.edges + ), "Bad embedding. Edges don't match the original graph." + + +def check_counterexample(G, sub_graph): + """Raises an exception if the counterexample is wrong. + + Parameters + ---------- + G : NetworkX graph + subdivision_nodes : set + A set of nodes inducing a subgraph as a counterexample + """ + # 1. Create the sub graph + sub_graph = nx.Graph(sub_graph) + + # 2. Remove self loops + for u in sub_graph: + if sub_graph.has_edge(u, u): + sub_graph.remove_edge(u, u) + + # keep track of nodes we might need to contract + contract = list(sub_graph) + + # 3. Contract Edges + while len(contract) > 0: + contract_node = contract.pop() + if contract_node not in sub_graph: + # Node was already contracted + continue + degree = sub_graph.degree[contract_node] + # Check if we can remove the node + if degree == 2: + # Get the two neighbors + neighbors = iter(sub_graph[contract_node]) + u = next(neighbors) + v = next(neighbors) + # Save nodes for later + contract.append(u) + contract.append(v) + # Contract edge + sub_graph.remove_node(contract_node) + sub_graph.add_edge(u, v) + + # 4. Check for isomorphism with K5 or K3_3 graphs + if len(sub_graph) == 5: + if not nx.is_isomorphic(nx.complete_graph(5), sub_graph): + raise nx.NetworkXException("Bad counter example.") + elif len(sub_graph) == 6: + if not nx.is_isomorphic(nx.complete_bipartite_graph(3, 3), sub_graph): + raise nx.NetworkXException("Bad counter example.") + else: + raise nx.NetworkXException("Bad counter example.") + + +class TestPlanarEmbeddingClass: + def test_get_data(self): + embedding = self.get_star_embedding(3) + data = embedding.get_data() + data_cmp = {0: [2, 1], 1: [0], 2: [0]} + assert data == data_cmp + + def test_missing_edge_orientation(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_edge(1, 2) + embedding.add_edge(2, 1) + # Invalid structure because the orientation of the edge was not set + embedding.check_structure() + + def test_invalid_edge_orientation(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_first(1, 2) + embedding.add_half_edge_first(2, 1) + embedding.add_edge(1, 3) + embedding.check_structure() + + def test_missing_half_edge(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_first(1, 2) + # Invalid structure because other half edge is missing + embedding.check_structure() + + def test_not_fulfilling_euler_formula(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + for i in range(5): + for j in range(5): + if i != j: + embedding.add_half_edge_first(i, j) + embedding.check_structure() + + def test_missing_reference(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_cw(1, 2, 3) + + def test_connect_components(self): + embedding = nx.PlanarEmbedding() + embedding.connect_components(1, 2) + + def test_successful_face_traversal(self): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_first(1, 2) + embedding.add_half_edge_first(2, 1) + face = embedding.traverse_face(1, 2) + assert face == [1, 2] + + def test_unsuccessful_face_traversal(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_edge(1, 2, ccw=2, cw=3) + embedding.add_edge(2, 1, ccw=1, cw=3) + embedding.traverse_face(1, 2) + + @staticmethod + def get_star_embedding(n): + embedding = nx.PlanarEmbedding() + for i in range(1, n): + embedding.add_half_edge_first(0, i) + embedding.add_half_edge_first(i, 0) + return embedding diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_polynomials.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_polynomials.py new file mode 100644 index 0000000..a81d6a6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_polynomials.py @@ -0,0 +1,57 @@ +"""Unit tests for the :mod:`networkx.algorithms.polynomials` module.""" + +import pytest + +import networkx as nx + +sympy = pytest.importorskip("sympy") + + +# Mapping of input graphs to a string representation of their tutte polynomials +_test_tutte_graphs = { + nx.complete_graph(1): "1", + nx.complete_graph(4): "x**3 + 3*x**2 + 4*x*y + 2*x + y**3 + 3*y**2 + 2*y", + nx.cycle_graph(5): "x**4 + x**3 + x**2 + x + y", + nx.diamond_graph(): "x**3 + 2*x**2 + 2*x*y + x + y**2 + y", +} + +_test_chromatic_graphs = { + nx.complete_graph(1): "x", + nx.complete_graph(4): "x**4 - 6*x**3 + 11*x**2 - 6*x", + nx.cycle_graph(5): "x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x", + nx.diamond_graph(): "x**4 - 5*x**3 + 8*x**2 - 4*x", + nx.path_graph(5): "x**5 - 4*x**4 + 6*x**3 - 4*x**2 + x", +} + + +@pytest.mark.parametrize(("G", "expected"), _test_tutte_graphs.items()) +def test_tutte_polynomial(G, expected): + assert nx.tutte_polynomial(G).equals(expected) + + +@pytest.mark.parametrize("G", _test_tutte_graphs.keys()) +def test_tutte_polynomial_disjoint(G): + """Tutte polynomial factors into the Tutte polynomials of its components. + Verify this property with the disjoint union of two copies of the input graph. + """ + t_g = nx.tutte_polynomial(G) + H = nx.disjoint_union(G, G) + t_h = nx.tutte_polynomial(H) + assert sympy.simplify(t_g * t_g).equals(t_h) + + +@pytest.mark.parametrize(("G", "expected"), _test_chromatic_graphs.items()) +def test_chromatic_polynomial(G, expected): + assert nx.chromatic_polynomial(G).equals(expected) + + +@pytest.mark.parametrize("G", _test_chromatic_graphs.keys()) +def test_chromatic_polynomial_disjoint(G): + """Chromatic polynomial factors into the Chromatic polynomials of its + components. Verify this property with the disjoint union of two copies of + the input graph. + """ + x_g = nx.chromatic_polynomial(G) + H = nx.disjoint_union(G, G) + x_h = nx.chromatic_polynomial(H) + assert sympy.simplify(x_g * x_g).equals(x_h) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_reciprocity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_reciprocity.py new file mode 100644 index 0000000..2c5fc04 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_reciprocity.py @@ -0,0 +1,38 @@ +import pytest + +import networkx as nx + + +class TestReciprocity: + + # test overall reicprocity by passing whole graph + def test_reciprocity_digraph(self): + DG = nx.DiGraph([(1, 2), (2, 1)]) + reciprocity = nx.reciprocity(DG) + assert reciprocity == 1.0 + + # test empty graph's overall reciprocity which will throw an error + def test_overall_reciprocity_empty_graph(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph() + nx.overall_reciprocity(DG) + + # test for reciprocity for a list of nodes + def test_reciprocity_graph_nodes(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)]) + reciprocity = nx.reciprocity(DG, [1, 2]) + expected_reciprocity = {1: 0.0, 2: 0.6666666666666666} + assert reciprocity == expected_reciprocity + + # test for reciprocity for a single node + def test_reciprocity_graph_node(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)]) + reciprocity = nx.reciprocity(DG, 2) + assert reciprocity == 0.6666666666666666 + + # test for reciprocity for an isolated node + def test_reciprocity_graph_isolated_nodes(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph([(1, 2)]) + DG.add_node(4) + nx.reciprocity(DG, 4) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_regular.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_regular.py new file mode 100644 index 0000000..0c8e4e4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_regular.py @@ -0,0 +1,86 @@ +import pytest + +import networkx +import networkx as nx +import networkx.algorithms.regular as reg +import networkx.generators as gen + + +class TestKFactor: + def test_k_factor_trivial(self): + g = gen.cycle_graph(4) + f = reg.k_factor(g, 2) + assert g.edges == f.edges + + def test_k_factor1(self): + g = gen.grid_2d_graph(4, 4) + g_kf = reg.k_factor(g, 2) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 2 + + def test_k_factor2(self): + g = gen.complete_graph(6) + g_kf = reg.k_factor(g, 3) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 3 + + def test_k_factor3(self): + g = gen.grid_2d_graph(4, 4) + with pytest.raises(nx.NetworkXUnfeasible): + reg.k_factor(g, 3) + + def test_k_factor4(self): + g = gen.lattice.hexagonal_lattice_graph(4, 4) + # Perfect matching doesn't exist for 4,4 hexagonal lattice graph + with pytest.raises(nx.NetworkXUnfeasible): + reg.k_factor(g, 2) + + def test_k_factor5(self): + g = gen.complete_graph(6) + # small k to exercise SmallKGadget + g_kf = reg.k_factor(g, 2) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 2 + + +class TestIsRegular: + def test_is_regular1(self): + g = gen.cycle_graph(4) + assert reg.is_regular(g) + + def test_is_regular2(self): + g = gen.complete_graph(5) + assert reg.is_regular(g) + + def test_is_regular3(self): + g = gen.lollipop_graph(5, 5) + assert not reg.is_regular(g) + + def test_is_regular4(self): + g = nx.DiGraph() + g.add_edges_from([(0, 1), (1, 2), (2, 0)]) + assert reg.is_regular(g) + + +class TestIsKRegular: + def test_is_k_regular1(self): + g = gen.cycle_graph(4) + assert reg.is_k_regular(g, 2) + assert not reg.is_k_regular(g, 3) + + def test_is_k_regular2(self): + g = gen.complete_graph(5) + assert reg.is_k_regular(g, 4) + assert not reg.is_k_regular(g, 3) + assert not reg.is_k_regular(g, 6) + + def test_is_k_regular3(self): + g = gen.lollipop_graph(5, 5) + assert not reg.is_k_regular(g, 5) + assert not reg.is_k_regular(g, 6) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_richclub.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_richclub.py new file mode 100644 index 0000000..1ed42d4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_richclub.py @@ -0,0 +1,86 @@ +import pytest + +import networkx as nx + + +def test_richclub(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rc = nx.richclub.rich_club_coefficient(G, normalized=False) + assert rc == {0: 12.0 / 30, 1: 8.0 / 12} + + # test single value + rc0 = nx.richclub.rich_club_coefficient(G, normalized=False)[0] + assert rc0 == 12.0 / 30.0 + + +def test_richclub_seed(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2, seed=1) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub_normalized(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub2(): + T = nx.balanced_tree(2, 10) + rc = nx.richclub.rich_club_coefficient(T, normalized=False) + assert rc == { + 0: 4092 / (2047 * 2046.0), + 1: (2044.0 / (1023 * 1022)), + 2: (2040.0 / (1022 * 1021)), + } + + +def test_richclub3(): + # tests edgecase + G = nx.karate_club_graph() + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == { + 0: 156.0 / 1122, + 1: 154.0 / 1056, + 2: 110.0 / 462, + 3: 78.0 / 240, + 4: 44.0 / 90, + 5: 22.0 / 42, + 6: 10.0 / 20, + 7: 10.0 / 20, + 8: 10.0 / 20, + 9: 6.0 / 12, + 10: 2.0 / 6, + 11: 2.0 / 6, + 12: 0.0, + 13: 0.0, + 14: 0.0, + 15: 0.0, + } + + +def test_richclub4(): + G = nx.Graph() + G.add_edges_from( + [(0, 1), (0, 2), (0, 3), (0, 4), (4, 5), (5, 9), (6, 9), (7, 9), (8, 9)] + ) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 18 / 90.0, 1: 6 / 12.0, 2: 0.0, 3: 0.0} + + +def test_richclub_exception(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_exception2(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.rich_club_coefficient(G) + + +# def test_richclub2_normalized(): +# T = nx.balanced_tree(2,10) +# rcNorm = nx.richclub.rich_club_coefficient(T,Q=2) +# assert_true(rcNorm[0] ==1.0 and rcNorm[1] < 0.9 and rcNorm[2] < 0.9) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_similarity.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_similarity.py new file mode 100644 index 0000000..9b620de --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_similarity.py @@ -0,0 +1,920 @@ +import pytest + +import networkx as nx +from networkx.algorithms.similarity import ( + graph_edit_distance, + optimal_edit_paths, + optimize_graph_edit_distance, +) +from networkx.generators.classic import ( + circular_ladder_graph, + cycle_graph, + path_graph, + wheel_graph, +) + + +def nmatch(n1, n2): + return n1 == n2 + + +def ematch(e1, e2): + return e1 == e2 + + +def getCanonical(): + G = nx.Graph() + G.add_node("A", label="A") + G.add_node("B", label="B") + G.add_node("C", label="C") + G.add_node("D", label="D") + G.add_edge("A", "B", label="a-b") + G.add_edge("B", "C", label="b-c") + G.add_edge("B", "D", label="b-d") + return G + + +class TestSimilarity: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_graph_edit_distance_roots_and_timeout(self): + G0 = nx.star_graph(5) + G1 = G0.copy() + pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2]) + pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2, 3, 4]) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 3)) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(3, 9)) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 9)) + assert graph_edit_distance(G0, G1, roots=(1, 2)) == 0 + assert graph_edit_distance(G0, G1, roots=(0, 1)) == 8 + assert graph_edit_distance(G0, G1, roots=(1, 2), timeout=5) == 0 + assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=5) == 8 + assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=0.0001) is None + # test raise on 0 timeout + pytest.raises(nx.NetworkXError, graph_edit_distance, G0, G1, timeout=0) + + def test_graph_edit_distance(self): + G0 = nx.Graph() + G1 = path_graph(6) + G2 = cycle_graph(6) + G3 = wheel_graph(7) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 11 + assert graph_edit_distance(G1, G0) == 11 + assert graph_edit_distance(G0, G2) == 12 + assert graph_edit_distance(G2, G0) == 12 + assert graph_edit_distance(G0, G3) == 19 + assert graph_edit_distance(G3, G0) == 19 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 1 + assert graph_edit_distance(G2, G1) == 1 + assert graph_edit_distance(G1, G3) == 8 + assert graph_edit_distance(G3, G1) == 8 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 7 + assert graph_edit_distance(G3, G2) == 7 + + assert graph_edit_distance(G3, G3) == 0 + + def test_graph_edit_distance_node_match(self): + G1 = cycle_graph(5) + G2 = cycle_graph(5) + for n, attr in G1.nodes.items(): + attr["color"] = "red" if n % 2 == 0 else "blue" + for n, attr in G2.nodes.items(): + attr["color"] = "red" if n % 2 == 1 else "blue" + assert graph_edit_distance(G1, G2) == 0 + assert ( + graph_edit_distance( + G1, G2, node_match=lambda n1, n2: n1["color"] == n2["color"] + ) + == 1 + ) + + def test_graph_edit_distance_edge_match(self): + G1 = path_graph(6) + G2 = path_graph(6) + for e, attr in G1.edges.items(): + attr["color"] = "red" if min(e) % 2 == 0 else "blue" + for e, attr in G2.edges.items(): + attr["color"] = "red" if min(e) // 3 == 0 else "blue" + assert graph_edit_distance(G1, G2) == 0 + assert ( + graph_edit_distance( + G1, G2, edge_match=lambda e1, e2: e1["color"] == e2["color"] + ) + == 2 + ) + + def test_graph_edit_distance_node_cost(self): + G1 = path_graph(6) + G2 = path_graph(6) + for n, attr in G1.nodes.items(): + attr["color"] = "red" if n % 2 == 0 else "blue" + for n, attr in G2.nodes.items(): + attr["color"] = "red" if n % 2 == 1 else "blue" + + def node_subst_cost(uattr, vattr): + if uattr["color"] == vattr["color"]: + return 1 + else: + return 10 + + def node_del_cost(attr): + if attr["color"] == "blue": + return 20 + else: + return 50 + + def node_ins_cost(attr): + if attr["color"] == "blue": + return 40 + else: + return 100 + + assert ( + graph_edit_distance( + G1, + G2, + node_subst_cost=node_subst_cost, + node_del_cost=node_del_cost, + node_ins_cost=node_ins_cost, + ) + == 6 + ) + + def test_graph_edit_distance_edge_cost(self): + G1 = path_graph(6) + G2 = path_graph(6) + for e, attr in G1.edges.items(): + attr["color"] = "red" if min(e) % 2 == 0 else "blue" + for e, attr in G2.edges.items(): + attr["color"] = "red" if min(e) // 3 == 0 else "blue" + + def edge_subst_cost(gattr, hattr): + if gattr["color"] == hattr["color"]: + return 0.01 + else: + return 0.1 + + def edge_del_cost(attr): + if attr["color"] == "blue": + return 0.2 + else: + return 0.5 + + def edge_ins_cost(attr): + if attr["color"] == "blue": + return 0.4 + else: + return 1.0 + + assert ( + graph_edit_distance( + G1, + G2, + edge_subst_cost=edge_subst_cost, + edge_del_cost=edge_del_cost, + edge_ins_cost=edge_ins_cost, + ) + == 0.23 + ) + + def test_graph_edit_distance_upper_bound(self): + G1 = circular_ladder_graph(2) + G2 = circular_ladder_graph(6) + assert graph_edit_distance(G1, G2, upper_bound=5) is None + assert graph_edit_distance(G1, G2, upper_bound=24) == 22 + assert graph_edit_distance(G1, G2) == 22 + + def test_optimal_edit_paths(self): + G1 = path_graph(3) + G2 = cycle_graph(3) + paths, cost = optimal_edit_paths(G1, G2) + assert cost == 1 + assert len(paths) == 6 + + def canonical(vertex_path, edge_path): + return ( + tuple(sorted(vertex_path)), + tuple(sorted(edge_path, key=lambda x: (None in x, x))), + ) + + expected_paths = [ + ( + [(0, 0), (1, 1), (2, 2)], + [((0, 1), (0, 1)), ((1, 2), (1, 2)), (None, (0, 2))], + ), + ( + [(0, 0), (1, 2), (2, 1)], + [((0, 1), (0, 2)), ((1, 2), (1, 2)), (None, (0, 1))], + ), + ( + [(0, 1), (1, 0), (2, 2)], + [((0, 1), (0, 1)), ((1, 2), (0, 2)), (None, (1, 2))], + ), + ( + [(0, 1), (1, 2), (2, 0)], + [((0, 1), (1, 2)), ((1, 2), (0, 2)), (None, (0, 1))], + ), + ( + [(0, 2), (1, 0), (2, 1)], + [((0, 1), (0, 2)), ((1, 2), (0, 1)), (None, (1, 2))], + ), + ( + [(0, 2), (1, 1), (2, 0)], + [((0, 1), (1, 2)), ((1, 2), (0, 1)), (None, (0, 2))], + ), + ] + assert {canonical(*p) for p in paths} == {canonical(*p) for p in expected_paths} + + def test_optimize_graph_edit_distance(self): + G1 = circular_ladder_graph(2) + G2 = circular_ladder_graph(6) + bestcost = 1000 + for cost in optimize_graph_edit_distance(G1, G2): + assert cost < bestcost + bestcost = cost + assert bestcost == 22 + + # def test_graph_edit_distance_bigger(self): + # G1 = circular_ladder_graph(12) + # G2 = circular_ladder_graph(16) + # assert_equal(graph_edit_distance(G1, G2), 22) + + def test_selfloops(self): + G0 = nx.Graph() + G1 = nx.Graph() + G1.add_edges_from((("A", "A"), ("A", "B"))) + G2 = nx.Graph() + G2.add_edges_from((("A", "B"), ("B", "B"))) + G3 = nx.Graph() + G3.add_edges_from((("A", "A"), ("A", "B"), ("B", "B"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 4 + assert graph_edit_distance(G1, G0) == 4 + assert graph_edit_distance(G0, G2) == 4 + assert graph_edit_distance(G2, G0) == 4 + assert graph_edit_distance(G0, G3) == 5 + assert graph_edit_distance(G3, G0) == 5 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 0 + assert graph_edit_distance(G2, G1) == 0 + assert graph_edit_distance(G1, G3) == 1 + assert graph_edit_distance(G3, G1) == 1 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 1 + assert graph_edit_distance(G3, G2) == 1 + + assert graph_edit_distance(G3, G3) == 0 + + def test_digraph(self): + G0 = nx.DiGraph() + G1 = nx.DiGraph() + G1.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("D", "A"))) + G2 = nx.DiGraph() + G2.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("A", "D"))) + G3 = nx.DiGraph() + G3.add_edges_from((("A", "B"), ("A", "C"), ("B", "D"), ("C", "D"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 8 + assert graph_edit_distance(G1, G0) == 8 + assert graph_edit_distance(G0, G2) == 8 + assert graph_edit_distance(G2, G0) == 8 + assert graph_edit_distance(G0, G3) == 8 + assert graph_edit_distance(G3, G0) == 8 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 2 + assert graph_edit_distance(G2, G1) == 2 + assert graph_edit_distance(G1, G3) == 4 + assert graph_edit_distance(G3, G1) == 4 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 2 + assert graph_edit_distance(G3, G2) == 2 + + assert graph_edit_distance(G3, G3) == 0 + + def test_multigraph(self): + G0 = nx.MultiGraph() + G1 = nx.MultiGraph() + G1.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"))) + G2 = nx.MultiGraph() + G2.add_edges_from((("A", "B"), ("B", "C"), ("B", "C"), ("A", "C"))) + G3 = nx.MultiGraph() + G3.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"), ("A", "C"), ("A", "C"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 6 + assert graph_edit_distance(G1, G0) == 6 + assert graph_edit_distance(G0, G2) == 7 + assert graph_edit_distance(G2, G0) == 7 + assert graph_edit_distance(G0, G3) == 8 + assert graph_edit_distance(G3, G0) == 8 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 1 + assert graph_edit_distance(G2, G1) == 1 + assert graph_edit_distance(G1, G3) == 2 + assert graph_edit_distance(G3, G1) == 2 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 1 + assert graph_edit_distance(G3, G2) == 1 + + assert graph_edit_distance(G3, G3) == 0 + + def test_multidigraph(self): + G1 = nx.MultiDiGraph() + G1.add_edges_from( + ( + ("hardware", "kernel"), + ("kernel", "hardware"), + ("kernel", "userspace"), + ("userspace", "kernel"), + ) + ) + G2 = nx.MultiDiGraph() + G2.add_edges_from( + ( + ("winter", "spring"), + ("spring", "summer"), + ("summer", "autumn"), + ("autumn", "winter"), + ) + ) + + assert graph_edit_distance(G1, G2) == 5 + assert graph_edit_distance(G2, G1) == 5 + + # by https://github.com/jfbeaumont + def testCopy(self): + G = nx.Graph() + G.add_node("A", label="A") + G.add_node("B", label="B") + G.add_edge("A", "B", label="a-b") + assert ( + graph_edit_distance(G, G.copy(), node_match=nmatch, edge_match=ematch) == 0 + ) + + def testSame(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 0 + + def testOneEdgeLabelDiff(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="bad") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneNodeLabelDiff(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="Z") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraNode(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + G2.add_node("C", label="C") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraEdge(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_node("C", label="C") + G1.add_node("C", label="C") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("A", "C", label="a-c") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraNodeAndEdge(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("A", "C", label="a-c") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph1(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "D", label="b-d") + G2.add_edge("D", "E", label="d-e") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 3 + + def testGraph2(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("C", "D", label="c-d") + G2.add_edge("C", "E", label="c-e") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 4 + + def testGraph3(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_node("F", label="F") + G2.add_node("G", label="G") + G2.add_edge("A", "C", label="a-c") + G2.add_edge("A", "D", label="a-d") + G2.add_edge("D", "E", label="d-e") + G2.add_edge("D", "F", label="d-f") + G2.add_edge("D", "G", label="d-g") + G2.add_edge("E", "B", label="e-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 12 + + def testGraph4(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("C", "D", label="c-d") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph4_a(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("A", "D", label="a-d") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph4_b(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("B", "D", label="bad") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + # note: nx.simrank_similarity_numpy not included because returns np.array + simrank_algs = [nx.simrank_similarity, nx.similarity._simrank_similarity_python] + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_no_source_no_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = { + 0: { + 0: 1, + 1: 0.3951219505902448, + 2: 0.5707317069281646, + 3: 0.5707317069281646, + 4: 0.3951219505902449, + }, + 1: { + 0: 0.3951219505902448, + 1: 1, + 2: 0.3951219505902449, + 3: 0.5707317069281646, + 4: 0.5707317069281646, + }, + 2: { + 0: 0.5707317069281646, + 1: 0.3951219505902449, + 2: 1, + 3: 0.3951219505902449, + 4: 0.5707317069281646, + }, + 3: { + 0: 0.5707317069281646, + 1: 0.5707317069281646, + 2: 0.3951219505902449, + 3: 1, + 4: 0.3951219505902449, + }, + 4: { + 0: 0.3951219505902449, + 1: 0.5707317069281646, + 2: 0.5707317069281646, + 3: 0.3951219505902449, + 4: 1, + }, + } + actual = simrank_similarity(G) + for k, v in expected.items(): + assert v == pytest.approx(actual[k], abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = { + 0: {0: 1, 1: 0.0, 2: 0.1323363991265798, 3: 0.0, 4: 0.03387811817640443}, + 1: {0: 0.0, 1: 1, 2: 0.4135512472705618, 3: 0.0, 4: 0.10586911930126384}, + 2: { + 0: 0.1323363991265798, + 1: 0.4135512472705618, + 2: 1, + 3: 0.04234764772050554, + 4: 0.08822426608438655, + }, + 3: {0: 0.0, 1: 0.0, 2: 0.04234764772050554, 3: 1, 4: 0.3308409978164495}, + 4: { + 0: 0.03387811817640443, + 1: 0.10586911930126384, + 2: 0.08822426608438655, + 3: 0.3308409978164495, + 4: 1, + }, + } + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8) + for k, v in expected.items(): + assert v == pytest.approx(actual[k], abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_source_no_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = { + 0: 1, + 1: 0.3951219505902448, + 2: 0.5707317069281646, + 3: 0.5707317069281646, + 4: 0.3951219505902449, + } + actual = simrank_similarity(G, source=0) + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = {0: 1, 1: 0.0, 2: 0.1323363991265798, 3: 0.0, 4: 0.03387811817640443} + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8, source=0) + assert expected == pytest.approx(actual, abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_noninteger_nodes(self, simrank_similarity): + G = nx.cycle_graph(5) + G = nx.relabel_nodes(G, dict(enumerate("abcde"))) + expected = { + "a": 1, + "b": 0.3951219505902448, + "c": 0.5707317069281646, + "d": 0.5707317069281646, + "e": 0.3951219505902449, + } + actual = simrank_similarity(G, source="a") + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + node_labels = dict(enumerate(nx.get_node_attributes(G, "label").values())) + G = nx.relabel_nodes(G, node_labels) + + expected = { + "Univ": 1, + "ProfA": 0.0, + "ProfB": 0.1323363991265798, + "StudentA": 0.0, + "StudentB": 0.03387811817640443, + } + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8, source="Univ") + assert expected == pytest.approx(actual, abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_source_and_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = 1 + actual = simrank_similarity(G, source=0, target=0) + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = 0.1323363991265798 + # Use the importance_factor from the paper to get the same numbers. + # Use the pair (0,2) because (0,0) and (0,1) have trivial results. + actual = simrank_similarity(G, importance_factor=0.8, source=0, target=2) + assert expected == pytest.approx(actual, abs=1e-5) + + @pytest.mark.parametrize("alg", simrank_algs) + def test_simrank_max_iterations(self, alg): + G = nx.cycle_graph(5) + pytest.raises(nx.ExceededMaxIterations, alg, G, max_iterations=10) + + def test_simrank_between_versions(self): + G = nx.cycle_graph(5) + # _python tolerance 1e-4 + expected_python_tol4 = { + 0: 1, + 1: 0.394512499239852, + 2: 0.5703550452791322, + 3: 0.5703550452791323, + 4: 0.394512499239852, + } + # _numpy tolerance 1e-4 + expected_numpy_tol4 = { + 0: 1.0, + 1: 0.3947180735764555, + 2: 0.570482097206368, + 3: 0.570482097206368, + 4: 0.3947180735764555, + } + actual = nx.simrank_similarity(G, source=0) + assert expected_numpy_tol4 == pytest.approx(actual, abs=1e-7) + # versions differ at 1e-4 level but equal at 1e-3 + assert expected_python_tol4 != pytest.approx(actual, abs=1e-4) + assert expected_python_tol4 == pytest.approx(actual, abs=1e-3) + + actual = nx.similarity._simrank_similarity_python(G, source=0) + assert expected_python_tol4 == pytest.approx(actual, abs=1e-7) + # versions differ at 1e-4 level but equal at 1e-3 + assert expected_numpy_tol4 != pytest.approx(actual, abs=1e-4) + assert expected_numpy_tol4 == pytest.approx(actual, abs=1e-3) + + def test_simrank_numpy_no_source_no_target(self): + G = nx.cycle_graph(5) + expected = np.array( + [ + [ + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + ], + [ + 0.3947180735764555, + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + ], + [ + 0.570482097206368, + 0.3947180735764555, + 1.0, + 0.3947180735764555, + 0.570482097206368, + ], + [ + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + 1.0, + 0.3947180735764555, + ], + [ + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + 1.0, + ], + ] + ) + actual = nx.similarity._simrank_similarity_numpy(G) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_simrank_numpy_source_no_target(self): + G = nx.cycle_graph(5) + expected = np.array( + [ + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + ] + ) + actual = nx.similarity._simrank_similarity_numpy(G, source=0) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_simrank_numpy_source_and_target(self): + G = nx.cycle_graph(5) + expected = 1.0 + actual = nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_panther_similarity_unweighted(self): + np.random.seed(42) + + G = nx.Graph() + G.add_edge(0, 1) + G.add_edge(0, 2) + G.add_edge(0, 3) + G.add_edge(1, 2) + G.add_edge(2, 4) + expected = {3: 0.5, 2: 0.5, 1: 0.5, 4: 0.125} + sim = nx.panther_similarity(G, 0, path_length=2) + assert sim == expected + + def test_panther_similarity_weighted(self): + np.random.seed(42) + + G = nx.Graph() + G.add_edge("v1", "v2", weight=5) + G.add_edge("v1", "v3", weight=1) + G.add_edge("v1", "v4", weight=2) + G.add_edge("v2", "v3", weight=0.1) + G.add_edge("v3", "v5", weight=1) + expected = {"v3": 0.75, "v4": 0.5, "v2": 0.5, "v5": 0.25} + sim = nx.panther_similarity(G, "v1", path_length=2) + assert sim == expected + + def test_generate_random_paths_unweighted(self): + np.random.seed(42) + + index_map = {} + num_paths = 10 + path_length = 2 + G = nx.Graph() + G.add_edge(0, 1) + G.add_edge(0, 2) + G.add_edge(0, 3) + G.add_edge(1, 2) + G.add_edge(2, 4) + paths = nx.generate_random_paths( + G, num_paths, path_length=path_length, index_map=index_map + ) + expected_paths = [ + [3, 0, 3], + [4, 2, 1], + [2, 1, 0], + [2, 0, 3], + [3, 0, 1], + [3, 0, 1], + [4, 2, 0], + [2, 1, 0], + [3, 0, 2], + [2, 1, 2], + ] + expected_map = { + 0: {0, 2, 3, 4, 5, 6, 7, 8}, + 1: {1, 2, 4, 5, 7, 9}, + 2: {1, 2, 3, 6, 7, 8, 9}, + 3: {0, 3, 4, 5, 8}, + 4: {1, 6}, + } + + assert expected_paths == list(paths) + assert expected_map == index_map + + def test_generate_random_paths_weighted(self): + np.random.seed(42) + + index_map = {} + num_paths = 10 + path_length = 6 + G = nx.Graph() + G.add_edge("a", "b", weight=0.6) + G.add_edge("a", "c", weight=0.2) + G.add_edge("c", "d", weight=0.1) + G.add_edge("c", "e", weight=0.7) + G.add_edge("c", "f", weight=0.9) + G.add_edge("a", "d", weight=0.3) + paths = nx.generate_random_paths( + G, num_paths, path_length=path_length, index_map=index_map + ) + + expected_paths = [ + ["d", "c", "f", "c", "d", "a", "b"], + ["e", "c", "f", "c", "f", "c", "e"], + ["d", "a", "b", "a", "b", "a", "c"], + ["b", "a", "d", "a", "b", "a", "b"], + ["d", "a", "b", "a", "b", "a", "d"], + ["d", "a", "b", "a", "b", "a", "c"], + ["d", "a", "b", "a", "b", "a", "b"], + ["f", "c", "f", "c", "f", "c", "e"], + ["d", "a", "d", "a", "b", "a", "b"], + ["e", "c", "f", "c", "e", "c", "d"], + ] + expected_map = { + "d": {0, 2, 3, 4, 5, 6, 8, 9}, + "c": {0, 1, 2, 5, 7, 9}, + "f": {0, 1, 9, 7}, + "a": {0, 2, 3, 4, 5, 6, 8}, + "b": {0, 2, 3, 4, 5, 6, 8}, + "e": {1, 9, 7}, + } + + assert expected_paths == list(paths) + assert expected_map == index_map + + def test_symmetry_with_custom_matching(self): + print("G2 is edge (a,b) and G3 is edge (a,a)") + print("but node order for G2 is (a,b) while for G3 it is (b,a)") + + a, b = "A", "B" + G2 = nx.Graph() + G2.add_nodes_from((a, b)) + G2.add_edges_from([(a, b)]) + G3 = nx.Graph() + G3.add_nodes_from((b, a)) + G3.add_edges_from([(a, a)]) + for G in (G2, G3): + for n in G: + G.nodes[n]["attr"] = n + for e in G.edges: + G.edges[e]["attr"] = e + match = lambda x, y: x == y + + print("Starting G2 to G3 GED calculation") + assert nx.graph_edit_distance(G2, G3, node_match=match, edge_match=match) == 1 + + print("Starting G3 to G2 GED calculation") + assert nx.graph_edit_distance(G3, G2, node_match=match, edge_match=match) == 1 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_simple_paths.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_simple_paths.py new file mode 100644 index 0000000..08348b9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_simple_paths.py @@ -0,0 +1,765 @@ +import random + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.algorithms.simple_paths import ( + _bidirectional_dijkstra, + _bidirectional_shortest_path, +) +from networkx.utils import arbitrary_element, pairwise + + +class TestIsSimplePath: + """Unit tests for the + :func:`networkx.algorithms.simple_paths.is_simple_path` function. + + """ + + def test_empty_list(self): + """Tests that the empty list is not a valid path, since there + should be a one-to-one correspondence between paths as lists of + nodes and paths as lists of edges. + + """ + G = nx.trivial_graph() + assert not nx.is_simple_path(G, []) + + def test_trivial_path(self): + """Tests that the trivial path, a path of length one, is + considered a simple path in a graph. + + """ + G = nx.trivial_graph() + assert nx.is_simple_path(G, [0]) + + def test_trivial_nonpath(self): + """Tests that a list whose sole element is an object not in the + graph is not considered a simple path. + + """ + G = nx.trivial_graph() + assert not nx.is_simple_path(G, ["not a node"]) + + def test_simple_path(self): + G = nx.path_graph(2) + assert nx.is_simple_path(G, [0, 1]) + + def test_non_simple_path(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [0, 1, 0]) + + def test_cycle(self): + G = nx.cycle_graph(3) + assert not nx.is_simple_path(G, [0, 1, 2, 0]) + + def test_missing_node(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [0, 2]) + + def test_directed_path(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + assert nx.is_simple_path(G, [0, 1, 2]) + + def test_directed_non_path(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + assert not nx.is_simple_path(G, [2, 1, 0]) + + def test_directed_cycle(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + assert not nx.is_simple_path(G, [0, 1, 2, 0]) + + def test_multigraph(self): + G = nx.MultiGraph([(0, 1), (0, 1)]) + assert nx.is_simple_path(G, [0, 1]) + + def test_multidigraph(self): + G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 0), (1, 0)]) + assert nx.is_simple_path(G, [0, 1]) + + +# Tests for all_simple_paths +def test_all_simple_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3)} + + +def test_all_simple_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_digraph_all_simple_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_all_simple_paths_with_two_targets_cutoff(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_digraph_all_simple_paths_with_two_targets_cutoff(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_all_simple_paths_with_two_targets_in_line_emits_two_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {(0, 1, 2), (0, 1, 2, 3)} + + +def test_all_simple_paths_ignores_cycle(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {(0, 1, 3)} + + +def test_all_simple_paths_with_two_targets_inside_cycle_emits_two_paths(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {(0, 1, 2), (0, 1, 3)} + + +def test_all_simple_paths_source_target(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 1, 1) + assert list(paths) == [] + + +def test_all_simple_paths_cutoff(): + G = nx.complete_graph(4) + paths = nx.all_simple_paths(G, 0, 1, cutoff=1) + assert {tuple(p) for p in paths} == {(0, 1)} + paths = nx.all_simple_paths(G, 0, 1, cutoff=2) + assert {tuple(p) for p in paths} == {(0, 1), (0, 2, 1), (0, 3, 1)} + + +def test_all_simple_paths_on_non_trivial_graph(): + """you may need to draw this graph to make sure it is reasonable""" + G = nx.path_graph(5, create_using=nx.DiGraph()) + G.add_edges_from([(0, 5), (1, 5), (1, 3), (5, 4), (4, 2), (4, 3)]) + paths = nx.all_simple_paths(G, 1, [2, 3]) + assert {tuple(p) for p in paths} == { + (1, 2), + (1, 3, 4, 2), + (1, 5, 4, 2), + (1, 3), + (1, 2, 3), + (1, 5, 4, 3), + (1, 5, 4, 2, 3), + } + paths = nx.all_simple_paths(G, 1, [2, 3], cutoff=3) + assert {tuple(p) for p in paths} == { + (1, 2), + (1, 3, 4, 2), + (1, 5, 4, 2), + (1, 3), + (1, 2, 3), + (1, 5, 4, 3), + } + paths = nx.all_simple_paths(G, 1, [2, 3], cutoff=2) + assert {tuple(p) for p in paths} == {(1, 2), (1, 3), (1, 2, 3)} + + +def test_all_simple_paths_multigraph(): + G = nx.MultiGraph([(1, 2), (1, 2)]) + paths = nx.all_simple_paths(G, 1, 1) + assert list(paths) == [] + nx.add_path(G, [3, 1, 10, 2]) + paths = list(nx.all_simple_paths(G, 1, 2)) + assert len(paths) == 3 + assert {tuple(p) for p in paths} == {(1, 2), (1, 2), (1, 10, 2)} + + +def test_all_simple_paths_multigraph_with_cutoff(): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 10), (10, 2)]) + paths = list(nx.all_simple_paths(G, 1, 2, cutoff=1)) + assert len(paths) == 2 + assert {tuple(p) for p in paths} == {(1, 2), (1, 2)} + + +def test_all_simple_paths_directed(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [3, 2, 1]) + paths = nx.all_simple_paths(G, 1, 3) + assert {tuple(p) for p in paths} == {(1, 2, 3)} + + +def test_all_simple_paths_empty(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, 3, cutoff=2) + assert list(paths) == [] + + +def test_all_simple_paths_corner_cases(): + assert list(nx.all_simple_paths(nx.empty_graph(2), 0, 0)) == [] + assert list(nx.all_simple_paths(nx.empty_graph(2), 0, 1)) == [] + assert list(nx.all_simple_paths(nx.path_graph(9), 0, 8, 0)) == [] + + +def hamiltonian_path(G, source): + source = arbitrary_element(G) + neighbors = set(G[source]) - {source} + n = len(G) + for target in neighbors: + for path in nx.all_simple_paths(G, source, target): + if len(path) == n: + yield path + + +def test_hamiltonian_path(): + from itertools import permutations + + G = nx.complete_graph(4) + paths = [list(p) for p in hamiltonian_path(G, 0)] + exact = [[0] + list(p) for p in permutations([1, 2, 3], 3)] + assert sorted(paths) == sorted(exact) + + +def test_cutoff_zero(): + G = nx.complete_graph(4) + paths = nx.all_simple_paths(G, 0, 3, cutoff=0) + assert list(list(p) for p in paths) == [] + paths = nx.all_simple_paths(nx.MultiGraph(G), 0, 3, cutoff=0) + assert list(list(p) for p in paths) == [] + + +def test_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_paths(nx.MultiGraph(G), 0, 3)) + + +def test_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_paths(nx.MultiGraph(G), 1, 4)) + + +# Tests for all_simple_edge_paths +def test_all_simple_edge_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2), (2, 3))} + + +def test_all_simple_edge_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_digraph_all_simple_edge_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_all_simple_edge_paths_with_two_targets_cutoff(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_digraph_all_simple_edge_paths_with_two_targets_cutoff(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_all_simple_edge_paths_with_two_targets_in_line_emits_two_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2)), ((0, 1), (1, 2), (2, 3))} + + +def test_all_simple_edge_paths_ignores_cycle(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_edge_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {((0, 1), (1, 3))} + + +def test_all_simple_edge_paths_with_two_targets_inside_cycle_emits_two_paths(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_edge_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2)), ((0, 1), (1, 3))} + + +def test_all_simple_edge_paths_source_target(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 1, 1) + assert list(paths) == [] + + +def test_all_simple_edge_paths_cutoff(): + G = nx.complete_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 1, cutoff=1) + assert {tuple(p) for p in paths} == {((0, 1),)} + paths = nx.all_simple_edge_paths(G, 0, 1, cutoff=2) + assert {tuple(p) for p in paths} == {((0, 1),), ((0, 2), (2, 1)), ((0, 3), (3, 1))} + + +def test_all_simple_edge_paths_on_non_trivial_graph(): + """you may need to draw this graph to make sure it is reasonable""" + G = nx.path_graph(5, create_using=nx.DiGraph()) + G.add_edges_from([(0, 5), (1, 5), (1, 3), (5, 4), (4, 2), (4, 3)]) + paths = nx.all_simple_edge_paths(G, 1, [2, 3]) + assert {tuple(p) for p in paths} == { + ((1, 2),), + ((1, 3), (3, 4), (4, 2)), + ((1, 5), (5, 4), (4, 2)), + ((1, 3),), + ((1, 2), (2, 3)), + ((1, 5), (5, 4), (4, 3)), + ((1, 5), (5, 4), (4, 2), (2, 3)), + } + paths = nx.all_simple_edge_paths(G, 1, [2, 3], cutoff=3) + assert {tuple(p) for p in paths} == { + ((1, 2),), + ((1, 3), (3, 4), (4, 2)), + ((1, 5), (5, 4), (4, 2)), + ((1, 3),), + ((1, 2), (2, 3)), + ((1, 5), (5, 4), (4, 3)), + } + paths = nx.all_simple_edge_paths(G, 1, [2, 3], cutoff=2) + assert {tuple(p) for p in paths} == {((1, 2),), ((1, 3),), ((1, 2), (2, 3))} + + +def test_all_simple_edge_paths_multigraph(): + G = nx.MultiGraph([(1, 2), (1, 2)]) + paths = nx.all_simple_edge_paths(G, 1, 1) + assert list(paths) == [] + nx.add_path(G, [3, 1, 10, 2]) + paths = list(nx.all_simple_edge_paths(G, 1, 2)) + assert len(paths) == 3 + assert {tuple(p) for p in paths} == { + ((1, 2, 0),), + ((1, 2, 1),), + ((1, 10, 0), (10, 2, 0)), + } + + +def test_all_simple_edge_paths_multigraph_with_cutoff(): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 10), (10, 2)]) + paths = list(nx.all_simple_edge_paths(G, 1, 2, cutoff=1)) + assert len(paths) == 2 + assert {tuple(p) for p in paths} == {((1, 2, 0),), ((1, 2, 1),)} + + +def test_all_simple_edge_paths_directed(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [3, 2, 1]) + paths = nx.all_simple_edge_paths(G, 1, 3) + assert {tuple(p) for p in paths} == {((1, 2), (2, 3))} + + +def test_all_simple_edge_paths_empty(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3, cutoff=2) + assert list(paths) == [] + + +def test_all_simple_edge_paths_corner_cases(): + assert list(nx.all_simple_edge_paths(nx.empty_graph(2), 0, 0)) == [] + assert list(nx.all_simple_edge_paths(nx.empty_graph(2), 0, 1)) == [] + assert list(nx.all_simple_edge_paths(nx.path_graph(9), 0, 8, 0)) == [] + + +def hamiltonian_edge_path(G, source): + source = arbitrary_element(G) + neighbors = set(G[source]) - {source} + n = len(G) + for target in neighbors: + for path in nx.all_simple_edge_paths(G, source, target): + if len(path) == n - 1: + yield path + + +def test_hamiltonian__edge_path(): + from itertools import permutations + + G = nx.complete_graph(4) + paths = hamiltonian_edge_path(G, 0) + exact = [list(pairwise([0] + list(p))) for p in permutations([1, 2, 3], 3)] + assert sorted(exact) == [p for p in sorted(paths)] + + +def test_edge_cutoff_zero(): + G = nx.complete_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3, cutoff=0) + assert list(list(p) for p in paths) == [] + paths = nx.all_simple_edge_paths(nx.MultiGraph(G), 0, 3, cutoff=0) + assert list(list(p) for p in paths) == [] + + +def test_edge_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_edge_paths(nx.MultiGraph(G), 0, 3)) + + +def test_edge_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_edge_paths(nx.MultiGraph(G), 1, 4)) + + +# Tests for shortest_simple_paths +def test_shortest_simple_paths(): + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + paths = nx.shortest_simple_paths(G, 1, 12) + assert next(paths) == [1, 2, 3, 4, 8, 12] + assert next(paths) == [1, 5, 6, 7, 8, 12] + assert [len(path) for path in nx.shortest_simple_paths(G, 1, 12)] == sorted( + len(path) for path in nx.all_simple_paths(G, 1, 12) + ) + + +def test_shortest_simple_paths_directed(): + G = nx.cycle_graph(7, create_using=nx.DiGraph()) + paths = nx.shortest_simple_paths(G, 0, 3) + assert [path for path in paths] == [[0, 1, 2, 3]] + + +def test_shortest_simple_paths_directed_with_weight_fucntion(): + def cost(u, v, x): + return 1 + + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + paths = nx.shortest_simple_paths(G, 1, 12) + assert next(paths) == [1, 2, 3, 4, 8, 12] + assert next(paths) == [1, 5, 6, 7, 8, 12] + assert [ + len(path) for path in nx.shortest_simple_paths(G, 1, 12, weight=cost) + ] == sorted(len(path) for path in nx.all_simple_paths(G, 1, 12)) + + +def test_shortest_simple_paths_with_weight_fucntion(): + def cost(u, v, x): + return 1 + + G = nx.cycle_graph(7, create_using=nx.DiGraph()) + paths = nx.shortest_simple_paths(G, 0, 3, weight=cost) + assert [path for path in paths] == [[0, 1, 2, 3]] + + +def test_Greg_Bernstein(): + g1 = nx.Graph() + g1.add_nodes_from(["N0", "N1", "N2", "N3", "N4"]) + g1.add_edge("N4", "N1", weight=10.0, capacity=50, name="L5") + g1.add_edge("N4", "N0", weight=7.0, capacity=40, name="L4") + g1.add_edge("N0", "N1", weight=10.0, capacity=45, name="L1") + g1.add_edge("N3", "N0", weight=10.0, capacity=50, name="L0") + g1.add_edge("N2", "N3", weight=12.0, capacity=30, name="L2") + g1.add_edge("N1", "N2", weight=15.0, capacity=42, name="L3") + solution = [["N1", "N0", "N3"], ["N1", "N2", "N3"], ["N1", "N4", "N0", "N3"]] + result = list(nx.shortest_simple_paths(g1, "N1", "N3", weight="weight")) + assert result == solution + + +def test_weighted_shortest_simple_path(): + def cost_func(path): + return sum(G.adj[u][v]["weight"] for (u, v) in zip(path, path[1:])) + + G = nx.complete_graph(5) + weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()} + nx.set_edge_attributes(G, weight, "weight") + cost = 0 + for path in nx.shortest_simple_paths(G, 0, 3, weight="weight"): + this_cost = cost_func(path) + assert cost <= this_cost + cost = this_cost + + +def test_directed_weighted_shortest_simple_path(): + def cost_func(path): + return sum(G.adj[u][v]["weight"] for (u, v) in zip(path, path[1:])) + + G = nx.complete_graph(5) + G = G.to_directed() + weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()} + nx.set_edge_attributes(G, weight, "weight") + cost = 0 + for path in nx.shortest_simple_paths(G, 0, 3, weight="weight"): + this_cost = cost_func(path) + assert cost <= this_cost + cost = this_cost + + +def test_weighted_shortest_simple_path_issue2427(): + G = nx.Graph() + G.add_edge("IN", "OUT", weight=2) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=2) + G.add_edge("B", "OUT", weight=2) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "OUT"], + ["IN", "B", "OUT"], + ] + G = nx.Graph() + G.add_edge("IN", "OUT", weight=10) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=1) + G.add_edge("B", "OUT", weight=1) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "B", "OUT"], + ["IN", "OUT"], + ] + + +def test_directed_weighted_shortest_simple_path_issue2427(): + G = nx.DiGraph() + G.add_edge("IN", "OUT", weight=2) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=2) + G.add_edge("B", "OUT", weight=2) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "OUT"], + ["IN", "B", "OUT"], + ] + G = nx.DiGraph() + G.add_edge("IN", "OUT", weight=10) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=1) + G.add_edge("B", "OUT", weight=1) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "B", "OUT"], + ["IN", "OUT"], + ] + + +def test_weight_name(): + G = nx.cycle_graph(7) + nx.set_edge_attributes(G, 1, "weight") + nx.set_edge_attributes(G, 1, "foo") + G.adj[1][2]["foo"] = 7 + paths = list(nx.shortest_simple_paths(G, 0, 3, weight="foo")) + solution = [[0, 6, 5, 4, 3], [0, 1, 2, 3]] + assert paths == solution + + +def test_ssp_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 0, 3)) + + +def test_ssp_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 1, 4)) + + +def test_ssp_multigraph(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 1, 4)) + + +def test_ssp_source_missing2(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5]) + list(nx.shortest_simple_paths(G, 0, 3)) + + +def test_bidirectional_shortest_path_restricted_cycle(): + cycle = nx.cycle_graph(7) + length, path = _bidirectional_shortest_path(cycle, 0, 3) + assert path == [0, 1, 2, 3] + length, path = _bidirectional_shortest_path(cycle, 0, 3, ignore_nodes=[1]) + assert path == [0, 6, 5, 4, 3] + + +def test_bidirectional_shortest_path_restricted_wheel(): + wheel = nx.wheel_graph(6) + length, path = _bidirectional_shortest_path(wheel, 1, 3) + assert path in [[1, 0, 3], [1, 2, 3]] + length, path = _bidirectional_shortest_path(wheel, 1, 3, ignore_nodes=[0]) + assert path == [1, 2, 3] + length, path = _bidirectional_shortest_path(wheel, 1, 3, ignore_nodes=[0, 2]) + assert path == [1, 5, 4, 3] + length, path = _bidirectional_shortest_path( + wheel, 1, 3, ignore_edges=[(1, 0), (5, 0), (2, 3)] + ) + assert path in [[1, 2, 0, 3], [1, 5, 4, 3]] + + +def test_bidirectional_shortest_path_restricted_directed_cycle(): + directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + length, path = _bidirectional_shortest_path(directed_cycle, 0, 3) + assert path == [0, 1, 2, 3] + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + directed_cycle, + 0, + 3, + ignore_nodes=[1], + ) + length, path = _bidirectional_shortest_path( + directed_cycle, 0, 3, ignore_edges=[(2, 1)] + ) + assert path == [0, 1, 2, 3] + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + directed_cycle, + 0, + 3, + ignore_edges=[(1, 2)], + ) + + +def test_bidirectional_shortest_path_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2]) + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[1] + ) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[2] + ) + G = nx.Graph() + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + nx.add_path(G, [3, 2]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[1, 2] + ) + + +def validate_path(G, s, t, soln_len, path): + assert path[0] == s + assert path[-1] == t + assert soln_len == sum( + G[u][v].get("weight", 1) for u, v in zip(path[:-1], path[1:]) + ) + + +def validate_length_path(G, s, t, soln_len, length, path): + assert soln_len == length + validate_path(G, s, t, length, path) + + +def test_bidirectional_dijksta_restricted(): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + validate_length_path(XG, "s", "v", 9, *_bidirectional_dijkstra(XG, "s", "v")) + validate_length_path( + XG, "s", "v", 10, *_bidirectional_dijkstra(XG, "s", "v", ignore_nodes=["u"]) + ) + validate_length_path( + XG, + "s", + "v", + 11, + *_bidirectional_dijkstra(XG, "s", "v", ignore_edges=[("s", "x")]) + ) + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + XG, + "s", + "v", + ignore_nodes=["u"], + ignore_edges=[("s", "x")], + ) + validate_length_path(XG3, 0, 3, 15, *_bidirectional_dijkstra(XG3, 0, 3)) + validate_length_path( + XG3, 0, 3, 16, *_bidirectional_dijkstra(XG3, 0, 3, ignore_nodes=[1]) + ) + validate_length_path( + XG3, 0, 3, 16, *_bidirectional_dijkstra(XG3, 0, 3, ignore_edges=[(2, 3)]) + ) + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + XG3, + 0, + 3, + ignore_nodes=[1], + ignore_edges=[(5, 4)], + ) + + +def test_bidirectional_dijkstra_no_path(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5, 6]) + _bidirectional_dijkstra(G, 1, 6) + + +def test_bidirectional_dijkstra_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2, 10]) + nx.add_path(G, [1, 3, 10]) + pytest.raises(nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[1]) + pytest.raises(nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[2]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[1, 2] + ) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_smallworld.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_smallworld.py new file mode 100644 index 0000000..42ede0e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_smallworld.py @@ -0,0 +1,70 @@ +import pytest + +pytest.importorskip("numpy") + +import random + +import networkx as nx +from networkx import lattice_reference, omega, random_reference, sigma + +rng = 42 + + +def test_random_reference(): + G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + Gr = random_reference(G, niter=1, seed=rng) + C = nx.average_clustering(G) + Cr = nx.average_clustering(Gr) + assert C > Cr + + with pytest.raises(nx.NetworkXError): + next(random_reference(nx.Graph())) + with pytest.raises(nx.NetworkXNotImplemented): + next(random_reference(nx.DiGraph())) + + H = nx.Graph(((0, 1), (2, 3))) + Hl = random_reference(H, niter=1, seed=rng) + + +def test_lattice_reference(): + G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + Gl = lattice_reference(G, niter=1, seed=rng) + L = nx.average_shortest_path_length(G) + Ll = nx.average_shortest_path_length(Gl) + assert Ll > L + + pytest.raises(nx.NetworkXError, lattice_reference, nx.Graph()) + pytest.raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph()) + + H = nx.Graph(((0, 1), (2, 3))) + Hl = lattice_reference(H, niter=1) + + +def test_sigma(): + Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + sigmas = sigma(Gs, niter=1, nrand=2, seed=rng) + sigmar = sigma(Gr, niter=1, nrand=2, seed=rng) + assert sigmar < sigmas + + +def test_omega(): + Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng) + Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + omegal = omega(Gl, niter=1, nrand=1, seed=rng) + omegar = omega(Gr, niter=1, nrand=1, seed=rng) + omegas = omega(Gs, niter=1, nrand=1, seed=rng) + assert omegal < omegas and omegas < omegar + + # Test that omega lies within the [-1, 1] bounds + G_barbell = nx.barbell_graph(5, 1) + G_karate = nx.karate_club_graph() + + omega_barbell = nx.omega(G_barbell) + omega_karate = nx.omega(G_karate, nrand=2) + + omegas = (omegal, omegar, omegas, omega_barbell, omega_karate) + + for o in omegas: + assert -1 <= o <= 1 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_smetric.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_smetric.py new file mode 100644 index 0000000..b6c4570 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_smetric.py @@ -0,0 +1,22 @@ +import pytest + +import networkx as nx + + +def test_smetric(): + g = nx.Graph() + g.add_edge(1, 2) + g.add_edge(2, 3) + g.add_edge(2, 4) + g.add_edge(1, 4) + sm = nx.s_metric(g, normalized=False) + assert sm == 19.0 + + +# smNorm = nx.s_metric(g,normalized=True) +# assert_equal(smNorm, 0.95) + + +def test_normalized(): + with pytest.raises(nx.NetworkXError): + sm = nx.s_metric(nx.Graph(), normalized=True) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_sparsifiers.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_sparsifiers.py new file mode 100644 index 0000000..78cabce --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_sparsifiers.py @@ -0,0 +1,137 @@ +"""Unit tests for the sparsifier computation functions.""" +import pytest + +import networkx as nx +from networkx.utils import py_random_state + +_seed = 2 + + +def _test_spanner(G, spanner, stretch, weight=None): + """Test whether a spanner is valid. + + This function tests whether the given spanner is a subgraph of the + given graph G with the same node set. It also tests for all shortest + paths whether they adhere to the given stretch. + + Parameters + ---------- + G : NetworkX graph + The original graph for which the spanner was constructed. + + spanner : NetworkX graph + The spanner to be tested. + + stretch : float + The proclaimed stretch of the spanner. + + weight : object + The edge attribute to use as distance. + """ + # check node set + assert set(G.nodes()) == set(spanner.nodes()) + + # check edge set and weights + for u, v in spanner.edges(): + assert G.has_edge(u, v) + if weight: + assert spanner[u][v][weight] == G[u][v][weight] + + # check connectivity and stretch + original_length = dict(nx.shortest_path_length(G, weight=weight)) + spanner_length = dict(nx.shortest_path_length(spanner, weight=weight)) + for u in G.nodes(): + for v in G.nodes(): + if u in original_length and v in original_length[u]: + assert spanner_length[u][v] <= stretch * original_length[u][v] + + +@py_random_state(1) +def _assign_random_weights(G, seed=None): + """Assigns random weights to the edges of a graph. + + Parameters + ---------- + + G : NetworkX graph + The original graph for which the spanner was constructed. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + for u, v in G.edges(): + G[u][v]["weight"] = seed.random() + + +def test_spanner_trivial(): + """Test a trivial spanner with stretch 1.""" + G = nx.complete_graph(20) + spanner = nx.spanner(G, 1, seed=_seed) + + for u, v in G.edges: + assert spanner.has_edge(u, v) + + +def test_spanner_unweighted_complete_graph(): + """Test spanner construction on a complete unweighted graph.""" + G = nx.complete_graph(20) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_weighted_complete_graph(): + """Test spanner construction on a complete weighted graph.""" + G = nx.complete_graph(20) + _assign_random_weights(G, seed=_seed) + + spanner = nx.spanner(G, 4, weight="weight", seed=_seed) + _test_spanner(G, spanner, 4, weight="weight") + + spanner = nx.spanner(G, 10, weight="weight", seed=_seed) + _test_spanner(G, spanner, 10, weight="weight") + + +def test_spanner_unweighted_gnp_graph(): + """Test spanner construction on an unweighted gnp graph.""" + G = nx.gnp_random_graph(20, 0.4, seed=_seed) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_weighted_gnp_graph(): + """Test spanner construction on an weighted gnp graph.""" + G = nx.gnp_random_graph(20, 0.4, seed=_seed) + _assign_random_weights(G, seed=_seed) + + spanner = nx.spanner(G, 4, weight="weight", seed=_seed) + _test_spanner(G, spanner, 4, weight="weight") + + spanner = nx.spanner(G, 10, weight="weight", seed=_seed) + _test_spanner(G, spanner, 10, weight="weight") + + +def test_spanner_unweighted_disconnected_graph(): + """Test spanner construction on a disconnected graph.""" + G = nx.disjoint_union(nx.complete_graph(10), nx.complete_graph(10)) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_invalid_stretch(): + """Check whether an invalid stretch is caught.""" + with pytest.raises(ValueError): + G = nx.empty_graph() + nx.spanner(G, 0) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_structuralholes.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_structuralholes.py new file mode 100644 index 0000000..53a4e88 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_structuralholes.py @@ -0,0 +1,135 @@ +"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module.""" +import math + +import pytest + +import networkx as nx + + +class TestStructuralHoles: + """Unit tests for computing measures of structural holes. + + The expected values for these functions were originally computed using the + proprietary software `UCINET`_ and the free software `IGraph`_ , and then + computed by hand to make sure that the results are correct. + + .. _UCINET: https://sites.google.com/site/ucinetsoftware/home + .. _IGraph: http://igraph.org/ + + """ + + def setup(self): + self.D = nx.DiGraph() + self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1} + # Example from http://www.analytictech.com/connections/v20(1)/holes.htm + self.G = nx.Graph() + self.G.add_edges_from( + [ + ("A", "B"), + ("A", "F"), + ("A", "G"), + ("A", "E"), + ("E", "G"), + ("F", "G"), + ("B", "G"), + ("B", "D"), + ("D", "G"), + ("G", "C"), + ] + ) + self.G_weights = { + ("A", "B"): 2, + ("A", "F"): 3, + ("A", "G"): 5, + ("A", "E"): 2, + ("E", "G"): 8, + ("F", "G"): 3, + ("B", "G"): 4, + ("B", "D"): 1, + ("D", "G"): 3, + ("G", "C"): 10, + } + + def test_constraint_directed(self): + constraint = nx.constraint(self.D) + assert constraint[0] == pytest.approx(1.003, abs=1e-3) + assert constraint[1] == pytest.approx(1.003, abs=1e-3) + assert constraint[2] == pytest.approx(1.389, abs=1e-3) + + def test_effective_size_directed(self): + effective_size = nx.effective_size(self.D) + assert effective_size[0] == pytest.approx(1.167, abs=1e-3) + assert effective_size[1] == pytest.approx(1.167, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + constraint = nx.constraint(D, weight="weight") + assert constraint[0] == pytest.approx(0.840, abs=1e-3) + assert constraint[1] == pytest.approx(1.143, abs=1e-3) + assert constraint[2] == pytest.approx(1.378, abs=1e-3) + + def test_effective_size_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + effective_size = nx.effective_size(D, weight="weight") + assert effective_size[0] == pytest.approx(1.567, abs=1e-3) + assert effective_size[1] == pytest.approx(1.083, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_undirected(self): + constraint = nx.constraint(self.G) + assert constraint["G"] == pytest.approx(0.400, abs=1e-3) + assert constraint["A"] == pytest.approx(0.595, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_undirected_borgatti(self): + effective_size = nx.effective_size(self.G) + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_effective_size_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, 1, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + constraint = nx.constraint(G, weight="weight") + assert constraint["G"] == pytest.approx(0.299, abs=1e-3) + assert constraint["A"] == pytest.approx(0.795, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert effective_size["G"] == pytest.approx(5.47, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.47, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_isolated(self): + G = self.G.copy() + G.add_node(1) + constraint = nx.constraint(G) + assert math.isnan(constraint[1]) + + def test_effective_size_isolated(self): + G = self.G.copy() + G.add_node(1) + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert math.isnan(effective_size[1]) + + def test_effective_size_borgatti_isolated(self): + G = self.G.copy() + G.add_node(1) + effective_size = nx.effective_size(G) + assert math.isnan(effective_size[1]) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_summarization.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_summarization.py new file mode 100644 index 0000000..c951b86 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_summarization.py @@ -0,0 +1,641 @@ +""" +Unit tests for dedensification and graph summarization +""" +import pytest + +import networkx as nx + + +class TestDirectedDedensification: + def build_original_graph(self): + original_matrix = [ + ("1", "BC"), + ("2", "ABC"), + ("3", ["A", "B", "6"]), + ("4", "ABC"), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ] + graph = nx.DiGraph() + for source, targets in original_matrix: + for target in targets: + graph.add_edge(source, target) + return graph + + def build_compressed_graph(self): + compressed_matrix = [ + ("1", "BC"), + ("2", ["ABC"]), + ("3", ["A", "B", "6"]), + ("4", ["ABC"]), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ("ABC", "ABC"), + ] + compressed_graph = nx.DiGraph() + for source, targets in compressed_matrix: + for target in targets: + compressed_graph.add_edge(source, target) + return compressed_graph + + def test_empty(self): + """ + Verify that an empty directed graph results in no compressor nodes + """ + G = nx.DiGraph() + compressed_graph, c_nodes = nx.dedensify(G, threshold=2) + assert c_nodes == set() + + @staticmethod + def densify(G, compressor_nodes, copy=True): + """ + Reconstructs the original graph from a dedensified, directed graph + + Parameters + ---------- + G: dedensified graph + A networkx graph + compressor_nodes: iterable + Iterable of compressor nodes in the dedensified graph + inplace: bool, optional (default: False) + Indicates if densification should be done inplace + + Returns + ------- + G: graph + A densified networkx graph + """ + if copy: + G = G.copy() + for compressor_node in compressor_nodes: + all_neighbors = set(nx.all_neighbors(G, compressor_node)) + out_neighbors = set(G.neighbors(compressor_node)) + for out_neighbor in out_neighbors: + G.remove_edge(compressor_node, out_neighbor) + in_neighbors = all_neighbors - out_neighbors + for in_neighbor in in_neighbors: + G.remove_edge(in_neighbor, compressor_node) + for out_neighbor in out_neighbors: + G.add_edge(in_neighbor, out_neighbor) + G.remove_node(compressor_node) + return G + + def setup_method(self): + self.c_nodes = ("ABC",) + + def test_dedensify_edges(self): + """ + Verifies that dedensify produced the correct edges to/from compressor + nodes in a directed graph + """ + G = self.build_original_graph() + compressed_G = self.build_compressed_graph() + compressed_graph, c_nodes = nx.dedensify(G, threshold=2) + for s, t in compressed_graph.edges(): + o_s = "".join(sorted(s)) + o_t = "".join(sorted(t)) + compressed_graph_exists = compressed_graph.has_edge(s, t) + verified_compressed_exists = compressed_G.has_edge(o_s, o_t) + assert compressed_graph_exists == verified_compressed_exists + assert len(c_nodes) == len(self.c_nodes) + + def test_dedensify_edge_count(self): + """ + Verifies that dedensify produced the correct number of comrpessor nodes + in a directed graph + """ + G = self.build_original_graph() + original_edge_count = len(G.edges()) + c_G, c_nodes = nx.dedensify(G, threshold=2) + compressed_edge_count = len(c_G.edges()) + assert compressed_edge_count <= original_edge_count + compressed_G = self.build_compressed_graph() + assert compressed_edge_count == len(compressed_G.edges()) + + def test_densify_edges(self): + """ + Verifies that densification produces the correct edges from the + original directed graph + """ + compressed_G = self.build_compressed_graph() + original_graph = self.densify(compressed_G, self.c_nodes, copy=True) + G = self.build_original_graph() + for s, t in G.edges(): + assert G.has_edge(s, t) == original_graph.has_edge(s, t) + + def test_densify_edge_count(self): + """ + Verifies that densification produces the correct number of edges in the + original directed graph + """ + compressed_G = self.build_compressed_graph() + compressed_edge_count = len(compressed_G.edges()) + original_graph = self.densify(compressed_G, self.c_nodes) + original_edge_count = len(original_graph.edges()) + assert compressed_edge_count <= original_edge_count + G = self.build_original_graph() + assert original_edge_count == len(G.edges()) + + +class TestUnDirectedDedensification: + def build_original_graph(self): + """ + Builds graph shown in the original research paper + """ + original_matrix = [ + ("1", "CB"), + ("2", "ABC"), + ("3", ["A", "B", "6"]), + ("4", "ABC"), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ] + graph = nx.Graph() + for source, targets in original_matrix: + for target in targets: + graph.add_edge(source, target) + return graph + + def test_empty(self): + """ + Verify that an empty undirected graph results in no compressor nodes + """ + G = nx.Graph() + compressed_G, c_nodes = nx.dedensify(G, threshold=2) + assert c_nodes == set() + + def setup_method(self): + self.c_nodes = ("6AB", "ABC") + + def build_compressed_graph(self): + compressed_matrix = [ + ("1", ["B", "C"]), + ("2", ["ABC"]), + ("3", ["6AB"]), + ("4", ["ABC"]), + ("5", ["6AB"]), + ("6", ["6AB", "A"]), + ("A", ["6AB", "ABC"]), + ("B", ["ABC", "6AB"]), + ("C", ["ABC"]), + ] + compressed_graph = nx.Graph() + for source, targets in compressed_matrix: + for target in targets: + compressed_graph.add_edge(source, target) + return compressed_graph + + def test_dedensify_edges(self): + """ + Verifies that dedensify produced correct compressor nodes and the + correct edges to/from the compressor nodes in an undirected graph + """ + G = self.build_original_graph() + c_G, c_nodes = nx.dedensify(G, threshold=2) + v_compressed_G = self.build_compressed_graph() + for s, t in c_G.edges(): + o_s = "".join(sorted(s)) + o_t = "".join(sorted(t)) + has_compressed_edge = c_G.has_edge(s, t) + verified_has_compressed_edge = v_compressed_G.has_edge(o_s, o_t) + assert has_compressed_edge == verified_has_compressed_edge + assert len(c_nodes) == len(self.c_nodes) + + def test_dedensify_edge_count(self): + """ + Verifies that dedensify produced the correct number of edges in an + undirected graph + """ + G = self.build_original_graph() + c_G, c_nodes = nx.dedensify(G, threshold=2, copy=True) + compressed_edge_count = len(c_G.edges()) + verified_original_edge_count = len(G.edges()) + assert compressed_edge_count <= verified_original_edge_count + verified_compressed_G = self.build_compressed_graph() + verified_compressed_edge_count = len(verified_compressed_G.edges()) + assert compressed_edge_count == verified_compressed_edge_count + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_summarization_empty(graph_type): + G = graph_type() + summary_graph = nx.snap_aggregation(G, node_attributes=("color",)) + assert nx.is_isomorphic(summary_graph, G) + + +class AbstractSNAP: + node_attributes = ("color",) + + def build_original_graph(self): + pass + + def build_summary_graph(self): + pass + + def test_summary_graph(self): + original_graph = self.build_original_graph() + summary_graph = self.build_summary_graph() + + relationship_attributes = ("type",) + generated_summary_graph = nx.snap_aggregation( + original_graph, self.node_attributes, relationship_attributes + ) + relabeled_summary_graph = self.deterministic_labels(generated_summary_graph) + assert nx.is_isomorphic(summary_graph, relabeled_summary_graph) + + def deterministic_labels(self, G): + node_labels = list(G.nodes) + node_labels = sorted(node_labels, key=lambda n: sorted(G.nodes[n]["group"])[0]) + node_labels.sort() + + label_mapping = dict() + for index, node in enumerate(node_labels): + label = "Supernode-%s" % index + label_mapping[node] = label + + return nx.relabel_nodes(G, label_mapping) + + +class TestSNAPNoEdgeTypes(AbstractSNAP): + relationship_attributes = () + + def test_summary_graph(self): + original_graph = self.build_original_graph() + summary_graph = self.build_summary_graph() + + relationship_attributes = ("type",) + generated_summary_graph = nx.snap_aggregation( + original_graph, self.node_attributes + ) + relabeled_summary_graph = self.deterministic_labels(generated_summary_graph) + assert nx.is_isomorphic(summary_graph, relabeled_summary_graph) + + def build_original_graph(self): + nodes = { + "A": dict(color="Red"), + "B": dict(color="Red"), + "C": dict(color="Red"), + "D": dict(color="Red"), + "E": dict(color="Blue"), + "F": dict(color="Blue"), + "G": dict(color="Blue"), + "H": dict(color="Blue"), + "I": dict(color="Yellow"), + "J": dict(color="Yellow"), + "K": dict(color="Yellow"), + "L": dict(color="Yellow"), + } + edges = [ + ("A", "B"), + ("A", "C"), + ("A", "E"), + ("A", "I"), + ("B", "D"), + ("B", "J"), + ("B", "F"), + ("C", "G"), + ("D", "H"), + ("I", "J"), + ("J", "K"), + ("I", "L"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target in edges: + G.add_edge(source, target) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": dict(color="Red"), + "Supernode-1": dict(color="Red"), + "Supernode-2": dict(color="Blue"), + "Supernode-3": dict(color="Blue"), + "Supernode-4": dict(color="Yellow"), + "Supernode-5": dict(color="Yellow"), + } + edges = [ + ("Supernode-0", "Supernode-0"), + ("Supernode-0", "Supernode-1"), + ("Supernode-0", "Supernode-2"), + ("Supernode-0", "Supernode-4"), + ("Supernode-1", "Supernode-3"), + ("Supernode-4", "Supernode-4"), + ("Supernode-4", "Supernode-5"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target in edges: + G.add_edge(source, target) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPUndirected(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": dict(color="Red"), + "B": dict(color="Red"), + "C": dict(color="Red"), + "D": dict(color="Red"), + "E": dict(color="Blue"), + "F": dict(color="Blue"), + "G": dict(color="Blue"), + "H": dict(color="Blue"), + "I": dict(color="Yellow"), + "J": dict(color="Yellow"), + "K": dict(color="Yellow"), + "L": dict(color="Yellow"), + } + edges = [ + ("A", "B", "Strong"), + ("A", "C", "Weak"), + ("A", "E", "Strong"), + ("A", "I", "Weak"), + ("B", "D", "Weak"), + ("B", "J", "Weak"), + ("B", "F", "Strong"), + ("C", "G", "Weak"), + ("D", "H", "Weak"), + ("I", "J", "Strong"), + ("J", "K", "Strong"), + ("I", "L", "Strong"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": dict(color="Red"), + "Supernode-1": dict(color="Red"), + "Supernode-2": dict(color="Blue"), + "Supernode-3": dict(color="Blue"), + "Supernode-4": dict(color="Yellow"), + "Supernode-5": dict(color="Yellow"), + } + edges = [ + ("Supernode-0", "Supernode-0", "Strong"), + ("Supernode-0", "Supernode-1", "Weak"), + ("Supernode-0", "Supernode-2", "Strong"), + ("Supernode-0", "Supernode-4", "Weak"), + ("Supernode-1", "Supernode-3", "Weak"), + ("Supernode-4", "Supernode-4", "Strong"), + ("Supernode-4", "Supernode-5", "Strong"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, types=[dict(type=type)]) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPDirected(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": dict(color="Red"), + "B": dict(color="Red"), + "C": dict(color="Green"), + "D": dict(color="Green"), + "E": dict(color="Blue"), + "F": dict(color="Blue"), + "G": dict(color="Yellow"), + "H": dict(color="Yellow"), + } + edges = [ + ("A", "C", "Strong"), + ("A", "E", "Strong"), + ("A", "F", "Weak"), + ("B", "D", "Strong"), + ("B", "E", "Weak"), + ("B", "F", "Strong"), + ("C", "G", "Strong"), + ("C", "F", "Strong"), + ("D", "E", "Strong"), + ("D", "H", "Strong"), + ("G", "E", "Strong"), + ("H", "F", "Strong"), + ] + G = nx.DiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": dict(color="Red"), + "Supernode-1": dict(color="Green"), + "Supernode-2": dict(color="Blue"), + "Supernode-3": dict(color="Yellow"), + } + edges = [ + ("Supernode-0", "Supernode-1", [{"type": "Strong"}]), + ("Supernode-0", "Supernode-2", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-1", "Supernode-2", [{"type": "Strong"}]), + ("Supernode-1", "Supernode-3", [{"type": "Strong"}]), + ("Supernode-3", "Supernode-2", [{"type": "Strong"}]), + ] + G = nx.DiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + G.add_edge(source, target, types=types) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPUndirectedMulti(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": dict(color="Red"), + "B": dict(color="Red"), + "C": dict(color="Red"), + "D": dict(color="Blue"), + "E": dict(color="Blue"), + "F": dict(color="Blue"), + "G": dict(color="Yellow"), + "H": dict(color="Yellow"), + "I": dict(color="Yellow"), + } + edges = [ + ("A", "D", ["Weak", "Strong"]), + ("B", "E", ["Weak", "Strong"]), + ("D", "I", ["Strong"]), + ("E", "H", ["Strong"]), + ("F", "G", ["Weak"]), + ("I", "G", ["Weak", "Strong"]), + ("I", "H", ["Weak", "Strong"]), + ("G", "H", ["Weak", "Strong"]), + ] + G = nx.MultiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": dict(color="Red"), + "Supernode-1": dict(color="Blue"), + "Supernode-2": dict(color="Yellow"), + "Supernode-3": dict(color="Blue"), + "Supernode-4": dict(color="Yellow"), + "Supernode-5": dict(color="Red"), + } + edges = [ + ("Supernode-1", "Supernode-2", [{"type": "Weak"}]), + ("Supernode-2", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-3", "Supernode-4", [{"type": "Strong"}]), + ("Supernode-3", "Supernode-5", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-4", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]), + ] + G = nx.MultiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPDirectedMulti(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": dict(color="Red"), + "B": dict(color="Red"), + "C": dict(color="Green"), + "D": dict(color="Green"), + "E": dict(color="Blue"), + "F": dict(color="Blue"), + "G": dict(color="Yellow"), + "H": dict(color="Yellow"), + } + edges = [ + ("A", "C", ["Weak", "Strong"]), + ("A", "E", ["Strong"]), + ("A", "F", ["Weak"]), + ("B", "D", ["Weak", "Strong"]), + ("B", "E", ["Weak"]), + ("B", "F", ["Strong"]), + ("C", "G", ["Weak", "Strong"]), + ("C", "F", ["Strong"]), + ("D", "E", ["Strong"]), + ("D", "H", ["Weak", "Strong"]), + ("G", "E", ["Strong"]), + ("H", "F", ["Strong"]), + ] + G = nx.MultiDiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": dict(color="Red"), + "Supernode-1": dict(color="Blue"), + "Supernode-2": dict(color="Yellow"), + "Supernode-3": dict(color="Blue"), + } + edges = [ + ("Supernode-0", "Supernode-1", ["Weak", "Strong"]), + ("Supernode-0", "Supernode-2", ["Weak", "Strong"]), + ("Supernode-1", "Supernode-2", ["Strong"]), + ("Supernode-1", "Supernode-3", ["Weak", "Strong"]), + ("Supernode-3", "Supernode-2", ["Strong"]), + ] + G = nx.MultiDiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_swap.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_swap.py new file mode 100644 index 0000000..9982b95 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_swap.py @@ -0,0 +1,94 @@ +import pytest + +import networkx as nx + +# import random +# random.seed(0) + + +def test_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_double_edge_swap_seed(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40, seed=1) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_low_window_threshold(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, _window_threshold=0, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star(): + # Testing ui==xi in connected_double_edge_swap + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star_low_window_threshold(): + # Testing ui==xi in connected_double_edge_swap with low window threshold + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, _window_threshold=0, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(3)) + + +def test_double_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(10), nswap=1, max_tries=0) + + +def test_double_edge_directed(): + graph = nx.DiGraph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXError, match="not defined for directed graphs."): + G = nx.double_edge_swap(graph) + + +def test_double_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.double_edge_swap(nx.complete_graph(4), nswap=1, max_tries=5) + + +def test_connected_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.connected_double_edge_swap(nx.path_graph(3)) + + +def test_connected_double_edge_swap_not_connected(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(3) + nx.add_path(G, [10, 11, 12]) + G = nx.connected_double_edge_swap(G) + + +def test_degree_seq_c4(): + G = nx.cycle_graph(4) + degrees = sorted(d for n, d in G.degree()) + G = nx.double_edge_swap(G, 1, 100) + assert degrees == sorted(d for n, d in G.degree()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_threshold.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_threshold.py new file mode 100644 index 0000000..c06784a --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_threshold.py @@ -0,0 +1,269 @@ +""" +Threshold Graphs +================ +""" + +import pytest + +import networkx as nx +import networkx.algorithms.threshold as nxt +from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic + +cnlti = nx.convert_node_labels_to_integers + + +class TestGeneratorThreshold: + def test_threshold_sequence_graph_test(self): + G = nx.star_graph(10) + assert nxt.is_threshold_graph(G) + assert nxt.is_threshold_sequence(list(d for n, d in G.degree())) + + G = nx.complete_graph(10) + assert nxt.is_threshold_graph(G) + assert nxt.is_threshold_sequence(list(d for n, d in G.degree())) + + deg = [3, 2, 2, 1, 1, 1] + assert not nxt.is_threshold_sequence(deg) + + deg = [3, 2, 2, 1] + assert nxt.is_threshold_sequence(deg) + + G = nx.generators.havel_hakimi_graph(deg) + assert nxt.is_threshold_graph(G) + + def test_creation_sequences(self): + deg = [3, 2, 2, 1] + G = nx.generators.havel_hakimi_graph(deg) + + with pytest.raises(ValueError): + nxt.creation_sequence(deg, with_labels=True, compact=True) + + cs0 = nxt.creation_sequence(deg) + H0 = nxt.threshold_graph(cs0) + assert "".join(cs0) == "ddid" + + cs1 = nxt.creation_sequence(deg, with_labels=True) + H1 = nxt.threshold_graph(cs1) + assert cs1 == [(1, "d"), (2, "d"), (3, "i"), (0, "d")] + + cs2 = nxt.creation_sequence(deg, compact=True) + H2 = nxt.threshold_graph(cs2) + assert cs2 == [2, 1, 1] + assert "".join(nxt.uncompact(cs2)) == "ddid" + assert graph_could_be_isomorphic(H0, G) + assert graph_could_be_isomorphic(H0, H1) + assert graph_could_be_isomorphic(H0, H2) + + def test_make_compact(self): + assert nxt.make_compact(["d", "d", "d", "i", "d", "d"]) == [3, 1, 2] + assert nxt.make_compact([3, 1, 2]) == [3, 1, 2] + assert pytest.raises(TypeError, nxt.make_compact, [3.0, 1.0, 2.0]) + + def test_uncompact(self): + assert nxt.uncompact([3, 1, 2]) == ["d", "d", "d", "i", "d", "d"] + assert nxt.uncompact(["d", "d", "i", "d"]) == ["d", "d", "i", "d"] + assert nxt.uncompact( + nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) + ) == nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) + assert pytest.raises(TypeError, nxt.uncompact, [3.0, 1.0, 2.0]) + + def test_creation_sequence_to_weights(self): + assert nxt.creation_sequence_to_weights([3, 1, 2]) == [ + 0.5, + 0.5, + 0.5, + 0.25, + 0.75, + 0.75, + ] + assert pytest.raises( + TypeError, nxt.creation_sequence_to_weights, [3.0, 1.0, 2.0] + ) + + def test_weights_to_creation_sequence(self): + deg = [3, 2, 2, 1] + with pytest.raises(ValueError): + nxt.weights_to_creation_sequence(deg, with_labels=True, compact=True) + assert nxt.weights_to_creation_sequence(deg, with_labels=True) == [ + (3, "d"), + (1, "d"), + (2, "d"), + (0, "d"), + ] + assert nxt.weights_to_creation_sequence(deg, compact=True) == [4] + + def test_find_alternating_4_cycle(self): + G = nx.Graph() + G.add_edge(1, 2) + assert not nxt.find_alternating_4_cycle(G) + + def test_shortest_path(self): + deg = [3, 2, 2, 1] + G = nx.generators.havel_hakimi_graph(deg) + cs1 = nxt.creation_sequence(deg, with_labels=True) + for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3), (3, 1), (1, 2), (2, 3)]: + assert nxt.shortest_path(cs1, n, m) == nx.shortest_path(G, n, m) + + spl = nxt.shortest_path_length(cs1, 3) + spl2 = nxt.shortest_path_length([t for v, t in cs1], 2) + assert spl == spl2 + + spld = {} + for j, pl in enumerate(spl): + n = cs1[j][0] + spld[n] = pl + assert spld == nx.single_source_shortest_path_length(G, 3) + + assert nxt.shortest_path(["d", "d", "d", "i", "d", "d"], 1, 2) == [1, 2] + assert nxt.shortest_path([3, 1, 2], 1, 2) == [1, 2] + assert pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1, 2) + assert pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], "a", 2) + assert pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], 1, "b") + assert nxt.shortest_path([3, 1, 2], 1, 1) == [1] + + def test_shortest_path_length(self): + assert nxt.shortest_path_length([3, 1, 2], 1) == [1, 0, 1, 2, 1, 1] + assert nxt.shortest_path_length(["d", "d", "d", "i", "d", "d"], 1) == [ + 1, + 0, + 1, + 2, + 1, + 1, + ] + assert nxt.shortest_path_length(("d", "d", "d", "i", "d", "d"), 1) == [ + 1, + 0, + 1, + 2, + 1, + 1, + ] + assert pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1) + + def test_random_threshold_sequence(self): + assert len(nxt.random_threshold_sequence(10, 0.5)) == 10 + assert nxt.random_threshold_sequence(10, 0.5, seed=42) == [ + "d", + "i", + "d", + "d", + "d", + "i", + "i", + "i", + "d", + "d", + ] + assert pytest.raises(ValueError, nxt.random_threshold_sequence, 10, 1.5) + + def test_right_d_threshold_sequence(self): + assert nxt.right_d_threshold_sequence(3, 2) == ["d", "i", "d"] + assert pytest.raises(ValueError, nxt.right_d_threshold_sequence, 2, 3) + + def test_left_d_threshold_sequence(self): + assert nxt.left_d_threshold_sequence(3, 2) == ["d", "i", "d"] + assert pytest.raises(ValueError, nxt.left_d_threshold_sequence, 2, 3) + + def test_weights_thresholds(self): + wseq = [3, 4, 3, 3, 5, 6, 5, 4, 5, 6] + cs = nxt.weights_to_creation_sequence(wseq, threshold=10) + wseq = nxt.creation_sequence_to_weights(cs) + cs2 = nxt.weights_to_creation_sequence(wseq) + assert cs == cs2 + + wseq = nxt.creation_sequence_to_weights(nxt.uncompact([3, 1, 2, 3, 3, 2, 3])) + assert wseq == [ + s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] + ] + + wseq = nxt.creation_sequence_to_weights([3, 1, 2, 3, 3, 2, 3]) + assert wseq == [ + s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] + ] + + wseq = nxt.creation_sequence_to_weights(list(enumerate("ddidiiidididi"))) + assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] + + wseq = nxt.creation_sequence_to_weights("ddidiiidididi") + assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] + + wseq = nxt.creation_sequence_to_weights("ddidiiidididid") + ws = [s / 12 for s in [6, 6, 5, 7, 4, 4, 4, 8, 3, 9, 2, 10, 1, 11]] + assert sum(abs(c - d) for c, d in zip(wseq, ws)) < 1e-14 + + def test_finding_routines(self): + G = nx.Graph({1: [2], 2: [3], 3: [4], 4: [5], 5: [6]}) + G.add_edge(2, 4) + G.add_edge(2, 5) + G.add_edge(2, 7) + G.add_edge(3, 6) + G.add_edge(4, 6) + + # Alternating 4 cycle + assert nxt.find_alternating_4_cycle(G) == [1, 2, 3, 6] + + # Threshold graph + TG = nxt.find_threshold_graph(G) + assert nxt.is_threshold_graph(TG) + assert sorted(TG.nodes()) == [1, 2, 3, 4, 5, 7] + + cs = nxt.creation_sequence(dict(TG.degree()), with_labels=True) + assert nxt.find_creation_sequence(G) == cs + + def test_fast_versions_properties_threshold_graphs(self): + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + assert nxt.density("ddiiddid") == nx.density(G) + assert sorted(nxt.degree_sequence(cs)) == sorted(d for n, d in G.degree()) + + ts = nxt.triangle_sequence(cs) + assert ts == list(nx.triangles(G).values()) + assert sum(ts) // 3 == nxt.triangles(cs) + + c1 = nxt.cluster_sequence(cs) + c2 = list(nx.clustering(G).values()) + assert sum(abs(c - d) for c, d in zip(c1, c2)) == pytest.approx(0, abs=1e-7) + + b1 = nx.betweenness_centrality(G).values() + b2 = nxt.betweenness_sequence(cs) + assert sum(abs(c - d) for c, d in zip(b1, b2)) < 1e-14 + + assert nxt.eigenvalues(cs) == [0, 1, 3, 3, 5, 7, 7, 8] + + # Degree Correlation + assert abs(nxt.degree_correlation(cs) + 0.593038821954) < 1e-12 + assert nxt.degree_correlation("diiiddi") == -0.8 + assert nxt.degree_correlation("did") == -1.0 + assert nxt.degree_correlation("ddd") == 1.0 + assert nxt.eigenvalues("dddiii") == [0, 0, 0, 0, 3, 3] + assert nxt.eigenvalues("dddiiid") == [0, 1, 1, 1, 4, 4, 7] + + def test_tg_creation_routines(self): + s = nxt.left_d_threshold_sequence(5, 7) + s = nxt.right_d_threshold_sequence(5, 7) + s1 = nxt.swap_d(s, 1.0, 1.0) + s1 = nxt.swap_d(s, 1.0, 1.0, seed=1) + + def test_eigenvectors(self): + np = pytest.importorskip("numpy") + eigenval = np.linalg.eigvals + pytest.importorskip("scipy") + + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + (tgeval, tgevec) = nxt.eigenvectors(cs) + np.testing.assert_allclose([np.dot(lv, lv) for lv in tgevec], 1.0, rtol=1e-9) + lapl = nx.laplacian_matrix(G) + + def test_create_using(self): + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + assert pytest.raises( + nx.exception.NetworkXError, + nxt.threshold_graph, + cs, + create_using=nx.DiGraph(), + ) + MG = nxt.threshold_graph(cs, create_using=nx.MultiGraph()) + assert sorted(MG.edges()) == sorted(G.edges()) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_tournament.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_tournament.py new file mode 100644 index 0000000..0a88b42 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_tournament.py @@ -0,0 +1,162 @@ +"""Unit tests for the :mod:`networkx.algorithms.tournament` module.""" +from itertools import combinations + +import pytest + +from networkx import DiGraph +from networkx.algorithms.tournament import ( + hamiltonian_path, + index_satisfying, + is_reachable, + is_strongly_connected, + is_tournament, + random_tournament, + score_sequence, + tournament_matrix, +) + + +def test_condition_not_satisfied(): + condition = lambda x: x > 0 + iter_in = [0] + assert index_satisfying(iter_in, condition) == 1 + + +def test_empty_iterable(): + condition = lambda x: x > 0 + with pytest.raises(ValueError): + index_satisfying([], condition) + + +def test_is_tournament(): + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + assert is_tournament(G) + + +def test_self_loops(): + """A tournament must have no self-loops.""" + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + G.add_edge(0, 0) + assert not is_tournament(G) + + +def test_missing_edges(): + """A tournament must not have any pair of nodes without at least + one edge joining the pair. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3)]) + assert not is_tournament(G) + + +def test_bidirectional_edges(): + """A tournament must not have any pair of nodes with greater + than one edge joining the pair. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + G.add_edge(1, 0) + assert not is_tournament(G) + + +def test_graph_is_tournament(): + for _ in range(10): + G = random_tournament(5) + assert is_tournament(G) + + +def test_graph_is_tournament_seed(): + for _ in range(10): + G = random_tournament(5, seed=1) + assert is_tournament(G) + + +def test_graph_is_tournament_one_node(): + G = random_tournament(1) + assert is_tournament(G) + + +def test_graph_is_tournament_zero_node(): + G = random_tournament(0) + assert is_tournament(G) + + +def test_hamiltonian_empty_graph(): + path = hamiltonian_path(DiGraph()) + assert len(path) == 0 + + +def test_path_is_hamiltonian(): + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + path = hamiltonian_path(G) + assert len(path) == 4 + assert all(v in G[u] for u, v in zip(path, path[1:])) + + +def test_hamiltonian_cycle(): + """Tests that :func:`networkx.tournament.hamiltonian_path` + returns a Hamiltonian cycle when provided a strongly connected + tournament. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + path = hamiltonian_path(G) + assert len(path) == 4 + assert all(v in G[u] for u, v in zip(path, path[1:])) + assert path[0] in G[path[-1]] + + +def test_score_sequence_edge(): + G = DiGraph([(0, 1)]) + assert score_sequence(G) == [0, 1] + + +def test_score_sequence_triangle(): + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert score_sequence(G) == [1, 1, 1] + + +def test_tournament_matrix(): + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + npt = np.testing + G = DiGraph([(0, 1)]) + m = tournament_matrix(G) + npt.assert_array_equal(m.todense(), np.array([[0, 1], [-1, 0]])) + + +def test_reachable_pair(): + """Tests for a reachable pair of nodes.""" + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert is_reachable(G, 0, 2) + + +def test_same_node_is_reachable(): + """Tests that a node is always reachable from it.""" + # G is an arbitrary tournament on ten nodes. + G = DiGraph(sorted(p) for p in combinations(range(10), 2)) + assert all(is_reachable(G, v, v) for v in G) + + +def test_unreachable_pair(): + """Tests for an unreachable pair of nodes.""" + G = DiGraph([(0, 1), (0, 2), (1, 2)]) + assert not is_reachable(G, 1, 0) + + +def test_is_strongly_connected(): + """Tests for a strongly connected tournament.""" + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert is_strongly_connected(G) + + +def test_not_strongly_connected(): + """Tests for a tournament that is not strongly connected.""" + G = DiGraph([(0, 1), (0, 2), (1, 2)]) + assert not is_strongly_connected(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_triads.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_triads.py new file mode 100644 index 0000000..446c2db --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_triads.py @@ -0,0 +1,272 @@ +"""Tests for the :mod:`networkx.algorithms.triads` module.""" + +import itertools +from collections import defaultdict +from random import sample + +import pytest + +import networkx as nx + + +def test_triadic_census(): + """Tests the triadic_census function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = { + "030T": 2, + "120C": 1, + "210": 0, + "120U": 0, + "012": 9, + "102": 3, + "021U": 0, + "111U": 0, + "003": 8, + "030C": 0, + "021D": 9, + "201": 0, + "111D": 1, + "300": 0, + "120D": 0, + "021C": 2, + } + actual = nx.triadic_census(G) + assert expected == actual + + +def test_is_triad(): + """Tests the is_triad function""" + G = nx.karate_club_graph() + G = G.to_directed() + for i in range(100): + nodes = sample(sorted(G.nodes()), 3) + G2 = G.subgraph(nodes) + assert nx.is_triad(G2) + + +def test_all_triplets(): + """Tests the all_triplets function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = [ + f"{i},{j},{k}" + for i in range(7) + for j in range(i + 1, 7) + for k in range(j + 1, 7) + ] + expected = [set(x.split(",")) for x in expected] + actual = list(set(x) for x in nx.all_triplets(G)) + assert all([any([s1 == s2 for s1 in expected]) for s2 in actual]) + + +def test_all_triads(): + """Tests the all_triplets function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = [ + f"{i},{j},{k}" + for i in range(7) + for j in range(i + 1, 7) + for k in range(j + 1, 7) + ] + expected = [G.subgraph(x.split(",")) for x in expected] + actual = list(nx.all_triads(G)) + assert all(any([nx.is_isomorphic(G1, G2) for G1 in expected]) for G2 in actual) + + +def test_triad_type(): + """Tests the triad_type function.""" + # 0 edges (1 type) + G = nx.DiGraph({0: [], 1: [], 2: []}) + assert nx.triad_type(G) == "003" + # 1 edge (1 type) + G = nx.DiGraph({0: [1], 1: [], 2: []}) + assert nx.triad_type(G) == "012" + # 2 edges (4 types) + G = nx.DiGraph([(0, 1), (0, 2)]) + assert nx.triad_type(G) == "021D" + G = nx.DiGraph({0: [1], 1: [0], 2: []}) + assert nx.triad_type(G) == "102" + G = nx.DiGraph([(0, 1), (2, 1)]) + assert nx.triad_type(G) == "021U" + G = nx.DiGraph([(0, 1), (1, 2)]) + assert nx.triad_type(G) == "021C" + # 3 edges (4 types) + G = nx.DiGraph([(0, 1), (1, 0), (2, 1)]) + assert nx.triad_type(G) == "111D" + G = nx.DiGraph([(0, 1), (1, 0), (1, 2)]) + assert nx.triad_type(G) == "111U" + G = nx.DiGraph([(0, 1), (1, 2), (0, 2)]) + assert nx.triad_type(G) == "030T" + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + assert nx.triad_type(G) == "030C" + # 4 edges (4 types) + G = nx.DiGraph([(0, 1), (1, 0), (2, 0), (0, 2)]) + assert nx.triad_type(G) == "201" + G = nx.DiGraph([(0, 1), (1, 0), (2, 0), (2, 1)]) + assert nx.triad_type(G) == "120D" + G = nx.DiGraph([(0, 1), (1, 0), (0, 2), (1, 2)]) + assert nx.triad_type(G) == "120U" + G = nx.DiGraph([(0, 1), (1, 0), (0, 2), (2, 1)]) + assert nx.triad_type(G) == "120C" + # 5 edges (1 type) + G = nx.DiGraph([(0, 1), (1, 0), (2, 1), (1, 2), (0, 2)]) + assert nx.triad_type(G) == "210" + # 6 edges (1 type) + G = nx.DiGraph([(0, 1), (1, 0), (1, 2), (2, 1), (0, 2), (2, 0)]) + assert nx.triad_type(G) == "300" + + +def test_triads_by_type(): + """Tests the all_triplets function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + all_triads = nx.all_triads(G) + expected = defaultdict(list) + for triad in all_triads: + name = nx.triad_type(triad) + expected[name].append(triad) + actual = nx.triads_by_type(G) + assert set(actual.keys()) == set(expected.keys()) + for tri_type, actual_Gs in actual.items(): + expected_Gs = expected[tri_type] + for a in actual_Gs: + assert any(nx.is_isomorphic(a, e) for e in expected_Gs) + + +def test_random_triad(): + """Tests the random_triad function""" + G = nx.karate_club_graph() + G = G.to_directed() + for i in range(100): + assert nx.is_triad(nx.random_triad(G)) + + +def test_triadic_census_short_path_nodelist(): + G = nx.path_graph("abc", create_using=nx.DiGraph) + expected = {"021C": 1} + for nl in ["a", "b", "c", "ab", "ac", "bc", "abc"]: + triad_census = nx.triadic_census(G, nodelist=nl) + assert expected == {typ: cnt for typ, cnt in triad_census.items() if cnt > 0} + + +def test_triadic_census_correct_nodelist_values(): + G = nx.path_graph(5, create_using=nx.DiGraph) + msg = r"nodelist includes duplicate nodes or nodes not in G" + with pytest.raises(ValueError, match=msg): + nx.triadic_census(G, [1, 2, 2, 3]) + with pytest.raises(ValueError, match=msg): + nx.triadic_census(G, [1, 2, "a", 3]) + + +def test_triadic_census_tiny_graphs(): + tc = nx.triadic_census(nx.empty_graph(0, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.empty_graph(1, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.empty_graph(2, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.DiGraph([(1, 2)])) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + + +def test_triadic_census_selfloops(): + GG = nx.path_graph("abc", create_using=nx.DiGraph) + expected = {"021C": 1} + for n in GG: + G = GG.copy() + G.add_edge(n, n) + tc = nx.triadic_census(G) + assert expected == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + + GG = nx.path_graph("abcde", create_using=nx.DiGraph) + tbt = nx.triads_by_type(GG) + for n in GG: + GG.add_edge(n, n) + tc = nx.triadic_census(GG) + assert tc == {tt: len(tbt[tt]) for tt in tc} + + +def test_triadic_census_four_path(): + G = nx.path_graph("abcd", create_using=nx.DiGraph) + expected = {"012": 2, "021C": 2} + triad_census = nx.triadic_census(G) + assert expected == {typ: cnt for typ, cnt in triad_census.items() if cnt > 0} + + +def test_triadic_census_four_path_nodelist(): + G = nx.path_graph("abcd", create_using=nx.DiGraph) + expected_end = {"012": 2, "021C": 1} + expected_mid = {"012": 1, "021C": 2} + a_triad_census = nx.triadic_census(G, nodelist=["a"]) + assert expected_end == {typ: cnt for typ, cnt in a_triad_census.items() if cnt > 0} + b_triad_census = nx.triadic_census(G, nodelist=["b"]) + assert expected_mid == {typ: cnt for typ, cnt in b_triad_census.items() if cnt > 0} + c_triad_census = nx.triadic_census(G, nodelist=["c"]) + assert expected_mid == {typ: cnt for typ, cnt in c_triad_census.items() if cnt > 0} + d_triad_census = nx.triadic_census(G, nodelist=["d"]) + assert expected_end == {typ: cnt for typ, cnt in d_triad_census.items() if cnt > 0} + + +def test_triadic_census_nodelist(): + """Tests the triadic_census function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = { + "030T": 2, + "120C": 1, + "210": 0, + "120U": 0, + "012": 9, + "102": 3, + "021U": 0, + "111U": 0, + "003": 8, + "030C": 0, + "021D": 9, + "201": 0, + "111D": 1, + "300": 0, + "120D": 0, + "021C": 2, + } + actual = {k: 0 for k in expected} + for node in G.nodes(): + node_triad_census = nx.triadic_census(G, nodelist=[node]) + for triad_key in expected: + actual[triad_key] += node_triad_census[triad_key] + # Divide all counts by 3 + for k, v in actual.items(): + actual[k] //= 3 + assert expected == actual + + +@pytest.mark.parametrize("N", [5, 10]) +def test_triandic_census_on_random_graph(N): + G = nx.binomial_graph(N, 0.3, directed=True, seed=42) + tc1 = nx.triadic_census(G) + tbt = nx.triads_by_type(G) + tc2 = {tt: len(tbt[tt]) for tt in tc1} + assert tc1 == tc2 + + for n in G: + tc1 = nx.triadic_census(G, nodelist={n}) + tc2 = {tt: sum(1 for t in tbt.get(tt, []) if n in t) for tt in tc1} + assert tc1 == tc2 + + for ns in itertools.combinations(G, 2): + ns = set(ns) + tc1 = nx.triadic_census(G, nodelist=ns) + tc2 = { + tt: sum(1 for t in tbt.get(tt, []) if any(n in ns for n in t)) for tt in tc1 + } + assert tc1 == tc2 + + for ns in itertools.combinations(G, 3): + ns = set(ns) + tc1 = nx.triadic_census(G, nodelist=ns) + tc2 = { + tt: sum(1 for t in tbt.get(tt, []) if any(n in ns for n in t)) for tt in tc1 + } + assert tc1 == tc2 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_vitality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_vitality.py new file mode 100644 index 0000000..248206e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_vitality.py @@ -0,0 +1,41 @@ +import networkx as nx + + +class TestClosenessVitality: + def test_unweighted(self): + G = nx.cycle_graph(3) + vitality = nx.closeness_vitality(G) + assert vitality == {0: 2, 1: 2, 2: 2} + + def test_weighted(self): + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 4, 1: 4, 2: 4} + + def test_unweighted_digraph(self): + G = nx.DiGraph(nx.cycle_graph(3)) + vitality = nx.closeness_vitality(G) + assert vitality == {0: 4, 1: 4, 2: 4} + + def test_weighted_digraph(self): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2], weight=2) + nx.add_cycle(G, [2, 1, 0], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 8, 1: 8, 2: 8} + + def test_weighted_multidigraph(self): + G = nx.MultiDiGraph() + nx.add_cycle(G, [0, 1, 2], weight=2) + nx.add_cycle(G, [2, 1, 0], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 8, 1: 8, 2: 8} + + def test_disconnecting_graph(self): + """Tests that the closeness vitality of a node whose removal + disconnects the graph is negative infinity. + + """ + G = nx.path_graph(3) + assert nx.closeness_vitality(G, node=1) == -float("inf") diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_voronoi.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_voronoi.py new file mode 100644 index 0000000..3269ae6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_voronoi.py @@ -0,0 +1,103 @@ +import networkx as nx +from networkx.utils import pairwise + + +class TestVoronoiCells: + """Unit tests for the Voronoi cells function.""" + + def test_isolates(self): + """Tests that a graph with isolated nodes has all isolates in + one block of the partition. + + """ + G = nx.empty_graph(5) + cells = nx.voronoi_cells(G, {0, 2, 4}) + expected = {0: {0}, 2: {2}, 4: {4}, "unreachable": {1, 3}} + assert expected == cells + + def test_undirected_unweighted(self): + G = nx.cycle_graph(6) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 5}, 3: {2, 3, 4}} + assert expected == cells + + def test_directed_unweighted(self): + # This is the singly-linked directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 2}, 3: {3, 4, 5}} + assert expected == cells + + def test_directed_inward(self): + """Tests that reversing the graph gives the "inward" Voronoi + partition. + + """ + # This is the singly-linked reverse directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + G = G.reverse(copy=False) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 4, 5}, 3: {1, 2, 3}} + assert expected == cells + + def test_undirected_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_directed_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1), (3, 2, 1), (2, 1, 1)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multigraph_unweighted(self): + """Tests that the Voronoi cells for a multigraph are the same as + for a simple graph. + + """ + edges = [(0, 1), (1, 2), (2, 3)] + G = nx.MultiGraph(2 * edges) + H = nx.Graph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multidigraph_unweighted(self): + # This is the twice-singly-linked directed cycle graph on six nodes. + edges = list(pairwise(range(6), cyclic=True)) + G = nx.MultiDiGraph(2 * edges) + H = nx.DiGraph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multigraph_weighted(self): + edges = [(0, 1, 10), (0, 1, 10), (1, 2, 1), (1, 2, 100), (2, 3, 1), (2, 3, 100)] + G = nx.MultiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multidigraph_weighted(self): + edges = [ + (0, 1, 10), + (0, 1, 10), + (1, 2, 1), + (2, 3, 1), + (3, 2, 10), + (3, 2, 1), + (2, 1, 10), + (2, 1, 1), + ] + G = nx.MultiDiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_wiener.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_wiener.py new file mode 100644 index 0000000..1cb4040 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tests/test_wiener.py @@ -0,0 +1,66 @@ +"""Unit tests for the :mod:`networkx.algorithms.wiener` module.""" + + +from networkx import DiGraph, complete_graph, empty_graph, path_graph, wiener_index + + +class TestWienerIndex: + """Unit tests for computing the Wiener index of a graph.""" + + def test_disconnected_graph(self): + """Tests that the Wiener index of a disconnected graph is + positive infinity. + + """ + assert wiener_index(empty_graph(2)) == float("inf") + + def test_directed(self): + """Tests that each pair of nodes in the directed graph is + counted once when computing the Wiener index. + + """ + G = complete_graph(3) + H = DiGraph(G) + assert (2 * wiener_index(G)) == wiener_index(H) + + def test_complete_graph(self): + """Tests that the Wiener index of the complete graph is simply + the number of edges. + + """ + n = 10 + G = complete_graph(n) + assert wiener_index(G) == (n * (n - 1) / 2) + + def test_path_graph(self): + """Tests that the Wiener index of the path graph is correctly + computed. + + """ + # In P_n, there are n - 1 pairs of vertices at distance one, n - + # 2 pairs at distance two, n - 3 at distance three, ..., 1 at + # distance n - 1, so the Wiener index should be + # + # 1 * (n - 1) + 2 * (n - 2) + ... + (n - 2) * 2 + (n - 1) * 1 + # + # For example, in P_5, + # + # 1 * 4 + 2 * 3 + 3 * 2 + 4 * 1 = 2 (1 * 4 + 2 * 3) + # + # and in P_6, + # + # 1 * 5 + 2 * 4 + 3 * 3 + 4 * 2 + 5 * 1 = 2 (1 * 5 + 2 * 4) + 3 * 3 + # + # assuming n is *odd*, this gives the formula + # + # 2 \sum_{i = 1}^{(n - 1) / 2} [i * (n - i)] + # + # assuming n is *even*, this gives the formula + # + # 2 \sum_{i = 1}^{n / 2} [i * (n - i)] - (n / 2) ** 2 + # + n = 9 + G = path_graph(n) + expected = 2 * sum(i * (n - i) for i in range(1, (n // 2) + 1)) + actual = wiener_index(G) + assert expected == actual diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/threshold.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/threshold.py new file mode 100644 index 0000000..5c50394 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/threshold.py @@ -0,0 +1,974 @@ +""" +Threshold Graphs - Creation, manipulation and identification. +""" +from math import sqrt + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["is_threshold_graph", "find_threshold_graph"] + + +def is_threshold_graph(G): + """ + Returns `True` if `G` is a threshold graph. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, `DiGraph`, `MultiGraph` or `MultiDiGraph` + + Returns + ------- + bool + `True` if `G` is a threshold graph, `False` otherwise. + + Examples + -------- + >>> from networkx.algorithms.threshold import is_threshold_graph + >>> G = nx.path_graph(3) + >>> is_threshold_graph(G) + True + >>> G = nx.barbell_graph(3, 3) + >>> is_threshold_graph(G) + False + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return is_threshold_sequence(list(d for n, d in G.degree())) + + +def is_threshold_sequence(degree_sequence): + """ + Returns True if the sequence is a threshold degree seqeunce. + + Uses the property that a threshold graph must be constructed by + adding either dominating or isolated nodes. Thus, it can be + deconstructed iteratively by removing a node of degree zero or a + node that connects to the remaining nodes. If this deconstruction + failes then the sequence is not a threshold sequence. + """ + ds = degree_sequence[:] # get a copy so we don't destroy original + ds.sort() + while ds: + if ds[0] == 0: # if isolated node + ds.pop(0) # remove it + continue + if ds[-1] != len(ds) - 1: # is the largest degree node dominating? + return False # no, not a threshold degree sequence + ds.pop() # yes, largest is the dominating node + ds = [d - 1 for d in ds] # remove it and decrement all degrees + return True + + +def creation_sequence(degree_sequence, with_labels=False, compact=False): + """ + Determines the creation sequence for the given threshold degree sequence. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present when it + is added. The first node added is by convention 'd'. + This list can be converted to a string if desired using "".join(cs) + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + + Returns None if the sequence is not a threshold sequence + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(degree_sequence, dict): # labeled degree seqeunce + ds = [[degree, label] for (label, degree) in degree_sequence.items()] + else: + ds = [[d, i] for i, d in enumerate(degree_sequence)] + ds.sort() + cs = [] # creation sequence + while ds: + if ds[0][0] == 0: # isolated node + (d, v) = ds.pop(0) + if len(ds) > 0: # make sure we start with a d + cs.insert(0, (v, "i")) + else: + cs.insert(0, (v, "d")) + continue + if ds[-1][0] != len(ds) - 1: # Not dominating node + return None # not a threshold degree sequence + (d, v) = ds.pop() + cs.insert(0, (v, "d")) + ds = [[d[0] - 1, d[1]] for d in ds] # decrement due to removing node + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +def make_compact(creation_sequence): + """ + Returns the creation sequence in a compact form + that is the number of 'i's and 'd's alternating. + + Examples + -------- + >>> from networkx.algorithms.threshold import make_compact + >>> make_compact(["d", "i", "i", "d", "d", "i", "i", "i"]) + [1, 2, 2, 3] + >>> make_compact(["d", "d", "d", "i", "d", "d"]) + [3, 1, 2] + + Notice that the first number is the first vertex + to be used for construction and so is always 'd'. + + Labeled creation sequences lose their labels in the + compact representation. + + >>> make_compact([3, 1, 2]) + [3, 1, 2] + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = creation_sequence[:] + elif isinstance(first, tuple): # labeled creation sequence + cs = [s[1] for s in creation_sequence] + elif isinstance(first, int): # compact creation sequence + return creation_sequence + else: + raise TypeError("Not a valid creation sequence type") + + ccs = [] + count = 1 # count the run lengths of d's or i's. + for i in range(1, len(cs)): + if cs[i] == cs[i - 1]: + count += 1 + else: + ccs.append(count) + count = 1 + ccs.append(count) # don't forget the last one + return ccs + + +def uncompact(creation_sequence): + """ + Converts a compact creation sequence for a threshold + graph to a standard creation sequence (unlabeled). + If the creation_sequence is already standard, return it. + See creation_sequence. + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + return creation_sequence + elif isinstance(first, tuple): # labeled creation sequence + return creation_sequence + elif isinstance(first, int): # compact creation sequence + ccscopy = creation_sequence[:] + else: + raise TypeError("Not a valid creation sequence type") + cs = [] + while ccscopy: + cs.extend(ccscopy.pop(0) * ["d"]) + if ccscopy: + cs.extend(ccscopy.pop(0) * ["i"]) + return cs + + +def creation_sequence_to_weights(creation_sequence): + """ + Returns a list of node weights which create the threshold + graph designated by the creation sequence. The weights + are scaled so that the threshold is 1.0. The order of the + nodes is the same as that in the creation sequence. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + wseq = creation_sequence[:] + else: + wseq = list(creation_sequence) # string like 'ddidid' + elif isinstance(first, tuple): # labeled creation sequence + wseq = [v[1] for v in creation_sequence] + elif isinstance(first, int): # compact creation sequence + wseq = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + # pass through twice--first backwards + wseq.reverse() + w = 0 + prev = "i" + for j, s in enumerate(wseq): + if s == "i": + wseq[j] = w + prev = s + elif prev == "i": + prev = s + w += 1 + wseq.reverse() # now pass through forwards + for j, s in enumerate(wseq): + if s == "d": + wseq[j] = w + prev = s + elif prev == "d": + prev = s + w += 1 + # Now scale weights + if prev == "d": + w += 1 + wscale = 1 / w + return [ww * wscale for ww in wseq] + # return wseq + + +def weights_to_creation_sequence( + weights, threshold=1, with_labels=False, compact=False +): + """ + Returns a creation sequence for a threshold graph + determined by the weights and threshold given as input. + If the sum of two node weights is greater than the + threshold value, an edge is created between these nodes. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present + when it is added. The first node added is by convention 'd'. + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(weights, dict): # labeled weights + wseq = [[w, label] for (label, w) in weights.items()] + else: + wseq = [[w, i] for i, w in enumerate(weights)] + wseq.sort() + cs = [] # creation sequence + cutoff = threshold - wseq[-1][0] + while wseq: + if wseq[0][0] < cutoff: # isolated node + (w, label) = wseq.pop(0) + cs.append((label, "i")) + else: + (w, label) = wseq.pop() + cs.append((label, "d")) + cutoff = threshold - wseq[-1][0] + if len(wseq) == 1: # make sure we start with a d + (w, label) = wseq.pop() + cs.append((label, "d")) + # put in correct order + cs.reverse() + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +# Manipulating NetworkX.Graphs in context of threshold graphs +def threshold_graph(creation_sequence, create_using=None): + """ + Create a threshold graph from the creation sequence or compact + creation_sequence. + + The input sequence can be a + + creation sequence (e.g. ['d','i','d','d','d','i']) + labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')]) + compact creation sequence (e.g. [2,1,1,2,0]) + + Use cs=creation_sequence(degree_sequence,labeled=True) + to convert a degree sequence to a creation sequence. + + Returns None if the sequence is not valid + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + ci = list(enumerate(creation_sequence)) + elif isinstance(first, tuple): # labeled creation sequence + ci = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + ci = list(enumerate(cs)) + else: + print("not a valid creation sequence type") + return None + + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + G.name = "Threshold Graph" + + # add nodes and edges + # if type is 'i' just add nodea + # if type is a d connect to everything previous + while ci: + (v, node_type) = ci.pop(0) + if node_type == "d": # dominating type, connect to all existing nodes + # We use `for u in list(G):` instead of + # `for u in G:` because we edit the graph `G` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for u in list(G): + G.add_edge(v, u) + G.add_node(v) + return G + + +def find_alternating_4_cycle(G): + """ + Returns False if there aren't any alternating 4 cycles. + Otherwise returns the cycle as [a,b,c,d] where (a,b) + and (c,d) are edges and (a,c) and (b,d) are not. + """ + for (u, v) in G.edges(): + for w in G.nodes(): + if not G.has_edge(u, w) and u != w: + for x in G.neighbors(w): + if not G.has_edge(v, x) and v != x: + return [u, v, w, x] + return False + + +def find_threshold_graph(G, create_using=None): + """ + Returns a threshold subgraph that is close to largest in `G`. + + The threshold graph will contain the largest degree node in G. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, or `MultiDiGraph` + create_using : NetworkX graph class or `None` (default), optional + Type of graph to use when constructing the threshold graph. + If `None`, infer the appropriate graph type from the input. + + Returns + ------- + graph : + A graph instance representing the threshold graph + + Examples + -------- + >>> from networkx.algorithms.threshold import find_threshold_graph + >>> G = nx.barbell_graph(3, 3) + >>> T = find_threshold_graph(G) + >>> T.nodes # may vary + NodeView((7, 8, 5, 6)) + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return threshold_graph(find_creation_sequence(G), create_using) + + +def find_creation_sequence(G): + """ + Find a threshold subgraph that is close to largest in G. + Returns the labeled creation sequence of that threshold graph. + """ + cs = [] + # get a local pointer to the working part of the graph + H = G + while H.order() > 0: + # get new degree sequence on subgraph + dsdict = dict(H.degree()) + ds = [(d, v) for v, d in dsdict.items()] + ds.sort() + # Update threshold graph nodes + if ds[-1][0] == 0: # all are isolated + cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"])) + break # Done! + # pull off isolated nodes + while ds[0][0] == 0: + (d, iso) = ds.pop(0) + cs.append((iso, "i")) + # find new biggest node + (d, bigv) = ds.pop() + # add edges of star to t_g + cs.append((bigv, "d")) + # form subgraph of neighbors of big node + H = H.subgraph(H.neighbors(bigv)) + cs.reverse() + return cs + + +# Properties of Threshold Graphs +def triangles(creation_sequence): + """ + Compute number of triangles in the threshold graph with the + given creation sequence. + """ + # shortcut algorithm that doesn't require computing number + # of triangles at each node. + cs = creation_sequence # alias + dr = cs.count("d") # number of d's in sequence + ntri = dr * (dr - 1) * (dr - 2) / 6 # number of triangles in clique of nd d's + # now add dr choose 2 triangles for every 'i' in sequence where + # dr is the number of d's to the right of the current i + for i, typ in enumerate(cs): + if typ == "i": + ntri += dr * (dr - 1) / 2 + else: + dr -= 1 + return ntri + + +def triangle_sequence(creation_sequence): + """ + Return triangle sequence for the given threshold graph creation sequence. + + """ + cs = creation_sequence + seq = [] + dr = cs.count("d") # number of d's to the right of the current pos + dcur = (dr - 1) * (dr - 2) // 2 # number of triangles through a node of clique dr + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + for i, sym in enumerate(cs): + if sym == "d": + drun += 1 + tri = dcur + (dr - 1) * irun # new triangles at this d + else: # cs[i]="i": + if prevsym == "d": # new string of i's + dcur += (dr - 1) * irun # accumulate shared shortest paths + irun = 0 # reset i run counter + dr -= drun # reduce number of d's to right + drun = 0 # reset d run counter + irun += 1 + tri = dr * (dr - 1) // 2 # new triangles at this i + seq.append(tri) + prevsym = sym + return seq + + +def cluster_sequence(creation_sequence): + """ + Return cluster sequence for the given threshold graph creation sequence. + """ + triseq = triangle_sequence(creation_sequence) + degseq = degree_sequence(creation_sequence) + cseq = [] + for i, deg in enumerate(degseq): + tri = triseq[i] + if deg <= 1: # isolated vertex or single pair gets cc 0 + cseq.append(0) + continue + max_size = (deg * (deg - 1)) // 2 + cseq.append(tri / max_size) + return cseq + + +def degree_sequence(creation_sequence): + """ + Return degree sequence for the threshold graph with the given + creation sequence + """ + cs = creation_sequence # alias + seq = [] + rd = cs.count("d") # number of d to the right + for i, sym in enumerate(cs): + if sym == "d": + rd -= 1 + seq.append(rd + i) + else: + seq.append(rd) + return seq + + +def density(creation_sequence): + """ + Return the density of the graph with this creation_sequence. + The density is the fraction of possible edges present. + """ + N = len(creation_sequence) + two_size = sum(degree_sequence(creation_sequence)) + two_possible = N * (N - 1) + den = two_size / two_possible + return den + + +def degree_correlation(creation_sequence): + """ + Return the degree-degree correlation over all edges. + """ + cs = creation_sequence + s1 = 0 # deg_i*deg_j + s2 = 0 # deg_i^2+deg_j^2 + s3 = 0 # deg_i+deg_j + m = 0 # number of edges + rd = cs.count("d") # number of d nodes to the right + rdi = [i for i, sym in enumerate(cs) if sym == "d"] # index of "d"s + ds = degree_sequence(cs) + for i, sym in enumerate(cs): + if sym == "d": + if i != rdi[0]: + print("Logic error in degree_correlation", i, rdi) + raise ValueError + rdi.pop(0) + degi = ds[i] + for dj in rdi: + degj = ds[dj] + s1 += degj * degi + s2 += degi**2 + degj**2 + s3 += degi + degj + m += 1 + denom = 2 * m * s2 - s3 * s3 + numer = 4 * m * s1 - s3 * s3 + if denom == 0: + if numer == 0: + return 1 + raise ValueError(f"Zero Denominator but Numerator is {numer}") + return numer / denom + + +def shortest_path(creation_sequence, u, v): + """ + Find the shortest path between u and v in a + threshold graph G with the given creation_sequence. + + For an unlabeled creation_sequence, the vertices + u and v must be integers in (0,len(sequence)) referring + to the position of the desired vertices in the sequence. + + For a labeled creation_sequence, u and v are labels of veritices. + + Use cs=creation_sequence(degree_sequence,with_labels=True) + to convert a degree sequence to a creation sequence. + + Returns a list of vertices from u to v. + Example: if they are neighbors, it returns [u,v] + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))] + elif isinstance(first, tuple): # labeled creation sequence + cs = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + ci = uncompact(creation_sequence) + cs = [(i, ci[i]) for i in range(len(ci))] + else: + raise TypeError("Not a valid creation sequence type") + + verts = [s[0] for s in cs] + if v not in verts: + raise ValueError(f"Vertex {v} not in graph from creation_sequence") + if u not in verts: + raise ValueError(f"Vertex {u} not in graph from creation_sequence") + # Done checking + if u == v: + return [u] + + uindex = verts.index(u) + vindex = verts.index(v) + bigind = max(uindex, vindex) + if cs[bigind][1] == "d": + return [u, v] + # must be that cs[bigind][1]=='i' + cs = cs[bigind:] + while cs: + vert = cs.pop() + if vert[1] == "d": + return [u, vert[0], v] + # All after u are type 'i' so no connection + return -1 + + +def shortest_path_length(creation_sequence, i): + """ + Return the shortest path length from indicated node to + every other node for the threshold graph with the given + creation sequence. + Node is indicated by index i in creation_sequence unless + creation_sequence is labeled in which case, i is taken to + be the label of the node. + + Paths lengths in threshold graphs are at most 2. + Length to unreachable nodes is set to -1. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + cs = creation_sequence[:] + else: + cs = list(creation_sequence) + elif isinstance(first, tuple): # labeled creation sequence + cs = [v[1] for v in creation_sequence] + i = [v[0] for v in creation_sequence].index(i) + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + + # Compute + N = len(cs) + spl = [2] * N # length 2 to every node + spl[i] = 0 # except self which is 0 + # 1 for all d's to the right + for j in range(i + 1, N): + if cs[j] == "d": + spl[j] = 1 + if cs[i] == "d": # 1 for all nodes to the left + for j in range(i): + spl[j] = 1 + # and -1 for any trailing i to indicate unreachable + for j in range(N - 1, 0, -1): + if cs[j] == "d": + break + spl[j] = -1 + return spl + + +def betweenness_sequence(creation_sequence, normalized=True): + """ + Return betweenness for the threshold graph with the given creation + sequence. The result is unscaled. To scale the values + to the iterval [0,1] divide by (n-1)*(n-2). + """ + cs = creation_sequence + seq = [] # betweenness + lastchar = "d" # first node is always a 'd' + dr = float(cs.count("d")) # number of d's to the right of curren pos + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + dlast = 0.0 # betweenness of last d + for i, c in enumerate(cs): + if c == "d": # cs[i]=="d": + # betweennees = amt shared with eariler d's and i's + # + new isolated nodes covered + # + new paths to all previous nodes + b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr + drun += 1 # update counter + else: # cs[i]="i": + if lastchar == "d": # if this is a new run of i's + dlast = b # accumulate betweenness + dr -= drun # update number of d's to the right + drun = 0 # reset d counter + irun = 0 # reset i counter + b = 0 # isolated nodes have zero betweenness + irun += 1 # add another i to the run + seq.append(float(b)) + lastchar = c + + # normalize by the number of possible shortest paths + if normalized: + order = len(cs) + scale = 1.0 / ((order - 1) * (order - 2)) + seq = [s * scale for s in seq] + + return seq + + +def eigenvectors(creation_sequence): + """ + Return a 2-tuple of Laplacian eigenvalues and eigenvectors + for the threshold network with creation_sequence. + The first value is a list of eigenvalues. + The second value is a list of eigenvectors. + The lists are in the same order so corresponding eigenvectors + and eigenvalues are in the same position in the two lists. + + Notice that the order of the eigenvalues returned by eigenvalues(cs) + may not correspond to the order of these eigenvectors. + """ + ccs = make_compact(creation_sequence) + N = sum(ccs) + vec = [0] * N + val = vec[:] + # get number of type d nodes to the right (all for first node) + dr = sum(ccs[::2]) + + nn = ccs[0] + vec[0] = [1.0 / sqrt(N)] * N + val[0] = 0 + e = dr + dr -= nn + type_d = True + i = 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(dd * dd + i) + vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + if len(ccs) == 1: + return (val, vec) + for nn in ccs[1:]: + scale = 1.0 / sqrt(nn * i * (i + nn)) + vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn) + # find eigenvalue + type_d = not type_d + if type_d: + e = i + dr + dr -= nn + else: + e = dr + val[i] = e + st = i + i += 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(i - st + dd * dd) + vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + return (val, vec) + + +def spectral_projection(u, eigenpairs): + """ + Returns the coefficients of each eigenvector + in a projection of the vector u onto the normalized + eigenvectors which are contained in eigenpairs. + + eigenpairs should be a list of two objects. The + first is a list of eigenvalues and the second a list + of eigenvectors. The eigenvectors should be lists. + + There's not a lot of error checking on lengths of + arrays, etc. so be careful. + """ + coeff = [] + evect = eigenpairs[1] + for ev in evect: + c = sum(evv * uv for (evv, uv) in zip(ev, u)) + coeff.append(c) + return coeff + + +def eigenvalues(creation_sequence): + """ + Return sequence of eigenvalues of the Laplacian of the threshold + graph for the given creation_sequence. + + Based on the Ferrer's diagram method. The spectrum is integral + and is the conjugate of the degree sequence. + + See:: + + @Article{degree-merris-1994, + author = {Russel Merris}, + title = {Degree maximal graphs are Laplacian integral}, + journal = {Linear Algebra Appl.}, + year = {1994}, + volume = {199}, + pages = {381--389}, + } + + """ + degseq = degree_sequence(creation_sequence) + degseq.sort() + eiglist = [] # zero is always one eigenvalue + eig = 0 + row = len(degseq) + bigdeg = degseq.pop() + while row: + if bigdeg < row: + eiglist.append(eig) + row -= 1 + else: + eig += 1 + if degseq: + bigdeg = degseq.pop() + else: + bigdeg = 0 + return eiglist + + +# Threshold graph creation routines + + +@py_random_state(2) +def random_threshold_sequence(n, p, seed=None): + """ + Create a random threshold sequence of size n. + A creation sequence is built by randomly choosing d's with + probabiliy p and i's with probability 1-p. + + s=nx.random_threshold_sequence(10,0.5) + + returns a threshold sequence of length 10 with equal + probably of an i or a d at each position. + + A "random" threshold graph can be built with + + G=nx.threshold_graph(s) + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + if not (0 <= p <= 1): + raise ValueError("p must be in [0,1]") + + cs = ["d"] # threshold sequences always start with a d + for i in range(1, n): + if seed.random() < p: + cs.append("d") + else: + cs.append("i") + return cs + + +# maybe *_d_threshold_sequence routines should +# be (or be called from) a single routine with a more descriptive name +# and a keyword parameter? +def right_d_threshold_sequence(n, m): + """ + Create a skewed threshold graph with a given number + of vertices (n) and a given number of edges (m). + + The routine returns an unlabeled creation sequence + for the threshold graph. + + FIXME: describe algorithm + + """ + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # connected case m >n-1 + ind = n - 1 + sum = n - 1 + while sum < m: + cs[ind] = "d" + ind -= 1 + sum += ind + ind = m - (sum - ind) + cs[ind] = "d" + return cs + + +def left_d_threshold_sequence(n, m): + """ + Create a skewed threshold graph with a given number + of vertices (n) and a given number of edges (m). + + The routine returns an unlabeled creation sequence + for the threshold graph. + + FIXME: describe algorithm + + """ + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # Connected case when M>N-1 + cs[n - 1] = "d" + sum = n - 1 + ind = 1 + while sum < m: + cs[ind] = "d" + sum += ind + ind += 1 + if sum > m: # be sure not to change the first vertex + cs[sum - m] = "i" + return cs + + +@py_random_state(3) +def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None): + """ + Perform a "swap" operation on a threshold sequence. + + The swap preserves the number of nodes and edges + in the graph for the given sequence. + The resulting sequence is still a threshold sequence. + + Perform one split and one combine operation on the + 'd's of a creation sequence for a threshold graph. + This operation maintains the number of nodes and edges + in the graph, but shifts the edges from node to node + maintaining the threshold quality of the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + # preprocess the creation sequence + dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"] + # split + if seed.random() < p_split: + choice = seed.choice(dlist) + split_to = seed.choice(range(choice)) + flip_side = choice - split_to + if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i": + cs[choice] = "i" + cs[split_to] = "d" + cs[flip_side] = "d" + dlist.remove(choice) + # don't add or combine may reverse this action + # dlist.extend([split_to,flip_side]) + # print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side) + # combine + if seed.random() < p_combine and dlist: + first_choice = seed.choice(dlist) + second_choice = seed.choice(dlist) + target = first_choice + second_choice + if target >= len(cs) or cs[target] == "d" or first_choice == second_choice: + return cs + # OK to combine + cs[first_choice] = "i" + cs[second_choice] = "i" + cs[target] = "d" + # print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target) + + return cs diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tournament.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tournament.py new file mode 100644 index 0000000..278a1c4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tournament.py @@ -0,0 +1,391 @@ +"""Functions concerning tournament graphs. + +A `tournament graph`_ is a complete oriented graph. In other words, it +is a directed graph in which there is exactly one directed edge joining +each pair of distinct nodes. For each function in this module that +accepts a graph as input, you must provide a tournament graph. The +responsibility is on the caller to ensure that the graph is a tournament +graph. + +To access the functions in this module, you must access them through the +:mod:`networkx.algorithms.tournament` module:: + + >>> from networkx.algorithms import tournament + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> tournament.is_tournament(G) + True + +.. _tournament graph: https://en.wikipedia.org/wiki/Tournament_%28graph_theory%29 + +""" +from itertools import combinations + +import networkx as nx +from networkx.algorithms.simple_paths import is_simple_path as is_path +from networkx.utils import arbitrary_element, not_implemented_for, py_random_state + +__all__ = [ + "hamiltonian_path", + "is_reachable", + "is_strongly_connected", + "is_tournament", + "random_tournament", + "score_sequence", +] + + +def index_satisfying(iterable, condition): + """Returns the index of the first element in `iterable` that + satisfies the given condition. + + If no such element is found (that is, when the iterable is + exhausted), this returns the length of the iterable (that is, one + greater than the last index of the iterable). + + `iterable` must not be empty. If `iterable` is empty, this + function raises :exc:`ValueError`. + + """ + # Pre-condition: iterable must not be empty. + for i, x in enumerate(iterable): + if condition(x): + return i + # If we reach the end of the iterable without finding an element + # that satisfies the condition, return the length of the iterable, + # which is one greater than the index of its last element. If the + # iterable was empty, `i` will not be defined, so we raise an + # exception. + try: + return i + 1 + except NameError as err: + raise ValueError("iterable must be non-empty") from err + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def is_tournament(G): + """Returns True if and only if `G` is a tournament. + + A tournament is a directed graph, with neither self-loops nor + multi-edges, in which there is exactly one directed edge joining + each pair of distinct nodes. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the given graph is a tournament graph. + + Examples + -------- + >>> from networkx.algorithms import tournament + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> tournament.is_tournament(G) + True + + Notes + ----- + Some definitions require a self-loop on each node, but that is not + the convention used here. + + """ + # In a tournament, there is exactly one directed edge joining each pair. + return ( + all((v in G[u]) ^ (u in G[v]) for u, v in combinations(G, 2)) + and nx.number_of_selfloops(G) == 0 + ) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def hamiltonian_path(G): + """Returns a Hamiltonian path in the given tournament graph. + + Each tournament has a Hamiltonian path. If furthermore, the + tournament is strongly connected, then the returned Hamiltonian path + is a Hamiltonian cycle (by joining the endpoints of the path). + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + path : list + A list of nodes which form a Hamiltonian path in `G`. + + Examples + -------- + >>> from networkx.algorithms import tournament + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]) + >>> tournament.hamiltonian_path(G) + [0, 1, 2, 3] + + Notes + ----- + This is a recursive implementation with an asymptotic running time + of $O(n^2)$, ignoring multiplicative polylogarithmic factors, where + $n$ is the number of nodes in the graph. + + """ + if len(G) == 0: + return [] + if len(G) == 1: + return [arbitrary_element(G)] + v = arbitrary_element(G) + hampath = hamiltonian_path(G.subgraph(set(G) - {v})) + # Get the index of the first node in the path that does *not* have + # an edge to `v`, then insert `v` before that node. + index = index_satisfying(hampath, lambda u: v not in G[u]) + hampath.insert(index, v) + return hampath + + +@py_random_state(1) +def random_tournament(n, seed=None): + r"""Returns a random tournament graph on `n` nodes. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + A tournament on `n` nodes, with exactly one directed edge joining + each pair of distinct nodes. + + Notes + ----- + This algorithm adds, for each pair of distinct nodes, an edge with + uniformly random orientation. In other words, `\binom{n}{2}` flips + of an unbiased coin decide the orientations of the edges in the + graph. + + """ + # Flip an unbiased coin for each pair of distinct nodes. + coins = (seed.random() for i in range((n * (n - 1)) // 2)) + pairs = combinations(range(n), 2) + edges = ((u, v) if r < 0.5 else (v, u) for (u, v), r in zip(pairs, coins)) + return nx.DiGraph(edges) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def score_sequence(G): + """Returns the score sequence for the given tournament graph. + + The score sequence is the sorted list of the out-degrees of the + nodes of the graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + list + A sorted list of the out-degrees of the nodes of `G`. + + Examples + -------- + >>> from networkx.algorithms import tournament + >>> G = nx.DiGraph([(1, 0), (1, 3), (0, 2), (0, 3), (2, 1), (3, 2)]) + >>> tournament.score_sequence(G) + [1, 1, 2, 2] + + """ + return sorted(d for v, d in G.out_degree()) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def tournament_matrix(G): + r"""Returns the tournament matrix for the given tournament graph. + + This function requires SciPy. + + The *tournament matrix* of a tournament graph with edge set *E* is + the matrix *T* defined by + + .. math:: + + T_{i j} = + \begin{cases} + +1 & \text{if } (i, j) \in E \\ + -1 & \text{if } (j, i) \in E \\ + 0 & \text{if } i == j. + \end{cases} + + An equivalent definition is `T = A - A^T`, where *A* is the + adjacency matrix of the graph `G`. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + SciPy sparse matrix + The tournament matrix of the tournament graph `G`. + + Raises + ------ + ImportError + If SciPy is not available. + + """ + A = nx.adjacency_matrix(G) + return A - A.T + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def is_reachable(G, s, t): + """Decides whether there is a path from `s` to `t` in the + tournament. + + This function is more theoretically efficient than the reachability + checks than the shortest path algorithms in + :mod:`networkx.algorithms.shortest_paths`. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + s : node + A node in the graph. + + t : node + A node in the graph. + + Returns + ------- + bool + Whether there is a path from `s` to `t` in `G`. + + Examples + -------- + >>> from networkx.algorithms import tournament + >>> G = nx.DiGraph([(1, 0), (1, 3), (1, 2), (2, 3), (2, 0), (3, 0)]) + >>> tournament.is_reachable(G, 1, 3) + True + >>> tournament.is_reachable(G, 3, 2) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic shortest path functions, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + """ + + def two_neighborhood(G, v): + """Returns the set of nodes at distance at most two from `v`. + + `G` must be a graph and `v` a node in that graph. + + The returned set includes the nodes at distance zero (that is, + the node `v` itself), the nodes at distance one (that is, the + out-neighbors of `v`), and the nodes at distance two. + + """ + # TODO This is trivially parallelizable. + return { + x for x in G if x == v or x in G[v] or any(is_path(G, [v, z, x]) for z in G) + } + + def is_closed(G, nodes): + """Decides whether the given set of nodes is closed. + + A set *S* of nodes is *closed* if for each node *u* in the graph + not in *S* and for each node *v* in *S*, there is an edge from + *u* to *v*. + + """ + # TODO This is trivially parallelizable. + return all(v in G[u] for u in set(G) - nodes for v in nodes) + + # TODO This is trivially parallelizable. + neighborhoods = [two_neighborhood(G, v) for v in G] + return all(not (is_closed(G, S) and s in S and t not in S) for S in neighborhoods) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +def is_strongly_connected(G): + """Decides whether the given tournament is strongly connected. + + This function is more theoretically efficient than the + :func:`~networkx.algorithms.components.is_strongly_connected` + function. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the tournament is strongly connected. + + Examples + -------- + >>> from networkx.algorithms import tournament + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (3, 0)]) + >>> tournament.is_strongly_connected(G) + True + >>> G.remove_edge(1, 3) + >>> tournament.is_strongly_connected(G) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic strong connectivity function, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + + """ + # TODO This is trivially parallelizable. + return all(is_reachable(G, u, v) for u in G for v in G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/__init__.py new file mode 100644 index 0000000..93e6cdd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/__init__.py @@ -0,0 +1,5 @@ +from .beamsearch import * +from .breadth_first_search import * +from .depth_first_search import * +from .edgedfs import * +from .edgebfs import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/beamsearch.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/beamsearch.py new file mode 100644 index 0000000..8a112c2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/beamsearch.py @@ -0,0 +1,104 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" + +from .breadth_first_search import generic_bfs_edges + +__all__ = ["bfs_beam_edges"] + + +def bfs_beam_edges(G, source, value, width=None): + """Iterates over edges in a beam search. + + The beam search is a generalized breadth-first search in which only + the "best" *w* neighbors of the current node are enqueued, where *w* + is the beam width and "best" is an application-specific + heuristic. In general, a beam search with a small beam width might + not visit each node in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + value : function + A function that takes a node of the graph as input and returns a + real number indicating how "good" it is. A higher value means it + is more likely to be visited sooner during the search. When + visiting a new node, only the `width` neighbors with the highest + `value` are enqueued (in decreasing order of `value`). + + width : int (default = None) + The beam width for the search. This is the number of neighbors + (ordered by `value`) to enqueue when visiting each new node. + + Yields + ------ + edge + Edges in the beam search starting from `source`, given as a pair + of nodes. + + Examples + -------- + To give nodes with, for example, a higher centrality precedence + during the search, set the `value` function to return the centrality + value of the node: + + >>> G = nx.karate_club_graph() + >>> centrality = nx.eigenvector_centrality(G) + >>> source = 0 + >>> width = 5 + >>> for u, v in nx.bfs_beam_edges(G, source, centrality.get, width): + ... print((u, v)) + ... + (0, 2) + (0, 1) + (0, 8) + (0, 13) + (0, 3) + (2, 32) + (1, 30) + (8, 33) + (3, 7) + (32, 31) + (31, 28) + (31, 25) + (25, 23) + (25, 24) + (23, 29) + (23, 27) + (29, 26) + """ + + if width is None: + width = len(G) + + def successors(v): + """Returns a list of the best neighbors of a node. + + `v` is a node in the graph `G`. + + The "best" neighbors are chosen according to the `value` + function (higher is better). Only the `width` best neighbors of + `v` are returned. + + The list returned by this function is in decreasing value as + measured by the `value` function. + + """ + # TODO The Python documentation states that for small values, it + # is better to use `heapq.nlargest`. We should determine the + # threshold at which its better to use `heapq.nlargest()` + # instead of `sorted()[:]` and apply that optimization here. + # + # If `width` is greater than the number of neighbors of `v`, all + # neighbors are returned by the semantics of slicing in + # Python. This occurs in the special case that the user did not + # specify a `width`: in this case all neighbors are always + # returned, so this is just a (slower) implementation of + # `bfs_edges(G, source)` but with a sorted enqueue step. + return iter(sorted(G.neighbors(v), key=value, reverse=True)[:width]) + + yield from generic_bfs_edges(G, source, successors) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/breadth_first_search.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/breadth_first_search.py new file mode 100644 index 0000000..a68dbfe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/breadth_first_search.py @@ -0,0 +1,420 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" +from collections import deque + +import networkx as nx + +__all__ = [ + "bfs_edges", + "bfs_tree", + "bfs_predecessors", + "bfs_successors", + "descendants_at_distance", +] + + +def generic_bfs_edges(G, source, neighbors=None, depth_limit=None, sort_neighbors=None): + """Iterate over edges in a breadth-first search. + + The breadth-first search begins at `source` and enqueues the + neighbors of newly visited nodes specified by the `neighbors` + function. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + neighbors : function + A function that takes a newly visited node of the graph as input + and returns an *iterator* (not just a list) of nodes that are + neighbors of that node. If not specified, this is just the + ``G.neighbors`` method, but in general it can be any function + that returns an iterator over some or all of the neighbors of a + given node, in any order. + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Yields + ------ + edge + Edges in the breadth-first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> print(list(nx.bfs_edges(G, 0))) + [(0, 1), (1, 2)] + >>> print(list(nx.bfs_edges(G, source=0, depth_limit=1))) + [(0, 1)] + + Notes + ----- + This implementation is from `PADS`_, which was in the public domain + when it was first accessed in July, 2004. The modifications + to allow depth limits are based on the Wikipedia article + "`Depth-limited-search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + """ + if callable(sort_neighbors): + _neighbors = neighbors + neighbors = lambda node: iter(sort_neighbors(_neighbors(node))) + + visited = {source} + if depth_limit is None: + depth_limit = len(G) + queue = deque([(source, depth_limit, neighbors(source))]) + while queue: + parent, depth_now, children = queue[0] + try: + child = next(children) + if child not in visited: + yield parent, child + visited.add(child) + if depth_now > 1: + queue.append((child, depth_now - 1, neighbors(child))) + except StopIteration: + queue.popleft() + + +def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Iterate over edges in a breadth-first-search starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the breadth-first search. + + Examples + -------- + To get the edges in a breadth-first search:: + + >>> G = nx.path_graph(3) + >>> list(nx.bfs_edges(G, 0)) + [(0, 1), (1, 2)] + >>> list(nx.bfs_edges(G, source=0, depth_limit=1)) + [(0, 1)] + + To get the nodes in a breadth-first search order:: + + >>> G = nx.path_graph(3) + >>> root = 2 + >>> edges = nx.bfs_edges(G, root) + >>> nodes = [root] + [v for u, v in edges] + >>> nodes + [2, 1, 0] + + Notes + ----- + The naming of this function is very similar to + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs`. The difference + is that ``edge_bfs`` yields edges even if they extend back to an already + explored node while this generator yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means ``edge_bfs`` reports all edges while + ``bfs_edges`` only reports those traversed by a node-based BFS. Yet another + description is that ``bfs_edges`` reports the edges traversed during BFS + while ``edge_bfs`` reports all edges in the order they are explored. + + Based on the breadth-first search implementation in PADS [1]_ + by D. Eppstein, July 2004; with modifications to allow depth limits + as described in [2]_. + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS/BFS.py. + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs` + + """ + if reverse and G.is_directed(): + successors = G.predecessors + else: + successors = G.neighbors + yield from generic_bfs_edges(G, source, successors, depth_limit, sort_neighbors) + + +def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Returns an oriented tree constructed from of a breadth-first-search + starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Returns + ------- + T: NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(3) + >>> print(list(nx.bfs_tree(G, 1).edges())) + [(1, 0), (1, 2)] + >>> H = nx.Graph() + >>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(H, [2, 7, 8, 9, 10]) + >>> print(sorted(list(nx.bfs_tree(H, source=3, depth_limit=3).edges()))) + [(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)] + + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_tree + bfs_edges + edge_bfs + """ + T = nx.DiGraph() + T.add_node(source) + edges_gen = bfs_edges( + G, + source, + reverse=reverse, + depth_limit=depth_limit, + sort_neighbors=sort_neighbors, + ) + T.add_edges_from(edges_gen) + return T + + +def bfs_predecessors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of predecessors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Returns + ------- + pred: iterator + (node, predecessor) iterator where `predecessor` is the predecessor of + `node` in a breadth first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> print(dict(nx.bfs_predecessors(G, 0))) + {1: 0, 2: 1} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> print(dict(nx.bfs_predecessors(H, 0))) + {1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2} + >>> M = nx.Graph() + >>> nx.add_path(M, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(M, [2, 7, 8, 9, 10]) + >>> print(sorted(nx.bfs_predecessors(M, source=1, depth_limit=3))) + [(0, 1), (2, 1), (3, 2), (4, 3), (7, 2), (8, 7)] + >>> N = nx.DiGraph() + >>> nx.add_path(N, [0, 1, 2, 3, 4, 7]) + >>> nx.add_path(N, [3, 5, 6, 7]) + >>> print(sorted(nx.bfs_predecessors(N, source=2))) + [(3, 2), (4, 3), (5, 3), (6, 5), (7, 4)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + for s, t in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + yield (t, s) + + +def bfs_successors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of successors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Returns + ------- + succ: iterator + (node, successors) iterator where `successors` is the non-empty list of + successors of `node` in a breadth first search from `source`. + To appear in the iterator, `node` must have successors. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> print(dict(nx.bfs_successors(G, 0))) + {0: [1], 1: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> print(dict(nx.bfs_successors(H, 0))) + {0: [1, 2], 1: [3, 4], 2: [5, 6]} + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(G, [2, 7, 8, 9, 10]) + >>> print(dict(nx.bfs_successors(G, source=1, depth_limit=3))) + {1: [0, 2], 2: [3, 7], 3: [4], 7: [8]} + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5]) + >>> print(dict(nx.bfs_successors(G, source=3))) + {3: [4], 4: [5]} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004.The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + parent = source + children = [] + for p, c in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + if p == parent: + children.append(c) + continue + yield (parent, children) + children = [c] + parent = p + yield (parent, children) + + +def descendants_at_distance(G, source, distance): + """Returns all nodes at a fixed `distance` from `source` in `G`. + + Parameters + ---------- + G : NetworkX graph + A graph + source : node in `G` + distance : the distance of the wanted nodes from `source` + + Returns + ------- + set() + The descendants of `source` in `G` at the given `distance` from `source` + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.descendants_at_distance(G, 2, 2) + {0, 4} + >>> H = nx.DiGraph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> nx.descendants_at_distance(H, 0, 2) + {3, 4, 5, 6} + >>> nx.descendants_at_distance(H, 5, 0) + {5} + >>> nx.descendants_at_distance(H, 5, 1) + set() + """ + if not G.has_node(source): + raise nx.NetworkXError(f"The node {source} is not in the graph.") + current_distance = 0 + current_layer = {source} + visited = {source} + + # this is basically BFS, except that the current layer only stores the nodes at + # current_distance from source at each iteration + while current_distance < distance: + next_layer = set() + for node in current_layer: + for child in G[node]: + if child not in visited: + visited.add(child) + next_layer.add(child) + current_layer = next_layer + current_distance += 1 + + return current_layer diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/depth_first_search.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/depth_first_search.py new file mode 100644 index 0000000..0ccca4f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/depth_first_search.py @@ -0,0 +1,443 @@ +"""Basic algorithms for depth-first searching the nodes of a graph.""" +from collections import defaultdict + +import networkx as nx + +__all__ = [ + "dfs_edges", + "dfs_tree", + "dfs_predecessors", + "dfs_successors", + "dfs_preorder_nodes", + "dfs_postorder_nodes", + "dfs_labeled_edges", +] + + +def dfs_edges(G, source=None, depth_limit=None): + """Iterate over edges in a depth-first-search (DFS). + + Perform a depth-first-search over the nodes of `G` and yield + the edges in order. This may not generate all edges in `G` + (see `~networkx.algorithms.traversal.edgedfs.edge_dfs`). + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and yield edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the depth-first-search. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4)] + >>> list(nx.dfs_edges(G, source=0, depth_limit=2)) + [(0, 1), (1, 2)] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in PADS [1]_, with modifications + to allow depth limits based on the Wikipedia article + "Depth-limited search" [2]_. + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + """ + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + visited = set() + if depth_limit is None: + depth_limit = len(G) + for start in nodes: + if start in visited: + continue + visited.add(start) + stack = [(start, depth_limit, iter(G[start]))] + while stack: + parent, depth_now, children = stack[-1] + try: + child = next(children) + if child not in visited: + yield parent, child + visited.add(child) + if depth_now > 1: + stack.append((child, depth_now - 1, iter(G[child]))) + except StopIteration: + stack.pop() + + +def dfs_tree(G, source=None, depth_limit=None): + """Returns oriented tree constructed from a depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + T : NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(5) + >>> T = nx.dfs_tree(G, source=0, depth_limit=2) + >>> list(T.edges()) + [(0, 1), (1, 2)] + >>> T = nx.dfs_tree(G, source=0) + >>> list(T.edges()) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + T = nx.DiGraph() + if source is None: + T.add_nodes_from(G) + else: + T.add_node(source) + T.add_edges_from(dfs_edges(G, source, depth_limit)) + return T + + +def dfs_predecessors(G, source=None, depth_limit=None): + """Returns dictionary of predecessors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + pred: dict + A dictionary with nodes as keys and predecessor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.dfs_predecessors(G, source=0) + {1: 0, 2: 1, 3: 2} + >>> nx.dfs_predecessors(G, source=0, depth_limit=2) + {1: 0, 2: 1} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + return {t: s for s, t in dfs_edges(G, source, depth_limit)} + + +def dfs_successors(G, source=None, depth_limit=None): + """Returns dictionary of successors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + succ: dict + A dictionary with nodes as keys and list of successor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dfs_successors(G, source=0) + {0: [1], 1: [2], 2: [3], 3: [4]} + >>> nx.dfs_successors(G, source=0, depth_limit=2) + {0: [1], 1: [2]} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + d = defaultdict(list) + for s, t in dfs_edges(G, source=source, depth_limit=depth_limit): + d[s].append(t) + return dict(d) + + +def dfs_postorder_nodes(G, source=None, depth_limit=None): + """Generate nodes in a depth-first-search post-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search post-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_postorder_nodes(G, source=0)) + [4, 3, 2, 1, 0] + >>> list(nx.dfs_postorder_nodes(G, source=0, depth_limit=2)) + [1, 0] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + edges = nx.dfs_labeled_edges(G, source=source, depth_limit=depth_limit) + return (v for u, v, d in edges if d == "reverse") + + +def dfs_preorder_nodes(G, source=None, depth_limit=None): + """Generate nodes in a depth-first-search pre-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return nodes in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search pre-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_preorder_nodes(G, source=0)) + [0, 1, 2, 3, 4] + >>> list(nx.dfs_preorder_nodes(G, source=0, depth_limit=2)) + [0, 1, 2] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_postorder_nodes + dfs_labeled_edges + bfs_edges + """ + edges = nx.dfs_labeled_edges(G, source=source, depth_limit=depth_limit) + return (v for u, v, d in edges if d == "forward") + + +def dfs_labeled_edges(G, source=None, depth_limit=None): + """Iterate over edges in a depth-first-search (DFS) labeled by type. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + edges: generator + A generator of triples of the form (*u*, *v*, *d*), where (*u*, + *v*) is the edge being explored in the depth-first search and *d* + is one of the strings 'forward', 'nontree', or 'reverse'. A + 'forward' edge is one in which *u* has been visited but *v* has + not. A 'nontree' edge is one in which both *u* and *v* have been + visited but the edge is not in the DFS tree. A 'reverse' edge is + on in which both *u* and *v* have been visited and the edge is in + the DFS tree. + + Examples + -------- + + The labels reveal the complete transcript of the depth-first search + algorithm in more detail than, for example, :func:`dfs_edges`:: + + >>> from pprint import pprint + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 1)]) + >>> pprint(list(nx.dfs_labeled_edges(G, source=0))) + [(0, 0, 'forward'), + (0, 1, 'forward'), + (1, 2, 'forward'), + (2, 1, 'nontree'), + (1, 2, 'reverse'), + (0, 1, 'reverse'), + (0, 0, 'reverse')] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_postorder_nodes + """ + # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + # by D. Eppstein, July 2004. + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + visited = set() + if depth_limit is None: + depth_limit = len(G) + for start in nodes: + if start in visited: + continue + yield start, start, "forward" + visited.add(start) + stack = [(start, depth_limit, iter(G[start]))] + while stack: + parent, depth_now, children = stack[-1] + try: + child = next(children) + if child in visited: + yield parent, child, "nontree" + else: + yield parent, child, "forward" + visited.add(child) + if depth_now > 1: + stack.append((child, depth_now - 1, iter(G[child]))) + except StopIteration: + stack.pop() + if stack: + yield stack[-1][0], parent, "reverse" + yield start, start, "reverse" diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/edgebfs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/edgebfs.py new file mode 100644 index 0000000..4e1c541 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/edgebfs.py @@ -0,0 +1,176 @@ +""" +============================= +Breadth First Search on Edges +============================= + +Algorithms for a breadth-first traversal of edges in a graph. + +""" +from collections import deque + +import networkx as nx + +FORWARD = "forward" +REVERSE = "reverse" + +__all__ = ["edge_bfs"] + + +def edge_bfs(G, source=None, orientation=None): + """A directed, breadth-first-search of edges in `G`, beginning at `source`. + + Yield the edges of G in a breadth-first-search order continuing until + all edges are generated. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Yields + ------ + edge : directed edge + A directed edge indicating the path taken by the breadth-first-search. + For graphs, `edge` is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Examples + -------- + >>> nodes = [0, 1, 2, 3] + >>> edges = [(0, 1), (1, 0), (1, 0), (2, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_bfs(nx.Graph(edges), nodes)) + [(0, 1), (0, 2), (1, 2), (1, 3)] + + >>> list(nx.edge_bfs(nx.DiGraph(edges), nodes)) + [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_bfs(nx.MultiGraph(edges), nodes)) + [(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (1, 2, 0), (1, 3, 0)] + + >>> list(nx.edge_bfs(nx.MultiDiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 1, 0), (3, 1, 0)] + + >>> list(nx.edge_bfs(nx.DiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 'forward'), (1, 0, 'reverse'), (2, 0, 'reverse'), (2, 1, 'reverse'), (3, 1, 'reverse')] + + >>> list(nx.edge_bfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 0, 'forward'), (1, 0, 0, 'reverse'), (1, 0, 1, 'reverse'), (2, 0, 0, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')] + + Notes + ----- + The goal of this function is to visit edges. It differs from the more + familiar breadth-first-search of nodes, as provided by + :func:`networkx.algorithms.traversal.breadth_first_search.bfs_edges`, in + that it does not stop once every node has been visited. In a directed graph + with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited + if not for the functionality provided by this function. + + The naming of this function is very similar to bfs_edges. The difference + is that 'edge_bfs' yields edges even if they extend back to an already + explored node while 'bfs_edges' yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means 'edge_bfs' reports all edges while + 'bfs_edges' only report those traversed by a node-based BFS. Yet another + description is that 'bfs_edges' reports the edges traversed during BFS + while 'edge_bfs' reports all edges in the order they are explored. + + See Also + -------- + bfs_edges + bfs_tree + edge_dfs + + """ + nodes = list(G.nbunch_iter(source)) + if not nodes: + return + + directed = G.is_directed() + kwds = {"data": False} + if G.is_multigraph() is True: + kwds["keys"] = True + + # set up edge lookup + if orientation is None: + + def edges_from(node): + return iter(G.edges(node, **kwds)) + + elif not directed or orientation == "original": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + + elif orientation == "reverse": + + def edges_from(node): + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + elif orientation == "ignore": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + else: + raise nx.NetworkXError("invalid orientation argument.") + + if directed: + neighbors = G.successors + + def edge_id(edge): + # remove direction indicator + return edge[:-1] if orientation is not None else edge + + else: + neighbors = G.neighbors + + def edge_id(edge): + return (frozenset(edge[:2]),) + edge[2:] + + check_reverse = directed and orientation in ("reverse", "ignore") + + # start BFS + visited_nodes = {n for n in nodes} + visited_edges = set() + queue = deque([(n, edges_from(n)) for n in nodes]) + while queue: + parent, children_edges = queue.popleft() + for edge in children_edges: + if check_reverse and edge[-1] == REVERSE: + child = edge[0] + else: + child = edge[1] + if child not in visited_nodes: + visited_nodes.add(child) + queue.append((child, edges_from(child))) + edgeid = edge_id(edge) + if edgeid not in visited_edges: + visited_edges.add(edgeid) + yield edge diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/edgedfs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/edgedfs.py new file mode 100644 index 0000000..8a4ba83 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/edgedfs.py @@ -0,0 +1,174 @@ +""" +=========================== +Depth First Search on Edges +=========================== + +Algorithms for a depth-first traversal of edges in a graph. + +""" +import networkx as nx + +FORWARD = "forward" +REVERSE = "reverse" + +__all__ = ["edge_dfs"] + + +def edge_dfs(G, source=None, orientation=None): + """A directed, depth-first-search of edges in `G`, beginning at `source`. + + Yield the edges of G in a depth-first-search order continuing until + all edges are generated. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Yields + ------ + edge : directed edge + A directed edge indicating the path taken by the depth-first traversal. + For graphs, `edge` is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Examples + -------- + >>> nodes = [0, 1, 2, 3] + >>> edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_dfs(nx.Graph(edges), nodes)) + [(0, 1), (1, 2), (1, 3)] + + >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes)) + [(0, 1), (1, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_dfs(nx.MultiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + + >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + + >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 'forward'), (1, 0, 'forward'), (2, 1, 'reverse'), (3, 1, 'reverse')] + + >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 0, 'forward'), (1, 0, 0, 'forward'), (1, 0, 1, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')] + + Notes + ----- + The goal of this function is to visit edges. It differs from the more + familiar depth-first traversal of nodes, as provided by + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`, in + that it does not stop once every node has been visited. In a directed graph + with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited + if not for the functionality provided by this function. + + See Also + -------- + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + + """ + nodes = list(G.nbunch_iter(source)) + if not nodes: + return + + directed = G.is_directed() + kwds = {"data": False} + if G.is_multigraph() is True: + kwds["keys"] = True + + # set up edge lookup + if orientation is None: + + def edges_from(node): + return iter(G.edges(node, **kwds)) + + elif not directed or orientation == "original": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + + elif orientation == "reverse": + + def edges_from(node): + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + elif orientation == "ignore": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + else: + raise nx.NetworkXError("invalid orientation argument.") + + # set up formation of edge_id to easily look up if edge already returned + if directed: + + def edge_id(edge): + # remove direction indicator + return edge[:-1] if orientation is not None else edge + + else: + + def edge_id(edge): + # single id for undirected requires frozenset on nodes + return (frozenset(edge[:2]),) + edge[2:] + + # Basic setup + check_reverse = directed and orientation in ("reverse", "ignore") + + visited_edges = set() + visited_nodes = set() + edges = {} + + # start DFS + for start_node in nodes: + stack = [start_node] + while stack: + current_node = stack[-1] + if current_node not in visited_nodes: + edges[current_node] = edges_from(current_node) + visited_nodes.add(current_node) + + try: + edge = next(edges[current_node]) + except StopIteration: + # No more edges from the current node. + stack.pop() + else: + edgeid = edge_id(edge) + if edgeid not in visited_edges: + visited_edges.add(edgeid) + # Mark the traversed "to" node as to-be-explored. + if check_reverse and edge[-1] == REVERSE: + stack.append(edge[0]) + else: + stack.append(edge[1]) + yield edge diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py new file mode 100644 index 0000000..249cc2f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py @@ -0,0 +1,27 @@ +"""Unit tests for the beam search functions.""" + +import networkx as nx + + +def identity(x): + return x + + +class TestBeamSearch: + """Unit tests for the beam search function.""" + + def test_narrow(self): + """Tests that a narrow beam width may cause an incomplete search.""" + # In this search, we enqueue only the neighbor 3 at the first + # step, then only the neighbor 2 at the second step. Once at + # node 2, the search chooses node 3, since it has a higher value + # that node 1, but node 3 has already been visited, so the + # search terminates. + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=1) + assert list(edges) == [(0, 3), (3, 2)] + + def test_wide(self): + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=2) + assert list(edges) == [(0, 3), (0, 1), (3, 2)] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_bfs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_bfs.py new file mode 100644 index 0000000..2ae42b9 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_bfs.py @@ -0,0 +1,119 @@ +from functools import partial + +import pytest + +import networkx as nx + + +class TestBFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + cls.G = G + + def test_successor(self): + assert dict(nx.bfs_successors(self.G, source=0)) == {0: [1], 1: [2, 3], 2: [4]} + + def test_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=0)) == {1: 0, 2: 1, 3: 1, 4: 2} + + def test_bfs_tree(self): + T = nx.bfs_tree(self.G, source=0) + assert sorted(T.nodes()) == sorted(self.G.nodes()) + assert sorted(T.edges()) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges_reverse(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + edges = nx.bfs_edges(D, source=4, reverse=True) + assert list(edges) == [(4, 2), (4, 3), (2, 1), (1, 0)] + + def test_bfs_edges_sorting(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (0, 2), (1, 4), (1, 3), (2, 5)]) + sort_desc = partial(sorted, reverse=True) + edges_asc = nx.bfs_edges(D, source=0, sort_neighbors=sorted) + edges_desc = nx.bfs_edges(D, source=0, sort_neighbors=sort_desc) + assert list(edges_asc) == [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5)] + assert list(edges_desc) == [(0, 2), (0, 1), (2, 5), (1, 4), (1, 3)] + + def test_bfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.bfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + + def test_descendants_at_distance(self): + for distance, descendants in enumerate([{0}, {1}, {2, 3}, {4}]): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + + def test_descendants_at_distance_missing_source(self): + with pytest.raises(nx.NetworkXError): + nx.descendants_at_distance(self.G, "abc", 0) + + +class TestBreadthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_limited_bfs_successor(self): + assert dict(nx.bfs_successors(self.G, source=1, depth_limit=3)) == { + 1: [0, 2], + 2: [3, 7], + 3: [4], + 7: [8], + } + result = { + n: sorted(s) for n, s in nx.bfs_successors(self.D, source=7, depth_limit=2) + } + assert result == {8: [9], 2: [3], 7: [2, 8]} + + def test_limited_bfs_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=1, depth_limit=3)) == { + 0: 1, + 2: 1, + 3: 2, + 4: 3, + 7: 2, + 8: 7, + } + assert dict(nx.bfs_predecessors(self.D, source=7, depth_limit=2)) == { + 2: 7, + 3: 2, + 8: 7, + 9: 8, + } + + def test_limited_bfs_tree(self): + T = nx.bfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_limited_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (9, 10), (8, 7), (7, 2), (2, 1), (2, 3)] + + def test_limited_descendants_at_distance(self): + for distance, descendants in enumerate( + [{0}, {1}, {2}, {3, 7}, {4, 8}, {5, 9}, {6, 10}] + ): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + for distance, descendants in enumerate([{2}, {3, 7}, {8}, {9}, {10}]): + assert nx.descendants_at_distance(self.D, 2, distance) == descendants diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_dfs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_dfs.py new file mode 100644 index 0000000..7652809 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_dfs.py @@ -0,0 +1,152 @@ +import networkx as nx + + +class TestDFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 0), (0, 4)]) + cls.G = G + # simple graph, disconnected + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + cls.D = D + + def test_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0)) == [0, 1, 2, 4, 3] + assert list(nx.dfs_preorder_nodes(self.D)) == [0, 1, 2, 3] + assert list(nx.dfs_preorder_nodes(self.D, source=2)) == [2, 3] + + def test_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=0)) == [4, 2, 3, 1, 0] + assert list(nx.dfs_postorder_nodes(self.D)) == [1, 0, 3, 2] + assert list(nx.dfs_postorder_nodes(self.D, source=0)) == [1, 0] + + def test_successor(self): + assert nx.dfs_successors(self.G, source=0) == {0: [1], 1: [2, 3], 2: [4]} + assert nx.dfs_successors(self.G, source=1) == {0: [3, 4], 1: [0], 4: [2]} + assert nx.dfs_successors(self.D) == {0: [1], 2: [3]} + assert nx.dfs_successors(self.D, source=1) == {1: [0]} + + def test_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2} + assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2} + + def test_dfs_tree(self): + exp_nodes = sorted(self.G.nodes()) + exp_edges = [(0, 1), (1, 2), (1, 3), (2, 4)] + # Search from first node + T = nx.dfs_tree(self.G, source=0) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None + T = nx.dfs_tree(self.G, source=None) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None is the default + T = nx.dfs_tree(self.G) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + + def test_dfs_edges(self): + edges = nx.dfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (2, 4), (1, 3)] + edges = nx.dfs_edges(self.D) + assert list(edges) == [(0, 1), (2, 3)] + + def test_dfs_labeled_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=0)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (1, 2), (2, 4), (1, 3)] + + def test_dfs_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (2, 2), (2, 3)] + + def test_dfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.dfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + T = nx.dfs_tree(G, source=None) + assert sorted(T.nodes()) == [1, 2] + assert sorted(T.edges()) == [] + + +class TestDepthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_dls_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0, depth_limit=2)) == [0, 1, 2] + assert list(nx.dfs_preorder_nodes(self.D, source=1, depth_limit=2)) == ([1, 0]) + + def test_dls_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=3, depth_limit=3)) == [ + 1, + 7, + 2, + 5, + 4, + 3, + ] + assert list(nx.dfs_postorder_nodes(self.D, source=2, depth_limit=2)) == ( + [3, 7, 2] + ) + + def test_dls_successor(self): + result = nx.dfs_successors(self.G, source=4, depth_limit=3) + assert {n: set(v) for n, v in result.items()} == { + 2: {1, 7}, + 3: {2}, + 4: {3, 5}, + 5: {6}, + } + result = nx.dfs_successors(self.D, source=7, depth_limit=2) + assert {n: set(v) for n, v in result.items()} == {8: {9}, 2: {3}, 7: {8, 2}} + + def test_dls_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0, depth_limit=3) == { + 1: 0, + 2: 1, + 3: 2, + 7: 2, + } + assert nx.dfs_predecessors(self.D, source=2, depth_limit=3) == { + 8: 7, + 9: 8, + 3: 2, + 7: 2, + } + + def test_dls_tree(self): + T = nx.dfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_dls_edges(self): + edges = nx.dfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (8, 7), (7, 2), (2, 1), (2, 3), (9, 10)] + + def test_dls_labeled_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(5, 5), (5, 4), (5, 6)] + + def test_dls_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=6, depth_limit=2)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(6, 6), (6, 5), (5, 4)] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py new file mode 100644 index 0000000..1bf3fae --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py @@ -0,0 +1,147 @@ +import pytest + +import networkx as nx +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +class TestEdgeBFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(nx.edge_bfs(G)) + assert edges == [] + + def test_graph_single_source(self): + G = nx.Graph(self.edges) + G.add_edge(4, 5) + x = list(nx.edge_bfs(G, [0])) + x_ = [(0, 1), (0, 2), (1, 2), (1, 3)] + assert x == x_ + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1), (0, 2), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = nx.edge_bfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="original")) + x_ = [ + (0, 1, FORWARD), + (1, 0, FORWARD), + (2, 0, FORWARD), + (2, 1, FORWARD), + (3, 1, FORWARD), + ] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, REVERSE), + (2, 0, REVERSE), + (0, 1, REVERSE), + (2, 1, REVERSE), + (3, 1, REVERSE), + ] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, FORWARD), + (1, 0, REVERSE), + (2, 0, REVERSE), + (2, 1, REVERSE), + (3, 1, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 0, 0, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py new file mode 100644 index 0000000..7c1967c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py @@ -0,0 +1,131 @@ +import pytest + +import networkx as nx +from networkx.algorithms import edge_dfs +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + +# These tests can fail with hash randomization. The easiest and clearest way +# to write these unit tests is for the edges to be output in an expected total +# order, but we cannot guarantee the order amongst outgoing edges from a node, +# unless each class uses an ordered data structure for neighbors. This is +# painful to do with the current API. The alternative is that the tests are +# written (IMO confusingly) so that there is not a total order over the edges, +# but only a partial order. Due to the small size of the graphs, hopefully +# failures due to hash randomization will not occur. For an example of how +# this can fail, see TestEdgeDFS.test_multigraph. + + +class TestEdgeDFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(edge_dfs(G)) + assert edges == [] + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = edge_dfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, FORWARD), (3, 1, FORWARD)] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, FORWARD), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/__init__.py new file mode 100644 index 0000000..7120d4b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/__init__.py @@ -0,0 +1,6 @@ +from .branchings import * +from .coding import * +from .mst import * +from .recognition import * +from .operations import * +from .decomposition import * diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/branchings.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/branchings.py new file mode 100644 index 0000000..8f8602b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/branchings.py @@ -0,0 +1,1048 @@ +""" +Algorithms for finding optimum branchings and spanning arborescences. + +This implementation is based on: + + J. Edmonds, Optimum branchings, J. Res. Natl. Bur. Standards 71B (1967), + 233–240. URL: http://archive.org/details/jresv71Bn4p233 + +""" +# TODO: Implement method from Gabow, Galil, Spence and Tarjan: +# +# @article{ +# year={1986}, +# issn={0209-9683}, +# journal={Combinatorica}, +# volume={6}, +# number={2}, +# doi={10.1007/BF02579168}, +# title={Efficient algorithms for finding minimum spanning trees in +# undirected and directed graphs}, +# url={https://doi.org/10.1007/BF02579168}, +# publisher={Springer-Verlag}, +# keywords={68 B 15; 68 C 05}, +# author={Gabow, Harold N. and Galil, Zvi and Spencer, Thomas and Tarjan, +# Robert E.}, +# pages={109-122}, +# language={English} +# } +import string +from dataclasses import dataclass, field +from enum import Enum +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import py_random_state + +from .recognition import is_arborescence, is_branching + +__all__ = [ + "branching_weight", + "greedy_branching", + "maximum_branching", + "minimum_branching", + "maximum_spanning_arborescence", + "minimum_spanning_arborescence", + "ArborescenceIterator", + "Edmonds", +] + +KINDS = {"max", "min"} + +STYLES = { + "branching": "branching", + "arborescence": "arborescence", + "spanning arborescence": "arborescence", +} + +INF = float("inf") + + +@py_random_state(1) +def random_string(L=15, seed=None): + return "".join([seed.choice(string.ascii_letters) for n in range(L)]) + + +def _min_weight(weight): + return -weight + + +def _max_weight(weight): + return weight + + +def branching_weight(G, attr="weight", default=1): + """ + Returns the total weight of a branching. + + You must access this function through the networkx.algorithms.tree module. + + Parameters + ---------- + G : DiGraph + The directed graph. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + + Returns + ------- + weight: int or float + The total weight of the branching. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 4), (2, 3, 3), (3, 4, 2)]) + >>> nx.tree.branching_weight(G) + 11 + + """ + return sum(edge[2].get(attr, default) for edge in G.edges(data=True)) + + +@py_random_state(4) +def greedy_branching(G, attr="weight", default=1, kind="max", seed=None): + """ + Returns a branching obtained through a greedy algorithm. + + This algorithm is wrong, and cannot give a proper optimal branching. + However, we include it for pedagogical reasons, as it can be helpful to + see what its outputs are. + + The output is a branching, and possibly, a spanning arborescence. However, + it is not guaranteed to be optimal in either case. + + Parameters + ---------- + G : DiGraph + The directed graph to scan. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + kind : str + The type of optimum to search for: 'min' or 'max' greedy branching. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + B : directed graph + The greedily obtained branching. + + """ + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + if kind == "min": + reverse = False + else: + reverse = True + + if attr is None: + # Generate a random string the graph probably won't have. + attr = random_string(seed=seed) + + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + + # We sort by weight, but also by nodes to normalize behavior across runs. + try: + edges.sort(key=itemgetter(2, 0, 1), reverse=reverse) + except TypeError: + # This will fail in Python 3.x if the nodes are of varying types. + # In that case, we use the arbitrary order. + edges.sort(key=itemgetter(2), reverse=reverse) + + # The branching begins with a forest of no edges. + B = nx.DiGraph() + B.add_nodes_from(G) + + # Now we add edges greedily so long we maintain the branching. + uf = nx.utils.UnionFind() + for i, (u, v, w) in enumerate(edges): + if uf[u] == uf[v]: + # Adding this edge would form a directed cycle. + continue + elif B.in_degree(v) == 1: + # The edge would increase the degree to be greater than one. + continue + else: + # If attr was None, then don't insert weights... + data = {} + if attr is not None: + data[attr] = w + B.add_edge(u, v, **data) + uf.union(u, v) + + return B + + +class MultiDiGraph_EdgeKey(nx.MultiDiGraph): + """ + MultiDiGraph which assigns unique keys to every edge. + + Adds a dictionary edge_index which maps edge keys to (u, v, data) tuples. + + This is not a complete implementation. For Edmonds algorithm, we only use + add_node and add_edge, so that is all that is implemented here. During + additions, any specified keys are ignored---this means that you also + cannot update edge attributes through add_node and add_edge. + + Why do we need this? Edmonds algorithm requires that we track edges, even + as we change the head and tail of an edge, and even changing the weight + of edges. We must reliably track edges across graph mutations. + + """ + + def __init__(self, incoming_graph_data=None, **attr): + cls = super() + cls.__init__(incoming_graph_data=incoming_graph_data, **attr) + + self._cls = cls + self.edge_index = {} + + def remove_node(self, n): + keys = set() + for keydict in self.pred[n].values(): + keys.update(keydict) + for keydict in self.succ[n].values(): + keys.update(keydict) + + for key in keys: + del self.edge_index[key] + + self._cls.remove_node(n) + + def remove_nodes_from(self, nbunch): + for n in nbunch: + self.remove_node(n) + + def add_edge(self, u_for_edge, v_for_edge, key_for_edge, **attr): + """ + Key is now required. + + """ + u, v, key = u_for_edge, v_for_edge, key_for_edge + if key in self.edge_index: + uu, vv, _ = self.edge_index[key] + if (u != uu) or (v != vv): + raise Exception(f"Key {key!r} is already in use.") + + self._cls.add_edge(u, v, key, **attr) + self.edge_index[key] = (u, v, self.succ[u][v][key]) + + def add_edges_from(self, ebunch_to_add, **attr): + for u, v, k, d in ebunch_to_add: + self.add_edge(u, v, k, **d) + + def remove_edge_with_key(self, key): + try: + u, v, _ = self.edge_index[key] + except KeyError as err: + raise KeyError(f"Invalid edge key {key!r}") from err + else: + del self.edge_index[key] + self._cls.remove_edge(u, v, key) + + def remove_edges_from(self, ebunch): + raise NotImplementedError + + +def get_path(G, u, v): + """ + Returns the edge keys of the unique path between u and v. + + This is not a generic function. G must be a branching and an instance of + MultiDiGraph_EdgeKey. + + """ + nodes = nx.shortest_path(G, u, v) + + # We are guaranteed that there is only one edge connected every node + # in the shortest path. + + def first_key(i, vv): + # Needed for 2.x/3.x compatibilitity + keys = G[nodes[i]][vv].keys() + # Normalize behavior + keys = list(keys) + return keys[0] + + edges = [first_key(i, vv) for i, vv in enumerate(nodes[1:])] + return nodes, edges + + +class Edmonds: + """ + Edmonds algorithm [1]_ for finding optimal branchings and spanning + arborescences. + + This algorithm can find both minimum and maximum spanning arborescences and + branchings. + + Notes + ----- + While this algorithm can find a minimum branching, since it isn't required + to be spanning, the minimum branching is always from the set of negative + weight edges which is most likely the empty set for most graphs. + + References + ---------- + .. [1] J. Edmonds, Optimum Branchings, Journal of Research of the National + Bureau of Standards, 1967, Vol. 71B, p.233-240, + https://archive.org/details/jresv71Bn4p233 + + """ + + def __init__(self, G, seed=None): + self.G_original = G + + # Need to fix this. We need the whole tree. + self.store = True + + # The final answer. + self.edges = [] + + # Since we will be creating graphs with new nodes, we need to make + # sure that our node names do not conflict with the real node names. + self.template = random_string(seed=seed) + "_{0}" + + def _init(self, attr, default, kind, style, preserve_attrs, seed, partition): + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + # Store inputs. + self.attr = attr + self.default = default + self.kind = kind + self.style = style + + # Determine how we are going to transform the weights. + if kind == "min": + self.trans = trans = _min_weight + else: + self.trans = trans = _max_weight + + if attr is None: + # Generate a random attr the graph probably won't have. + attr = random_string(seed=seed) + + # This is the actual attribute used by the algorithm. + self._attr = attr + + # This attribute is used to store whether a particular edge is still + # a candidate. We generate a random attr to remove clashes with + # preserved edges + self.candidate_attr = "candidate_" + random_string(seed=seed) + + # The object we manipulate at each step is a multidigraph. + self.G = G = MultiDiGraph_EdgeKey() + for key, (u, v, data) in enumerate(self.G_original.edges(data=True)): + d = {attr: trans(data.get(attr, default))} + + if data.get(partition) is not None: + d[partition] = data.get(partition) + + if preserve_attrs: + for (d_k, d_v) in data.items(): + if d_k != attr: + d[d_k] = d_v + + G.add_edge(u, v, key, **d) + + self.level = 0 + + # These are the "buckets" from the paper. + # + # As in the paper, G^i are modified versions of the original graph. + # D^i and E^i are nodes and edges of the maximal edges that are + # consistent with G^i. These are dashed edges in figures A-F of the + # paper. In this implementation, we store D^i and E^i together as a + # graph B^i. So we will have strictly more B^i than the paper does. + self.B = MultiDiGraph_EdgeKey() + self.B.edge_index = {} + self.graphs = [] # G^i + self.branchings = [] # B^i + self.uf = nx.utils.UnionFind() + + # A list of lists of edge indexes. Each list is a circuit for graph G^i. + # Note the edge list will not, in general, be a circuit in graph G^0. + self.circuits = [] + # Stores the index of the minimum edge in the circuit found in G^i + # and B^i. The ordering of the edges seems to preserve the weight + # ordering from G^0. So even if the circuit does not form a circuit + # in G^0, it is still true that the minimum edge of the circuit in + # G^i is still the minimum edge in circuit G^0 (despite their weights + # being different). + self.minedge_circuit = [] + + def find_optimum( + self, + attr="weight", + default=1, + kind="max", + style="branching", + preserve_attrs=False, + partition=None, + seed=None, + ): + """ + Returns a branching from G. + + Parameters + ---------- + attr : str + The edge attribute used to in determining optimality. + default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. + kind : {'min', 'max'} + The type of optimum to search for, either 'min' or 'max'. + style : {'branching', 'arborescence'} + If 'branching', then an optimal branching is found. If `style` is + 'arborescence', then a branching is found, such that if the + branching is also an arborescence, then the branching is an + optimal spanning arborescences. A given graph G need not have + an optimal spanning arborescence. + preserve_attrs : bool + If True, preserve the other edge attributes of the original + graph (that are not the one passed to `attr`) + partition : str + The edge attribute holding edge partition data. Used in the + spanning arborescence iterator. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + H : (multi)digraph + The branching. + + """ + self._init(attr, default, kind, style, preserve_attrs, seed, partition) + uf = self.uf + + # This enormous while loop could use some refactoring... + + G, B = self.G, self.B + D = set() + nodes = iter(list(G.nodes())) + attr = self._attr + G_pred = G.pred + + def desired_edge(v): + """ + Find the edge directed toward v with maximal weight. + + If an edge partition exists in this graph, return the included edge + if it exists and no not return any excluded edges. There can only + be one included edge for each vertex otherwise the edge partition is + empty. + """ + edge = None + weight = -INF + for u, _, key, data in G.in_edges(v, data=True, keys=True): + # Skip excluded edges + if data.get(partition) == nx.EdgePartition.EXCLUDED: + continue + new_weight = data[attr] + # Return the included edge + if data.get(partition) == nx.EdgePartition.INCLUDED: + weight = new_weight + edge = (u, v, key, new_weight, data) + return edge, weight + # Find the best open edge + if new_weight > weight: + weight = new_weight + edge = (u, v, key, new_weight, data) + + return edge, weight + + while True: + # (I1): Choose a node v in G^i not in D^i. + try: + v = next(nodes) + except StopIteration: + # If there are no more new nodes to consider, then we *should* + # meet the break condition (b) from the paper: + # (b) every node of G^i is in D^i and E^i is a branching + # Construction guarantees that it's a branching. + assert len(G) == len(B) + if len(B): + assert is_branching(B) + + if self.store: + self.graphs.append(G.copy()) + self.branchings.append(B.copy()) + + # Add these to keep the lengths equal. Element i is the + # circuit at level i that was merged to form branching i+1. + # There is no circuit for the last level. + self.circuits.append([]) + self.minedge_circuit.append(None) + break + else: + if v in D: + # print("v in D", v) + continue + + # Put v into bucket D^i. + # print(f"Adding node {v}") + D.add(v) + B.add_node(v) + + edge, weight = desired_edge(v) + # print(f"Max edge is {edge!r}") + if edge is None: + # If there is no edge, continue with a new node at (I1). + continue + else: + # Determine if adding the edge to E^i would mean its no longer + # a branching. Presently, v has indegree 0 in B---it is a root. + u = edge[0] + + if uf[u] == uf[v]: + # Then adding the edge will create a circuit. Then B + # contains a unique path P from v to u. So condition (a) + # from the paper does hold. We need to store the circuit + # for future reference. + Q_nodes, Q_edges = get_path(B, v, u) + Q_edges.append(edge[2]) # Edge key + else: + # Then B with the edge is still a branching and condition + # (a) from the paper does not hold. + Q_nodes, Q_edges = None, None + + # Conditions for adding the edge. + # If weight < 0, then it cannot help in finding a maximum branching. + if self.style == "branching" and weight <= 0: + acceptable = False + else: + acceptable = True + + # print(f"Edge is acceptable: {acceptable}") + if acceptable: + dd = {attr: weight} + if edge[4].get(partition) is not None: + dd[partition] = edge[4].get(partition) + B.add_edge(u, v, edge[2], **dd) + G[u][v][edge[2]][self.candidate_attr] = True + uf.union(u, v) + if Q_edges is not None: + # print("Edge introduced a simple cycle:") + # print(Q_nodes, Q_edges) + + # Move to method + # Previous meaning of u and v is no longer important. + + # Apply (I2). + # Get the edge in the cycle with the minimum weight. + # Also, save the incoming weights for each node. + minweight = INF + minedge = None + Q_incoming_weight = {} + for edge_key in Q_edges: + u, v, data = B.edge_index[edge_key] + # We cannot remove an included edges, even if it is + # the minimum edge in the circuit + w = data[attr] + Q_incoming_weight[v] = w + if data.get(partition) == nx.EdgePartition.INCLUDED: + continue + if w < minweight: + minweight = w + minedge = edge_key + + self.circuits.append(Q_edges) + self.minedge_circuit.append(minedge) + + if self.store: + self.graphs.append(G.copy()) + # Always need the branching with circuits. + self.branchings.append(B.copy()) + + # Now we mutate it. + new_node = self.template.format(self.level) + + # print(minweight, minedge, Q_incoming_weight) + + G.add_node(new_node) + new_edges = [] + for u, v, key, data in G.edges(data=True, keys=True): + if u in Q_incoming_weight: + if v in Q_incoming_weight: + # Circuit edge, do nothing for now. + # Eventually delete it. + continue + else: + # Outgoing edge. Make it from new node + dd = data.copy() + new_edges.append((new_node, v, key, dd)) + else: + if v in Q_incoming_weight: + # Incoming edge. Change its weight + w = data[attr] + w += minweight - Q_incoming_weight[v] + dd = data.copy() + dd[attr] = w + new_edges.append((u, new_node, key, dd)) + else: + # Outside edge. No modification necessary. + continue + + G.remove_nodes_from(Q_nodes) + B.remove_nodes_from(Q_nodes) + D.difference_update(set(Q_nodes)) + + for u, v, key, data in new_edges: + G.add_edge(u, v, key, **data) + if self.candidate_attr in data: + del data[self.candidate_attr] + B.add_edge(u, v, key, **data) + uf.union(u, v) + + nodes = iter(list(G.nodes())) + self.level += 1 + + # (I3) Branch construction. + # print(self.level) + H = self.G_original.__class__() + + def is_root(G, u, edgekeys): + """ + Returns True if `u` is a root node in G. + + Node `u` will be a root node if its in-degree, restricted to the + specified edges, is equal to 0. + + """ + if u not in G: + # print(G.nodes(), u) + raise Exception(f"{u!r} not in G") + for v in G.pred[u]: + for edgekey in G.pred[u][v]: + if edgekey in edgekeys: + return False, edgekey + else: + return True, None + + # Start with the branching edges in the last level. + edges = set(self.branchings[self.level].edge_index) + while self.level > 0: + self.level -= 1 + + # The current level is i, and we start counting from 0. + + # We need the node at level i+1 that results from merging a circuit + # at level i. randomname_0 is the first merged node and this + # happens at level 1. That is, randomname_0 is a node at level 1 + # that results from merging a circuit at level 0. + merged_node = self.template.format(self.level) + + # The circuit at level i that was merged as a node the graph + # at level i+1. + circuit = self.circuits[self.level] + # print + # print(merged_node, self.level, circuit) + # print("before", edges) + # Note, we ask if it is a root in the full graph, not the branching. + # The branching alone doesn't have all the edges. + + isroot, edgekey = is_root(self.graphs[self.level + 1], merged_node, edges) + edges.update(circuit) + if isroot: + minedge = self.minedge_circuit[self.level] + if minedge is None: + raise Exception + + # Remove the edge in the cycle with minimum weight. + edges.remove(minedge) + else: + # We have identified an edge at next higher level that + # transitions into the merged node at the level. That edge + # transitions to some corresponding node at the current level. + # We want to remove an edge from the cycle that transitions + # into the corresponding node. + # print("edgekey is: ", edgekey) + # print("circuit is: ", circuit) + # The branching at level i + G = self.graphs[self.level] + # print(G.edge_index) + target = G.edge_index[edgekey][1] + for edgekey in circuit: + u, v, data = G.edge_index[edgekey] + if v == target: + break + else: + raise Exception("Couldn't find edge incoming to merged node.") + # print(f"not a root. removing {edgekey}") + + edges.remove(edgekey) + + self.edges = edges + + H.add_nodes_from(self.G_original) + for edgekey in edges: + u, v, d = self.graphs[0].edge_index[edgekey] + dd = {self.attr: self.trans(d[self.attr])} + + # Optionally, preserve the other edge attributes of the original + # graph + if preserve_attrs: + for (key, value) in d.items(): + if key not in [self.attr, self.candidate_attr]: + dd[key] = value + + # TODO: make this preserve the key. + H.add_edge(u, v, **dd) + + return H + + +def maximum_branching( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + ed = Edmonds(G) + B = ed.find_optimum( + attr, + default, + kind="max", + style="branching", + preserve_attrs=preserve_attrs, + partition=partition, + ) + return B + + +def minimum_branching( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + ed = Edmonds(G) + B = ed.find_optimum( + attr, + default, + kind="min", + style="branching", + preserve_attrs=preserve_attrs, + partition=partition, + ) + return B + + +def maximum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + ed = Edmonds(G) + B = ed.find_optimum( + attr, + default, + kind="max", + style="arborescence", + preserve_attrs=preserve_attrs, + partition=partition, + ) + if not is_arborescence(B): + msg = "No maximum spanning arborescence in G." + raise nx.exception.NetworkXException(msg) + return B + + +def minimum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + ed = Edmonds(G) + B = ed.find_optimum( + attr, + default, + kind="min", + style="arborescence", + preserve_attrs=preserve_attrs, + partition=partition, + ) + if not is_arborescence(B): + msg = "No minimum spanning arborescence in G." + raise nx.exception.NetworkXException(msg) + return B + + +docstring_branching = """ +Returns a {kind} {style} from G. + +Parameters +---------- +G : (multi)digraph-like + The graph to be searched. +attr : str + The edge attribute used to in determining optimality. +default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. +preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) +partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + +Returns +------- +B : (multi)digraph-like + A {kind} {style}. +""" + +docstring_arborescence = ( + docstring_branching + + """ +Raises +------ +NetworkXException + If the graph does not contain a {kind} {style}. + +""" +) + +maximum_branching.__doc__ = docstring_branching.format( + kind="maximum", style="branching" +) + +minimum_branching.__doc__ = docstring_branching.format( + kind="minimum", style="branching" +) + +maximum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="maximum", style="spanning arborescence" +) + +minimum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="minimum", style="spanning arborescence" +) + + +class ArborescenceIterator: + """ + Iterate over all spanning arborescences of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges). It generates minimum spanning + arborescences using a modified Edmonds' Algorithm which respects the + partition of edges. For arborescences with the same weight, ties are + broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning arborescence of the + partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return ArborescenceIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, init_partition=None): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.DiGraph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + init_partition : tuple, default = None + In the case that certain edges have to be included or excluded from + the arborescences, `init_partition` should be in the form + `(included_edges, excluded_edges)` where each edges is a + `(u, v)`-tuple inside an iterable such as a list or set. + + """ + self.G = G.copy() + self.weight = weight + self.minimum = minimum + self.method = ( + minimum_spanning_arborescence if minimum else maximum_spanning_arborescence + ) + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "ArborescenceIterators super secret partition attribute name" + ) + if init_partition is not None: + partition_dict = {} + for e in init_partition[0]: + partition_dict[e] = nx.EdgePartition.INCLUDED + for e in init_partition[1]: + partition_dict[e] = nx.EdgePartition.EXCLUDED + self.init_partition = ArborescenceIterator.Partition(0, partition_dict) + else: + self.init_partition = None + + def __iter__(self): + """ + Returns + ------- + ArborescenceIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + + # Write the initial partition if it exists. + if self.init_partition is not None: + self._write_partition(self.init_partition) + + mst_weight = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition( + mst_weight if self.minimum else -mst_weight, + dict() + if self.init_partition is None + else self.init_partition.partition_dict, + ) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_arborescence = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + self._partition(partition, next_arborescence) + + self._clear_partition(next_arborescence) + return next_arborescence + + def _partition(self, partition, partition_arborescence): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_arborescence : nx.Graph + The minimum spanning arborescence of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_arborescence.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = nx.EdgePartition.EXCLUDED + p2.partition_dict[e] = nx.EdgePartition.INCLUDED + + self._write_partition(p1) + try: + p1_mst = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + + p1_mst_weight = p1_mst.size(weight=self.weight) + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + except nx.NetworkXException: + pass + + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. Also, if one incoming edge is included, mark all others + as excluded so that if that vertex is merged during Edmonds' algorithm + we cannot still pick another of that vertex's included edges. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = nx.EdgePartition.OPEN + + for n in self.G: + included_count = 0 + excluded_count = 0 + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) == nx.EdgePartition.INCLUDED: + included_count += 1 + elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED: + excluded_count += 1 + # Check that if there is an included edges, all other incoming ones + # are excluded. If not fix it! + if included_count == 1 and excluded_count != self.G.in_degree(n) - 1: + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) != nx.EdgePartition.INCLUDED: + d[self.partition_key] = nx.EdgePartition.EXCLUDED + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/coding.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/coding.py new file mode 100644 index 0000000..0147e7e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/coding.py @@ -0,0 +1,398 @@ +"""Functions for encoding and decoding trees. + +Since a tree is a highly restricted form of graph, it can be represented +concisely in several ways. This module includes functions for encoding +and decoding trees in the form of nested tuples and Prüfer +sequences. The former requires a rooted tree, whereas the latter can be +applied to unrooted trees. Furthermore, there is a bijection from Prüfer +sequences to labeled trees. + +""" +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_nested_tuple", + "from_prufer_sequence", + "NotATree", + "to_nested_tuple", + "to_prufer_sequence", +] + + +class NotATree(nx.NetworkXException): + """Raised when a function expects a tree (that is, a connected + undirected graph with no cycles) but gets a non-tree graph as input + instead. + + """ + + +@not_implemented_for("directed") +def to_nested_tuple(T, root, canonical_form=False): + """Returns a nested tuple representation of the given tree. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + root : node + The node in ``T`` to interpret as the root of the tree. + + canonical_form : bool + If ``True``, each tuple is sorted so that the function returns + a canonical form for rooted trees. This means "lighter" subtrees + will appear as nested tuples before "heavier" subtrees. In this + way, each isomorphic rooted tree has the same nested tuple + representation. + + Returns + ------- + tuple + A nested tuple representation of the tree. + + Notes + ----- + This function is *not* the inverse of :func:`from_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + The tree need not be a balanced binary tree:: + + >>> T = nx.Graph() + >>> T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + >>> T.add_edges_from([(1, 4), (1, 5)]) + >>> T.add_edges_from([(3, 6), (3, 7)]) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + (((), ()), (), ((), ())) + + Continuing the above example, if ``canonical_form`` is ``True``, the + nested tuples will be sorted:: + + >>> nx.to_nested_tuple(T, root, canonical_form=True) + ((), ((), ()), ((), ())) + + Even the path graph can be interpreted as a tree:: + + >>> T = nx.path_graph(4) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + ((((),),),) + + """ + + def _make_tuple(T, root, _parent): + """Recursively compute the nested tuple representation of the + given rooted tree. + + ``_parent`` is the parent node of ``root`` in the supertree in + which ``T`` is a subtree, or ``None`` if ``root`` is the root of + the supertree. This argument is used to determine which + neighbors of ``root`` are children and which is the parent. + + """ + # Get the neighbors of `root` that are not the parent node. We + # are guaranteed that `root` is always in `T` by construction. + children = set(T[root]) - {_parent} + if len(children) == 0: + return () + nested = (_make_tuple(T, v, root) for v in children) + if canonical_form: + nested = sorted(nested) + return tuple(nested) + + # Do some sanity checks on the input. + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if root not in T: + raise nx.NodeNotFound(f"Graph {T} contains no node {root}") + + return _make_tuple(T, root, None) + + +def from_nested_tuple(sequence, sensible_relabeling=False): + """Returns the rooted tree corresponding to the given nested tuple. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + sequence : tuple + A nested tuple representing a rooted tree. + + sensible_relabeling : bool + Whether to relabel the nodes of the tree so that nodes are + labeled in increasing order according to their breadth-first + search order from the root node. + + Returns + ------- + NetworkX graph + The tree corresponding to the given nested tuple, whose root + node is node 0. If ``sensible_labeling`` is ``True``, nodes will + be labeled in breadth-first search order starting from the root + node. + + Notes + ----- + This function is *not* the inverse of :func:`to_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + Examples + -------- + Sensible relabeling ensures that the nodes are labeled from the root + starting at 0:: + + >>> balanced = (((), ()), ((), ())) + >>> T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + >>> edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + >>> all((u, v) in T.edges() or (v, u) in T.edges() for (u, v) in edges) + True + + """ + + def _make_tree(sequence): + """Recursively creates a tree from the given sequence of nested + tuples. + + This function employs the :func:`~networkx.tree.join` function + to recursively join subtrees into a larger tree. + + """ + # The empty sequence represents the empty tree, which is the + # (unique) graph with a single node. We mark the single node + # with an attribute that indicates that it is the root of the + # graph. + if len(sequence) == 0: + return nx.empty_graph(1) + # For a nonempty sequence, get the subtrees for each child + # sequence and join all the subtrees at their roots. After + # joining the subtrees, the root is node 0. + return nx.tree.join([(_make_tree(child), 0) for child in sequence]) + + # Make the tree and remove the `is_root` node attribute added by the + # helper function. + T = _make_tree(sequence) + if sensible_relabeling: + # Relabel the nodes according to their breadth-first search + # order, starting from the root node (that is, the node 0). + bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0))) + labels = {v: i for i, v in enumerate(bfs_nodes)} + # We would like to use `copy=False`, but `relabel_nodes` doesn't + # allow a relabel mapping that can't be topologically sorted. + T = nx.relabel_nodes(T, labels) + return T + + +@not_implemented_for("directed") +def to_prufer_sequence(T): + r"""Returns the Prüfer sequence of the given tree. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + Returns + ------- + list + The Prüfer sequence of the given tree. + + Raises + ------ + NetworkXPointlessConcept + If the number of nodes in `T` is less than two. + + NotATree + If `T` is not a tree. + + KeyError + If the set of nodes in `T` is not {0, …, *n* - 1}. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` + function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`~networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`from_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + # Perform some sanity checks on the input. + n = len(T) + if n < 2: + msg = "Prüfer sequence undefined for trees with fewer than two nodes" + raise nx.NetworkXPointlessConcept(msg) + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if set(T) != set(range(n)): + raise KeyError("tree must have node labels {0, ..., n - 1}") + + degree = dict(T.degree()) + + def parents(u): + return next(v for v in T[u] if degree[v] > 1) + + index = u = next(k for k in range(n) if degree[k] == 1) + result = [] + for i in range(n - 2): + v = parents(u) + result.append(v) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + return result + + +def from_prufer_sequence(sequence): + r"""Returns the tree corresponding to the given Prüfer sequence. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + sequence : list + A Prüfer sequence, which is a list of *n* - 2 integers between + zero and *n* - 1, inclusive. + + Returns + ------- + NetworkX graph + The tree corresponding to the given Prüfer sequence. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`to_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + n = len(sequence) + 2 + # `degree` stores the remaining degree (plus one) for each node. The + # degree of a node in the decoded tree is one more than the number + # of times it appears in the code. + degree = Counter(chain(sequence, range(n))) + T = nx.empty_graph(n) + # `not_orphaned` is the set of nodes that have a parent in the + # tree. After the loop, there should be exactly two nodes that are + # not in this set. + not_orphaned = set() + index = u = next(k for k in range(n) if degree[k] == 1) + for v in sequence: + T.add_edge(u, v) + not_orphaned.add(u) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + # At this point, there must be exactly two orphaned nodes; join them. + orphans = set(T) - not_orphaned + u, v = orphans + T.add_edge(u, v) + return T diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/decomposition.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/decomposition.py new file mode 100644 index 0000000..54a2118 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/decomposition.py @@ -0,0 +1,87 @@ +r"""Function for computing a junction tree of a graph.""" + +from itertools import combinations + +import networkx as nx +from networkx.algorithms import chordal_graph_cliques, complete_to_chordal_graph, moral +from networkx.utils import not_implemented_for + +__all__ = ["junction_tree"] + + +@not_implemented_for("multigraph") +def junction_tree(G): + r"""Returns a junction tree of a given graph. + + A junction tree (or clique tree) is constructed from a (un)directed graph G. + The tree is constructed based on a moralized and triangulated version of G. + The tree's nodes consist of maximal cliques and sepsets of the revised graph. + The sepset of two cliques is the intersection of the nodes of these cliques, + e.g. the sepset of (A,B,C) and (A,C,E,F) is (A,C). These nodes are often called + "variables" in this literature. The tree is bipartitie with each sepset + connected to its two cliques. + + Junction Trees are not unique as the order of clique consideration determines + which sepsets are included. + + The junction tree algorithm consists of five steps [1]_: + + 1. Moralize the graph + 2. Triangulate the graph + 3. Find maximal cliques + 4. Build the tree from cliques, connecting cliques with shared + nodes, set edge-weight to number of shared variables + 5. Find maximum spanning tree + + + Parameters + ---------- + G : networkx.Graph + Directed or undirected graph. + + Returns + ------- + junction_tree : networkx.Graph + The corresponding junction tree of `G`. + + Raises + ------ + NetworkXNotImplemented + Raised if `G` is an instance of `MultiGraph` or `MultiDiGraph`. + + References + ---------- + .. [1] Junction tree algorithm: + https://en.wikipedia.org/wiki/Junction_tree_algorithm + + .. [2] Finn V. Jensen and Frank Jensen. 1994. Optimal + junction trees. In Proceedings of the Tenth international + conference on Uncertainty in artificial intelligence (UAI’94). + Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 360–366. + """ + + clique_graph = nx.Graph() + + if G.is_directed(): + G = moral.moral_graph(G) + chordal_graph, _ = complete_to_chordal_graph(G) + + cliques = [tuple(sorted(i)) for i in chordal_graph_cliques(chordal_graph)] + clique_graph.add_nodes_from(cliques, type="clique") + + for edge in combinations(cliques, 2): + set_edge_0 = set(edge[0]) + set_edge_1 = set(edge[1]) + if not set_edge_0.isdisjoint(set_edge_1): + sepset = tuple(sorted(set_edge_0.intersection(set_edge_1))) + clique_graph.add_edge(edge[0], edge[1], weight=len(sepset), sepset=sepset) + + junction_tree = nx.maximum_spanning_tree(clique_graph) + + for edge in list(junction_tree.edges(data=True)): + junction_tree.add_node(edge[2]["sepset"], type="sepset") + junction_tree.add_edge(edge[0], edge[2]["sepset"]) + junction_tree.add_edge(edge[1], edge[2]["sepset"]) + junction_tree.remove_edge(edge[0], edge[1]) + + return junction_tree diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/mst.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/mst.py new file mode 100644 index 0000000..e2ff7c6 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/mst.py @@ -0,0 +1,1112 @@ +""" +Algorithms for calculating min/max spanning trees/forests. + +""" +from dataclasses import dataclass, field +from enum import Enum +from heapq import heappop, heappush +from itertools import count +from math import isnan +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for, py_random_state + +__all__ = [ + "minimum_spanning_edges", + "maximum_spanning_edges", + "minimum_spanning_tree", + "maximum_spanning_tree", + "random_spanning_tree", + "partition_spanning_tree", + "EdgePartition", + "SpanningTreeIterator", +] + + +class EdgePartition(Enum): + """ + An enum to store the state of an edge partition. The enum is written to the + edges of a graph before being pasted to `kruskal_mst_edges`. Options are: + + - EdgePartition.OPEN + - EdgePartition.INCLUDED + - EdgePartition.EXCLUDED + """ + + OPEN = 0 + INCLUDED = 1 + EXCLUDED = 2 + + +@not_implemented_for("multigraph") +def boruvka_mst_edges( + G, minimum=True, weight="weight", keys=False, data=True, ignore_nan=False +): + """Iterate over edges of a Borůvka's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The edges of `G` must have distinct weights, + otherwise the edges may not form a tree. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + This argument is ignored since this function is not + implemented for multigraphs; it exists only for consistency + with the other minimum spanning tree functions. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + # Initialize a forest, assuming initially that it is the discrete + # partition of the nodes of the graph. + forest = UnionFind(G) + + def best_edge(component): + """Returns the optimum (minimum or maximum) edge on the edge + boundary of the given set of nodes. + + A return value of ``None`` indicates an empty boundary. + + """ + sign = 1 if minimum else -1 + minwt = float("inf") + boundary = None + for e in nx.edge_boundary(G, component, data=True): + wt = e[-1].get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {e}" + raise ValueError(msg) + if wt < minwt: + minwt = wt + boundary = e + return boundary + + # Determine the optimum edge in the edge boundary of each component + # in the forest. + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # If each entry was ``None``, that means the graph was disconnected, + # so we are done generating the forest. + while best_edges: + # Determine the optimum edge in the edge boundary of each + # component in the forest. + # + # This must be a sequence, not an iterator. In this list, the + # same edge may appear twice, in different orientations (but + # that's okay, since a union operation will be called on the + # endpoints the first time it is seen, but not the second time). + # + # Any ``None`` indicates that the edge boundary for that + # component was empty, so that part of the forest has been + # completed. + # + # TODO This can be parallelized, both in the outer loop over + # each component in the forest and in the computation of the + # minimum. (Same goes for the identical lines outside the loop.) + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # Join trees in the forest using the best edges, and yield that + # edge, since it is part of the spanning tree. + # + # TODO This loop can be parallelized, to an extent (the union + # operation must be atomic). + for u, v, d in best_edges: + if forest[u] != forest[v]: + if data: + yield u, v, d + else: + yield u, v + forest.union(u, v) + + +def kruskal_mst_edges( + G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, partition=None +): + """ + Iterate over edge of a Kruskal's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + partition : string (default: None) + The name of the edge attribute holding the partition data, if it exists. + Partition data is written to the edges using the `EdgePartition` enum. + If a partition exists, all included edges and none of the excluded edges + will appear in the final tree. Open edges may or may not be used. + + Yields + ------ + edge tuple + The edges as discovered by Kruskal's method. Each edge can + take the following forms: `(u, v)`, `(u, v, d)` or `(u, v, k, d)` + depending on the `key` and `data` parameters + """ + subtrees = UnionFind() + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + + """ + Sort the edges of the graph with respect to the partition data. + Edges are returned in the following order: + + * Included edges + * Open edges from smallest to largest weight + * Excluded edges + """ + included_edges = [] + open_edges = [] + for e in edges: + d = e[-1] + wt = d.get(weight, 1) + if isnan(wt): + if ignore_nan: + continue + raise ValueError(f"NaN found as an edge weight. Edge {e}") + + edge = (wt,) + e + if d.get(partition) == EdgePartition.INCLUDED: + included_edges.append(edge) + elif d.get(partition) == EdgePartition.EXCLUDED: + continue + else: + open_edges.append(edge) + + if minimum: + sorted_open_edges = sorted(open_edges, key=itemgetter(0)) + else: + sorted_open_edges = sorted(open_edges, key=itemgetter(0), reverse=True) + + # Condense the lists into one + included_edges.extend(sorted_open_edges) + sorted_edges = included_edges + del open_edges, sorted_open_edges, included_edges + + # Multigraphs need to handle edge keys in addition to edge data. + if G.is_multigraph(): + for wt, u, v, k, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + else: + for wt, u, v, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + + +def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False): + """Iterate over edges of Prim's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + is_multigraph = G.is_multigraph() + push = heappush + pop = heappop + + nodes = set(G) + c = count() + + sign = 1 if minimum else -1 + + while nodes: + u = nodes.pop() + frontier = [] + visited = {u} + if is_multigraph: + for v, keydict in G.adj[u].items(): + for k, d in keydict.items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}" + raise ValueError(msg) + push(frontier, (wt, next(c), u, v, k, d)) + else: + for v, d in G.adj[u].items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, d)}" + raise ValueError(msg) + push(frontier, (wt, next(c), u, v, d)) + while nodes and frontier: + if is_multigraph: + W, _, u, v, k, d = pop(frontier) + else: + W, _, u, v, d = pop(frontier) + if v in visited or v not in nodes: + continue + # Multigraphs need to handle edge keys in addition to edge data. + if is_multigraph and keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + # update frontier + visited.add(v) + nodes.discard(v) + if is_multigraph: + for w, keydict in G.adj[v].items(): + if w in visited: + continue + for k2, d2 in keydict.items(): + new_weight = d2.get(weight, 1) * sign + push(frontier, (new_weight, next(c), v, w, k2, d2)) + else: + for w, d2 in G.adj[v].items(): + if w in visited: + continue + new_weight = d2.get(weight, 1) * sign + push(frontier, (new_weight, next(c), v, w, d2)) + + +ALGORITHMS = { + "boruvka": boruvka_mst_edges, + "borůvka": boruvka_mst_edges, + "kruskal": kruskal_mst_edges, + "prim": prim_mst_edges, +} + + +@not_implemented_for("directed") +def minimum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a minimum spanning forest of an undirected + weighted graph. + + A minimum spanning tree is a subgraph of the graph (a tree) + with the minimum sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find minimum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Find minimum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=True, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@not_implemented_for("directed") +def maximum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a maximum spanning forest of an undirected + weighted graph. + + A maximum spanning tree is a subgraph of the graph (a tree) + with the maximum possible sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find maximum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.maximum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [1, 2]] + + Find maximum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3 + >>> mst = tree.maximum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=False, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +def minimum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a minimum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree or forest. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.minimum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = minimum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +def partition_spanning_tree( + G, minimum=True, weight="weight", partition="partition", ignore_nan=False +): + """ + Find a spanning tree while respecting a partition of edges. + + Edges can be flagged as either `INLCUDED` which are required to be in the + returned tree, `EXCLUDED`, which cannot be in the returned tree and `OPEN`. + + This is used in the SpanningTreeIterator to create new partitions following + the algorithm of Sörensen and Janssens [1]_. + + Parameters + ---------- + G : undirected graph + An undirected graph. + + minimum : bool (default: True) + Determines whether the returned tree is the minimum spanning tree of + the partition of the maximum one. + + weight : str + Data key to use for edge weights. + + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree using all of the included edges in the graph and + none of the excluded edges. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + edges = kruskal_mst_edges( + G, + minimum, + weight, + keys=True, + data=True, + ignore_nan=ignore_nan, + partition=partition, + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +def maximum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a maximum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A maximum spanning tree or forest. + + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.maximum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = maximum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + edges = list(edges) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@py_random_state(3) +def random_spanning_tree(G, weight=None, *, multiplicative=True, seed=None): + """ + Sample a random spanning tree using the edges weights of `G`. + + This function supports two different methods for determining the + probability of the graph. If ``multiplicative=True``, the probability + is based on the product of edge weights, and if ``multiplicative=False`` + it is based on the sum of the edge weight. However, since it is + easier to determine the total weight of all spanning trees for the + multiplicative verison, that is significantly faster and should be used if + possible. Additionally, setting `weight` to `None` will cause a spanning tree + to be selected with uniform probability. + + The function uses algorithm A8 in [1]_ . + + Parameters + ---------- + G : nx.Graph + An undirected version of the original graph. + + weight : string + The edge key for the edge attribute holding edge weight. + + multiplicative : bool, default=True + If `True`, the probability of each tree is the product of its edge weight + over the sum of the product of all the spanning trees in the graph. If + `False`, the probability is the sum of its edge weight over the sum of + the sum of weights for all spanning trees in the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nx.Graph + A spanning tree using the distribution defined by the weight of the tree. + + References + ---------- + .. [1] V. Kulkarni, Generating random combinatorial objects, Journal of + Algorithms, 11 (1990), pp. 185–207 + """ + + def find_node(merged_nodes, node): + """ + We can think of clusters of contracted nodes as having one + representative in the graph. Each node which is not in merged_nodes + is still its own representative. Since a representative can be later + contracted, we need to recursively search though the dict to find + the final representative, but once we know it we can use path + compression to speed up the access of the representative for next time. + + This cannot be replaced by the standard NetworkX union_find since that + data structure will merge nodes with less representing nodes into the + one with more representing nodes but this function requires we merge + them using the order that contract_edges contracts using. + + Parameters + ---------- + merged_nodes : dict + The dict storing the mapping from node to representative + node + The node whose representative we seek + + Returns + ------- + The representative of the `node` + """ + if node not in merged_nodes: + return node + else: + rep = find_node(merged_nodes, merged_nodes[node]) + merged_nodes[node] = rep + return rep + + def prepare_graph(): + """ + For the graph `G`, remove all edges not in the set `V` and then + contract all edges in the set `U`. + + Returns + ------- + A copy of `G` which has had all edges not in `V` removed and all edges + in `U` contracted. + """ + + # The result is a MultiGraph version of G so that parallel edges are + # allowed during edge contraction + result = nx.MultiGraph(incoming_graph_data=G) + + # Remove all edges not in V + edges_to_remove = set(result.edges()).difference(V) + result.remove_edges_from(edges_to_remove) + + # Contract all edges in U + # + # Imagine that you have two edges to contract and they share an + # endpoint like this: + # [0] ----- [1] ----- [2] + # If we contract (0, 1) first, the contraction function will always + # delete the second node it is passed so the resulting graph would be + # [0] ----- [2] + # and edge (1, 2) no longer exists but (0, 2) would need to be contracted + # in its place now. That is why I use the below dict as a merge-find + # data structure with path compression to track how the nodes are merged. + merged_nodes = {} + + for u, v in U: + u_rep = find_node(merged_nodes, u) + v_rep = find_node(merged_nodes, v) + # We cannot contract a node with itself + if u_rep == v_rep: + continue + nx.contracted_nodes(result, u_rep, v_rep, self_loops=False, copy=False) + merged_nodes[v_rep] = u_rep + + return merged_nodes, result + + def spanning_tree_total_weight(G, weight): + """ + Find the sum of weights of the spanning trees of `G` using the + approioate `method`. + + This is easy if the choosen method is 'multiplicative', since we can + use Kirchhoff's Tree Matrix Theorem directly. However, with the + 'additive' method, this process is slightly more complex and less + computatiionally efficent as we have to find the number of spanning + trees which contain each possible edge in the graph. + + Parameters + ---------- + G : NetworkX Graph + The graph to find the total weight of all spanning trees on. + + weight : string + The key for the weight edge attribute of the graph. + + Returns + ------- + float + The sum of either the multiplicative or additive weight for all + spanning trees in the graph. + """ + if multiplicative: + return nx.total_spanning_tree_weight(G, weight) + else: + # There are two cases for the total spanning tree additive weight. + # 1. There is one edge in the graph. Then the only spanning tree is + # that edge itself, which will have a total weight of that edge + # itself. + if G.number_of_edges() == 1: + return G.edges(data=weight).__iter__().__next__()[2] + # 2. There are more than two edges in the graph. Then, we can find the + # total weight of the spanning trees using the formula in the + # reference paper: take the weight of that edge and multiple it by + # the number of spanning trees which have to include that edge. This + # can be accomplished by contracting the edge and finding the + # multiplicative total spanning tree weight if the weight of each edge + # is assumed to be 1, which is conviently built into networkx already, + # by calling total_spanning_tree_weight with weight=None + else: + total = 0 + for u, v, w in G.edges(data=weight): + total += w * nx.total_spanning_tree_weight( + nx.contracted_edge(G, edge=(u, v), self_loops=False), None + ) + return total + + U = set() + st_cached_value = 0 + V = set(G.edges()) + shuffled_edges = list(G.edges()) + seed.shuffle(shuffled_edges) + + for u, v in shuffled_edges: + e_weight = G[u][v][weight] if weight is not None else 1 + node_map, prepared_G = prepare_graph() + G_total_tree_weight = spanning_tree_total_weight(prepared_G, weight) + # Add the edge to U so that we can compute the total tree weight + # assuming we include that edge + # Now, if (u, v) cannot exist in G because it is fully contracted out + # of existence, then it by definition cannot influence G_e's Kirchhoff + # value. But, we also cannot pick it. + rep_edge = (find_node(node_map, u), find_node(node_map, v)) + # Check to see if the 'representative edge' for the current edge is + # in prepared_G. If so, then we can pick it. + if rep_edge in prepared_G.edges: + prepared_G_e = nx.contracted_edge( + prepared_G, edge=rep_edge, self_loops=False + ) + G_e_total_tree_weight = spanning_tree_total_weight(prepared_G_e, weight) + if multiplicative: + threshold = e_weight * G_e_total_tree_weight / G_total_tree_weight + else: + numerator = ( + st_cached_value + e_weight + ) * nx.total_spanning_tree_weight(prepared_G_e) + G_e_total_tree_weight + denominator = ( + st_cached_value * nx.total_spanning_tree_weight(prepared_G) + + G_total_tree_weight + ) + threshold = numerator / denominator + else: + threshold = 0.0 + z = seed.uniform(0.0, 1.0) + if z > threshold: + # Remove the edge from V since we did not pick it. + V.remove((u, v)) + else: + # Add the edge to U since we picked it. + st_cached_value += e_weight + U.add((u, v)) + # If we decide to keep an edge, it may complete the spanning tree. + if len(U) == G.number_of_nodes() - 1: + spanning_tree = nx.Graph() + spanning_tree.add_edges_from(U) + return spanning_tree + raise Exception(f"Something went wrong! Only {len(U)} edges in the spanning tree!") + + +class SpanningTreeIterator: + """ + Iterate over all spanning trees of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges) as well as a modified Kruskal's Algorithm + to generate minimum spanning trees which respect the partition of edges. + For spanning trees with the same weight, ties are broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning tree of the partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return SpanningTreeIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, ignore_nan=False): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.Graph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + ignore_nan : bool, default = False + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + """ + self.G = G.copy() + self.weight = weight + self.minimum = minimum + self.ignore_nan = ignore_nan + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "SpanningTreeIterators super secret partition attribute name" + ) + + def __iter__(self): + """ + Returns + ------- + SpanningTreeIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + mst_weight = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition(mst_weight if self.minimum else -mst_weight, dict()) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_tree = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ) + self._partition(partition, next_tree) + + self._clear_partition(next_tree) + return next_tree + + def _partition(self, partition, partition_tree): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_tree : nx.Graph + The minimum spanning tree of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_tree.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = EdgePartition.EXCLUDED + p2.partition_dict[e] = EdgePartition.INCLUDED + + self._write_partition(p1) + p1_mst = partition_spanning_tree( + self.G, + self.minimum, + self.weight, + self.partition_key, + self.ignore_nan, + ) + p1_mst_weight = p1_mst.size(weight=self.weight) + if nx.is_connected(p1_mst): + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = EdgePartition.OPEN + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/operations.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/operations.py new file mode 100644 index 0000000..da3adda --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/operations.py @@ -0,0 +1,106 @@ +"""Operations on trees.""" +from functools import partial +from itertools import accumulate, chain + +import networkx as nx + +__all__ = ["join"] + + +def join(rooted_trees, label_attribute=None): + """Returns a new rooted tree with a root node joined with the roots + of each of the given rooted trees. + + Parameters + ---------- + rooted_trees : list + A list of pairs in which each left element is a NetworkX graph + object representing a tree and each right element is the root + node of that tree. The nodes of these trees will be relabeled to + integers. + + label_attribute : str + If provided, the old node labels will be stored in the new tree + under this node attribute. If not provided, the node attribute + ``'_old'`` will store the original label of the node in the + rooted trees given in the input. + + Returns + ------- + NetworkX graph + The rooted tree whose subtrees are the given rooted trees. The + new root node is labeled 0. Each non-root node has an attribute, + as described under the keyword argument ``label_attribute``, + that indicates the label of the original node in the input tree. + + Notes + ----- + Graph, edge, and node attributes are propagated from the given + rooted trees to the created tree. If there are any overlapping graph + attributes, those from later trees will overwrite those from earlier + trees in the tuple of positional arguments. + + Examples + -------- + Join two full balanced binary trees of height *h* to get a full + balanced binary tree of depth *h* + 1:: + + >>> h = 4 + >>> left = nx.balanced_tree(2, h) + >>> right = nx.balanced_tree(2, h) + >>> joined_tree = nx.join([(left, 0), (right, 0)]) + >>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1)) + True + + """ + if len(rooted_trees) == 0: + return nx.empty_graph(1) + + # Unzip the zipped list of (tree, root) pairs. + trees, roots = zip(*rooted_trees) + + # The join of the trees has the same type as the type of the first + # tree. + R = type(trees[0])() + + # Relabel the nodes so that their union is the integers starting at 1. + if label_attribute is None: + label_attribute = "_old" + relabel = partial( + nx.convert_node_labels_to_integers, label_attribute=label_attribute + ) + lengths = (len(tree) for tree in trees[:-1]) + first_labels = chain([0], accumulate(lengths)) + trees = [ + relabel(tree, first_label=first_label + 1) + for tree, first_label in zip(trees, first_labels) + ] + + # Get the relabeled roots. + roots = [ + next(v for v, d in tree.nodes(data=True) if d.get("_old") == root) + for tree, root in zip(trees, roots) + ] + + # Remove the old node labels. + for tree in trees: + for v in tree: + tree.nodes[v].pop("_old") + + # Add all sets of nodes and edges, with data. + nodes = (tree.nodes(data=True) for tree in trees) + edges = (tree.edges(data=True) for tree in trees) + R.add_nodes_from(chain.from_iterable(nodes)) + R.add_edges_from(chain.from_iterable(edges)) + + # Add graph attributes; later attributes take precedent over earlier + # attributes. + for tree in trees: + R.graph.update(tree.graph) + + # Finally, join the subtrees at the root. We know 0 is unused by the + # way we relabeled the subtrees. + R.add_node(0) + R.add_edges_from((0, root) for root in roots) + + return R diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/recognition.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/recognition.py new file mode 100644 index 0000000..8fdeb76 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/recognition.py @@ -0,0 +1,269 @@ +""" +Recognition Tests +================= + +A *forest* is an acyclic, undirected graph, and a *tree* is a connected forest. +Depending on the subfield, there are various conventions for generalizing these +definitions to directed graphs. + +In one convention, directed variants of forest and tree are defined in an +identical manner, except that the direction of the edges is ignored. In effect, +each directed edge is treated as a single undirected edge. Then, additional +restrictions are imposed to define *branchings* and *arborescences*. + +In another convention, directed variants of forest and tree correspond to +the previous convention's branchings and arborescences, respectively. Then two +new terms, *polyforest* and *polytree*, are defined to correspond to the other +convention's forest and tree. + +Summarizing:: + + +-----------------------------+ + | Convention A | Convention B | + +=============================+ + | forest | polyforest | + | tree | polytree | + | branching | forest | + | arborescence | tree | + +-----------------------------+ + +Each convention has its reasons. The first convention emphasizes definitional +similarity in that directed forests and trees are only concerned with +acyclicity and do not have an in-degree constraint, just as their undirected +counterparts do not. The second convention emphasizes functional similarity +in the sense that the directed analog of a spanning tree is a spanning +arborescence. That is, take any spanning tree and choose one node as the root. +Then every edge is assigned a direction such there is a directed path from the +root to every other node. The result is a spanning arborescence. + +NetworkX follows convention "A". Explicitly, these are: + +undirected forest + An undirected graph with no undirected cycles. + +undirected tree + A connected, undirected forest. + +directed forest + A directed graph with no undirected cycles. Equivalently, the underlying + graph structure (which ignores edge orientations) is an undirected forest. + In convention B, this is known as a polyforest. + +directed tree + A weakly connected, directed forest. Equivalently, the underlying graph + structure (which ignores edge orientations) is an undirected tree. In + convention B, this is known as a polytree. + +branching + A directed forest with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a forest. + +arborescence + A directed tree with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a tree. + +For trees and arborescences, the adjective "spanning" may be added to designate +that the graph, when considered as a forest/branching, consists of a single +tree/arborescence that includes all nodes in the graph. It is true, by +definition, that every tree/arborescence is spanning with respect to the nodes +that define the tree/arborescence and so, it might seem redundant to introduce +the notion of "spanning". However, the nodes may represent a subset of +nodes from a larger graph, and it is in this context that the term "spanning" +becomes a useful notion. + +""" + +import networkx as nx + +__all__ = ["is_arborescence", "is_branching", "is_forest", "is_tree"] + + +@nx.utils.not_implemented_for("undirected") +def is_arborescence(G): + """ + Returns True if `G` is an arborescence. + + An arborescence is a directed tree with maximum in-degree equal to 1. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is an arborescence. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)]) + >>> nx.is_arborescence(G) + True + >>> G.remove_edge(0, 1) + >>> G.add_edge(1, 2) # maximum in-degree is 2 + >>> nx.is_arborescence(G) + False + + Notes + ----- + In another convention, an arborescence is known as a *tree*. + + See Also + -------- + is_tree + + """ + return is_tree(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx.utils.not_implemented_for("undirected") +def is_branching(G): + """ + Returns True if `G` is a branching. + + A branching is a directed forest with maximum in-degree equal to 1. + + Parameters + ---------- + G : directed graph + The directed graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a branching. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + >>> nx.is_branching(G) + True + >>> G.remove_edge(2, 3) + >>> G.add_edge(3, 1) # maximum in-degree is 2 + >>> nx.is_branching(G) + False + + Notes + ----- + In another convention, a branching is also known as a *forest*. + + See Also + -------- + is_forest + + """ + return is_forest(G) and max(d for n, d in G.in_degree()) <= 1 + + +def is_forest(G): + """ + Returns True if `G` is a forest. + + A forest is a graph with no undirected cycles. + + For directed graphs, `G` is a forest if the underlying graph is a forest. + The underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a forest. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_forest(G) + True + >>> G.add_edge(4, 1) + >>> nx.is_forest(G) + False + + Notes + ----- + In another convention, a directed forest is known as a *polyforest* and + then *forest* corresponds to a *branching*. + + See Also + -------- + is_branching + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + components = (G.subgraph(c) for c in nx.weakly_connected_components(G)) + else: + components = (G.subgraph(c) for c in nx.connected_components(G)) + + return all(len(c) - 1 == c.number_of_edges() for c in components) + + +def is_tree(G): + """ + Returns True if `G` is a tree. + + A tree is a connected graph with no undirected cycles. + + For directed graphs, `G` is a tree if the underlying graph is a tree. The + underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a tree. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_tree(G) # n-1 edges + True + >>> G.add_edge(3, 4) + >>> nx.is_tree(G) # n edges + False + + Notes + ----- + In another convention, a directed tree is known as a *polytree* and then + *tree* corresponds to an *arborescence*. + + See Also + -------- + is_arborescence + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + + # A connected graph with no cycles has n-1 edges. + return len(G) - 1 == G.number_of_edges() and is_connected(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_branchings.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_branchings.py new file mode 100644 index 0000000..3417446 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_branchings.py @@ -0,0 +1,556 @@ +import math + +import pytest + +np = pytest.importorskip("numpy") + +import networkx as nx +from networkx.algorithms.tree import branchings, recognition + +# +# Explicitly discussed examples from Edmonds paper. +# + +# Used in Figures A-F. +# +# fmt: off +G_array = np.array([ + # 0 1 2 3 4 5 6 7 8 + [0, 0, 12, 0, 12, 0, 0, 0, 0], # 0 + [4, 0, 0, 0, 0, 13, 0, 0, 0], # 1 + [0, 17, 0, 21, 0, 12, 0, 0, 0], # 2 + [5, 0, 0, 0, 17, 0, 18, 0, 0], # 3 + [0, 0, 0, 0, 0, 0, 0, 12, 0], # 4 + [0, 0, 0, 0, 0, 0, 14, 0, 12], # 5 + [0, 0, 21, 0, 0, 0, 0, 0, 15], # 6 + [0, 0, 0, 19, 0, 0, 15, 0, 0], # 7 + [0, 0, 0, 0, 0, 0, 0, 18, 0], # 8 +], dtype=int) + + +# fmt: on + + +def G1(): + G = nx.from_numpy_array(G_array, create_using=nx.MultiDiGraph) + return G + + +def G2(): + # Now we shift all the weights by -10. + # Should not affect optimal arborescence, but does affect optimal branching. + Garr = G_array.copy() + Garr[np.nonzero(Garr)] -= 10 + G = nx.from_numpy_array(Garr, create_using=nx.MultiDiGraph) + return G + + +# An optimal branching for G1 that is also a spanning arborescence. So it is +# also an optimal spanning arborescence. +# +optimal_arborescence_1 = [ + (0, 2, 12), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 4, 17), + (3, 6, 18), + (6, 8, 15), + (8, 7, 18), +] + +# For G2, the optimal branching of G1 (with shifted weights) is no longer +# an optimal branching, but it is still an optimal spanning arborescence +# (just with shifted weights). An optimal branching for G2 is similar to what +# appears in figure G (this is greedy_subopt_branching_1a below), but with the +# edge (3, 0, 5), which is now (3, 0, -5), removed. Thus, the optimal branching +# is not a spanning arborescence. The code finds optimal_branching_2a. +# An alternative and equivalent branching is optimal_branching_2b. We would +# need to modify the code to iterate through all equivalent optimal branchings. +# +# These are maximal branchings or arborescences. +optimal_branching_2a = [ + (5, 6, 4), + (6, 2, 11), + (6, 8, 5), + (8, 7, 8), + (2, 1, 7), + (2, 3, 11), + (3, 4, 7), +] +optimal_branching_2b = [ + (8, 7, 8), + (7, 3, 9), + (3, 4, 7), + (3, 6, 8), + (6, 2, 11), + (2, 1, 7), + (1, 5, 3), +] +optimal_arborescence_2 = [ + (0, 2, 2), + (2, 1, 7), + (2, 3, 11), + (1, 5, 3), + (3, 4, 7), + (3, 6, 8), + (6, 8, 5), + (8, 7, 8), +] + +# Two suboptimal maximal branchings on G1 obtained from a greedy algorithm. +# 1a matches what is shown in Figure G in Edmonds's paper. +greedy_subopt_branching_1a = [ + (5, 6, 14), + (6, 2, 21), + (6, 8, 15), + (8, 7, 18), + (2, 1, 17), + (2, 3, 21), + (3, 0, 5), + (3, 4, 17), +] +greedy_subopt_branching_1b = [ + (8, 7, 18), + (7, 6, 15), + (6, 2, 21), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 0, 5), + (3, 4, 17), +] + + +def build_branching(edges): + G = nx.DiGraph() + for u, v, weight in edges: + G.add_edge(u, v, weight=weight) + return G + + +def sorted_edges(G, attr="weight", default=1): + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + edges = sorted(edges, key=lambda x: (x[2], x[1], x[0])) + return edges + + +def assert_equal_branchings(G1, G2, attr="weight", default=1): + edges1 = list(G1.edges(data=True)) + edges2 = list(G2.edges(data=True)) + assert len(edges1) == len(edges2) + + # Grab the weights only. + e1 = sorted_edges(G1, attr, default) + e2 = sorted_edges(G2, attr, default) + + for a, b in zip(e1, e2): + assert a[:2] == b[:2] + np.testing.assert_almost_equal(a[2], b[2]) + + +################ + + +def test_optimal_branching1(): + G = build_branching(optimal_arborescence_1) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 131 + + +def test_optimal_branching2a(): + G = build_branching(optimal_branching_2a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_branching2b(): + G = build_branching(optimal_branching_2b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_arborescence2(): + G = build_branching(optimal_arborescence_2) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 51 + + +def test_greedy_suboptimal_branching1a(): + G = build_branching(greedy_subopt_branching_1a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 128 + + +def test_greedy_suboptimal_branching1b(): + G = build_branching(greedy_subopt_branching_1b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 127 + + +def test_greedy_max1(): + # Standard test. + # + G = G1() + B = branchings.greedy_branching(G) + # There are only two possible greedy branchings. The sorting is such + # that it should equal the second suboptimal branching: 1b. + B_ = build_branching(greedy_subopt_branching_1b) + assert_equal_branchings(B, B_) + + +def test_greedy_max2(): + # Different default weight. + # + G = G1() + del G[1][0][0]["weight"] + B = branchings.greedy_branching(G, default=6) + # Chosen so that edge (3,0,5) is not selected and (1,0,6) is instead. + + edges = [ + (1, 0, 6), + (1, 5, 13), + (7, 6, 15), + (2, 1, 17), + (3, 4, 17), + (8, 7, 18), + (2, 3, 21), + (6, 2, 21), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_greedy_max3(): + # All equal weights. + # + G = G1() + B = branchings.greedy_branching(G, attr=None) + + # This is mostly arbitrary...the output was generated by running the algo. + edges = [ + (2, 1, 1), + (3, 0, 1), + (3, 4, 1), + (5, 8, 1), + (6, 2, 1), + (7, 3, 1), + (7, 6, 1), + (8, 7, 1), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_, default=1) + + +def test_greedy_min(): + G = G1() + B = branchings.greedy_branching(G, kind="min") + + edges = [ + (1, 0, 4), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (7, 3, 19), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_edmonds1_maxbranch(): + G = G1() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_maxarbor(): + G = G1() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds2_maxbranch(): + G = G2() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_branching_2a) + assert_equal_branchings(x, x_) + + +def test_edmonds2_maxarbor(): + G = G2() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_2) + assert_equal_branchings(x, x_) + + +def test_edmonds2_minarbor(): + G = G1() + x = branchings.minimum_spanning_arborescence(G) + # This was obtained from algorithm. Need to verify it independently. + # Branch weight is: 96 + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch1(): + G = G1() + x = branchings.minimum_branching(G) + edges = [] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch2(): + G = G1() + G.add_edge(8, 9, weight=-10) + x = branchings.minimum_branching(G) + edges = [(8, 9, -10)] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +# Need more tests + + +def test_mst(): + # Make sure we get the same results for undirected graphs. + # Example from: https://en.wikipedia.org/wiki/Kruskal's_algorithm + G = nx.Graph() + edgelist = [ + (0, 3, [("weight", 5)]), + (0, 1, [("weight", 7)]), + (1, 3, [("weight", 9)]), + (1, 2, [("weight", 8)]), + (1, 4, [("weight", 7)]), + (3, 4, [("weight", 15)]), + (3, 5, [("weight", 6)]), + (2, 4, [("weight", 5)]), + (4, 5, [("weight", 8)]), + (4, 6, [("weight", 9)]), + (5, 6, [("weight", 11)]), + ] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + edges = [ + ({0, 1}, 7), + ({0, 3}, 5), + ({3, 5}, 6), + ({1, 4}, 7), + ({4, 2}, 5), + ({4, 6}, 9), + ] + + assert x.number_of_edges() == len(edges) + for u, v, d in x.edges(data=True): + assert ({u, v}, d["weight"]) in edges + + +def test_mixed_nodetypes(): + # Smoke test to make sure no TypeError is raised for mixed node types. + G = nx.Graph() + edgelist = [(0, 3, [("weight", 5)]), (0, "1", [("weight", 5)])] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + +def test_edmonds1_minbranch(): + # Using -G_array and min should give the same as optimal_arborescence_1, + # but with all edges negative. + edges = [(u, v, -w) for (u, v, w) in optimal_arborescence_1] + + G = nx.from_numpy_array(-G_array, create_using=nx.DiGraph) + + # Quickly make sure max branching is empty. + x = branchings.maximum_branching(G) + x_ = build_branching([]) + assert_equal_branchings(x, x_) + + # Now test the min branching. + x = branchings.minimum_branching(G) + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edge_attribute_preservation_normal_graph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for normal graphs. + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + ed = branchings.Edmonds(G) + B = ed.find_optimum("weight", preserve_attrs=True, seed=1) + + assert B[0][1]["otherattr"] == 1 + assert B[0][1]["otherattr2"] == 3 + + +def test_edge_attribute_preservation_multigraph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for multigraphs. + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + + ed = branchings.Edmonds(G) + B = ed.find_optimum("weight", preserve_attrs=True) + + assert B[0][1][0]["otherattr"] == 1 + assert B[0][1][0]["otherattr2"] == 3 + + +def test_edge_attribute_discard(): + # Test that edge attributes are discarded if we do not specify to keep them + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + ed = branchings.Edmonds(G) + B = ed.find_optimum("weight", preserve_attrs=False) + + edge_dict = B[0][1] + with pytest.raises(KeyError): + _ = edge_dict["otherattr"] + + +def test_partition_spanning_arborescence(): + """ + Test that we can generate minimum spanning arborescences which respect the + given partition. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + G[3][0]["partition"] = nx.EdgePartition.EXCLUDED + G[2][3]["partition"] = nx.EdgePartition.INCLUDED + G[7][3]["partition"] = nx.EdgePartition.EXCLUDED + G[0][2]["partition"] = nx.EdgePartition.EXCLUDED + G[6][2]["partition"] = nx.EdgePartition.INCLUDED + + actual_edges = [ + (0, 4, 12), + (1, 0, 4), + (1, 5, 13), + (2, 3, 21), + (4, 7, 12), + (5, 6, 14), + (5, 8, 12), + (6, 2, 21), + ] + + B = branchings.minimum_spanning_arborescence(G, partition="partition") + assert_equal_branchings(build_branching(actual_edges), B) + + +def test_arborescence_iterator_min(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arboresecences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arboresecences + * The weight of the arborescences is non-strictly increasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator(G): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_max(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arboresecences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arboresecences + * The weight of the arborescences is non-strictly decreasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = math.inf + for B in branchings.ArborescenceIterator(G, minimum=False): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight <= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_initial_partition(): + """ + Tests the arborescence iterator with three included edges and three excluded + in the initial partition. + + A brute force method similar to the one used in the above tests found that + there are 16 arborescences which contain the included edges and not the + excluded edges. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + included_edges = [(1, 0), (5, 6), (8, 7)] + excluded_edges = [(0, 2), (3, 6), (1, 5)] + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator( + G, init_partition=(included_edges, excluded_edges) + ): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + for e in included_edges: + assert e in B.edges + for e in excluded_edges: + assert e not in B.edges + assert arborescence_count == 16 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_coding.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_coding.py new file mode 100644 index 0000000..c695fea --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_coding.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`~networkx.algorithms.tree.coding` module.""" +from itertools import product + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPruferSequence: + """Unit tests for the Prüfer sequence encoding and decoding + functions. + + """ + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_prufer_sequence(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.null_graph()) + + def test_trivial_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.trivial_graph()) + + def test_bad_integer_labels(self): + with pytest.raises(KeyError): + T = nx.Graph(nx.utils.pairwise("abc")) + nx.to_prufer_sequence(T) + + def test_encoding(self): + """Tests for encoding a tree as a Prüfer sequence using the + iterative strategy. + + """ + # Example from Wikipedia. + tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]) + sequence = nx.to_prufer_sequence(tree) + assert sequence == [3, 3, 3, 4] + + def test_decoding(self): + """Tests for decoding a tree from a Prüfer sequence.""" + # Example from Wikipedia. + sequence = [3, 3, 3, 4] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(6))) + edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + assert edges_equal(list(tree.edges()), edges) + + def test_decoding2(self): + # Example from "An Optimal Algorithm for Prufer Codes". + sequence = [2, 4, 0, 1, 3, 3] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(8))) + edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)] + assert edges_equal(list(tree.edges()), edges) + + def test_inverse(self): + """Tests that the encoding and decoding functions are inverses.""" + for T in nx.nonisomorphic_trees(4): + T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T)) + assert nodes_equal(list(T), list(T2)) + assert edges_equal(list(T.edges()), list(T2.edges())) + + for seq in product(range(4), repeat=2): + seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq)) + assert list(seq) == seq2 + + +class TestNestedTuple: + """Unit tests for the nested tuple encoding and decoding functions.""" + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_nested_tuple(G, 0) + + def test_unknown_root(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.to_nested_tuple(G, "bogus") + + def test_encoding(self): + T = nx.full_rary_tree(2, 2**3 - 1) + expected = (((), ()), ((), ())) + actual = nx.to_nested_tuple(T, 0) + assert nodes_equal(expected, actual) + + def test_canonical_form(self): + T = nx.Graph() + T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + T.add_edges_from([(1, 4), (1, 5)]) + T.add_edges_from([(3, 6), (3, 7)]) + root = 0 + actual = nx.to_nested_tuple(T, root, canonical_form=True) + expected = ((), ((), ()), ((), ())) + assert actual == expected + + def test_decoding(self): + balanced = (((), ()), ((), ())) + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.from_nested_tuple(balanced) + assert nx.is_isomorphic(expected, actual) + + def test_sensible_relabeling(self): + balanced = (((), ()), ((), ())) + T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + assert nodes_equal(list(T), list(range(2**3 - 1))) + assert edges_equal(list(T.edges()), edges) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_decomposition.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_decomposition.py new file mode 100644 index 0000000..8c37605 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_decomposition.py @@ -0,0 +1,79 @@ +import networkx as nx +from networkx.algorithms.tree.decomposition import junction_tree + + +def test_junction_tree_directed_confounders(): + B = nx.DiGraph() + B.add_edges_from([("A", "C"), ("B", "C"), ("C", "D"), ("C", "E")]) + + G = junction_tree(B) + J = nx.Graph() + J.add_edges_from( + [ + (("C", "E"), ("C",)), + (("C",), ("A", "B", "C")), + (("A", "B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_nodes(): + B = nx.DiGraph() + B.add_nodes_from([("A", "B", "C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B", "C", "D")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_cascade(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("B", "C"), ("C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "B"), ("B",)), + (("B",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_edges(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("C", "D"), ("E", "F")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B"), ("C", "D"), ("E", "F")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_undirected(): + B = nx.Graph() + B.add_edges_from([("A", "C"), ("A", "D"), ("B", "C"), ("C", "E")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "D"), ("A",)), + (("A",), ("A", "C")), + (("A", "C"), ("C",)), + (("C",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "E")), + ] + ) + + assert nx.is_isomorphic(G, J) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_mst.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_mst.py new file mode 100644 index 0000000..7daf0fc --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_mst.py @@ -0,0 +1,622 @@ +"""Unit tests for the :mod:`networkx.algorithms.tree.mst` module.""" + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def test_unknown_algorithm(): + with pytest.raises(ValueError): + nx.minimum_spanning_tree(nx.Graph(), algorithm="random") + + +class MinimumSpanningTreeTestBase: + """Base class for test classes for minimum spanning tree algorithms. + This class contains some common tests that will be inherited by + subclasses. Each subclass must have a class attribute + :data:`algorithm` that is a string representing the algorithm to + run, as described under the ``algorithm`` keyword argument for the + :func:`networkx.minimum_spanning_edges` function. Subclasses can + then implement any algorithm-specific tests. + """ + + def setup_method(self, method): + """Creates an example graph and stores the expected minimum and + maximum spanning tree edges. + """ + # This stores the class attribute `algorithm` in an instance attribute. + self.algo = self.algorithm + # This example graph comes from Wikipedia: + # https://en.wikipedia.org/wiki/Kruskal's_algorithm + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + self.minimum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (0, 3, {"weight": 5}), + (1, 4, {"weight": 7}), + (2, 4, {"weight": 5}), + (3, 5, {"weight": 6}), + (4, 6, {"weight": 9}), + ] + self.maximum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (1, 2, {"weight": 8}), + (1, 3, {"weight": 9}), + (3, 4, {"weight": 15}), + (4, 6, {"weight": 9}), + (5, 6, {"weight": 11}), + ] + + def test_minimum_edges(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_edges(self): + edges = nx.maximum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_without_data(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo, data=False) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + + def test_nan_weights(self): + # Edge weights NaN never appear in the spanning tree. see #2164 + G = self.G + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + # Now test for raising exception + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm=self.algo, data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_order(self): + # now try again with a nan edge at the beginning of G.nodes + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_edge(0, 7, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_isolated_node(self): + # now try again with an isolated node + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_node(0) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_minimum_tree(self): + T = nx.minimum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_tree(self): + T = nx.maximum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_disconnected(self): + G = nx.Graph([(0, 1, dict(weight=1)), (2, 3, dict(weight=2))]) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(list(T), list(range(4))) + assert edges_equal(list(T.edges()), [(0, 1), (2, 3)]) + + def test_empty_graph(self): + G = nx.empty_graph(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(sorted(T), list(range(3))) + assert T.number_of_edges() == 0 + + def test_attributes(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1, color="red", distance=7) + G.add_edge(2, 3, weight=1, color="green", distance=2) + G.add_edge(1, 3, weight=10, color="blue", distance=1) + G.graph["foo"] = "bar" + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert T.graph == G.graph + assert nodes_equal(T, G) + for u, v in T.edges(): + assert T.adj[u][v] == G.adj[u][v] + + def test_weight_attribute(self): + G = nx.Graph() + G.add_edge(0, 1, weight=1, distance=7) + G.add_edge(0, 2, weight=30, distance=1) + G.add_edge(1, 2, weight=1, distance=1) + G.add_node(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 2), (1, 2)]) + T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 1), (0, 2)]) + + +class TestBoruvka(MinimumSpanningTreeTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Borůvka's algorithm. + """ + + algorithm = "boruvka" + + def test_unicode_name(self): + """Tests that using a Unicode string can correctly indicate + Borůvka's algorithm. + """ + edges = nx.minimum_spanning_edges(self.G, algorithm="borůvka") + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + +class MultigraphMSTTestBase(MinimumSpanningTreeTestBase): + # Abstract class + + def test_multigraph_keys_min(self): + """Tests that the minimum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + min_edges = nx.minimum_spanning_edges + mst_edges = min_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "b")], list(mst_edges)) + + def test_multigraph_keys_max(self): + """Tests that the maximum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + max_edges = nx.maximum_spanning_edges + mst_edges = max_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "a")], list(mst_edges)) + + +class TestKruskal(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Kruskal's algorithm. + """ + + algorithm = "kruskal" + + +class TestPrim(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Prim's algorithm. + """ + + algorithm = "prim" + + def test_multigraph_keys_tree(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 1)], list(T.edges(data="weight"))) + + def test_multigraph_keys_tree_max(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.maximum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 2)], list(T.edges(data="weight"))) + + +class TestSpanningTreeIterator: + """ + Tests the spanning tree iterator on the example graph in the 2005 Sörensen + and Janssens paper An Algorithm to Generate all Spanning Trees of a Graph in + Order of Increasing Cost + """ + + def setup(self): + # Original Graph + edges = [(0, 1, 5), (1, 2, 4), (1, 4, 6), (2, 3, 5), (2, 4, 7), (3, 4, 3)] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + # List of lists of spanning trees in increasing order + self.spanning_trees = [ + # 1, MST, cost = 17 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 2, cost = 18 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (3, 4, {"weight": 3}), + ], + # 3, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 4, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 5, cost = 20 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + ], + # 6, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 7, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + # 8, cost = 23 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + ] + + def test_minimum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in increasing order + """ + tree_index = 0 + for tree in nx.SpanningTreeIterator(self.G): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index += 1 + + def test_maximum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in decreasing order + """ + tree_index = 7 + for tree in nx.SpanningTreeIterator(self.G, minimum=False): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index -= 1 + + +def test_random_spanning_tree_multiplicative_small(): + """ + Using a fixed seed, sample one tree for repeatability. + """ + from math import exp + + pytest.importorskip("scipy") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + solution_edges = [(2, 3), (3, 4), (0, 5), (5, 4), (4, 1)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=42) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_multiplicative_large(): + """ + Sample many trees from the distribution created in the last test + """ + from math import exp + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + # Find the multiplicative weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 1 + for u, v, d in t.edges(data="lambda_key"): + weight *= d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.15. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 1200 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=rng) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_additive_small(): + """ + Sample a single spanning tree from the additive method. + """ + pytest.importorskip("scipy") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree( + G, weight="weight", multiplicative=False, seed=37 + ) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_additive_large(): + """ + Sample many spanning trees from the additive method. + """ + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + # Find the additive weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 0 + for u, v, d in t.edges(data="weight"): + weight += d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.07. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 500 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree( + G, "weight", multiplicative=False, seed=rng + ) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_operations.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_operations.py new file mode 100644 index 0000000..a2c5827 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_operations.py @@ -0,0 +1,37 @@ +"""Unit tests for the :mod:`networkx.algorithms.tree.operations` module. + +""" + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestJoin: + """Unit tests for the :func:`networkx.tree.join` function.""" + + def test_empty_sequence(self): + """Tests that joining the empty sequence results in the tree + with one node. + + """ + T = nx.join([]) + assert len(T) == 1 + assert T.number_of_edges() == 0 + + def test_single(self): + """Tests that joining just one tree yields a tree with one more + node. + + """ + T = nx.empty_graph(1) + actual = nx.join([(T, 0)]) + expected = nx.path_graph(2) + assert nodes_equal(list(expected), list(actual)) + assert edges_equal(list(expected.edges()), list(actual.edges())) + + def test_basic(self): + """Tests for joining multiple subtrees at a root node.""" + trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] + actual = nx.join(trees) + expected = nx.full_rary_tree(2, 2**3 - 1) + assert nx.is_isomorphic(actual, expected) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_recognition.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_recognition.py new file mode 100644 index 0000000..d9c4943 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/tree/tests/test_recognition.py @@ -0,0 +1,164 @@ +import pytest + +import networkx as nx + + +class TestTreeRecognition: + + graph = nx.Graph + multigraph = nx.MultiGraph + + @classmethod + def setup_class(cls): + + cls.T1 = cls.graph() + + cls.T2 = cls.graph() + cls.T2.add_node(1) + + cls.T3 = cls.graph() + cls.T3.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T3.add_edges_from(edges) + + cls.T5 = cls.multigraph() + cls.T5.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T5.add_edges_from(edges) + + cls.T6 = cls.graph() + cls.T6.add_nodes_from([6, 7]) + cls.T6.add_edge(6, 7) + + cls.F1 = nx.compose(cls.T6, cls.T3) + + cls.N4 = cls.graph() + cls.N4.add_node(1) + cls.N4.add_edge(1, 1) + + cls.N5 = cls.graph() + cls.N5.add_nodes_from(range(5)) + + cls.N6 = cls.graph() + cls.N6.add_nodes_from(range(3)) + cls.N6.add_edges_from([(0, 1), (1, 2), (2, 0)]) + + cls.NF1 = nx.compose(cls.T6, cls.N6) + + def test_null_tree(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.graph()) + + def test_null_tree2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.multigraph()) + + def test_null_forest(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.graph()) + + def test_null_forest2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.multigraph()) + + def test_is_tree(self): + assert nx.is_tree(self.T2) + assert nx.is_tree(self.T3) + assert nx.is_tree(self.T5) + + def test_is_not_tree(self): + assert not nx.is_tree(self.N4) + assert not nx.is_tree(self.N5) + assert not nx.is_tree(self.N6) + + def test_is_forest(self): + assert nx.is_forest(self.T2) + assert nx.is_forest(self.T3) + assert nx.is_forest(self.T5) + assert nx.is_forest(self.F1) + assert nx.is_forest(self.N5) + + def test_is_not_forest(self): + assert not nx.is_forest(self.N4) + assert not nx.is_forest(self.N6) + assert not nx.is_forest(self.NF1) + + +class TestDirectedTreeRecognition(TestTreeRecognition): + graph = nx.DiGraph + multigraph = nx.MultiDiGraph + + +def test_disconnected_graph(): + # https://github.com/networkx/networkx/issues/1144 + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + +def test_dag_nontree(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (0, 2), (1, 2)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_multicycle(): + G = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (0, 1)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_emptybranch(): + G = nx.DiGraph() + G.add_nodes_from(range(10)) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_path(): + G = nx.DiGraph() + nx.add_path(G, range(5)) + assert nx.is_branching(G) + assert nx.is_arborescence(G) + + +def test_notbranching1(): + # Acyclic violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (1, 0)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notbranching2(): + # In-degree violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (3, 2)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence1(): + # Not an arborescence due to not spanning. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (5, 6)]) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence2(): + # Not an arborescence due to in-degree violation. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + G.add_edge(6, 4) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/triads.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/triads.py new file mode 100644 index 0000000..1c107a1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/triads.py @@ -0,0 +1,498 @@ +# See https://github.com/networkx/networkx/pull/1474 +# Copyright 2011 Reya Group +# Copyright 2011 Alex Levenson +# Copyright 2011 Diederik van Liere +"""Functions for analyzing triads of a graph.""" + +from collections import defaultdict +from itertools import combinations, permutations +from random import sample + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "triadic_census", + "is_triad", + "all_triplets", + "all_triads", + "triads_by_type", + "triad_type", + "random_triad", +] + +#: The integer codes representing each type of triad. +#: +#: Triads that are the same up to symmetry have the same code. +TRICODES = ( + 1, + 2, + 2, + 3, + 2, + 4, + 6, + 8, + 2, + 6, + 5, + 7, + 3, + 8, + 7, + 11, + 2, + 6, + 4, + 8, + 5, + 9, + 9, + 13, + 6, + 10, + 9, + 14, + 7, + 14, + 12, + 15, + 2, + 5, + 6, + 7, + 6, + 9, + 10, + 14, + 4, + 9, + 9, + 12, + 8, + 13, + 14, + 15, + 3, + 7, + 8, + 11, + 7, + 12, + 14, + 15, + 8, + 14, + 13, + 15, + 11, + 15, + 15, + 16, +) + +#: The names of each type of triad. The order of the elements is +#: important: it corresponds to the tricodes given in :data:`TRICODES`. +TRIAD_NAMES = ( + "003", + "012", + "102", + "021D", + "021U", + "021C", + "111D", + "111U", + "030T", + "030C", + "201", + "120D", + "120U", + "120C", + "210", + "300", +) + + +#: A dictionary mapping triad code to triad name. +TRICODE_TO_NAME = {i: TRIAD_NAMES[code - 1] for i, code in enumerate(TRICODES)} + + +def _tricode(G, v, u, w): + """Returns the integer code of the given triad. + + This is some fancy magic that comes from Batagelj and Mrvar's paper. It + treats each edge joining a pair of `v`, `u`, and `w` as a bit in + the binary representation of an integer. + + """ + combos = ((v, u, 1), (u, v, 2), (v, w, 4), (w, v, 8), (u, w, 16), (w, u, 32)) + return sum(x for u, v, x in combos if v in G[u]) + + +@not_implemented_for("undirected") +def triadic_census(G, nodelist=None): + """Determines the triadic census of a directed graph. + + The triadic census is a count of how many of the 16 possible types of + triads are present in a directed graph. If a list of nodes is passed, then + only those triads are taken into account which have elements of nodelist in them. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + nodelist : list + List of nodes for which you want to calculate triadic census + + Returns + ------- + census : dict + Dictionary with triad type as keys and number of occurrences as values. + + Notes + ----- + This algorithm has complexity $O(m)$ where $m$ is the number of edges in + the graph. + + Raises + ------ + ValueError + If `nodelist` contains duplicate nodes or nodes not in `G`. + If you want to ignore this you can preprocess with `set(nodelist) & G.nodes` + + See also + -------- + triad_graph + + References + ---------- + .. [1] Vladimir Batagelj and Andrej Mrvar, A subquadratic triad census + algorithm for large sparse networks with small maximum degree, + University of Ljubljana, + http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf + + """ + nodeset = set(G.nbunch_iter(nodelist)) + if nodelist is not None and len(nodelist) != len(nodeset): + raise ValueError("nodelist includes duplicate nodes or nodes not in G") + + N = len(G) + Nnot = N - len(nodeset) # can signal special counting for subset of nodes + + # create an ordering of nodes with nodeset nodes first + m = {n: i for i, n in enumerate(nodeset)} + if Nnot: + # add non-nodeset nodes later in the ordering + not_nodeset = G.nodes - nodeset + m.update((n, i + N) for i, n in enumerate(not_nodeset)) + + # build all_neighbor dicts for easy counting + # After Python 3.8 can leave off these keys(). Speedup also using G._pred + # nbrs = {n: G._pred[n].keys() | G._succ[n].keys() for n in G} + nbrs = {n: G.pred[n].keys() | G.succ[n].keys() for n in G} + dbl_nbrs = {n: G.pred[n].keys() & G.succ[n].keys() for n in G} + + if Nnot: + sgl_nbrs = {n: G.pred[n].keys() ^ G.succ[n].keys() for n in not_nodeset} + # find number of edges not incident to nodes in nodeset + sgl = sum(1 for n in not_nodeset for nbr in sgl_nbrs[n] if nbr not in nodeset) + sgl_edges_outside = sgl // 2 + dbl = sum(1 for n in not_nodeset for nbr in dbl_nbrs[n] if nbr not in nodeset) + dbl_edges_outside = dbl // 2 + + # Initialize the count for each triad to be zero. + census = {name: 0 for name in TRIAD_NAMES} + # Main loop over nodes + for v in nodeset: + vnbrs = nbrs[v] + dbl_vnbrs = dbl_nbrs[v] + if Nnot: + # set up counts of edges attached to v. + sgl_unbrs_bdy = sgl_unbrs_out = dbl_unbrs_bdy = dbl_unbrs_out = 0 + for u in vnbrs: + if m[u] <= m[v]: + continue + unbrs = nbrs[u] + neighbors = (vnbrs | unbrs) - {u, v} + # Count connected triads. + for w in neighbors: + if m[u] < m[w] or (m[v] < m[w] < m[u] and v not in nbrs[w]): + code = _tricode(G, v, u, w) + census[TRICODE_TO_NAME[code]] += 1 + + # Use a formula for dyadic triads with edge incident to v + if u in dbl_vnbrs: + census["102"] += N - len(neighbors) - 2 + else: + census["012"] += N - len(neighbors) - 2 + + # Count edges attached to v. Subtract later to get triads with v isolated + # _out are (u,unbr) for unbrs outside boundary of nodeset + # _bdy are (u,unbr) for unbrs on boundary of nodeset (get double counted) + if Nnot and u not in nodeset: + sgl_unbrs = sgl_nbrs[u] + sgl_unbrs_bdy += len(sgl_unbrs & vnbrs - nodeset) + sgl_unbrs_out += len(sgl_unbrs - vnbrs - nodeset) + dbl_unbrs = dbl_nbrs[u] + dbl_unbrs_bdy += len(dbl_unbrs & vnbrs - nodeset) + dbl_unbrs_out += len(dbl_unbrs - vnbrs - nodeset) + # if nodeset == G.nodes, skip this b/c we will find the edge later. + if Nnot: + # Count edges outside nodeset not connected with v (v isolated triads) + census["012"] += sgl_edges_outside - (sgl_unbrs_out + sgl_unbrs_bdy // 2) + census["102"] += dbl_edges_outside - (dbl_unbrs_out + dbl_unbrs_bdy // 2) + + # calculate null triads: "003" + # null triads = total number of possible triads - all found triads + total_triangles = (N * (N - 1) * (N - 2)) // 6 + triangles_without_nodeset = (Nnot * (Nnot - 1) * (Nnot - 2)) // 6 + total_census = total_triangles - triangles_without_nodeset + census["003"] = total_census - sum(census.values()) + + return census + + +def is_triad(G): + """Returns True if the graph G is a triad, else False. + + Parameters + ---------- + G : graph + A NetworkX Graph + + Returns + ------- + istriad : boolean + Whether G is a valid triad + """ + if isinstance(G, nx.Graph): + if G.order() == 3 and nx.is_directed(G): + if not any((n, n) in G.edges() for n in G.nodes()): + return True + return False + + +@not_implemented_for("undirected") +def all_triplets(G): + """Returns a generator of all possible sets of 3 nodes in a DiGraph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + triplets : generator of 3-tuples + Generator of tuples of 3 nodes + """ + triplets = combinations(G.nodes(), 3) + return triplets + + +@not_implemented_for("undirected") +def all_triads(G): + """A generator of all possible triads in G. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + all_triads : generator of DiGraphs + Generator of triads (order-3 DiGraphs) + """ + triplets = combinations(G.nodes(), 3) + for triplet in triplets: + yield G.subgraph(triplet).copy() + + +@not_implemented_for("undirected") +def triads_by_type(G): + """Returns a list of all triads for each triad type in a directed graph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + tri_by_type : dict + Dictionary with triad types as keys and lists of triads as values. + """ + # num_triads = o * (o - 1) * (o - 2) // 6 + # if num_triads > TRIAD_LIMIT: print(WARNING) + all_tri = all_triads(G) + tri_by_type = defaultdict(list) + for triad in all_tri: + name = triad_type(triad) + tri_by_type[name].append(triad) + return tri_by_type + + +@not_implemented_for("undirected") +def triad_type(G): + """Returns the sociological triad type for a triad. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph with 3 nodes + + Returns + ------- + triad_type : str + A string identifying the triad type + + Notes + ----- + There can be 6 unique edges in a triad (order-3 DiGraph) (so 2^^6=64 unique + triads given 3 nodes). These 64 triads each display exactly 1 of 16 + topologies of triads (topologies can be permuted). These topologies are + identified by the following notation: + + {m}{a}{n}{type} (for example: 111D, 210, 102) + + Here: + + {m} = number of mutual ties (takes 0, 1, 2, 3); a mutual tie is (0,1) + AND (1,0) + {a} = number of assymmetric ties (takes 0, 1, 2, 3); an assymmetric tie + is (0,1) BUT NOT (1,0) or vice versa + {n} = number of null ties (takes 0, 1, 2, 3); a null tie is NEITHER + (0,1) NOR (1,0) + {type} = a letter (takes U, D, C, T) corresponding to up, down, cyclical + and transitive. This is only used for topologies that can have + more than one form (eg: 021D and 021U). + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + if not is_triad(G): + raise nx.NetworkXAlgorithmError("G is not a triad (order-3 DiGraph)") + num_edges = len(G.edges()) + if num_edges == 0: + return "003" + elif num_edges == 1: + return "012" + elif num_edges == 2: + e1, e2 = G.edges() + if set(e1) == set(e2): + return "102" + elif e1[0] == e2[0]: + return "021D" + elif e1[1] == e2[1]: + return "021U" + elif e1[1] == e2[0] or e2[1] == e1[0]: + return "021C" + elif num_edges == 3: + for (e1, e2, e3) in permutations(G.edges(), 3): + if set(e1) == set(e2): + if e3[0] in e1: + return "111U" + # e3[1] in e1: + return "111D" + elif set(e1).symmetric_difference(set(e2)) == set(e3): + if {e1[0], e2[0], e3[0]} == {e1[0], e2[0], e3[0]} == set(G.nodes()): + return "030C" + # e3 == (e1[0], e2[1]) and e2 == (e1[1], e3[1]): + return "030T" + elif num_edges == 4: + for (e1, e2, e3, e4) in permutations(G.edges(), 4): + if set(e1) == set(e2): + # identify pair of symmetric edges (which necessarily exists) + if set(e3) == set(e4): + return "201" + if {e3[0]} == {e4[0]} == set(e3).intersection(set(e4)): + return "120D" + if {e3[1]} == {e4[1]} == set(e3).intersection(set(e4)): + return "120U" + if e3[1] == e4[0]: + return "120C" + elif num_edges == 5: + return "210" + elif num_edges == 6: + return "300" + + +@not_implemented_for("undirected") +def random_triad(G): + """Returns a random triad from a directed graph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + G2 : subgraph + A randomly selected triad (order-3 NetworkX DiGraph) + """ + nodes = sample(list(G.nodes()), 3) + G2 = G.subgraph(nodes) + return G2 + + +""" +@not_implemented_for('undirected') +def triadic_closures(G): + '''Returns a list of order-3 subgraphs of G that are triadic closures. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + closures : list + List of triads of G that are triadic closures + ''' + pass + + +@not_implemented_for('undirected') +def focal_closures(G, attr_name): + '''Returns a list of order-3 subgraphs of G that are focally closed. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + attr_name : str + An attribute name + + + Returns + ------- + closures : list + List of triads of G that are focally closed on attr_name + ''' + pass + + +@not_implemented_for('undirected') +def balanced_triads(G, crit_func): + '''Returns a list of order-3 subgraphs of G that are stable. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + crit_func : function + A function that determines if a triad (order-3 digraph) is stable + + Returns + ------- + triads : list + List of triads in G that are stable + ''' + pass +""" diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/vitality.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/vitality.py new file mode 100644 index 0000000..88e174d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/vitality.py @@ -0,0 +1,75 @@ +""" +Vitality measures. +""" +from functools import partial + +import networkx as nx + +__all__ = ["closeness_vitality"] + + +def closeness_vitality(G, node=None, weight=None, wiener_index=None): + """Returns the closeness vitality for nodes in the graph. + + The *closeness vitality* of a node, defined in Section 3.6.2 of [1], + is the change in the sum of distances between all node pairs when + excluding that node. + + Parameters + ---------- + G : NetworkX graph + A strongly-connected graph. + + weight : string + The name of the edge attribute used as weight. This is passed + directly to the :func:`~networkx.wiener_index` function. + + node : object + If specified, only the closeness vitality for this node will be + returned. Otherwise, a dictionary mapping each node to its + closeness vitality will be returned. + + Other parameters + ---------------- + wiener_index : number + If you have already computed the Wiener index of the graph + `G`, you can provide that value here. Otherwise, it will be + computed for you. + + Returns + ------- + dictionary or float + If `node` is None, this function returns a dictionary + with nodes as keys and closeness vitality as the + value. Otherwise, it returns only the closeness vitality for the + specified `node`. + + The closeness vitality of a node may be negative infinity if + removing that node would disconnect the graph. + + Examples + -------- + >>> G = nx.cycle_graph(3) + >>> nx.closeness_vitality(G) + {0: 2.0, 1: 2.0, 2: 2.0} + + See Also + -------- + closeness_centrality + + References + ---------- + .. [1] Ulrik Brandes, Thomas Erlebach (eds.). + *Network Analysis: Methodological Foundations*. + Springer, 2005. + + + """ + if wiener_index is None: + wiener_index = nx.wiener_index(G, weight=weight) + if node is not None: + after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight) + return wiener_index - after + vitality = partial(closeness_vitality, G, weight=weight, wiener_index=wiener_index) + # TODO This can be trivially parallelized. + return {v: vitality(node=v) for v in G} diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/voronoi.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/voronoi.py new file mode 100644 index 0000000..184afa2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/voronoi.py @@ -0,0 +1,85 @@ +"""Functions for computing the Voronoi cells of a graph.""" +import networkx as nx +from networkx.utils import groups + +__all__ = ["voronoi_cells"] + + +def voronoi_cells(G, center_nodes, weight="weight"): + """Returns the Voronoi cells centered at `center_nodes` with respect + to the shortest-path distance metric. + + If *C* is a set of nodes in the graph and *c* is an element of *C*, + the *Voronoi cell* centered at a node *c* is the set of all nodes + *v* that are closer to *c* than to any other center node in *C* with + respect to the shortest-path distance metric. [1]_ + + For directed graphs, this will compute the "outward" Voronoi cells, + as defined in [1]_, in which distance is measured from the center + nodes to the target node. For the "inward" Voronoi cells, use the + :meth:`DiGraph.reverse` method to reverse the orientation of the + edges before invoking this function on the directed graph. + + Parameters + ---------- + G : NetworkX graph + + center_nodes : set + A nonempty set of nodes in the graph `G` that represent the + center of the Voronoi cells. + + weight : string or function + The edge attribute (or an arbitrary function) representing the + weight of an edge. This keyword argument is as described in the + documentation for :func:`~networkx.multi_source_dijkstra_path`, + for example. + + Returns + ------- + dictionary + A mapping from center node to set of all nodes in the graph + closer to that center node than to any other center node. The + keys of the dictionary are the element of `center_nodes`, and + the values of the dictionary form a partition of the nodes of + `G`. + + Examples + -------- + To get only the partition of the graph induced by the Voronoi cells, + take the collection of all values in the returned dictionary:: + + >>> G = nx.path_graph(6) + >>> center_nodes = {0, 3} + >>> cells = nx.voronoi_cells(G, center_nodes) + >>> partition = set(map(frozenset, cells.values())) + >>> sorted(map(sorted, partition)) + [[0, 1], [2, 3, 4, 5]] + + Raises + ------ + ValueError + If `center_nodes` is empty. + + References + ---------- + .. [1] Erwig, Martin. (2000), + "The graph Voronoi diagram with applications." + *Networks*, 36: 156--163. + 3.0.CO;2-L> + + """ + # Determine the shortest paths from any one of the center nodes to + # every node in the graph. + # + # This raises `ValueError` if `center_nodes` is an empty set. + paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight) + # Determine the center node from which the shortest path originates. + nearest = {v: p[0] for v, p in paths.items()} + # Get the mapping from center node to all nodes closer to it than to + # any other center node. + cells = groups(nearest) + # We collect all unreachable nodes under a special key, if there are any. + unreachable = set(G) - set(nearest) + if unreachable: + cells["unreachable"] = unreachable + return cells diff --git a/myenv/lib/python3.9/site-packages/networkx/algorithms/wiener.py b/myenv/lib/python3.9/site-packages/networkx/algorithms/wiener.py new file mode 100644 index 0000000..e60fb9e --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/algorithms/wiener.py @@ -0,0 +1,76 @@ +"""Functions related to the Wiener index of a graph.""" + +from itertools import chain + +from .components import is_connected, is_strongly_connected +from .shortest_paths import shortest_path_length as spl + +__all__ = ["wiener_index"] + +#: Rename the :func:`chain.from_iterable` function for the sake of +#: brevity. +chaini = chain.from_iterable + + +def wiener_index(G, weight=None): + """Returns the Wiener index of the given graph. + + The *Wiener index* of a graph is the sum of the shortest-path + distances between each pair of reachable nodes. For pairs of nodes + in undirected graphs, only one orientation of the pair is counted. + + Parameters + ---------- + G : NetworkX graph + + weight : object + The edge attribute to use as distance when computing + shortest-path distances. This is passed directly to the + :func:`networkx.shortest_path_length` function. + + Returns + ------- + float + The Wiener index of the graph `G`. + + Raises + ------ + NetworkXError + If the graph `G` is not connected. + + Notes + ----- + If a pair of nodes is not reachable, the distance is assumed to be + infinity. This means that for graphs that are not + strongly-connected, this function returns ``inf``. + + The Wiener index is not usually defined for directed graphs, however + this function uses the natural generalization of the Wiener index to + directed graphs. + + Examples + -------- + The Wiener index of the (unweighted) complete graph on *n* nodes + equals the number of pairs of the *n* nodes, since each pair of + nodes is at distance one:: + + >>> n = 10 + >>> G = nx.complete_graph(n) + >>> nx.wiener_index(G) == n * (n - 1) / 2 + True + + Graphs that are not strongly-connected have infinite Wiener index:: + + >>> G = nx.empty_graph(2) + >>> nx.wiener_index(G) + inf + + """ + is_directed = G.is_directed() + if (is_directed and not is_strongly_connected(G)) or ( + not is_directed and not is_connected(G) + ): + return float("inf") + total = sum(chaini(p.values() for v, p in spl(G, weight=weight))) + # Need to account for double counting pairs of nodes in undirected graphs. + return total if is_directed else total / 2 diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/__init__.py b/myenv/lib/python3.9/site-packages/networkx/classes/__init__.py new file mode 100644 index 0000000..d5bb1d7 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/__init__.py @@ -0,0 +1,13 @@ +from .graph import Graph +from .digraph import DiGraph +from .multigraph import MultiGraph +from .multidigraph import MultiDiGraph +from .ordered import * + +from .function import * + +from networkx.classes import filters + +from networkx.classes import coreviews +from networkx.classes import graphviews +from networkx.classes import reportviews diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/coreviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/coreviews.py new file mode 100644 index 0000000..6c5b8a4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/coreviews.py @@ -0,0 +1,482 @@ +"""Views of core data structures such as nested Mappings (e.g. dict-of-dicts). +These ``Views`` often restrict element access, with either the entire view or +layers of nested mappings being read-only. +""" +import warnings +from collections.abc import Mapping + +__all__ = [ + "AtlasView", + "AdjacencyView", + "MultiAdjacencyView", + "UnionAtlas", + "UnionAdjacency", + "UnionMultiInner", + "UnionMultiAdjacency", + "FilterAtlas", + "FilterAdjacency", + "FilterMultiInner", + "FilterMultiAdjacency", +] + + +class AtlasView(Mapping): + """An AtlasView is a Read-only Mapping of Mappings. + + It is a View into a dict-of-dict data structure. + The inner level of dict is read-write. But the + outer level is read-only. + + See Also + ======== + AdjacencyView: View into dict-of-dict-of-dict + MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_atlas",) + + def __getstate__(self): + return {"_atlas": self._atlas} + + def __setstate__(self, state): + self._atlas = state["_atlas"] + + def __init__(self, d): + self._atlas = d + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._atlas) + + def __getitem__(self, key): + return self._atlas[key] + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + def __str__(self): + return str(self._atlas) # {nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._atlas!r})" + + +class AdjacencyView(AtlasView): + """An AdjacencyView is a Read-only Map of Maps of Maps. + + It is a View into a dict-of-dict-of-dict data structure. + The inner level of dict is read-write. But the + outer levels are read-only. + + See Also + ======== + AtlasView: View into dict-of-dict + MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = () # Still uses AtlasView slots names _atlas + + def __getitem__(self, name): + return AtlasView(self._atlas[name]) + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + +class MultiAdjacencyView(AdjacencyView): + """An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps. + + It is a View into a dict-of-dict-of-dict-of-dict data structure. + The inner level of dict is read-write. But the + outer levels are read-only. + + See Also + ======== + AtlasView: View into dict-of-dict + AdjacencyView: View into dict-of-dict-of-dict + """ + + __slots__ = () # Still uses AtlasView slots names _atlas + + def __getitem__(self, name): + return AdjacencyView(self._atlas[name]) + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + +class UnionAtlas(Mapping): + """A read-only union of two atlases (dict-of-dict). + + The two dict-of-dicts represent the inner dict of + an Adjacency: `G.succ[node]` and `G.pred[node]`. + The inner level of dict of both hold attribute key:value + pairs and is read-write. But the outer level is read-only. + + See Also + ======== + UnionAdjacency: View into dict-of-dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_succ", "_pred") + + def __getstate__(self): + return {"_succ": self._succ, "_pred": self._pred} + + def __setstate__(self, state): + self._succ = state["_succ"] + self._pred = state["_pred"] + + def __init__(self, succ, pred): + self._succ = succ + self._pred = pred + + def __len__(self): + return len(self._succ) + len(self._pred) + + def __iter__(self): + return iter(set(self._succ.keys()) | set(self._pred.keys())) + + def __getitem__(self, key): + try: + return self._succ[key] + except KeyError: + return self._pred[key] + + def copy(self): + result = {nbr: dd.copy() for nbr, dd in self._succ.items()} + for nbr, dd in self._pred.items(): + if nbr in result: + result[nbr].update(dd) + else: + result[nbr] = dd.copy() + return result + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})" + + +class UnionAdjacency(Mapping): + """A read-only union of dict Adjacencies as a Map of Maps of Maps. + + The two input dict-of-dict-of-dicts represent the union of + `G.succ` and `G.pred`. Return values are UnionAtlas + The inner level of dict is read-write. But the + middle and outer levels are read-only. + + succ : a dict-of-dict-of-dict {node: nbrdict} + pred : a dict-of-dict-of-dict {node: nbrdict} + The keys for the two dicts should be the same + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_succ", "_pred") + + def __getstate__(self): + return {"_succ": self._succ, "_pred": self._pred} + + def __setstate__(self, state): + self._succ = state["_succ"] + self._pred = state["_pred"] + + def __init__(self, succ, pred): + # keys must be the same for two input dicts + assert len(set(succ.keys()) ^ set(pred.keys())) == 0 + self._succ = succ + self._pred = pred + + def __len__(self): + return len(self._succ) # length of each dict should be the same + + def __iter__(self): + return iter(self._succ) + + def __getitem__(self, nbr): + return UnionAtlas(self._succ[nbr], self._pred[nbr]) + + def copy(self): + return {n: self[n].copy() for n in self._succ} + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})" + + +class UnionMultiInner(UnionAtlas): + """A read-only union of two inner dicts of MultiAdjacencies. + + The two input dict-of-dict-of-dicts represent the union of + `G.succ[node]` and `G.pred[node]` for MultiDiGraphs. + Return values are UnionAtlas. + The inner level of dict is read-write. But the outer levels are read-only. + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionAdjacency: View into dict-of-dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = () # Still uses UnionAtlas slots names _succ, _pred + + def __getitem__(self, node): + in_succ = node in self._succ + in_pred = node in self._pred + if in_succ: + if in_pred: + return UnionAtlas(self._succ[node], self._pred[node]) + return UnionAtlas(self._succ[node], {}) + return UnionAtlas({}, self._pred[node]) + + def copy(self): + nodes = set(self._succ.keys()) | set(self._pred.keys()) + return {n: self[n].copy() for n in nodes} + + +class UnionMultiAdjacency(UnionAdjacency): + """A read-only union of two dict MultiAdjacencies. + + The two input dict-of-dict-of-dict-of-dicts represent the union of + `G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency. + The inner level of dict is read-write. But the outer levels are read-only. + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionMultiInner: View into dict-of-dict-of-dict + """ + + __slots__ = () # Still uses UnionAdjacency slots names _succ, _pred + + def __getitem__(self, node): + return UnionMultiInner(self._succ[node], self._pred[node]) + + +class FilterAtlas(Mapping): # nodedict, nbrdict, keydict + def __init__(self, d, NODE_OK): + self._atlas = d + self.NODE_OK = NODE_OK + + def __len__(self): + return sum(1 for n in self) + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return (n for n in self.NODE_OK.nodes if n in self._atlas) + return (n for n in self._atlas if self.NODE_OK(n)) + + def __getitem__(self, key): + if key in self._atlas and self.NODE_OK(key): + return self._atlas[key] + raise KeyError(f"Key {key} not found") + + # FIXME should this just be removed? we don't use it, but someone might + def copy(self): + warnings.warn( + ( + "FilterAtlas.copy is deprecated.\n" + "It will be removed in NetworkX 3.0.\n" + "Please open an Issue on https://github.com/networkx/networkx/issues\n" + "if you use this feature. We think that no one does use it." + ), + DeprecationWarning, + ) + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return {u: self._atlas[u] for u in self.NODE_OK.nodes if u in self._atlas} + return {u: d for u, d in self._atlas.items() if self.NODE_OK(u)} + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})" + + +class FilterAdjacency(Mapping): # edgedict + def __init__(self, d, NODE_OK, EDGE_OK): + self._atlas = d + self.NODE_OK = NODE_OK + self.EDGE_OK = EDGE_OK + + def __len__(self): + return sum(1 for n in self) + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return (n for n in self.NODE_OK.nodes if n in self._atlas) + return (n for n in self._atlas if self.NODE_OK(n)) + + def __getitem__(self, node): + if node in self._atlas and self.NODE_OK(node): + + def new_node_ok(nbr): + return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr) + + return FilterAtlas(self._atlas[node], new_node_ok) + raise KeyError(f"Key {node} not found") + + # FIXME should this just be removed? we don't use it, but someone might + def copy(self): + warnings.warn( + ( + "FilterAdjacency.copy is deprecated.\n" + "It will be removed in NetworkX 3.0.\n" + "Please open an Issue on https://github.com/networkx/networkx/issues\n" + "if you use this feature. We think that no one does use it." + ), + DeprecationWarning, + ) + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return { + u: { + v: d + for v, d in self._atlas[u].items() + if self.NODE_OK(v) + if self.EDGE_OK(u, v) + } + for u in self.NODE_OK.nodes + if u in self._atlas + } + return { + u: {v: d for v, d in nbrs.items() if self.NODE_OK(v) if self.EDGE_OK(u, v)} + for u, nbrs in self._atlas.items() + if self.NODE_OK(u) + } + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + name = self.__class__.__name__ + return f"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})" + + +class FilterMultiInner(FilterAdjacency): # muliedge_seconddict + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas) + else: + my_nodes = (n for n in self._atlas if self.NODE_OK(n)) + for n in my_nodes: + some_keys_ok = False + for key in self._atlas[n]: + if self.EDGE_OK(n, key): + some_keys_ok = True + break + if some_keys_ok is True: + yield n + + def __getitem__(self, nbr): + if nbr in self._atlas and self.NODE_OK(nbr): + + def new_node_ok(key): + return self.EDGE_OK(nbr, key) + + return FilterAtlas(self._atlas[nbr], new_node_ok) + raise KeyError(f"Key {nbr} not found") + + # FIXME should this just be removed? we don't use it, but someone might + def copy(self): + warnings.warn( + ( + "FilterMultiInner.copy is deprecated.\n" + "It will be removed in NetworkX 3.0.\n" + "Please open an Issue on https://github.com/networkx/networkx/issues\n" + "if you use this feature. We think that no one does use it." + ), + DeprecationWarning, + ) + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return { + v: {k: d for k, d in self._atlas[v].items() if self.EDGE_OK(v, k)} + for v in self.NODE_OK.nodes + if v in self._atlas + } + return { + v: {k: d for k, d in nbrs.items() if self.EDGE_OK(v, k)} + for v, nbrs in self._atlas.items() + if self.NODE_OK(v) + } + + +class FilterMultiAdjacency(FilterAdjacency): # multiedgedict + def __getitem__(self, node): + if node in self._atlas and self.NODE_OK(node): + + def edge_ok(nbr, key): + return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key) + + return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok) + raise KeyError(f"Key {node} not found") + + # FIXME should this just be removed? we don't use it, but someone might + def copy(self): + warnings.warn( + ( + "FilterMultiAdjacency.copy is deprecated.\n" + "It will be removed in NetworkX 3.0.\n" + "Please open an Issue on https://github.com/networkx/networkx/issues\n" + "if you use this feature. We think that no one does use it." + ), + DeprecationWarning, + ) + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + my_nodes = self.NODE_OK.nodes + return { + u: { + v: {k: d for k, d in kd.items() if self.EDGE_OK(u, v, k)} + for v, kd in self._atlas[u].items() + if v in my_nodes + } + for u in my_nodes + if u in self._atlas + } + return { + u: { + v: {k: d for k, d in kd.items() if self.EDGE_OK(u, v, k)} + for v, kd in nbrs.items() + if self.NODE_OK(v) + } + for u, nbrs in self._atlas.items() + if self.NODE_OK(u) + } diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/digraph.py b/myenv/lib/python3.9/site-packages/networkx/classes/digraph.py new file mode 100644 index 0000000..9528a15 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/digraph.py @@ -0,0 +1,1269 @@ +"""Base class for directed graphs.""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +import networkx.convert as convert +from networkx.classes.coreviews import AdjacencyView +from networkx.classes.graph import Graph +from networkx.classes.reportviews import ( + DiDegreeView, + InDegreeView, + InEdgeView, + OutDegreeView, + OutEdgeView, +) +from networkx.exception import NetworkXError + +__all__ = ["DiGraph"] + + +class _CachedPropertyResetterAdjAndSucc: + """Data Descriptor class that syncs and resets cached properties adj and succ + + The cached properties `adj` and `succ` are reset whenever `_adj` or `_succ` + are set to new objects. In addition, the attributes `_succ` and `_adj` + are synced so these two names point to the same object. + + This object sits on a class and ensures that any instance of that + class clears its cached properties "succ" and "adj" whenever the + underlying instance attributes "_succ" or "_adj" are set to a new object. + It only affects the set process of the obj._adj and obj._succ attribute. + All get/del operations act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_adj"] = value + od["_succ"] = value + # reset cached properties + if "adj" in od: + del od["adj"] + if "succ" in od: + del od["succ"] + + +class _CachedPropertyResetterPred: + """Data Descriptor class for _pred that resets ``pred`` cached_property when needed + + This assumes that the ``cached_property`` ``G.pred`` should be reset whenever + ``G._pred`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "pred" whenever the underlying + instance attribute "_pred" is set to a new object. It only affects + the set process of the obj._pred attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_pred"] = value + if "pred" in od: + del od["pred"] + + +class DiGraph(Graph): + """ + Base class for directed graphs. + + A DiGraph stores nodes and edges with optional data, or attributes. + + DiGraphs hold directed edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + MultiGraph + MultiDiGraph + OrderedDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.DiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.DiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> G.add_edge(1, 2, weight=4.7) + >>> G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2]["weight"] = 4.7 + >>> G.edges[1, 2]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, 2]` a + read-only dict-like structure. However, you can assign to attributes + in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change + data attributes: `G.edges[1, 2]['weight'] = 4` + (For multigraphs: `MG.edges[u, v, key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()` + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, eattr in nbrsdict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges reporting object is often more convenient: + + >>> for u, v, weight in G.edges(data="weight"): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using object-attributes and methods. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The Graph class uses a dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information and holds + edge data keyed by neighbor. The inner dict (edge_attr_dict) represents + the edge data and holds edge attribute values keyed by attribute names. + + Each of these three dicts can be replaced in a subclass by a user defined + dict-like object. In general, the dict-like features should be + maintained but extra features can be added. To replace one of the + dicts create a new graph class by changing the class(!) variable + holding the factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, optional (default: dict) + Factory function to be used to create the adjacency list + dict which holds edge data keyed by neighbor. + It should require no arguments and return a dict-like object + + edge_attr_dict_factory : function, optional (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + + + Please see :mod:`~networkx.classes.ordered` for more examples of + creating graph subclasses by overwriting the base class `dict` with + a dictionary-like object. + """ + + _adj = _CachedPropertyResetterAdjAndSucc() # type: ignore + _succ = _adj # type: ignore + _pred = _CachedPropertyResetterPred() + + def __init__(self, incoming_graph_data=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse matrix, or a PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes + self._node = self.node_dict_factory() # dictionary for node attr + # We store two adjacency lists: + # the predecessors of node n are stored in the dict self._pred + # the successors of node n are stored in the dict self._succ=self._adj + self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict successor + self._pred = self.adjlist_outer_dict_factory() # predecessor + # Note: self._succ = self._adj # successor + + # attempt to load graph with data + if incoming_graph_data is not None: + convert.to_networkx_graph(incoming_graph_data, create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return AdjacencyView(self._succ) + + @cached_property + def succ(self): + """Graph adjacency object holding the successors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.succ[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.succ behaves like a dict. Useful idioms include + `for nbr, datadict in G.succ[n].items():`. A data-view not provided + by dicts also exists: `for nbr, foovalue in G.succ[node].data('foo'):` + and a default can be set via a `default` argument to the `data` method. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` is identical to `G.succ`. + """ + return AdjacencyView(self._succ) + + @cached_property + def pred(self): + """Graph adjacency object holding the predecessors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.pred[2][3]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.pred behaves like a dict. Useful idioms include + `for nbr, datadict in G.pred[n].items():`. A data-view not provided + by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):` + A default can be set via a `default` argument to the `data` method. + """ + return AdjacencyView(self._pred) + + def add_node(self, node_for_adding, **attr): + """Add a single node `node_for_adding` and update node attributes. + + Parameters + ---------- + node_for_adding : node + A node can be any hashable Python object except None. + attr : keyword arguments, optional + Set or change node attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1, size=10) + >>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + if node_for_adding not in self._succ: + if node_for_adding is None: + raise ValueError("None cannot be a node") + self._succ[node_for_adding] = self.adjlist_inner_dict_factory() + self._pred[node_for_adding] = self.adjlist_inner_dict_factory() + attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory() + attr_dict.update(attr) + else: # update attr even if node already exists + self._node[node_for_adding].update(attr) + + def add_nodes_from(self, nodes_for_adding, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes_for_adding : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple take + precedence over attributes specified via keyword arguments. + + See Also + -------- + add_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(), key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1, 2], size=10) + >>> G.add_nodes_from([3, 4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific nodes. + + >>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})]) + >>> G.nodes[1]["size"] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.nodes[1]["size"] + 11 + + """ + for n in nodes_for_adding: + try: + newnode = n not in self._node + newdict = attr + except TypeError: + n, ndict = n + newnode = n not in self._node + newdict = attr.copy() + newdict.update(ndict) + if newnode: + if n is None: + raise ValueError("None cannot be a node") + self._succ[n] = self.adjlist_inner_dict_factory() + self._pred[n] = self.adjlist_inner_dict_factory() + self._node[n] = self.node_attr_dict_factory() + self._node[n].update(newdict) + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a non-existent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> list(G.edges) + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> list(G.edges) + [] + + """ + try: + nbrs = self._succ[n] + del self._node[n] + except KeyError as err: # NetworkXError if n not in self + raise NetworkXError(f"The node {n} is not in the digraph.") from err + for u in nbrs: + del self._pred[u][n] # remove all edges n-u in digraph + del self._succ[n] # remove node from succ + for u in self._pred[n]: + del self._succ[u][n] # remove all edges n-u in digraph + del self._pred[n] # remove node from pred + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently ignored. + + See Also + -------- + remove_node + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = list(G.nodes) + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> list(G.nodes) + [] + + """ + for n in nodes: + try: + succs = self._succ[n] + del self._node[n] + for u in succs: + del self._pred[u][n] # remove all edges n-u in digraph + del self._succ[n] # now remove node + for u in self._pred[n]: + del self._succ[u][n] # remove all edges n-u in digraph + del self._pred[n] # now remove node + except KeyError: + pass # silent failure on remove + + def add_edge(self, u_of_edge, v_of_edge, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_of_edge, v_of_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + Many NetworkX algorithms designed for weighted graphs use + an edge attribute (by default `weight`) to hold a numerical value. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1, 2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> G.add_edge(1, 2) + >>> G[1][2].update({0: 5}) + >>> G.edges[1, 2].update({0: 5}) + """ + u, v = u_of_edge, v_of_edge + # add nodes + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + # add the edge + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + self._succ[u][v] = datadict + self._pred[v][u] = datadict + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as 2-tuples (u, v) or + 3-tuples (u, v, d) where d is a dictionary containing edge data. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + """ + for e in ebunch_to_add: + ne = len(e) + if ne == 3: + u, v, dd = e + elif ne == 2: + u, v = e + dd = {} + else: + raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + datadict.update(dd) + self._succ[u][v] = datadict + self._pred[v][u] = datadict + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, etc + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2, 3, {"weight": 7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self._succ[u][v] + del self._pred[v][u] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} not in graph.") from err + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) edge between u and v. + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + for e in ebunch: + u, v = e[:2] # ignore edge data + if u in self._succ and v in self._succ[u]: + del self._succ[u][v] + del self._pred[v][u] + + def has_successor(self, u, v): + """Returns True if node u has successor v. + + This is true if graph has the edge u->v. + """ + return u in self._succ and v in self._succ[u] + + def has_predecessor(self, u, v): + """Returns True if node u has predecessor v. + + This is true if graph has the edge u<-v. + """ + return u in self._pred and v in self._pred[u] + + def successors(self, n): + """Returns an iterator over successor nodes of n. + + A successor of n is a node m such that there exists a directed + edge from n to m. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + predecessors + + Notes + ----- + neighbors() and successors() are the same. + """ + try: + return iter(self._succ[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the digraph.") from err + + # digraph definitions + neighbors = successors + + def predecessors(self, n): + """Returns an iterator over predecessor nodes of n. + + A predecessor of n is a node m such that there exists a directed + edge from m to n. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + successors + """ + try: + return iter(self._pred[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the digraph.") from err + + @cached_property + def edges(self): + """An OutEdgeView of the DiGraph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, default=None) + + The OutEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, `G.edges[u, v]['color']` provides the value of the color + attribute for edge `(u, v)` while + `for (u, v, c) in G.edges.data('color', default='red'):` + iterates through all the edges yielding the color attribute + with default `'red'` if no color attribute exists. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : OutEdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + See Also + -------- + in_edges, out_edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> nx.add_path(G, [0, 1, 2]) + >>> G.add_edge(2, 3, weight=5) + >>> [e for e in G.edges] + [(0, 1), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + OutEdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + OutEdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)]) + >>> G.edges([0, 2]) # only edges originating from these nodes + OutEdgeDataView([(0, 1), (2, 3)]) + >>> G.edges(0) # only edges from node 0 + OutEdgeDataView([(0, 1)]) + + """ + return OutEdgeView(self) + + # alias out_edges to edges + @cached_property + def out_edges(self): + return OutEdgeView(self) + + out_edges.__doc__ = edges.__doc__ + + @cached_property + def in_edges(self): + """An InEdgeView of the Graph as G.in_edges or G.in_edges(). + + in_edges(self, nbunch=None, data=False, default=None): + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + in_edges : InEdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + See Also + -------- + edges + """ + return InEdgeView(self) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DiDegreeView or int + If multiple nodes are requested (the default), returns a `DiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + See Also + -------- + in_degree, out_degree + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + + """ + return DiDegreeView(self) + + @cached_property + def in_degree(self): + """An InDegreeView for (node, in_degree) or in_degree for single node. + + The node in_degree is the number of edges pointing to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iteration over (node, in_degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + In-degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, out_degree + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.in_degree(0) # node 0 with degree 0 + 0 + >>> list(G.in_degree([0, 1, 2])) + [(0, 0), (1, 1), (2, 1)] + + """ + return InDegreeView(self) + + @cached_property + def out_degree(self): + """An OutDegreeView for (node, out_degree) + + The node out_degree is the number of edges pointing out of the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator over (node, out_degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + Out-degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.out_degree(0) # node 0 with degree 1 + 1 + >>> list(G.out_degree([0, 1, 2])) + [(0, 1), (1, 1), (2, 1)] + + """ + return OutDegreeView(self) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear() + >>> list(G.nodes) + [] + >>> list(G.edges) + [] + + """ + self._succ.clear() + self._pred.clear() + self._node.clear() + self.graph.clear() + + def clear_edges(self): + """Remove all edges from the graph without altering nodes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear_edges() + >>> list(G.nodes) + [0, 1, 2, 3] + >>> list(G.edges) + [] + + """ + for predecessor_dict in self._pred.values(): + predecessor_dict.clear() + for successor_dict in self._succ.values(): + successor_dict.clear() + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return False + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return True + + def to_undirected(self, reciprocal=False, as_view=False): + """Returns an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + as_view : bool (optional, default=False) + If True return an undirected view of the original directed graph. + + Returns + ------- + G : Graph + An undirected graph with the same name and nodes and + with edge (u, v, data) if either (u, v, data) or (v, u, data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + See Also + -------- + Graph, copy, add_edge, add_edges_from + + Notes + ----- + If edges in both directions (u, v) and (v, u) exist in the + graph, attributes for the new undirected edge will be a combination of + the attributes of the directed edges. The edge data is updated + in the (arbitrary) order that the edges are encountered. For + more customized control of the edge attributes use add_edge(). + + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed DiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + Graph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + if reciprocal is True: + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + if v in self._pred[u] + ) + else: + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G + + def reverse(self, copy=True): + """Returns the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, the reverse graph is created using a view of + the original graph. + """ + if copy: + H = self.__class__() + H.graph.update(deepcopy(self.graph)) + H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items()) + H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True)) + return H + return nx.graphviews.reverse_view(self) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/filters.py b/myenv/lib/python3.9/site-packages/networkx/classes/filters.py new file mode 100644 index 0000000..aefcbdf --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/filters.py @@ -0,0 +1,75 @@ +"""Filter factories to hide or show sets of nodes and edges. + +These filters return the function used when creating `SubGraph`. +""" +__all__ = [ + "no_filter", + "hide_nodes", + "hide_edges", + "hide_multiedges", + "hide_diedges", + "hide_multidiedges", + "show_nodes", + "show_edges", + "show_multiedges", + "show_diedges", + "show_multidiedges", +] + + +def no_filter(*items): + return True + + +def hide_nodes(nodes): + nodes = set(nodes) + return lambda node: node not in nodes + + +def hide_diedges(edges): + edges = {(u, v) for u, v in edges} + return lambda u, v: (u, v) not in edges + + +def hide_edges(edges): + alledges = set(edges) | {(v, u) for (u, v) in edges} + return lambda u, v: (u, v) not in alledges + + +def hide_multidiedges(edges): + edges = {(u, v, k) for u, v, k in edges} + return lambda u, v, k: (u, v, k) not in edges + + +def hide_multiedges(edges): + alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} + return lambda u, v, k: (u, v, k) not in alledges + + +# write show_nodes as a class to make SubGraph pickleable +class show_nodes: + def __init__(self, nodes): + self.nodes = set(nodes) + + def __call__(self, node): + return node in self.nodes + + +def show_diedges(edges): + edges = {(u, v) for u, v in edges} + return lambda u, v: (u, v) in edges + + +def show_edges(edges): + alledges = set(edges) | {(v, u) for (u, v) in edges} + return lambda u, v: (u, v) in alledges + + +def show_multidiedges(edges): + edges = {(u, v, k) for u, v, k in edges} + return lambda u, v, k: (u, v, k) in edges + + +def show_multiedges(edges): + alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} + return lambda u, v, k: (u, v, k) in alledges diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/function.py b/myenv/lib/python3.9/site-packages/networkx/classes/function.py new file mode 100644 index 0000000..7707ec4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/function.py @@ -0,0 +1,1311 @@ +"""Functional interface to graph methods and assorted utilities. +""" + +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.classes.graphviews import reverse_view, subgraph_view +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "nodes", + "edges", + "degree", + "degree_histogram", + "neighbors", + "number_of_nodes", + "number_of_edges", + "density", + "is_directed", + "info", + "freeze", + "is_frozen", + "subgraph", + "subgraph_view", + "induced_subgraph", + "reverse_view", + "edge_subgraph", + "restricted_view", + "to_directed", + "to_undirected", + "add_star", + "add_path", + "add_cycle", + "create_empty_copy", + "set_node_attributes", + "get_node_attributes", + "set_edge_attributes", + "get_edge_attributes", + "all_neighbors", + "non_neighbors", + "non_edges", + "common_neighbors", + "is_weighted", + "is_negatively_weighted", + "is_empty", + "selfloop_edges", + "nodes_with_selfloops", + "number_of_selfloops", + "path_weight", + "is_path", +] + + +def nodes(G): + """Returns an iterator over the graph nodes.""" + return G.nodes() + + +def edges(G, nbunch=None): + """Returns an edge view of edges incident to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + """ + return G.edges(nbunch) + + +def degree(G, nbunch=None, weight=None): + """Returns a degree view of single node or of nbunch of nodes. + If nbunch is omitted, then return degrees of *all* nodes. + """ + return G.degree(nbunch, weight) + + +def neighbors(G, n): + """Returns a list of nodes connected to node n.""" + return G.neighbors(n) + + +def number_of_nodes(G): + """Returns the number of nodes in the graph.""" + return G.number_of_nodes() + + +def number_of_edges(G): + """Returns the number of edges in the graph.""" + return G.number_of_edges() + + +def density(G): + r"""Returns the density of a graph. + + The density for undirected graphs is + + .. math:: + + d = \frac{2m}{n(n-1)}, + + and for directed graphs is + + .. math:: + + d = \frac{m}{n(n-1)}, + + where `n` is the number of nodes and `m` is the number of edges in `G`. + + Notes + ----- + The density is 0 for a graph without edges and 1 for a complete graph. + The density of multigraphs can be higher than 1. + + Self loops are counted in the total number of edges so graphs with self + loops can have density higher than 1. + """ + n = number_of_nodes(G) + m = number_of_edges(G) + if m == 0 or n <= 1: + return 0 + d = m / (n * (n - 1)) + if not G.is_directed(): + d *= 2 + return d + + +def degree_histogram(G): + """Returns a list of the frequency of each degree value. + + Parameters + ---------- + G : Networkx graph + A graph + + Returns + ------- + hist : list + A list of frequencies of degrees. + The degree values are the index in the list. + + Notes + ----- + Note: the bins are width one, hence len(list) can be large + (Order(number_of_edges)) + """ + counts = Counter(d for n, d in G.degree()) + return [counts.get(i, 0) for i in range(max(counts) + 1)] + + +def is_directed(G): + """Return True if graph is directed.""" + return G.is_directed() + + +def frozen(*args, **kwargs): + """Dummy method for raising errors when trying to modify frozen graphs""" + raise nx.NetworkXError("Frozen graph can't be modified") + + +def freeze(G): + """Modify graph to prevent further change by adding or removing + nodes or edges. + + Node and edge data can still be modified. + + Parameters + ---------- + G : graph + A NetworkX graph + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G = nx.freeze(G) + >>> try: + ... G.add_edge(4, 5) + ... except nx.NetworkXError as err: + ... print(str(err)) + Frozen graph can't be modified + + Notes + ----- + To "unfreeze" a graph you must make a copy by creating a new graph object: + + >>> graph = nx.path_graph(4) + >>> frozen_graph = nx.freeze(graph) + >>> unfrozen_graph = nx.Graph(frozen_graph) + >>> nx.is_frozen(unfrozen_graph) + False + + See Also + -------- + is_frozen + """ + G.add_node = frozen + G.add_nodes_from = frozen + G.remove_node = frozen + G.remove_nodes_from = frozen + G.add_edge = frozen + G.add_edges_from = frozen + G.add_weighted_edges_from = frozen + G.remove_edge = frozen + G.remove_edges_from = frozen + G.clear = frozen + G.frozen = True + return G + + +def is_frozen(G): + """Returns True if graph is frozen. + + Parameters + ---------- + G : graph + A NetworkX graph + + See Also + -------- + freeze + """ + try: + return G.frozen + except AttributeError: + return False + + +def add_star(G_to_add_to, nodes_for_star, **attr): + """Add a star to Graph G_to_add_to. + + The first node in `nodes_for_star` is the middle of the star. + It is connected to all other nodes. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_star : iterable container + A container of nodes. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in star. + + See Also + -------- + add_path, add_cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_star(G, [0, 1, 2, 3]) + >>> nx.add_star(G, [10, 11, 12], weight=2) + """ + nlist = iter(nodes_for_star) + try: + v = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(v) + edges = ((v, n) for n in nlist) + G_to_add_to.add_edges_from(edges, **attr) + + +def add_path(G_to_add_to, nodes_for_path, **attr): + """Add a path to the Graph G_to_add_to. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_path : iterable container + A container of nodes. A path will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in path. + + See Also + -------- + add_star, add_cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.add_path(G, [10, 11, 12], weight=7) + """ + nlist = iter(nodes_for_path) + try: + first_node = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(first_node) + G_to_add_to.add_edges_from(pairwise(chain((first_node,), nlist)), **attr) + + +def add_cycle(G_to_add_to, nodes_for_cycle, **attr): + """Add a cycle to the Graph G_to_add_to. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_cycle: iterable container + A container of nodes. A cycle will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in cycle. + + See Also + -------- + add_path, add_star + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [10, 11, 12], weight=7) + """ + nlist = iter(nodes_for_cycle) + try: + first_node = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(first_node) + G_to_add_to.add_edges_from( + pairwise(chain((first_node,), nlist), cyclic=True), **attr + ) + + +def subgraph(G, nbunch): + """Returns the subgraph induced on nodes in nbunch. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : list, iterable + A container of nodes that will be iterated through once (thus + it should be an iterator or be iterable). Each element of the + container should be a valid node type: any hashable type except + None. If nbunch is None, return all edges data in the graph. + Nodes in nbunch that are not in the graph will be (quietly) + ignored. + + Notes + ----- + subgraph(G) calls G.subgraph() + """ + return G.subgraph(nbunch) + + +def induced_subgraph(G, nbunch): + """Returns a SubGraph view of `G` showing only nodes in nbunch. + + The induced subgraph of a graph on a set of nodes N is the + graph with nodes N and edges from G which have both ends in N. + + Parameters + ---------- + G : NetworkX Graph + nbunch : node, container of nodes or None (for all nodes) + + Returns + ------- + subgraph : SubGraph View + A read-only view of the subgraph in `G` induced by the nodes. + Changes to the graph `G` will be reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + For an inplace reduction of a graph to a subgraph you can remove nodes: + `G.remove_nodes_from(n in G if n not in set(nbunch))` + + If you are going to compute subgraphs of your subgraphs you could + end up with a chain of views that can be very slow once the chain + has about 15 views in it. If they are all induced subgraphs, you + can short-cut the chain by making them all subgraphs of the original + graph. The graph class method `G.subgraph` does this when `G` is + a subgraph. In contrast, this function allows you to choose to build + chains or not, as you wish. The returned subgraph is a view on `G`. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = nx.induced_subgraph(G, [0, 1, 3]) + >>> list(H.edges) + [(0, 1)] + >>> list(H.nodes) + [0, 1, 3] + """ + induced_nodes = nx.filters.show_nodes(G.nbunch_iter(nbunch)) + return nx.graphviews.subgraph_view(G, induced_nodes) + + +def edge_subgraph(G, edges): + """Returns a view of the subgraph induced by the specified edges. + + The induced subgraph contains each edge in `edges` and each + node incident to any of those edges. + + Parameters + ---------- + G : NetworkX Graph + edges : iterable + An iterable of edges. Edges not present in `G` are ignored. + + Returns + ------- + subgraph : SubGraph View + A read-only edge-induced subgraph of `G`. + Changes to `G` are reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + If you create a subgraph of a subgraph recursively you can end up + with a chain of subgraphs that becomes very slow with about 15 + nested subgraph views. Luckily the edge_subgraph filter nests + nicely so you can use the original graph as G in this function + to avoid chains. We do not rule out chains programmatically so + that odd cases like an `edge_subgraph` of a `restricted_view` + can be created. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = G.edge_subgraph([(0, 1), (3, 4)]) + >>> list(H.nodes) + [0, 1, 3, 4] + >>> list(H.edges) + [(0, 1), (3, 4)] + """ + nxf = nx.filters + edges = set(edges) + nodes = set() + for e in edges: + nodes.update(e[:2]) + induced_nodes = nxf.show_nodes(nodes) + if G.is_multigraph(): + if G.is_directed(): + induced_edges = nxf.show_multidiedges(edges) + else: + induced_edges = nxf.show_multiedges(edges) + else: + if G.is_directed(): + induced_edges = nxf.show_diedges(edges) + else: + induced_edges = nxf.show_edges(edges) + return nx.graphviews.subgraph_view(G, induced_nodes, induced_edges) + + +def restricted_view(G, nodes, edges): + """Returns a view of `G` with hidden nodes and edges. + + The resulting subgraph filters out node `nodes` and edges `edges`. + Filtered out nodes also filter out any of their edges. + + Parameters + ---------- + G : NetworkX Graph + nodes : iterable + An iterable of nodes. Nodes not present in `G` are ignored. + edges : iterable + An iterable of edges. Edges not present in `G` are ignored. + + Returns + ------- + subgraph : SubGraph View + A read-only restricted view of `G` filtering out nodes and edges. + Changes to `G` are reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + If you create a subgraph of a subgraph recursively you may end up + with a chain of subgraph views. Such chains can get quite slow + for lengths near 15. To avoid long chains, try to make your subgraph + based on the original graph. We do not rule out chains programmatically + so that odd cases like an `edge_subgraph` of a `restricted_view` + can be created. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = nx.restricted_view(G, [0], [(1, 2), (3, 4)]) + >>> list(H.nodes) + [1, 2, 3, 4] + >>> list(H.edges) + [(2, 3)] + """ + nxf = nx.filters + hide_nodes = nxf.hide_nodes(nodes) + if G.is_multigraph(): + if G.is_directed(): + hide_edges = nxf.hide_multidiedges(edges) + else: + hide_edges = nxf.hide_multiedges(edges) + else: + if G.is_directed(): + hide_edges = nxf.hide_diedges(edges) + else: + hide_edges = nxf.hide_edges(edges) + return nx.graphviews.subgraph_view(G, hide_nodes, hide_edges) + + +def to_directed(graph): + """Returns a directed view of the graph `graph`. + + Identical to graph.to_directed(as_view=True) + Note that graph.to_directed defaults to `as_view=False` + while this function always provides a view. + """ + return graph.to_directed(as_view=True) + + +def to_undirected(graph): + """Returns an undirected view of the graph `graph`. + + Identical to graph.to_undirected(as_view=True) + Note that graph.to_undirected defaults to `as_view=False` + while this function always provides a view. + """ + return graph.to_undirected(as_view=True) + + +def create_empty_copy(G, with_data=True): + """Returns a copy of the graph G with all of the edges removed. + + Parameters + ---------- + G : graph + A NetworkX graph + + with_data : bool (default=True) + Propagate Graph and Nodes data to the new graph. + + See Also + -------- + empty_graph + + """ + H = G.__class__() + H.add_nodes_from(G.nodes(data=with_data)) + if with_data: + H.graph.update(G.graph) + return H + + +def info(G, n=None): + """Return a summary of information for the graph G or a single node n. + + The summary includes the number of nodes and edges, or neighbours for a single + node. + + Parameters + ---------- + G : Networkx graph + A graph + n : node (any hashable) + A node in the graph G + + Returns + ------- + info : str + A string containing the short summary + + Raises + ------ + NetworkXError + If n is not in the graph G + + .. deprecated:: 2.7 + ``info`` is deprecated and will be removed in NetworkX 3.0. + """ + import warnings + + warnings.warn( + ("info is deprecated and will be removed in version 3.0.\n"), + DeprecationWarning, + stacklevel=2, + ) + if n is None: + return str(G) + if n not in G: + raise nx.NetworkXError(f"node {n} not in graph") + info = "" # append this all to a string + info += f"Node {n} has the following properties:\n" + info += f"Degree: {G.degree(n)}\n" + info += "Neighbors: " + info += " ".join(str(nbr) for nbr in G.neighbors(n)) + return info + + +def set_node_attributes(G, values, name=None): + """Sets node attributes from a given value or dictionary of values. + + .. Warning:: The call order of arguments `values` and `name` + switched between v1.x & v2.x. + + Parameters + ---------- + G : NetworkX Graph + + values : scalar value, dict-like + What the node attribute should be set to. If `values` is + not a dictionary, then it is treated as a single attribute value + that is then applied to every node in `G`. This means that if + you provide a mutable object, like a list, updates to that object + will be reflected in the node attribute for every node. + The attribute name will be `name`. + + If `values` is a dict or a dict of dict, it should be keyed + by node to either an attribute value or a dict of attribute key/value + pairs used to update the node's attributes. + + name : string (optional, default=None) + Name of the node attribute to set if values is a scalar. + + Examples + -------- + After computing some property of the nodes of a graph, you may want + to assign a node attribute to store the value of that property for + each node:: + + >>> G = nx.path_graph(3) + >>> bb = nx.betweenness_centrality(G) + >>> isinstance(bb, dict) + True + >>> nx.set_node_attributes(G, bb, "betweenness") + >>> G.nodes[1]["betweenness"] + 1.0 + + If you provide a list as the second argument, updates to the list + will be reflected in the node attribute for each node:: + + >>> G = nx.path_graph(3) + >>> labels = [] + >>> nx.set_node_attributes(G, labels, "labels") + >>> labels.append("foo") + >>> G.nodes[0]["labels"] + ['foo'] + >>> G.nodes[1]["labels"] + ['foo'] + >>> G.nodes[2]["labels"] + ['foo'] + + If you provide a dictionary of dictionaries as the second argument, + the outer dictionary is assumed to be keyed by node to an inner + dictionary of node attributes for that node:: + + >>> G = nx.path_graph(3) + >>> attrs = {0: {"attr1": 20, "attr2": "nothing"}, 1: {"attr2": 3}} + >>> nx.set_node_attributes(G, attrs) + >>> G.nodes[0]["attr1"] + 20 + >>> G.nodes[0]["attr2"] + 'nothing' + >>> G.nodes[1]["attr2"] + 3 + >>> G.nodes[2] + {} + + Note that if the dictionary contains nodes that are not in `G`, the + values are silently ignored:: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> nx.set_node_attributes(G, {0: "red", 1: "blue"}, name="color") + >>> G.nodes[0]["color"] + 'red' + >>> 1 in G.nodes + False + + """ + # Set node attributes based on type of `values` + if name is not None: # `values` must not be a dict of dict + try: # `values` is a dict + for n, v in values.items(): + try: + G.nodes[n][name] = values[n] + except KeyError: + pass + except AttributeError: # `values` is a constant + for n in G: + G.nodes[n][name] = values + else: # `values` must be dict of dict + for n, d in values.items(): + try: + G.nodes[n].update(d) + except KeyError: + pass + + +def get_node_attributes(G, name): + """Get node attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([1, 2, 3], color="red") + >>> color = nx.get_node_attributes(G, "color") + >>> color[1] + 'red' + """ + return {n: d[name] for n, d in G.nodes.items() if name in d} + + +def set_edge_attributes(G, values, name=None): + """Sets edge attributes from a given value or dictionary of values. + + .. Warning:: The call order of arguments `values` and `name` + switched between v1.x & v2.x. + + Parameters + ---------- + G : NetworkX Graph + + values : scalar value, dict-like + What the edge attribute should be set to. If `values` is + not a dictionary, then it is treated as a single attribute value + that is then applied to every edge in `G`. This means that if + you provide a mutable object, like a list, updates to that object + will be reflected in the edge attribute for each edge. The attribute + name will be `name`. + + If `values` is a dict or a dict of dict, it should be keyed + by edge tuple to either an attribute value or a dict of attribute + key/value pairs used to update the edge's attributes. + For multigraphs, the edge tuples must be of the form ``(u, v, key)``, + where `u` and `v` are nodes and `key` is the edge key. + For non-multigraphs, the keys must be tuples of the form ``(u, v)``. + + name : string (optional, default=None) + Name of the edge attribute to set if values is a scalar. + + Examples + -------- + After computing some property of the edges of a graph, you may want + to assign a edge attribute to store the value of that property for + each edge:: + + >>> G = nx.path_graph(3) + >>> bb = nx.edge_betweenness_centrality(G, normalized=False) + >>> nx.set_edge_attributes(G, bb, "betweenness") + >>> G.edges[1, 2]["betweenness"] + 2.0 + + If you provide a list as the second argument, updates to the list + will be reflected in the edge attribute for each edge:: + + >>> labels = [] + >>> nx.set_edge_attributes(G, labels, "labels") + >>> labels.append("foo") + >>> G.edges[0, 1]["labels"] + ['foo'] + >>> G.edges[1, 2]["labels"] + ['foo'] + + If you provide a dictionary of dictionaries as the second argument, + the entire dictionary will be used to update edge attributes:: + + >>> G = nx.path_graph(3) + >>> attrs = {(0, 1): {"attr1": 20, "attr2": "nothing"}, (1, 2): {"attr2": 3}} + >>> nx.set_edge_attributes(G, attrs) + >>> G[0][1]["attr1"] + 20 + >>> G[0][1]["attr2"] + 'nothing' + >>> G[1][2]["attr2"] + 3 + + Note that if the dict contains edges that are not in `G`, they are + silently ignored:: + + >>> G = nx.Graph([(0, 1)]) + >>> nx.set_edge_attributes(G, {(1, 2): {"weight": 2.0}}) + >>> (1, 2) in G.edges() + False + + """ + if name is not None: + # `values` does not contain attribute names + try: + # if `values` is a dict using `.items()` => {edge: value} + if G.is_multigraph(): + for (u, v, key), value in values.items(): + try: + G[u][v][key][name] = value + except KeyError: + pass + else: + for (u, v), value in values.items(): + try: + G[u][v][name] = value + except KeyError: + pass + except AttributeError: + # treat `values` as a constant + for u, v, data in G.edges(data=True): + data[name] = values + else: + # `values` consists of doct-of-dict {edge: {attr: value}} shape + if G.is_multigraph(): + for (u, v, key), d in values.items(): + try: + G[u][v][key].update(d) + except KeyError: + pass + else: + for (u, v), d in values.items(): + try: + G[u][v].update(d) + except KeyError: + pass + + +def get_edge_attributes(G, name): + """Get edge attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + Returns + ------- + Dictionary of attributes keyed by edge. For (di)graphs, the keys are + 2-tuples of the form: (u, v). For multi(di)graphs, the keys are 3-tuples of + the form: (u, v, key). + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [1, 2, 3], color="red") + >>> color = nx.get_edge_attributes(G, "color") + >>> color[(1, 2)] + 'red' + """ + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + return {x[:-1]: x[-1][name] for x in edges if name in x[-1]} + + +def all_neighbors(graph, node): + """Returns all of the neighbors of a node in the graph. + + If the graph is directed returns predecessors as well as successors. + + Parameters + ---------- + graph : NetworkX graph + Graph to find neighbors. + + node : node + The node whose neighbors will be returned. + + Returns + ------- + neighbors : iterator + Iterator of neighbors + """ + if graph.is_directed(): + values = chain(graph.predecessors(node), graph.successors(node)) + else: + values = graph.neighbors(node) + return values + + +def non_neighbors(graph, node): + """Returns the non-neighbors of the node in the graph. + + Parameters + ---------- + graph : NetworkX graph + Graph to find neighbors. + + node : node + The node whose neighbors will be returned. + + Returns + ------- + non_neighbors : iterator + Iterator of nodes in the graph that are not neighbors of the node. + """ + nbors = set(neighbors(graph, node)) | {node} + return (nnode for nnode in graph if nnode not in nbors) + + +def non_edges(graph): + """Returns the non-existent edges in the graph. + + Parameters + ---------- + graph : NetworkX graph. + Graph to find non-existent edges. + + Returns + ------- + non_edges : iterator + Iterator of edges that are not in the graph. + """ + if graph.is_directed(): + for u in graph: + for v in non_neighbors(graph, u): + yield (u, v) + else: + nodes = set(graph) + while nodes: + u = nodes.pop() + for v in nodes - set(graph[u]): + yield (u, v) + + +@not_implemented_for("directed") +def common_neighbors(G, u, v): + """Returns the common neighbors of two nodes in a graph. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + u, v : nodes + Nodes in the graph. + + Returns + ------- + cnbors : iterator + Iterator of common neighbors of u and v in the graph. + + Raises + ------ + NetworkXError + If u or v is not a node in the graph. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> sorted(nx.common_neighbors(G, 0, 1)) + [2, 3, 4] + """ + if u not in G: + raise nx.NetworkXError("u is not in the graph.") + if v not in G: + raise nx.NetworkXError("v is not in the graph.") + + # Return a generator explicitly instead of yielding so that the above + # checks are executed eagerly. + return (w for w in G[u] if w in G[v] and w not in (u, v)) + + +def is_weighted(G, edge=None, weight="weight"): + """Returns True if `G` has weighted edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + edge : tuple, optional + A 2-tuple specifying the only edge in `G` that will be tested. If + None, then every edge in `G` is tested. + + weight: string, optional + The attribute name used to query for edge weights. + + Returns + ------- + bool + A boolean signifying if `G`, or the specified edge, is weighted. + + Raises + ------ + NetworkXError + If the specified edge does not exist. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.is_weighted(G) + False + >>> nx.is_weighted(G, (2, 3)) + False + + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2, weight=1) + >>> nx.is_weighted(G) + True + + """ + if edge is not None: + data = G.get_edge_data(*edge) + if data is None: + msg = f"Edge {edge!r} does not exist." + raise nx.NetworkXError(msg) + return weight in data + + if is_empty(G): + # Special handling required since: all([]) == True + return False + + return all(weight in data for u, v, data in G.edges(data=True)) + + +def is_negatively_weighted(G, edge=None, weight="weight"): + """Returns True if `G` has negatively weighted edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + edge : tuple, optional + A 2-tuple specifying the only edge in `G` that will be tested. If + None, then every edge in `G` is tested. + + weight: string, optional + The attribute name used to query for edge weights. + + Returns + ------- + bool + A boolean signifying if `G`, or the specified edge, is negatively + weighted. + + Raises + ------ + NetworkXError + If the specified edge does not exist. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 3), (2, 4), (2, 6)]) + >>> G.add_edge(1, 2, weight=4) + >>> nx.is_negatively_weighted(G, (1, 2)) + False + >>> G[2][4]["weight"] = -2 + >>> nx.is_negatively_weighted(G) + True + >>> G = nx.DiGraph() + >>> edges = [("0", "3", 3), ("0", "1", -5), ("1", "0", -2)] + >>> G.add_weighted_edges_from(edges) + >>> nx.is_negatively_weighted(G) + True + + """ + if edge is not None: + data = G.get_edge_data(*edge) + if data is None: + msg = f"Edge {edge!r} does not exist." + raise nx.NetworkXError(msg) + return weight in data and data[weight] < 0 + + return any(weight in data and data[weight] < 0 for u, v, data in G.edges(data=True)) + + +def is_empty(G): + """Returns True if `G` has no edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + bool + True if `G` has no edges, and False otherwise. + + Notes + ----- + An empty graph can have nodes but not edges. The empty graph with zero + nodes is known as the null graph. This is an $O(n)$ operation where n + is the number of nodes in the graph. + + """ + return not any(G.adj.values()) + + +def nodes_with_selfloops(G): + """Returns an iterator over nodes with self loops. + + A node with a self loop has an edge with both ends adjacent + to that node. + + Returns + ------- + nodelist : iterator + A iterator over nodes with self loops. + + See Also + -------- + selfloop_edges, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1, 1) + >>> G.add_edge(1, 2) + >>> list(nx.nodes_with_selfloops(G)) + [1] + + """ + return (n for n, nbrs in G.adj.items() if n in nbrs) + + +def selfloop_edges(G, data=False, keys=False, default=None): + """Returns an iterator over selfloop edges. + + A selfloop edge has the same node at both ends. + + Parameters + ---------- + G : graph + A NetworkX graph. + data : string or bool, optional (default=False) + Return selfloop edges as two tuples (u, v) (data=False) + or three-tuples (u, v, datadict) (data=True) + or three-tuples (u, v, datavalue) (data='attrname') + keys : bool, optional (default=False) + If True, return edge keys with each edge. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edgeiter : iterator over edge tuples + An iterator over all selfloop edges. + + See Also + -------- + nodes_with_selfloops, number_of_selfloops + + Examples + -------- + >>> G = nx.MultiGraph() # or Graph, DiGraph, MultiDiGraph, etc + >>> ekey = G.add_edge(1, 1) + >>> ekey = G.add_edge(1, 2) + >>> list(nx.selfloop_edges(G)) + [(1, 1)] + >>> list(nx.selfloop_edges(G, data=True)) + [(1, 1, {})] + >>> list(nx.selfloop_edges(G, keys=True)) + [(1, 1, 0)] + >>> list(nx.selfloop_edges(G, keys=True, data=True)) + [(1, 1, 0, {})] + """ + if data is True: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k, d) + for n, nbrs in G.adj.items() + if n in nbrs + for k, d in nbrs[n].items() + ) + else: + return ( + (n, n, d) + for n, nbrs in G.adj.items() + if n in nbrs + for d in nbrs[n].values() + ) + else: + return ((n, n, nbrs[n]) for n, nbrs in G.adj.items() if n in nbrs) + elif data is not False: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k, d.get(data, default)) + for n, nbrs in G.adj.items() + if n in nbrs + for k, d in nbrs[n].items() + ) + else: + return ( + (n, n, d.get(data, default)) + for n, nbrs in G.adj.items() + if n in nbrs + for d in nbrs[n].values() + ) + else: + return ( + (n, n, nbrs[n].get(data, default)) + for n, nbrs in G.adj.items() + if n in nbrs + ) + else: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k) for n, nbrs in G.adj.items() if n in nbrs for k in nbrs[n] + ) + else: + return ( + (n, n) + for n, nbrs in G.adj.items() + if n in nbrs + for i in range(len(nbrs[n])) # for easy edge removal (#4068) + ) + else: + return ((n, n) for n, nbrs in G.adj.items() if n in nbrs) + + +def number_of_selfloops(G): + """Returns the number of selfloop edges. + + A selfloop edge has the same node at both ends. + + Returns + ------- + nloops : int + The number of selfloops. + + See Also + -------- + nodes_with_selfloops, selfloop_edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1, 1) + >>> G.add_edge(1, 2) + >>> nx.number_of_selfloops(G) + 1 + """ + return sum(1 for _ in nx.selfloop_edges(G)) + + +def is_path(G, path): + """Returns whether or not the specified path exists + + Parameters + ---------- + G : graph + A NetworkX graph. + + path: list + A list of node labels which defines the path to traverse + + Returns + ------- + isPath: bool + A boolean representing whether or not the path exists + + """ + for node, nbr in nx.utils.pairwise(path): + if nbr not in G[node]: + return False + return True + + +def path_weight(G, path, weight): + """Returns total cost associated with specified path and weight + + Parameters + ---------- + G : graph + A NetworkX graph. + + path: list + A list of node labels which defines the path to traverse + + weight: string + A string indicating which edge attribute to use for path cost + + Returns + ------- + cost: int or float + An integer or a float representing the total cost with respect to the + specified weight of the specified path + + Raises + ------ + NetworkXNoPath + If the specified edge does not exist. + """ + multigraph = G.is_multigraph() + cost = 0 + + if not nx.is_path(G, path): + raise nx.NetworkXNoPath("path does not exist") + for node, nbr in nx.utils.pairwise(path): + if multigraph: + cost += min(v[weight] for v in G[node][nbr].values()) + else: + cost += G[node][nbr][weight] + return cost diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/graph.py b/myenv/lib/python3.9/site-packages/networkx/classes/graph.py new file mode 100644 index 0000000..b5c3b79 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/graph.py @@ -0,0 +1,1940 @@ +"""Base class for undirected graphs. + +The Graph class allows any hashable object as a node +and can associate key/value attribute pairs with each undirected edge. + +Self-loops are allowed but multiple edges are not (see MultiGraph). + +For directed graphs see DiGraph and MultiDiGraph. +""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +import networkx.convert as convert +from networkx.classes.coreviews import AdjacencyView +from networkx.classes.reportviews import DegreeView, EdgeView, NodeView +from networkx.exception import NetworkXError + +__all__ = ["Graph"] + + +class _CachedPropertyResetterAdj: + """Data Descriptor class for _adj that resets ``adj`` cached_property when needed + + This assumes that the ``cached_property`` ``G.adj`` should be reset whenever + ``G._adj`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "adj" whenever the underlying + instance attribute "_adj" is set to a new object. It only affects + the set process of the obj._adj attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_adj"] = value + if "adj" in od: + del od["adj"] + + +class Graph: + """ + Base class for undirected graphs. + + A Graph stores nodes and edges with optional data, or attributes. + + Graphs hold undirected edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes, except that `None` is not allowed as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + DiGraph + MultiGraph + MultiDiGraph + OrderedGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.Graph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.Graph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 # node must exist already to use G.nodes + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> G.add_edge(1, 2, weight=4.7) + >>> G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2]["weight"] = 4.7 + >>> G.edges[1, 2]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges` a + read-only dict-like structure. However, you can assign to attributes + in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change + data attributes: `G.edges[1, 2]['weight'] = 4` + (For multigraphs: `MG.edges[u, v, key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()` + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, eattr in nbrsdict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, weight in G.edges.data("weight"): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using object-attributes and methods. + Reporting typically provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The Graph class uses a dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information and holds + edge data keyed by neighbor. The inner dict (edge_attr_dict) represents + the edge data and holds edge attribute values keyed by attribute names. + + Each of these three dicts can be replaced in a subclass by a user defined + dict-like object. In general, the dict-like features should be + maintained but extra features can be added. To replace one of the + dicts create a new graph class by changing the class(!) variable + holding the factory for that dict-like structure. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds edge data keyed by neighbor. + It should require no arguments and return a dict-like object + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherit without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + + Please see :mod:`~networkx.classes.ordered` for more examples of + creating graph subclasses by overwriting the base class `dict` with + a dictionary-like object. + """ + + _adj = _CachedPropertyResetterAdj() + + node_dict_factory = dict + node_attr_dict_factory = dict + adjlist_outer_dict_factory = dict + adjlist_inner_dict_factory = dict + edge_attr_dict_factory = dict + graph_attr_dict_factory = dict + + def to_directed_class(self): + """Returns the class to use for empty directed copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return nx.DiGraph + + def to_undirected_class(self): + """Returns the class to use for empty undirected copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return Graph + + def __init__(self, incoming_graph_data=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse matrix, or a PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes + self._node = self.node_dict_factory() # empty node attribute dict + self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict + # attempt to load graph with data + if incoming_graph_data is not None: + convert.to_networkx_graph(incoming_graph_data, create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return AdjacencyView(self._adj) + + @property + def name(self): + """String identifier of the graph. + + This graph attribute appears in the attribute dict G.graph + keyed by the string `"name"`. as well as an attribute (technically + a property) `G.name`. This is entirely user controlled. + """ + return self.graph.get("name", "") + + @name.setter + def name(self, s): + self.graph["name"] = s + + def __str__(self): + """Returns a short summary of the graph. + + Returns + ------- + info : string + Graph information as provided by `nx.info` + + Examples + -------- + >>> G = nx.Graph(name="foo") + >>> str(G) + "Graph named 'foo' with 0 nodes and 0 edges" + + >>> G = nx.path_graph(3) + >>> str(G) + 'Graph with 3 nodes and 2 edges' + + """ + return "".join( + [ + type(self).__name__, + f" named {self.name!r}" if self.name else "", + f" with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges", + ] + ) + + def __iter__(self): + """Iterate over the nodes. Use: 'for n in G'. + + Returns + ------- + niter : iterator + An iterator over all nodes in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [n for n in G] + [0, 1, 2, 3] + >>> list(G) + [0, 1, 2, 3] + """ + return iter(self._node) + + def __contains__(self, n): + """Returns True if n is a node, False otherwise. Use: 'n in G'. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> 1 in G + True + """ + try: + return n in self._node + except TypeError: + return False + + def __len__(self): + """Returns the number of nodes in the graph. Use: 'len(G)'. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes: identical method + order: identical method + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> len(G) + 4 + + """ + return len(self._node) + + def __getitem__(self, n): + """Returns a dict of neighbors of node n. Use: 'G[n]'. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + Notes + ----- + G[n] is the same as G.adj[n] and similar to G.neighbors(n) + (which is an iterator over G.adj[n]) + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G[0] + AtlasView({1: {}}) + """ + return self.adj[n] + + def add_node(self, node_for_adding, **attr): + """Add a single node `node_for_adding` and update node attributes. + + Parameters + ---------- + node_for_adding : node + A node can be any hashable Python object except None. + attr : keyword arguments, optional + Set or change node attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1, size=10) + >>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + if node_for_adding not in self._node: + if node_for_adding is None: + raise ValueError("None cannot be a node") + self._adj[node_for_adding] = self.adjlist_inner_dict_factory() + attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory() + attr_dict.update(attr) + else: # update attr even if node already exists + self._node[node_for_adding].update(attr) + + def add_nodes_from(self, nodes_for_adding, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes_for_adding : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple take + precedence over attributes specified via keyword arguments. + + See Also + -------- + add_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(), key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1, 2], size=10) + >>> G.add_nodes_from([3, 4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific nodes. + + >>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})]) + >>> G.nodes[1]["size"] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.nodes[1]["size"] + 11 + + """ + for n in nodes_for_adding: + try: + newnode = n not in self._node + newdict = attr + except TypeError: + n, ndict = n + newnode = n not in self._node + newdict = attr.copy() + newdict.update(ndict) + if newnode: + if n is None: + raise ValueError("None cannot be a node") + self._adj[n] = self.adjlist_inner_dict_factory() + self._node[n] = self.node_attr_dict_factory() + self._node[n].update(newdict) + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a non-existent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> list(G.edges) + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> list(G.edges) + [] + + """ + adj = self._adj + try: + nbrs = list(adj[n]) # list handles self-loops (allows mutation) + del self._node[n] + except KeyError as err: # NetworkXError if n not in self + raise NetworkXError(f"The node {n} is not in the graph.") from err + for u in nbrs: + del adj[u][n] # remove all edges n-u in graph + del adj[n] # now remove node + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently + ignored. + + See Also + -------- + remove_node + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = list(G.nodes) + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> list(G.nodes) + [] + + """ + adj = self._adj + for n in nodes: + try: + del self._node[n] + for u in list(adj[n]): # list handles self-loops + del adj[u][n] # (allows mutation of dict in loop) + del adj[n] + except KeyError: + pass + + @cached_property + def nodes(self): + """A NodeView of the Graph as G.nodes or G.nodes(). + + Can be used as `G.nodes` for data lookup and for set-like operations. + Can also be used as `G.nodes(data='color', default=None)` to return a + NodeDataView which reports specific node data but no set operations. + It presents a dict-like interface as well with `G.nodes.items()` + iterating over `(node, nodedata)` 2-tuples and `G.nodes[3]['foo']` + providing the value of the `foo` attribute for node `3`. In addition, + a view `G.nodes.data('foo')` provides a dict-like interface to the + `foo` attribute of each node. `G.nodes.data('foo', default=1)` + provides a default for nodes that do not have attribute `foo`. + + Parameters + ---------- + data : string or bool, optional (default=False) + The node attribute returned in 2-tuple (n, ddict[data]). + If True, return entire node attribute dict as (n, ddict). + If False, return just the nodes n. + + default : value, optional (default=None) + Value used for nodes that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + NodeView + Allows set-like operations over the nodes as well as node + attribute dict lookup and calling to get a NodeDataView. + A NodeDataView iterates over `(n, data)` and has no set operations. + A NodeView iterates over `n` and includes set operations. + + When called, if data is False, an iterator over nodes. + Otherwise an iterator of 2-tuples (node, attribute value) + where the attribute is specified in `data`. + If data is True then the attribute becomes the + entire data dictionary. + + Notes + ----- + If your node data is not needed, it is simpler and equivalent + to use the expression ``for n in G``, or ``list(G)``. + + Examples + -------- + There are two simple ways of getting a list of all nodes in the graph: + + >>> G = nx.path_graph(3) + >>> list(G.nodes) + [0, 1, 2] + >>> list(G) + [0, 1, 2] + + To get the node data along with the nodes: + + >>> G.add_node(1, time="5pm") + >>> G.nodes[0]["foo"] = "bar" + >>> list(G.nodes(data=True)) + [(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})] + >>> list(G.nodes.data()) + [(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})] + + >>> list(G.nodes(data="foo")) + [(0, 'bar'), (1, None), (2, None)] + >>> list(G.nodes.data("foo")) + [(0, 'bar'), (1, None), (2, None)] + + >>> list(G.nodes(data="time")) + [(0, None), (1, '5pm'), (2, None)] + >>> list(G.nodes.data("time")) + [(0, None), (1, '5pm'), (2, None)] + + >>> list(G.nodes(data="time", default="Not Available")) + [(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')] + >>> list(G.nodes.data("time", default="Not Available")) + [(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')] + + If some of your nodes have an attribute and the rest are assumed + to have a default attribute value you can create a dictionary + from node/attribute pairs using the `default` keyword argument + to guarantee the value is never None:: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> G.add_node(1, weight=2) + >>> G.add_node(2, weight=3) + >>> dict(G.nodes(data="weight", default=1)) + {0: 1, 1: 2, 2: 3} + + """ + return NodeView(self) + + def number_of_nodes(self): + """Returns the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + order: identical method + __len__: identical method + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.number_of_nodes() + 3 + """ + return len(self._node) + + def order(self): + """Returns the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes: identical method + __len__: identical method + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.order() + 3 + """ + return len(self._node) + + def has_node(self, n): + """Returns True if the graph contains the node n. + + Identical to `n in G` + + Parameters + ---------- + n : node + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.has_node(0) + True + + It is more readable and simpler to use + + >>> 0 in G + True + + """ + try: + return n in self._node + except TypeError: + return False + + def add_edge(self, u_of_edge, v_of_edge, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_of_edge, v_of_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + Many NetworkX algorithms designed for weighted graphs use + an edge attribute (by default `weight`) to hold a numerical value. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1, 2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> G.add_edge(1, 2) + >>> G[1][2].update({0: 5}) + >>> G.edges[1, 2].update({0: 5}) + """ + u, v = u_of_edge, v_of_edge + # add nodes + if u not in self._node: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._node: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + # add the edge + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + self._adj[u][v] = datadict + self._adj[v][u] = datadict + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as 2-tuples (u, v) or + 3-tuples (u, v, d) where d is a dictionary containing edge data. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + """ + for e in ebunch_to_add: + ne = len(e) + if ne == 3: + u, v, dd = e + elif ne == 2: + u, v = e + dd = {} # doesn't need edge_attr_dict_factory + else: + raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") + if u not in self._node: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._node: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + datadict.update(dd) + self._adj[u][v] = datadict + self._adj[v][u] = datadict + + def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr): + """Add weighted edges in `ebunch_to_add` with specified weight attr + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the list or container will be added + to the graph. The edges must be given as 3-tuples (u, v, w) + where w is a number. + weight : string, optional (default= 'weight') + The attribute name for the edge weights to be added. + attr : keyword arguments, optional (default= no attributes) + Edge attributes to add/update for all edges. + + See Also + -------- + add_edge : add a single edge + add_edges_from : add multiple edges + + Notes + ----- + Adding the same edge twice for Graph/DiGraph simply updates + the edge data. For MultiGraph/MultiDiGraph, duplicate edges + are stored. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)]) + """ + self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, etc + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2, 3, {"weight": 7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self._adj[u][v] + if u != v: # self-loop needs only one entry removed + del self._adj[v][u] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph") from err + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) edge between u and v. + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + adj = self._adj + for e in ebunch: + u, v = e[:2] # ignore edge data if present + if u in adj and v in adj[u]: + del adj[u][v] + if u != v: # self loop needs only one entry removed + del adj[v][u] + + def update(self, edges=None, nodes=None): + """Update the graph using nodes/edges/graphs as input. + + Like dict.update, this method takes a graph as input, adding the + graph's nodes and edges to this graph. It can also take two inputs: + edges and nodes. Finally it can take either edges or nodes. + To specify only nodes the keyword `nodes` must be used. + + The collections of edges and nodes are treated similarly to + the add_edges_from/add_nodes_from methods. When iterated, they + should yield 2-tuples (u, v) or 3-tuples (u, v, datadict). + + Parameters + ---------- + edges : Graph object, collection of edges, or None + The first parameter can be a graph or some edges. If it has + attributes `nodes` and `edges`, then it is taken to be a + Graph-like object and those attributes are used as collections + of nodes and edges to be added to the graph. + If the first parameter does not have those attributes, it is + treated as a collection of edges and added to the graph. + If the first argument is None, no edges are added. + nodes : collection of nodes, or None + The second parameter is treated as a collection of nodes + to be added to the graph unless it is None. + If `edges is None` and `nodes is None` an exception is raised. + If the first parameter is a Graph, then `nodes` is ignored. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> G.update(nx.complete_graph(range(4, 10))) + >>> from itertools import combinations + >>> edges = ( + ... (u, v, {"power": u * v}) + ... for u, v in combinations(range(10, 20), 2) + ... if u * v < 225 + ... ) + >>> nodes = [1000] # for singleton, use a container + >>> G.update(edges, nodes) + + Notes + ----- + It you want to update the graph using an adjacency structure + it is straightforward to obtain the edges/nodes from adjacency. + The following examples provide common cases, your adjacency may + be slightly different and require tweaks of these examples:: + + >>> # dict-of-set/list/tuple + >>> adj = {1: {2, 3}, 2: {1, 3}, 3: {1, 2}} + >>> e = [(u, v) for u, nbrs in adj.items() for v in nbrs] + >>> G.update(edges=e, nodes=adj) + + >>> DG = nx.DiGraph() + >>> # dict-of-dict-of-attribute + >>> adj = {1: {2: 1.3, 3: 0.7}, 2: {1: 1.4}, 3: {1: 0.7}} + >>> e = [ + ... (u, v, {"weight": d}) + ... for u, nbrs in adj.items() + ... for v, d in nbrs.items() + ... ] + >>> DG.update(edges=e, nodes=adj) + + >>> # dict-of-dict-of-dict + >>> adj = {1: {2: {"weight": 1.3}, 3: {"color": 0.7, "weight": 1.2}}} + >>> e = [ + ... (u, v, {"weight": d}) + ... for u, nbrs in adj.items() + ... for v, d in nbrs.items() + ... ] + >>> DG.update(edges=e, nodes=adj) + + >>> # predecessor adjacency (dict-of-set) + >>> pred = {1: {2, 3}, 2: {3}, 3: {3}} + >>> e = [(v, u) for u, nbrs in pred.items() for v in nbrs] + + >>> # MultiGraph dict-of-dict-of-dict-of-attribute + >>> MDG = nx.MultiDiGraph() + >>> adj = { + ... 1: {2: {0: {"weight": 1.3}, 1: {"weight": 1.2}}}, + ... 3: {2: {0: {"weight": 0.7}}}, + ... } + >>> e = [ + ... (u, v, ekey, d) + ... for u, nbrs in adj.items() + ... for v, keydict in nbrs.items() + ... for ekey, d in keydict.items() + ... ] + >>> MDG.update(edges=e) + + See Also + -------- + add_edges_from: add multiple edges to a graph + add_nodes_from: add multiple nodes to a graph + """ + if edges is not None: + if nodes is not None: + self.add_nodes_from(nodes) + self.add_edges_from(edges) + else: + # check if edges is a Graph object + try: + graph_nodes = edges.nodes + graph_edges = edges.edges + except AttributeError: + # edge not Graph-like + self.add_edges_from(edges) + else: # edges is Graph-like + self.add_nodes_from(graph_nodes.data()) + self.add_edges_from(graph_edges.data()) + self.graph.update(edges.graph) + elif nodes is not None: + self.add_nodes_from(nodes) + else: + raise NetworkXError("update needs nodes or edges input") + + def has_edge(self, u, v): + """Returns True if the edge (u, v) is in the graph. + + This is the same as `v in G[u]` without KeyError exceptions. + + Parameters + ---------- + u, v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.has_edge(0, 1) # using two nodes + True + >>> e = (0, 1) + >>> G.has_edge(*e) # e is a 2-tuple (u, v) + True + >>> e = (0, 1, {"weight": 7}) + >>> G.has_edge(*e[:2]) # e is a 3-tuple (u, v, data_dictionary) + True + + The following syntax are equivalent: + + >>> G.has_edge(0, 1) + True + >>> 1 in G[0] # though this gives KeyError if 0 not in G + True + + """ + try: + return v in self._adj[u] + except KeyError: + return False + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n. + + This is identical to `iter(G[n])` + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + neighbors : iterator + An iterator over all neighbors of node n + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [n for n in G.neighbors(0)] + [1] + + Notes + ----- + Alternate ways to access the neighbors are ``G.adj[n]`` or ``G[n]``: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge("a", "b", weight=7) + >>> G["a"] + AtlasView({'b': {'weight': 7}}) + >>> G = nx.path_graph(4) + >>> [n for n in G[0]] + [1] + """ + try: + return iter(self._adj[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + @cached_property + def edges(self): + """An EdgeView of the Graph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, default=None) + + The EdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, `G.edges[u, v]['color']` provides the value of the color + attribute for edge `(u, v)` while + `for (u, v, c) in G.edges.data('color', default='red'):` + iterates through all the edges yielding the color attribute + with default `'red'` if no color attribute exists. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : EdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.path_graph(3) # or MultiGraph, etc + >>> G.add_edge(2, 3, weight=5) + >>> [e for e in G.edges] + [(0, 1), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + EdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + EdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)]) + >>> G.edges([0, 3]) # only edges from these nodes + EdgeDataView([(0, 1), (3, 2)]) + >>> G.edges(0) # only edges from node 0 + EdgeDataView([(0, 1)]) + """ + return EdgeView(self) + + def get_edge_data(self, u, v, default=None): + """Returns the attribute dictionary associated with edge (u, v). + + This is identical to `G[u][v]` except the default is returned + instead of an exception if the edge doesn't exist. + + Parameters + ---------- + u, v : nodes + default: any Python object (default=None) + Value to return if the edge (u, v) is not found. + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G[0][1] + {} + + Warning: Assigning to `G[u][v]` is not permitted. + But it is safe to assign attributes `G[u][v]['foo']` + + >>> G[0][1]["weight"] = 7 + >>> G[0][1]["weight"] + 7 + >>> G[1][0]["weight"] + 7 + + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.get_edge_data(0, 1) # default edge data is {} + {} + >>> e = (0, 1) + >>> G.get_edge_data(*e) # tuple form + {} + >>> G.get_edge_data("a", "b", default=0) # edge not in graph, return 0 + 0 + """ + try: + return self._adj[u][v] + except KeyError: + return default + + def adjacency(self): + """Returns an iterator over (node, adjacency dict) tuples for all nodes. + + For directed graphs, only outgoing neighbors/adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator over (node, adjacency dictionary) for all nodes in + the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [(n, nbrdict) for n, nbrdict in G.adjacency()] + [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})] + + """ + return iter(self._adj.items()) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DegreeView or int + If multiple nodes are requested (the default), returns a `DegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.degree[0] # node 0 has degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + """ + return DegreeView(self) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear() + >>> list(G.nodes) + [] + >>> list(G.edges) + [] + + """ + self._adj.clear() + self._node.clear() + self.graph.clear() + + def clear_edges(self): + """Remove all edges from the graph without altering nodes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear_edges() + >>> list(G.nodes) + [0, 1, 2, 3] + >>> list(G.edges) + [] + """ + for neighbours_dict in self._adj.values(): + neighbours_dict.clear() + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return False + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return False + + def copy(self, as_view=False): + """Returns a copy of the graph. + + The copy method by default returns an independent shallow copy + of the graph and attributes. That is, if an attribute is a + container, that container is shared by the original an the copy. + Use Python's `copy.deepcopy` for new containers. + + If `as_view` is True then a view is returned instead of a copy. + + Notes + ----- + All copies reproduce the graph structure, but data attributes + may be handled in different ways. There are four types of copies + of a graph that people might want. + + Deepcopy -- A "deepcopy" copies the graph structure as well as + all data attributes and any objects they might contain. + The entire graph object is new so that changes in the copy + do not affect the original object. (see Python's copy.deepcopy) + + Data Reference (Shallow) -- For a shallow copy the graph structure + is copied but the edge, node and graph attribute dicts are + references to those in the original graph. This saves + time and memory but could cause confusion if you change an attribute + in one graph and it changes the attribute in the other. + NetworkX does not provide this level of shallow copy. + + Independent Shallow -- This copy creates new independent attribute + dicts and then does a shallow copy of the attributes. That is, any + attributes that are containers are shared between the new graph + and the original. This is exactly what `dict.copy()` provides. + You can obtain this style copy using: + + >>> G = nx.path_graph(5) + >>> H = G.copy() + >>> H = G.copy(as_view=False) + >>> H = nx.Graph(G) + >>> H = G.__class__(G) + + Fresh Data -- For fresh data, the graph structure is copied while + new empty data attribute dicts are created. The resulting graph + is independent of the original and it has no edge, node or graph + attributes. Fresh copies are not enabled. Instead use: + + >>> H = G.__class__() + >>> H.add_nodes_from(G) + >>> H.add_edges_from(G.edges) + + View -- Inspired by dict-views, graph-views act like read-only + versions of the original graph, providing a copy of the original + structure without requiring any memory for copying the information. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Parameters + ---------- + as_view : bool, optional (default=False) + If True, the returned graph-view provides a read-only view + of the original graph without actually copying any data. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.copy() + + """ + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + G.add_edges_from( + (u, v, datadict.copy()) + for u, nbrs in self._adj.items() + for v, datadict in nbrs.items() + ) + return G + + def to_directed(self, as_view=False): + """Returns a directed representation of the graph. + + Returns + ------- + G : DiGraph + A directed graph with the same name, same nodes, and with + each edge (u, v, data) replaced by two directed edges + (u, v, data) and (v, u, data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed Graph to use dict-like objects + in the data structure, those changes do not transfer to the + DiGraph created by this method. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_edge(0, 1) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_edge(0, 1) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1)] + """ + graph_class = self.to_directed_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, data in nbrs.items() + ) + return G + + def to_undirected(self, as_view=False): + """Returns an undirected copy of the graph. + + Parameters + ---------- + as_view : bool (optional, default=False) + If True return a view of the original undirected graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + Graph, copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar `G = nx.DiGraph(D)` which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed DiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + Graph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G + + def subgraph(self, nodes): + """Returns a SubGraph view of the subgraph induced on `nodes`. + + The induced subgraph of the graph contains the nodes in `nodes` + and the edges between those nodes. + + Parameters + ---------- + nodes : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : SubGraph View + A subgraph view of the graph. The graph structure cannot be + changed but node/edge attributes can and are shared with the + original graph. + + Notes + ----- + The graph, edge and node attributes are shared with the original graph. + Changes to the graph structure is ruled out by the view, but changes + to attributes are reflected in the original graph. + + To create a subgraph with its own copy of the edge/node attributes use: + G.subgraph(nodes).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([n for n in G if n not in set(nodes)]) + + Subgraph views are sometimes NOT what you want. In most cases where + you want to do more than simply look at the induced edges, it makes + more sense to just create the subgraph as its own graph with code like: + + :: + + # Create a subgraph SG based on a (possibly multigraph) G + SG = G.__class__() + SG.add_nodes_from((n, G.nodes[n]) for n in largest_wcc) + if SG.is_multigraph(): + SG.add_edges_from((n, nbr, key, d) + for n, nbrs in G.adj.items() if n in largest_wcc + for nbr, keydict in nbrs.items() if nbr in largest_wcc + for key, d in keydict.items()) + else: + SG.add_edges_from((n, nbr, d) + for n, nbrs in G.adj.items() if n in largest_wcc + for nbr, d in nbrs.items() if nbr in largest_wcc) + SG.graph.update(G.graph) + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.subgraph([0, 1, 2]) + >>> list(H.edges) + [(0, 1), (1, 2)] + """ + induced_nodes = nx.filters.show_nodes(self.nbunch_iter(nodes)) + # if already a subgraph, don't make a chain + subgraph = nx.graphviews.subgraph_view + if hasattr(self, "_NODE_OK"): + return subgraph(self._graph, induced_nodes, self._EDGE_OK) + return subgraph(self, induced_nodes) + + def edge_subgraph(self, edges): + """Returns the subgraph induced by the specified edges. + + The induced subgraph contains each edge in `edges` and each + node incident to any one of those edges. + + Parameters + ---------- + edges : iterable + An iterable of edges in this graph. + + Returns + ------- + G : Graph + An edge-induced subgraph of this graph with the same edge + attributes. + + Notes + ----- + The graph, edge, and node attributes in the returned subgraph + view are references to the corresponding attributes in the original + graph. The view is read-only. + + To create a full graph version of the subgraph with its own copy + of the edge or node attributes, use:: + + G.edge_subgraph(edges).copy() + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = G.edge_subgraph([(0, 1), (3, 4)]) + >>> list(H.nodes) + [0, 1, 3, 4] + >>> list(H.edges) + [(0, 1), (3, 4)] + + """ + return nx.edge_subgraph(self, edges) + + def size(self, weight=None): + """Returns the number of edges or total of all edge weights. + + Parameters + ---------- + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + + Returns + ------- + size : numeric + The number of edges or + (if weight keyword is provided) the total weight sum. + + If weight is None, returns an int. Otherwise a float + (or more general numeric if the weights are more general). + + See Also + -------- + number_of_edges + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.size() + 3 + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge("a", "b", weight=2) + >>> G.add_edge("b", "c", weight=4) + >>> G.size() + 2 + >>> G.size(weight="weight") + 6.0 + """ + s = sum(d for v, d in self.degree(weight=weight)) + # If `weight` is None, the sum of the degrees is guaranteed to be + # even, so we can perform integer division and hence return an + # integer. Otherwise, the sum of the weighted degrees is not + # guaranteed to be an integer, so we perform "real" division. + return s // 2 if weight is None else s / 2 + + def number_of_edges(self, u=None, v=None): + """Returns the number of edges between two nodes. + + Parameters + ---------- + u, v : nodes, optional (default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes `u` and `v` are + specified return the number of edges between those nodes. If + the graph is directed, this only returns the number of edges + from `u` to `v`. + + See Also + -------- + size + + Examples + -------- + For undirected graphs, this method counts the total number of + edges in the graph: + + >>> G = nx.path_graph(4) + >>> G.number_of_edges() + 3 + + If you specify two nodes, this counts the total number of edges + joining the two nodes: + + >>> G.number_of_edges(0, 1) + 1 + + For directed graphs, this method can count the total number of + directed edges from `u` to `v`: + + >>> G = nx.DiGraph() + >>> G.add_edge(0, 1) + >>> G.add_edge(1, 0) + >>> G.number_of_edges(0, 1) + 1 + + """ + if u is None: + return int(self.size()) + if v in self._adj[u]: + return 1 + return 0 + + def nbunch_iter(self, nbunch=None): + """Returns an iterator over nodes contained in nbunch that are + also in the graph. + + The nodes in nbunch are checked for membership in the graph + and if not are silently ignored. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + Returns + ------- + niter : iterator + An iterator over nodes in nbunch that are also in the graph. + If nbunch is None, iterate over all nodes in the graph. + + Raises + ------ + NetworkXError + If nbunch is not a node or sequence of nodes. + If a node in nbunch is not hashable. + + See Also + -------- + Graph.__iter__ + + Notes + ----- + When nbunch is an iterator, the returned iterator yields values + directly from nbunch, becoming exhausted when nbunch is exhausted. + + To test whether nbunch is a single node, one can use + "if nbunch in self:", even after processing with this routine. + + If nbunch is not a node or a (possibly empty) sequence/iterator + or None, a :exc:`NetworkXError` is raised. Also, if any object in + nbunch is not hashable, a :exc:`NetworkXError` is raised. + """ + if nbunch is None: # include all nodes via iterator + bunch = iter(self._adj) + elif nbunch in self: # if nbunch is a single node + bunch = iter([nbunch]) + else: # if nbunch is a sequence of nodes + + def bunch_iter(nlist, adj): + try: + for n in nlist: + if n in adj: + yield n + except TypeError as err: + exc, message = err, err.args[0] + # capture error for non-sequence/iterator nbunch. + if "iter" in message: + exc = NetworkXError( + "nbunch is not a node or a sequence of nodes." + ) + # capture error for unhashable node. + if "hashable" in message: + exc = NetworkXError( + f"Node {n} in sequence nbunch is not a valid node." + ) + raise exc + + bunch = bunch_iter(nbunch, self._adj) + return bunch diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/graphviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/graphviews.py new file mode 100644 index 0000000..dcb7836 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/graphviews.py @@ -0,0 +1,205 @@ +"""View of Graphs as SubGraph, Reverse, Directed, Undirected. + +In some algorithms it is convenient to temporarily morph +a graph to exclude some nodes or edges. It should be better +to do that via a view than to remove and then re-add. +In other algorithms it is convenient to temporarily morph +a graph to reverse directed edges, or treat a directed graph +as undirected, etc. This module provides those graph views. + +The resulting views are essentially read-only graphs that +report data from the orignal graph object. We provide an +attribute G._graph which points to the underlying graph object. + +Note: Since graphviews look like graphs, one can end up with +view-of-view-of-view chains. Be careful with chains because +they become very slow with about 15 nested views. +For the common simple case of node induced subgraphs created +from the graph class, we short-cut the chain by returning a +subgraph of the original graph directly rather than a subgraph +of a subgraph. We are careful not to disrupt any edge filter in +the middle subgraph. In general, determining how to short-cut +the chain is tricky and much harder with restricted_views than +with induced subgraphs. +Often it is easiest to use .copy() to avoid chains. +""" +import networkx as nx +from networkx.classes.coreviews import ( + FilterAdjacency, + FilterAtlas, + FilterMultiAdjacency, + UnionAdjacency, + UnionMultiAdjacency, +) +from networkx.classes.filters import no_filter +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = ["generic_graph_view", "subgraph_view", "reverse_view"] + + +def generic_graph_view(G, create_using=None): + if create_using is None: + newG = G.__class__() + else: + newG = nx.empty_graph(0, create_using) + if G.is_multigraph() != newG.is_multigraph(): + raise NetworkXError("Multigraph for G must agree with create_using") + newG = nx.freeze(newG) + + # create view by assigning attributes from G + newG._graph = G + newG.graph = G.graph + + newG._node = G._node + if newG.is_directed(): + if G.is_directed(): + newG._succ = G._succ + newG._pred = G._pred + # newG._adj is synced with _succ + else: + newG._succ = G._adj + newG._pred = G._adj + # newG._adj is synced with _succ + elif G.is_directed(): + if G.is_multigraph(): + newG._adj = UnionMultiAdjacency(G._succ, G._pred) + else: + newG._adj = UnionAdjacency(G._succ, G._pred) + else: + newG._adj = G._adj + return newG + + +def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter): + """View of `G` applying a filter on nodes and edges. + + `subgraph_view` provides a read-only view of the input graph that excludes + nodes and edges based on the outcome of two filter functions `filter_node` + and `filter_edge`. + + The `filter_node` function takes one argument --- the node --- and returns + `True` if the node should be included in the subgraph, and `False` if it + should not be included. + + The `filter_edge` function takes two (or three arguments if `G` is a + multi-graph) --- the nodes describing an edge, plus the edge-key if + parallel edges are possible --- and returns `True` if the edge should be + included in the subgraph, and `False` if it should not be included. + + Both node and edge filter functions are called on graph elements as they + are queried, meaning there is no up-front cost to creating the view. + + Parameters + ---------- + G : networkx.Graph + A directed/undirected graph/multigraph + + filter_node : callable, optional + A function taking a node as input, which returns `True` if the node + should appear in the view. + + filter_edge : callable, optional + A function taking as input the two nodes describing an edge (plus the + edge-key if `G` is a multi-graph), which returns `True` if the edge + should appear in the view. + + Returns + ------- + graph : networkx.Graph + A read-only graph view of the input graph. + + Examples + -------- + >>> G = nx.path_graph(6) + + Filter functions operate on the node, and return `True` if the node should + appear in the view: + + >>> def filter_node(n1): + ... return n1 != 5 + ... + >>> view = nx.subgraph_view(G, filter_node=filter_node) + >>> view.nodes() + NodeView((0, 1, 2, 3, 4)) + + We can use a closure pattern to filter graph elements based on additional + data --- for example, filtering on edge data attached to the graph: + + >>> G[3][4]["cross_me"] = False + >>> def filter_edge(n1, n2): + ... return G[n1][n2].get("cross_me", True) + ... + >>> view = nx.subgraph_view(G, filter_edge=filter_edge) + >>> view.edges() + EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)]) + + >>> view = nx.subgraph_view(G, filter_node=filter_node, filter_edge=filter_edge,) + >>> view.nodes() + NodeView((0, 1, 2, 3, 4)) + >>> view.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + """ + newG = nx.freeze(G.__class__()) + newG._NODE_OK = filter_node + newG._EDGE_OK = filter_edge + + # create view by assigning attributes from G + newG._graph = G + newG.graph = G.graph + + newG._node = FilterAtlas(G._node, filter_node) + if G.is_multigraph(): + Adj = FilterMultiAdjacency + + def reverse_edge(u, v, k=None): + return filter_edge(v, u, k) + + else: + Adj = FilterAdjacency + + def reverse_edge(u, v, k=None): + return filter_edge(v, u) + + if G.is_directed(): + newG._succ = Adj(G._succ, filter_node, filter_edge) + newG._pred = Adj(G._pred, filter_node, reverse_edge) + # newG._adj is synced with _succ + else: + newG._adj = Adj(G._adj, filter_node, filter_edge) + return newG + + +@not_implemented_for("undirected") +def reverse_view(G): + """View of `G` with edge directions reversed + + `reverse_view` returns a read-only view of the input graph where + edge directions are reversed. + + Identical to digraph.reverse(copy=False) + + Parameters + ---------- + G : networkx.DiGraph + + Returns + ------- + graph : networkx.DiGraph + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(2, 3) + >>> G.edges() + OutEdgeView([(1, 2), (2, 3)]) + + >>> view = nx.reverse_view(G) + >>> view.edges() + OutEdgeView([(2, 1), (3, 2)]) + """ + newG = generic_graph_view(G) + newG._succ, newG._pred = G._pred, G._succ + # newG._adj is synced with _succ + return newG diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/multidigraph.py b/myenv/lib/python3.9/site-packages/networkx/classes/multidigraph.py new file mode 100644 index 0000000..e118dc2 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/multidigraph.py @@ -0,0 +1,947 @@ +"""Base class for MultiDiGraph.""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +import networkx.convert as convert +from networkx.classes.coreviews import MultiAdjacencyView +from networkx.classes.digraph import DiGraph +from networkx.classes.multigraph import MultiGraph +from networkx.classes.reportviews import ( + DiMultiDegreeView, + InMultiDegreeView, + InMultiEdgeView, + OutMultiDegreeView, + OutMultiEdgeView, +) +from networkx.exception import NetworkXError + +__all__ = ["MultiDiGraph"] + + +class MultiDiGraph(MultiGraph, DiGraph): + """A directed graph class that can store multiedges. + + Multiedges are multiple edges between two nodes. Each edge + can hold optional data or attributes. + + A MultiDiGraph holds directed edges. Self loops are allowed. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + DiGraph + MultiGraph + OrderedMultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.MultiDiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> key = G.add_edge(1, 2) + + a list of edges, + + >>> keys = G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> keys = G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. If an edge already exists, an additional + edge is created and stored using a key to identify the edge. + By default the key is the lowest unused integer. + + >>> keys = G.add_edges_from([(4, 5, dict(route=282)), (4, 5, dict(route=37))]) + >>> G[4] + AdjacencyView({5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}) + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.MultiDiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> key = G.add_edge(1, 2, weight=4.7) + >>> keys = G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> keys = G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2][0]["weight"] = 4.7 + >>> G.edges[1, 2, 0]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, + 2, 0]` a read-only dict-like structure. However, you can assign to + attributes in e.g. `G.edges[1, 2, 0]`. Thus, use 2 sets of brackets + to add/change data attributes: `G.edges[1, 2, 0]['weight'] = 4` + (for multigraphs the edge key is required: `MG.edges[u, v, + key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict-like view mapping neighbor -> edge key -> edge attributes + AdjacencyView({2: {0: {'weight': 4}, 1: {'color': 'blue'}}}) + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are available as an adjacency-view `G.adj` object or via + the method `G.adjacency()`. + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, keydict in nbrsdict.items(): + ... for key, eattr in keydict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, keys, weight in G.edges(data="weight", keys=True): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using methods and object-attributes. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v, k]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The MultiDiGraph class uses a dict-of-dict-of-dict-of-dict structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information + and holds edge_key dicts keyed by neighbor. The edge_key dict holds + each edge_attr dict keyed by edge key. The inner dict + (edge_attr_dict) represents the edge data and holds edge attribute + values keyed by attribute names. + + Each of these four dicts in the dict-of-dict-of-dict-of-dict + structure can be replaced by a user defined dict-like object. + In general, the dict-like features should be maintained but + extra features can be added. To replace one of the dicts create + a new graph class by changing the class(!) variable holding the + factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_key_dict_factory, edge_attr_dict_factory + and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds multiedge key dicts keyed by neighbor. + It should require no arguments and return a dict-like object. + + edge_key_dict_factory : function, (default: dict) + Factory function to be used to create the edge key dict + which holds edge data keyed by edge key. + It should require no arguments and return a dict-like object. + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + Please see :mod:`~networkx.classes.ordered` for examples of + creating graph subclasses by overwriting the base class `dict` with + a dictionary-like object. + """ + + # node_dict_factory = dict # already assigned in Graph + # adjlist_outer_dict_factory = dict + # adjlist_inner_dict_factory = dict + edge_key_dict_factory = dict + # edge_attr_dict_factory = dict + + def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph + Data to initialize graph. If incoming_graph_data=None (default) + an empty graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse matrix, or a PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + # multigraph_input can be None/True/False. So check "is not False" + if isinstance(incoming_graph_data, dict) and multigraph_input is not False: + DiGraph.__init__(self) + try: + convert.from_dict_of_dicts( + incoming_graph_data, create_using=self, multigraph_input=True + ) + self.graph.update(attr) + except Exception as err: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err)}: {err}" + ) + DiGraph.__init__(self, incoming_graph_data, **attr) + else: + DiGraph.__init__(self, incoming_graph_data, **attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return MultiAdjacencyView(self._succ) + + @cached_property + def succ(self): + """Graph adjacency object holding the successors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.succ` is identical to `G.adj`. + """ + return MultiAdjacencyView(self._succ) + + @cached_property + def pred(self): + """Graph adjacency object holding the predecessors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + """ + return MultiAdjacencyView(self._pred) + + def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_for_edge, v_for_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + key : hashable identifier, optional (default=lowest unused integer) + Used to distinguish multiedges between a pair of nodes. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + The edge key assigned to the edge. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + To replace/update edge data, use the optional key argument + to identify a unique edge. Otherwise a new edge will be created. + + NetworkX algorithms designed for weighted graphs cannot use + multigraphs directly because it is not clear how to handle + multiedge weights. Convert to Graph using edge attribute + 'weight' to enable weighted graph algorithms. + + Default keys are generated using the method `new_edge_key()`. + This method can be overridden by subclassing the base class and + providing a custom `new_edge_key()` method. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.MultiDiGraph() + >>> e = (1, 2) + >>> key = G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + 1 + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + [2] + + Associate data to edges using keywords: + + >>> key = G.add_edge(1, 2, weight=3) + >>> key = G.add_edge(1, 2, key=0, weight=4) # update data for key=0 + >>> key = G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> ekey = G.add_edge(1, 2) + >>> G[1][2][0].update({0: 5}) + >>> G.edges[1, 2, 0].update({0: 5}) + """ + u, v = u_for_edge, v_for_edge + # add nodes + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + if key is None: + key = self.new_edge_key(u, v) + if v in self._succ[u]: + keydict = self._adj[u][v] + datadict = keydict.get(key, self.edge_attr_dict_factory()) + datadict.update(attr) + keydict[key] = datadict + else: + # selfloops work this way without special treatment + datadict = self.edge_attr_dict_factory() + datadict.update(attr) + keydict = self.edge_key_dict_factory() + keydict[key] = datadict + self._succ[u][v] = keydict + self._pred[v][u] = keydict + return key + + def remove_edge(self, u, v, key=None): + """Remove an edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove an edge between nodes u and v. + key : hashable identifier, optional (default=None) + Used to distinguish multiple edges between a pair of nodes. + If None, remove a single edge between u and v. If there are + multiple edges, removes the last edge added in terms of + insertion order. + + Raises + ------ + NetworkXError + If there is not an edge between u and v, or + if there is no edge with the specified key. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + + For multiple edges + + >>> G = nx.MultiDiGraph() + >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned + [0, 1, 2] + + When ``key=None`` (the default), edges are removed in the opposite + order that they were added: + + >>> G.remove_edge(1, 2) + >>> G.edges(keys=True) + OutMultiEdgeView([(1, 2, 0), (1, 2, 1)]) + + For edges with keys + + >>> G = nx.MultiDiGraph() + >>> G.add_edge(1, 2, key="first") + 'first' + >>> G.add_edge(1, 2, key="second") + 'second' + >>> G.remove_edge(1, 2, key="first") + >>> G.edges(keys=True) + OutMultiEdgeView([(1, 2, 'second')]) + + """ + try: + d = self._adj[u][v] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err + # remove the edge with specified data + if key is None: + d.popitem() + else: + try: + del d[key] + except KeyError as err: + msg = f"The edge {u}-{v} with key {key} is not in the graph." + raise NetworkXError(msg) from err + if len(d) == 0: + # remove the key entries if last edge + del self._succ[u][v] + del self._pred[v][u] + + @cached_property + def edges(self): + """An OutMultiEdgeView of the Graph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, keys=False, default=None) + + The OutMultiEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, ``G.edges[u, v, k]['color']`` provides the value of the color + attribute for the edge from ``u`` to ``v`` with key ``k`` while + ``for (u, v, k, c) in G.edges(data='color', default='red', keys=True):`` + iterates through all the edges yielding the color attribute with + default `'red'` if no color attribute exists. + + Edges are returned as tuples with optional data and keys + in the order (node, neighbor, key, data). If ``keys=True`` is not + provided, the tuples will just be (node, neighbor, data), but + multiple tuples with the same node and neighbor will be + generated when multiple edges between two nodes exist. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating (u, v, k, + d) tuples when data is also requested (the default) and (u, + v, k) tuples when data is not requested. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : OutMultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as ``edges[u, v, k]['foo']``. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2]) + >>> key = G.add_edge(2, 3, weight=5) + >>> key2 = G.add_edge(1, 2) # second edge between these nodes + >>> [e for e in G.edges()] + [(0, 1), (1, 2), (1, 2), (2, 3)] + >>> list(G.edges(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (1, 2, {}), (2, 3, {'weight': 5})] + >>> list(G.edges(data="weight", default=1)) + [(0, 1, 1), (1, 2, 1), (1, 2, 1), (2, 3, 5)] + >>> list(G.edges(keys=True)) # default keys are integers + [(0, 1, 0), (1, 2, 0), (1, 2, 1), (2, 3, 0)] + >>> list(G.edges(data=True, keys=True)) + [(0, 1, 0, {}), (1, 2, 0, {}), (1, 2, 1, {}), (2, 3, 0, {'weight': 5})] + >>> list(G.edges(data="weight", default=1, keys=True)) + [(0, 1, 0, 1), (1, 2, 0, 1), (1, 2, 1, 1), (2, 3, 0, 5)] + >>> list(G.edges([0, 2])) + [(0, 1), (2, 3)] + >>> list(G.edges(0)) + [(0, 1)] + >>> list(G.edges(1)) + [(1, 2), (1, 2)] + + See Also + -------- + in_edges, out_edges + """ + return OutMultiEdgeView(self) + + # alias out_edges to edges + @cached_property + def out_edges(self): + return OutMultiEdgeView(self) + + out_edges.__doc__ = edges.__doc__ + + @cached_property + def in_edges(self): + """An InMultiEdgeView of the Graph as G.in_edges or G.in_edges(). + + in_edges(self, nbunch=None, data=False, keys=False, default=None) + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating 3-tuples + (u, v, k) or with data, 4-tuples (u, v, k, d). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + in_edges : InMultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as `edges[u, v, k]['foo']`. + + See Also + -------- + edges + """ + return InMultiEdgeView(self) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DiMultiDegreeView or int + If multiple nodes are requested (the default), returns a `DiMultiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + See Also + -------- + out_degree, in_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.degree([0, 1, 2])) # parallel edges are counted + [(0, 2), (1, 3), (2, 2)] + + """ + return DiMultiDegreeView(self) + + @cached_property + def in_degree(self): + """A DegreeView for (node, in_degree) or in_degree for single node. + + The node in-degree is the number of edges pointing in to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + Degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, out_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.in_degree(0) # node 0 with degree 0 + 0 + >>> list(G.in_degree([0, 1, 2])) + [(0, 0), (1, 1), (2, 1)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.in_degree([0, 1, 2])) # parallel edges counted + [(0, 0), (1, 2), (2, 1)] + + """ + return InMultiDegreeView(self) + + @cached_property + def out_degree(self): + """Returns an iterator for (node, out-degree) or out-degree for single node. + + out_degree(self, nbunch=None, weight=None) + + The node out-degree is the number of edges pointing out of the node. + This function returns the out-degree for a single node or an iterator + for a bunch of nodes or if nothing is passed as argument. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights. + + Returns + ------- + If a single node is requested + deg : int + Degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.out_degree(0) # node 0 with degree 1 + 1 + >>> list(G.out_degree([0, 1, 2])) + [(0, 1), (1, 1), (2, 1)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.out_degree([0, 1, 2])) # counts parallel edges + [(0, 2), (1, 1), (2, 1)] + + """ + return OutMultiDegreeView(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return True + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return True + + def to_undirected(self, reciprocal=False, as_view=False): + """Returns an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + as_view : bool (optional, default=False) + If True return an undirected view of the original directed graph. + + Returns + ------- + G : MultiGraph + An undirected graph with the same name and nodes and + with edge (u, v, data) if either (u, v, data) or (v, u, data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + See Also + -------- + MultiGraph, copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=MultiDiGraph(G) which + returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiDiGraph to use dict-like + objects in the data structure, those changes do not transfer + to the MultiGraph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + if reciprocal is True: + G.add_edges_from( + (u, v, key, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, data in keydict.items() + if v in self._pred[u] and key in self._pred[u][v] + ) + else: + G.add_edges_from( + (u, v, key, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, data in keydict.items() + ) + return G + + def reverse(self, copy=True): + """Returns the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, the reverse graph is created using a view of + the original graph. + """ + if copy: + H = self.__class__() + H.graph.update(deepcopy(self.graph)) + H.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + H.add_edges_from( + (v, u, k, deepcopy(d)) + for u, v, k, d in self.edges(keys=True, data=True) + ) + return H + return nx.graphviews.reverse_view(self) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/multigraph.py b/myenv/lib/python3.9/site-packages/networkx/classes/multigraph.py new file mode 100644 index 0000000..3332201 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/multigraph.py @@ -0,0 +1,1246 @@ +"""Base class for MultiGraph.""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +import networkx.convert as convert +from networkx import NetworkXError +from networkx.classes.coreviews import MultiAdjacencyView +from networkx.classes.graph import Graph +from networkx.classes.reportviews import MultiDegreeView, MultiEdgeView + +__all__ = ["MultiGraph"] + + +class MultiGraph(Graph): + """ + An undirected graph class that can store multiedges. + + Multiedges are multiple edges between two nodes. Each edge + can hold optional data or attributes. + + A MultiGraph holds undirected edges. Self loops are allowed. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes, in a MultiGraph each edge has a key to + distinguish between multiple edges that have the same source and + destination nodes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, + SciPy sparse matrix, or PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + DiGraph + MultiDiGraph + OrderedMultiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.MultiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> key = G.add_edge(1, 2) + + a list of edges, + + >>> keys = G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> keys = G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. If an edge already exists, an additional + edge is created and stored using a key to identify the edge. + By default the key is the lowest unused integer. + + >>> keys = G.add_edges_from([(4, 5, {"route": 28}), (4, 5, {"route": 37})]) + >>> G[4] + AdjacencyView({3: {0: {}}, 5: {0: {}, 1: {'route': 28}, 2: {'route': 37}}}) + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.MultiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> key = G.add_edge(1, 2, weight=4.7) + >>> keys = G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> keys = G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2][0]["weight"] = 4.7 + >>> G.edges[1, 2, 0]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, + 2, 0]` a read-only dict-like structure. However, you can assign to + attributes in e.g. `G.edges[1, 2, 0]`. Thus, use 2 sets of brackets + to add/change data attributes: `G.edges[1, 2, 0]['weight'] = 4`. + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict-like view mapping neighbor -> edge key -> edge attributes + AdjacencyView({2: {0: {'weight': 4}, 1: {'color': 'blue'}}}) + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`. + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, keydict in nbrsdict.items(): + ... for key, eattr in keydict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, keys, weight in G.edges(data="weight", keys=True): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using methods and object-attributes. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v, k]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The MultiGraph class uses a dict-of-dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information + and holds edge_key dicts keyed by neighbor. The edge_key dict holds + each edge_attr dict keyed by edge key. The inner dict + (edge_attr_dict) represents the edge data and holds edge attribute + values keyed by attribute names. + + Each of these four dicts in the dict-of-dict-of-dict-of-dict + structure can be replaced by a user defined dict-like object. + In general, the dict-like features should be maintained but + extra features can be added. To replace one of the dicts create + a new graph class by changing the class(!) variable holding the + factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_key_dict_factory, edge_attr_dict_factory + and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds multiedge key dicts keyed by neighbor. + It should require no arguments and return a dict-like object. + + edge_key_dict_factory : function, (default: dict) + Factory function to be used to create the edge key dict + which holds edge data keyed by edge key. + It should require no arguments and return a dict-like object. + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + Please see :mod:`~networkx.classes.ordered` for examples of + creating graph subclasses by overwriting the base class `dict` with + a dictionary-like object. + """ + + # node_dict_factory = dict # already assigned in Graph + # adjlist_outer_dict_factory = dict + # adjlist_inner_dict_factory = dict + edge_key_dict_factory = dict + # edge_attr_dict_factory = dict + + def to_directed_class(self): + """Returns the class to use for empty directed copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return nx.MultiDiGraph + + def to_undirected_class(self): + """Returns the class to use for empty undirected copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return MultiGraph + + def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph + Data to initialize graph. If incoming_graph_data=None (default) + an empty graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse matrix, or a PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.MultiGraph() + >>> G = nx.MultiGraph(name="my graph") + >>> e = [(1, 2), (1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.MultiGraph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.MultiGraph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + # multigraph_input can be None/True/False. So check "is not False" + if isinstance(incoming_graph_data, dict) and multigraph_input is not False: + Graph.__init__(self) + try: + convert.from_dict_of_dicts( + incoming_graph_data, create_using=self, multigraph_input=True + ) + self.graph.update(attr) + except Exception as err: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err)}: {err}" + ) + Graph.__init__(self, incoming_graph_data, **attr) + else: + Graph.__init__(self, incoming_graph_data, **attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-data-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, edgesdict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + + Examples + -------- + >>> e = [(1, 2), (1, 2), (1, 3), (3, 4)] # list of edges + >>> G = nx.MultiGraph(e) + >>> G.edges[1, 2, 0]["weight"] = 3 + >>> result = set() + >>> for edgekey, data in G[1][2].items(): + ... result.add(data.get('weight', 1)) + >>> result + {1, 3} + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return MultiAdjacencyView(self._adj) + + def new_edge_key(self, u, v): + """Returns an unused key for edges between nodes `u` and `v`. + + The nodes `u` and `v` do not need to be already in the graph. + + Notes + ----- + In the standard MultiGraph class the new key is the number of existing + edges between `u` and `v` (increased if necessary to ensure unused). + The first edge will have key 0, then 1, etc. If an edge is removed + further new_edge_keys may not be in this order. + + Parameters + ---------- + u, v : nodes + + Returns + ------- + key : int + """ + try: + keydict = self._adj[u][v] + except KeyError: + return 0 + key = len(keydict) + while key in keydict: + key += 1 + return key + + def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_for_edge, v_for_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + key : hashable identifier, optional (default=lowest unused integer) + Used to distinguish multiedges between a pair of nodes. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + The edge key assigned to the edge. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + To replace/update edge data, use the optional key argument + to identify a unique edge. Otherwise a new edge will be created. + + NetworkX algorithms designed for weighted graphs cannot use + multigraphs directly because it is not clear how to handle + multiedge weights. Convert to Graph using edge attribute + 'weight' to enable weighted graph algorithms. + + Default keys are generated using the method `new_edge_key()`. + This method can be overridden by subclassing the base class and + providing a custom `new_edge_key()` method. + + Examples + -------- + The following each add an additional edge e=(1, 2) to graph G: + + >>> G = nx.MultiGraph() + >>> e = (1, 2) + >>> ekey = G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + 1 + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + [2] + + Associate data to edges using keywords: + + >>> ekey = G.add_edge(1, 2, weight=3) + >>> ekey = G.add_edge(1, 2, key=0, weight=4) # update data for key=0 + >>> ekey = G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> ekey = G.add_edge(1, 2) + >>> G[1][2][0].update({0: 5}) + >>> G.edges[1, 2, 0].update({0: 5}) + """ + u, v = u_for_edge, v_for_edge + # add nodes + if u not in self._adj: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._adj: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + if key is None: + key = self.new_edge_key(u, v) + if v in self._adj[u]: + keydict = self._adj[u][v] + datadict = keydict.get(key, self.edge_attr_dict_factory()) + datadict.update(attr) + keydict[key] = datadict + else: + # selfloops work this way without special treatment + datadict = self.edge_attr_dict_factory() + datadict.update(attr) + keydict = self.edge_key_dict_factory() + keydict[key] = datadict + self._adj[u][v] = keydict + self._adj[v][u] = keydict + return key + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges can be: + + - 2-tuples (u, v) or + - 3-tuples (u, v, d) for an edge data dict d, or + - 3-tuples (u, v, k) for not iterable key k, or + - 4-tuples (u, v, k, d) for an edge with data and key k + + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + A list of edge keys assigned to the edges in `ebunch`. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + Default keys are generated using the method ``new_edge_key()``. + This method can be overridden by subclassing the base class and + providing a custom ``new_edge_key()`` method. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + """ + keylist = [] + for e in ebunch_to_add: + ne = len(e) + if ne == 4: + u, v, key, dd = e + elif ne == 3: + u, v, dd = e + key = None + elif ne == 2: + u, v = e + dd = {} + key = None + else: + msg = f"Edge tuple {e} must be a 2-tuple, 3-tuple or 4-tuple." + raise NetworkXError(msg) + ddd = {} + ddd.update(attr) + try: + ddd.update(dd) + except (TypeError, ValueError): + if ne != 3: + raise + key = dd # ne == 3 with 3rd value not dict, must be a key + key = self.add_edge(u, v, key) + self[u][v][key].update(ddd) + keylist.append(key) + return keylist + + def remove_edge(self, u, v, key=None): + """Remove an edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove an edge between nodes u and v. + key : hashable identifier, optional (default=None) + Used to distinguish multiple edges between a pair of nodes. + If None, remove a single edge between u and v. If there are + multiple edges, removes the last edge added in terms of + insertion order. + + Raises + ------ + NetworkXError + If there is not an edge between u and v, or + if there is no edge with the specified key. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.MultiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + + For multiple edges + + >>> G = nx.MultiGraph() # or MultiDiGraph, etc + >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned + [0, 1, 2] + + When ``key=None`` (the default), edges are removed in the opposite + order that they were added: + + >>> G.remove_edge(1, 2) + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 0), (1, 2, 1)]) + >>> G.remove_edge(2, 1) # edges are not directed + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 0)]) + + For edges with keys + + >>> G = nx.MultiGraph() + >>> G.add_edge(1, 2, key="first") + 'first' + >>> G.add_edge(1, 2, key="second") + 'second' + >>> G.remove_edge(1, 2, key="first") + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 'second')]) + + """ + try: + d = self._adj[u][v] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err + # remove the edge with specified data + if key is None: + d.popitem() + else: + try: + del d[key] + except KeyError as err: + msg = f"The edge {u}-{v} with key {key} is not in the graph." + raise NetworkXError(msg) from err + if len(d) == 0: + # remove the key entries if last edge + del self._adj[u][v] + if u != v: # check for selfloop + del self._adj[v][u] + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) A single edge between u and v is removed. + - 3-tuples (u, v, key) The edge identified by key is removed. + - 4-tuples (u, v, key, data) where data is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + + Removing multiple copies of edges + + >>> G = nx.MultiGraph() + >>> keys = G.add_edges_from([(1, 2), (1, 2), (1, 2)]) + >>> G.remove_edges_from([(1, 2), (2, 1)]) # edges aren't directed + >>> list(G.edges()) + [(1, 2)] + >>> G.remove_edges_from([(1, 2), (1, 2)]) # silently ignore extra copy + >>> list(G.edges) # now empty graph + [] + + When the edge is a 2-tuple ``(u, v)`` but there are multiple edges between + u and v in the graph, the most recent edge (in terms of insertion + order) is removed. + + >>> G = nx.MultiGraph() + >>> for key in ("x", "y", "a"): + ... k = G.add_edge(0, 1, key=key) + >>> G.edges(keys=True) + MultiEdgeView([(0, 1, 'x'), (0, 1, 'y'), (0, 1, 'a')]) + >>> G.remove_edges_from([(0, 1)]) + >>> G.edges(keys=True) + MultiEdgeView([(0, 1, 'x'), (0, 1, 'y')]) + + """ + for e in ebunch: + try: + self.remove_edge(*e[:3]) + except NetworkXError: + pass + + def has_edge(self, u, v, key=None): + """Returns True if the graph has an edge between nodes u and v. + + This is the same as `v in G[u] or key in G[u][v]` + without KeyError exceptions. + + Parameters + ---------- + u, v : nodes + Nodes can be, for example, strings or numbers. + + key : hashable identifier, optional (default=None) + If specified return True only if the edge with + key is found. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + Can be called either using two nodes u, v, an edge tuple (u, v), + or an edge tuple (u, v, key). + + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.has_edge(0, 1) # using two nodes + True + >>> e = (0, 1) + >>> G.has_edge(*e) # e is a 2-tuple (u, v) + True + >>> G.add_edge(0, 1, key="a") + 'a' + >>> G.has_edge(0, 1, key="a") # specify key + True + >>> G.has_edge(1, 0, key="a") # edges aren't directed + True + >>> e = (0, 1, "a") + >>> G.has_edge(*e) # e is a 3-tuple (u, v, 'a') + True + + The following syntax are equivalent: + + >>> G.has_edge(0, 1) + True + >>> 1 in G[0] # though this gives :exc:`KeyError` if 0 not in G + True + >>> 0 in G[1] # other order; also gives :exc:`KeyError` if 0 not in G + True + + """ + try: + if key is None: + return v in self._adj[u] + else: + return key in self._adj[u][v] + except KeyError: + return False + + @cached_property + def edges(self): + """Returns an iterator over the edges. + + edges(self, nbunch=None, data=False, keys=False, default=None) + + The MultiEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, ``G.edges[u, v, k]['color']`` provides the value of the color + attribute for the edge from ``u`` to ``v`` with key ``k`` while + ``for (u, v, k, c) in G.edges(data='color', keys=True, default="red"):`` + iterates through all the edges yielding the color attribute with + default `'red'` if no color attribute exists. + + Edges are returned as tuples with optional data and keys + in the order (node, neighbor, key, data). If ``keys=True`` is not + provided, the tuples will just be (node, neighbor, data), but + multiple tuples with the same node and neighbor will be generated + when multiple edges exist between two nodes. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating (u, v, k) + tuples or (u, v, k, d) tuples if data is also requested. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : MultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as ``edges[u, v, k]['foo']``. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.MultiGraph() + >>> nx.add_path(G, [0, 1, 2]) + >>> key = G.add_edge(2, 3, weight=5) + >>> key2 = G.add_edge(2, 1, weight=2) # multi-edge + >>> [e for e in G.edges()] + [(0, 1), (1, 2), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + MultiEdgeDataView([(0, 1, {}), (1, 2, {}), (1, 2, {'weight': 2}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + MultiEdgeDataView([(0, 1, 1), (1, 2, 1), (1, 2, 2), (2, 3, 5)]) + >>> G.edges(keys=True) # default keys are integers + MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 1), (2, 3, 0)]) + >>> G.edges.data(keys=True) + MultiEdgeDataView([(0, 1, 0, {}), (1, 2, 0, {}), (1, 2, 1, {'weight': 2}), (2, 3, 0, {'weight': 5})]) + >>> G.edges.data("weight", default=1, keys=True) + MultiEdgeDataView([(0, 1, 0, 1), (1, 2, 0, 1), (1, 2, 1, 2), (2, 3, 0, 5)]) + >>> G.edges([0, 3]) # Note ordering of tuples from listed sources + MultiEdgeDataView([(0, 1), (3, 2)]) + >>> G.edges([0, 3, 2, 1]) # Note ordering of tuples + MultiEdgeDataView([(0, 1), (3, 2), (2, 1), (2, 1)]) + >>> G.edges(0) + MultiEdgeDataView([(0, 1)]) + """ + return MultiEdgeView(self) + + def get_edge_data(self, u, v, key=None, default=None): + """Returns the attribute dictionary associated with edge (u, v, + key). + + If a key is not provided, returns a dictionary mapping edge keys + to attribute dictionaries for each edge between u and v. + + This is identical to `G[u][v][key]` except the default is returned + instead of an exception is the edge doesn't exist. + + Parameters + ---------- + u, v : nodes + + default : any Python object (default=None) + Value to return if the specific edge (u, v, key) is not + found, OR if there are no edges between u and v and no key + is specified. + + key : hashable identifier, optional (default=None) + Return data only for the edge with specified key, as an + attribute dictionary (rather than a dictionary mapping keys + to attribute dictionaries). + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary, OR a dictionary mapping edge + keys to attribute dictionaries for each of those edges if no + specific key is provided (even if there's only one edge + between u and v). + + Examples + -------- + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> key = G.add_edge(0, 1, key="a", weight=7) + >>> G[0][1]["a"] # key='a' + {'weight': 7} + >>> G.edges[0, 1, "a"] # key='a' + {'weight': 7} + + Warning: we protect the graph data structure by making + `G.edges` and `G[1][2]` read-only dict-like structures. + However, you can assign values to attributes in e.g. + `G.edges[1, 2, 'a']` or `G[1][2]['a']` using an additional + bracket as shown next. You need to specify all edge info + to assign to the edge data associated with an edge. + + >>> G[0][1]["a"]["weight"] = 10 + >>> G.edges[0, 1, "a"]["weight"] = 10 + >>> G[0][1]["a"]["weight"] + 10 + >>> G.edges[1, 0, "a"]["weight"] + 10 + + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.edges[0, 1, 0]["weight"] = 5 + >>> G.get_edge_data(0, 1) + {0: {'weight': 5}} + >>> e = (0, 1) + >>> G.get_edge_data(*e) # tuple form + {0: {'weight': 5}} + >>> G.get_edge_data(3, 0) # edge not in graph, returns None + >>> G.get_edge_data(3, 0, default=0) # edge not in graph, return default + 0 + >>> G.get_edge_data(1, 0, 0) # specific key gives back + {'weight': 5} + """ + try: + if key is None: + return self._adj[u][v] + else: + return self._adj[u][v][key] + except KeyError: + return default + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + MultiDegreeView or int + If multiple nodes are requested (the default), returns a `MultiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return MultiDegreeView(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return True + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return False + + def copy(self, as_view=False): + """Returns a copy of the graph. + + The copy method by default returns an independent shallow copy + of the graph and attributes. That is, if an attribute is a + container, that container is shared by the original an the copy. + Use Python's `copy.deepcopy` for new containers. + + If `as_view` is True then a view is returned instead of a copy. + + Notes + ----- + All copies reproduce the graph structure, but data attributes + may be handled in different ways. There are four types of copies + of a graph that people might want. + + Deepcopy -- A "deepcopy" copies the graph structure as well as + all data attributes and any objects they might contain. + The entire graph object is new so that changes in the copy + do not affect the original object. (see Python's copy.deepcopy) + + Data Reference (Shallow) -- For a shallow copy the graph structure + is copied but the edge, node and graph attribute dicts are + references to those in the original graph. This saves + time and memory but could cause confusion if you change an attribute + in one graph and it changes the attribute in the other. + NetworkX does not provide this level of shallow copy. + + Independent Shallow -- This copy creates new independent attribute + dicts and then does a shallow copy of the attributes. That is, any + attributes that are containers are shared between the new graph + and the original. This is exactly what `dict.copy()` provides. + You can obtain this style copy using: + + >>> G = nx.path_graph(5) + >>> H = G.copy() + >>> H = G.copy(as_view=False) + >>> H = nx.Graph(G) + >>> H = G.__class__(G) + + Fresh Data -- For fresh data, the graph structure is copied while + new empty data attribute dicts are created. The resulting graph + is independent of the original and it has no edge, node or graph + attributes. Fresh copies are not enabled. Instead use: + + >>> H = G.__class__() + >>> H.add_nodes_from(G) + >>> H.add_edges_from(G.edges) + + View -- Inspired by dict-views, graph-views act like read-only + versions of the original graph, providing a copy of the original + structure without requiring any memory for copying the information. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Parameters + ---------- + as_view : bool, optional (default=False) + If True, the returned graph-view provides a read-only view + of the original graph without actually copying any data. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.copy() + + """ + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, datadict.copy()) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def to_directed(self, as_view=False): + """Returns a directed representation of the graph. + + Returns + ------- + G : MultiDiGraph + A directed graph with the same name, same nodes, and with + each edge (u, v, k, data) replaced by two directed edges + (u, v, k, data) and (v, u, k, data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=MultiDiGraph(G) which + returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + MultiDiGraph created by this method. + + Examples + -------- + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1) + 0 + >>> G.add_edge(0, 1) + 1 + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1)] + + If already directed, return a (deep) copy + + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1) + 0 + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0)] + """ + graph_class = self.to_directed_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, deepcopy(datadict)) + for u, nbrs in self.adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def to_undirected(self, as_view=False): + """Returns an undirected copy of the graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar `G = nx.MultiGraph(D)` + which returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiGraph to use dict-like + objects in the data structure, those changes do not transfer + to the MultiGraph created by this method. + + Examples + -------- + >>> G = nx.MultiGraph([(0, 1), (0, 1), (1, 2)]) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 2, 0), (2, 1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1, 0), (0, 1, 1), (1, 2, 0)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, deepcopy(datadict)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def number_of_edges(self, u=None, v=None): + """Returns the number of edges between two nodes. + + Parameters + ---------- + u, v : nodes, optional (Gefault=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes `u` and `v` are + specified return the number of edges between those nodes. If + the graph is directed, this only returns the number of edges + from `u` to `v`. + + See Also + -------- + size + + Examples + -------- + For undirected multigraphs, this method counts the total number + of edges in the graph:: + + >>> G = nx.MultiGraph() + >>> G.add_edges_from([(0, 1), (0, 1), (1, 2)]) + [0, 1, 0] + >>> G.number_of_edges() + 3 + + If you specify two nodes, this counts the total number of edges + joining the two nodes:: + + >>> G.number_of_edges(0, 1) + 2 + + For directed multigraphs, this method can count the total number + of directed edges from `u` to `v`:: + + >>> G = nx.MultiDiGraph() + >>> G.add_edges_from([(0, 1), (0, 1), (1, 0)]) + [0, 1, 0] + >>> G.number_of_edges(0, 1) + 2 + >>> G.number_of_edges(1, 0) + 1 + + """ + if u is None: + return self.size() + try: + edgedata = self._adj[u][v] + except KeyError: + return 0 # no such edge + return len(edgedata) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/ordered.py b/myenv/lib/python3.9/site-packages/networkx/classes/ordered.py new file mode 100644 index 0000000..ca82d12 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/ordered.py @@ -0,0 +1,162 @@ +""" + +.. deprecated:: 2.6 + + The ordered variants of graph classes in this module are deprecated and + will be removed in version 3.0. + +Consistently ordered variants of the default base classes. +Note that if you are using Python 3.6+, you shouldn't need these classes +because the dicts in Python 3.6+ are ordered. +Note also that there are many differing expectations for the word "ordered" +and that these classes may not provide the order you expect. +The intent here is to give a consistent order not a particular order. + +The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of +nodes and edges. The order of node reporting agrees with node adding, but for +edges, the order is not necessarily the order that the edges were added. + +In general, you should use the default (i.e., unordered) graph classes. +However, there are times (e.g., when testing) when you may need the +order preserved. + +Special care is required when using subgraphs of the Ordered classes. +The order of nodes in the subclass is not necessarily the same order +as the original class. In general it is probably better to avoid using +subgraphs and replace with code similar to: + +.. code-block:: python + + # instead of SG = G.subgraph(ordered_nodes) + SG = nx.OrderedGraph() + SG.add_nodes_from(ordered_nodes) + SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG) + +""" +import warnings +from collections import OrderedDict + +from .digraph import DiGraph +from .graph import Graph +from .multidigraph import MultiDiGraph +from .multigraph import MultiGraph + +__all__ = [] + +__all__.extend( + ["OrderedGraph", "OrderedDiGraph", "OrderedMultiGraph", "OrderedMultiDiGraph"] +) + + +class OrderedGraph(Graph): + """Consistently ordered variant of :class:`~networkx.Graph`. + + .. deprecated:: 2.6 + + OrderedGraph is deprecated and will be removed in version 3.0. + Use `Graph` instead, which guarantees order is preserved for + Python >= 3.7 + """ + + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + def __init__(self, incoming_graph_data=None, **attr): + warnings.warn( + ( + "OrderedGraph is deprecated and will be removed in version 3.0.\n" + "Use `Graph` instead, which guarantees order is preserved for\n" + "Python >= 3.7\n" + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__(incoming_graph_data, **attr) + + +class OrderedDiGraph(DiGraph): + """Consistently ordered variant of :class:`~networkx.DiGraph`. + + .. deprecated:: 2.6 + + OrderedDiGraph is deprecated and will be removed in version 3.0. + Use `DiGraph` instead, which guarantees order is preserved for + Python >= 3.7 + """ + + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + def __init__(self, incoming_graph_data=None, **attr): + warnings.warn( + ( + "OrderedDiGraph is deprecated and will be removed in version 3.0.\n" + "Use `DiGraph` instead, which guarantees order is preserved for\n" + "Python >= 3.7\n" + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__(incoming_graph_data, **attr) + + +class OrderedMultiGraph(MultiGraph): + """Consistently ordered variant of :class:`~networkx.MultiGraph`. + + .. deprecated:: 2.6 + + OrderedMultiGraph is deprecated and will be removed in version 3.0. + Use `MultiGraph` instead, which guarantees order is preserved for + Python >= 3.7 + """ + + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_key_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + def __init__(self, incoming_graph_data=None, **attr): + warnings.warn( + ( + "OrderedMultiGraph is deprecated and will be removed in version 3.0.\n" + "Use `MultiGraph` instead, which guarantees order is preserved for\n" + "Python >= 3.7\n" + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__(incoming_graph_data, **attr) + + +class OrderedMultiDiGraph(MultiDiGraph): + """Consistently ordered variant of :class:`~networkx.MultiDiGraph`. + + .. deprecated:: 2.6 + + OrderedMultiDiGraph is deprecated and will be removed in version 3.0. + Use `MultiDiGraph` instead, which guarantees order is preserved for + Python >= 3.7 + """ + + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_key_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + def __init__(self, incoming_graph_data=None, **attr): + warnings.warn( + ( + "OrderedMultiDiGraph is deprecated and will be removed in version 3.0.\n" + "Use `MultiDiGraph` instead, which guarantees order is preserved for\n" + "Python >= 3.7\n" + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__(incoming_graph_data, **attr) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/reportviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/reportviews.py new file mode 100644 index 0000000..de5ff04 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/reportviews.py @@ -0,0 +1,1440 @@ +""" +View Classes provide node, edge and degree "views" of a graph. + +Views for nodes, edges and degree are provided for all base graph classes. +A view means a read-only object that is quick to create, automatically +updated when the graph changes, and provides basic access like `n in V`, +`for n in V`, `V[n]` and sometimes set operations. + +The views are read-only iterable containers that are updated as the +graph is updated. As with dicts, the graph should not be updated +while iterating through the view. Views can be iterated multiple times. + +Edge and Node views also allow data attribute lookup. +The resulting attribute dict is writable as `G.edges[3, 4]['color']='red'` +Degree views allow lookup of degree values for single nodes. +Weighted degree is supported with the `weight` argument. + +NodeView +======== + + `V = G.nodes` (or `V = G.nodes()`) allows `len(V)`, `n in V`, set + operations e.g. "G.nodes & H.nodes", and `dd = G.nodes[n]`, where + `dd` is the node data dict. Iteration is over the nodes by default. + +NodeDataView +============ + + To iterate over (node, data) pairs, use arguments to `G.nodes()` + to create a DataView e.g. `DV = G.nodes(data='color', default='red')`. + The DataView iterates as `for n, color in DV` and allows + `(n, 'red') in DV`. Using `DV = G.nodes(data=True)`, the DataViews + use the full datadict in writeable form also allowing contain testing as + `(n, {'color': 'red'}) in VD`. DataViews allow set operations when + data attributes are hashable. + +DegreeView +========== + + `V = G.degree` allows iteration over (node, degree) pairs as well + as lookup: `deg=V[n]`. There are many flavors of DegreeView + for In/Out/Directed/Multi. For Directed Graphs, `G.degree` + counts both in and out going edges. `G.out_degree` and + `G.in_degree` count only specific directions. + Weighted degree using edge data attributes is provide via + `V = G.degree(weight='attr_name')` where any string with the + attribute name can be used. `weight=None` is the default. + No set operations are implemented for degrees, use NodeView. + + The argument `nbunch` restricts iteration to nodes in nbunch. + The DegreeView can still lookup any node even if nbunch is specified. + +EdgeView +======== + + `V = G.edges` or `V = G.edges()` allows iteration over edges as well as + `e in V`, set operations and edge data lookup `dd = G.edges[2, 3]`. + Iteration is over 2-tuples `(u, v)` for Graph/DiGraph. For multigraphs + edges 3-tuples `(u, v, key)` are the default but 2-tuples can be obtained + via `V = G.edges(keys=False)`. + + Set operations for directed graphs treat the edges as a set of 2-tuples. + For undirected graphs, 2-tuples are not a unique representation of edges. + So long as the set being compared to contains unique representations + of its edges, the set operations will act as expected. If the other + set contains both `(0, 1)` and `(1, 0)` however, the result of set + operations may contain both representations of the same edge. + +EdgeDataView +============ + + Edge data can be reported using an EdgeDataView typically created + by calling an EdgeView: `DV = G.edges(data='weight', default=1)`. + The EdgeDataView allows iteration over edge tuples, membership checking + but no set operations. + + Iteration depends on `data` and `default` and for multigraph `keys` + If `data is False` (the default) then iterate over 2-tuples `(u, v)`. + If `data is True` iterate over 3-tuples `(u, v, datadict)`. + Otherwise iterate over `(u, v, datadict.get(data, default))`. + For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` + to create 3-tuples and 4-tuples. + + The argument `nbunch` restricts edges to those incident to nodes in nbunch. +""" +from collections.abc import Mapping, Set + +import networkx as nx + +__all__ = [ + "NodeView", + "NodeDataView", + "EdgeView", + "OutEdgeView", + "InEdgeView", + "EdgeDataView", + "OutEdgeDataView", + "InEdgeDataView", + "MultiEdgeView", + "OutMultiEdgeView", + "InMultiEdgeView", + "MultiEdgeDataView", + "OutMultiEdgeDataView", + "InMultiEdgeDataView", + "DegreeView", + "DiDegreeView", + "InDegreeView", + "OutDegreeView", + "MultiDegreeView", + "DiMultiDegreeView", + "InMultiDegreeView", + "OutMultiDegreeView", +] + + +# NodeViews +class NodeView(Mapping, Set): + """A NodeView class to act as G.nodes for a NetworkX Graph + + Set operations act on the nodes without considering data. + Iteration is over nodes. Node data can be looked up like a dict. + Use NodeDataView to iterate over node data or to specify a data + attribute for lookup. NodeDataView is created by calling the NodeView. + + Parameters + ---------- + graph : NetworkX graph-like class + + Examples + -------- + >>> G = nx.path_graph(3) + >>> NV = G.nodes() + >>> 2 in NV + True + >>> for n in NV: + ... print(n) + 0 + 1 + 2 + >>> assert NV & {1, 2, 3} == {1, 2} + + >>> G.add_node(2, color="blue") + >>> NV[2] + {'color': 'blue'} + >>> G.add_node(8, color="red") + >>> NDV = G.nodes(data=True) + >>> (2, NV[2]) in NDV + True + >>> for n, dd in NDV: + ... print((n, dd.get("color", "aqua"))) + (0, 'aqua') + (1, 'aqua') + (2, 'blue') + (8, 'red') + >>> NDV[2] == NV[2] + True + + >>> NVdata = G.nodes(data="color", default="aqua") + >>> (2, NVdata[2]) in NVdata + True + >>> for n, dd in NVdata: + ... print((n, dd)) + (0, 'aqua') + (1, 'aqua') + (2, 'blue') + (8, 'red') + >>> NVdata[2] == NV[2] # NVdata gets 'color', NV gets datadict + False + """ + + __slots__ = ("_nodes",) + + def __getstate__(self): + return {"_nodes": self._nodes} + + def __setstate__(self, state): + self._nodes = state["_nodes"] + + def __init__(self, graph): + self._nodes = graph._node + + # Mapping methods + def __len__(self): + return len(self._nodes) + + def __iter__(self): + return iter(self._nodes) + + def __getitem__(self, n): + if isinstance(n, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.nodes)[{n.start}:{n.stop}:{n.step}]" + ) + return self._nodes[n] + + # Set methods + def __contains__(self, n): + return n in self._nodes + + @classmethod + def _from_iterable(cls, it): + return set(it) + + # DataView method + def __call__(self, data=False, default=None): + if data is False: + return self + return NodeDataView(self._nodes, data, default) + + def data(self, data=True, default=None): + """ + Return a read-only view of node data. + + Parameters + ---------- + data : bool or node data key, default=True + If ``data=True`` (the default), return a `NodeDataView` object that + maps each node to *all* of its attributes. `data` may also be an + arbitrary key, in which case the `NodeDataView` maps each node to + the value for the keyed attribute. In this case, if a node does + not have the `data` attribute, the `default` value is used. + default : object, default=None + The value used when a node does not have a specific attribute. + + Returns + ------- + NodeDataView + The layout of the returned NodeDataView depends on the value of the + `data` parameter. + + Notes + ----- + If ``data=False``, returns a `NodeView` object without data. + + See Also + -------- + NodeDataView + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([ + ... (0, {"color": "red", "weight": 10}), + ... (1, {"color": "blue"}), + ... (2, {"color": "yellow", "weight": 2}) + ... ]) + + Accessing node data with ``data=True`` (the default) returns a + NodeDataView mapping each node to all of its attributes: + + >>> G.nodes.data() + NodeDataView({0: {'color': 'red', 'weight': 10}, 1: {'color': 'blue'}, 2: {'color': 'yellow', 'weight': 2}}) + + If `data` represents a key in the node attribute dict, a NodeDataView mapping + the nodes to the value for that specific key is returned: + + >>> G.nodes.data("color") + NodeDataView({0: 'red', 1: 'blue', 2: 'yellow'}, data='color') + + If a specific key is not found in an attribute dict, the value specified + by `default` is returned: + + >>> G.nodes.data("weight", default=-999) + NodeDataView({0: 10, 1: -999, 2: 2}, data='weight') + + Note that there is no check that the `data` key is in any of the + node attribute dictionaries: + + >>> G.nodes.data("height") + NodeDataView({0: None, 1: None, 2: None}, data='height') + """ + if data is False: + return self + return NodeDataView(self._nodes, data, default) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({tuple(self)})" + + +class NodeDataView(Set): + """A DataView class for nodes of a NetworkX Graph + + The main use for this class is to iterate through node-data pairs. + The data can be the entire data-dictionary for each node, or it + can be a specific attribute (with default) for each node. + Set operations are enabled with NodeDataView, but don't work in + cases where the data is not hashable. Use with caution. + Typically, set operations on nodes use NodeView, not NodeDataView. + That is, they use `G.nodes` instead of `G.nodes(data='foo')`. + + Parameters + ========== + graph : NetworkX graph-like class + data : bool or string (default=False) + default : object (default=None) + """ + + __slots__ = ("_nodes", "_data", "_default") + + def __getstate__(self): + return {"_nodes": self._nodes, "_data": self._data, "_default": self._default} + + def __setstate__(self, state): + self._nodes = state["_nodes"] + self._data = state["_data"] + self._default = state["_default"] + + def __init__(self, nodedict, data=False, default=None): + self._nodes = nodedict + self._data = data + self._default = default + + @classmethod + def _from_iterable(cls, it): + try: + return set(it) + except TypeError as err: + if "unhashable" in str(err): + msg = " : Could be b/c data=True or your values are unhashable" + raise TypeError(str(err) + msg) from err + raise + + def __len__(self): + return len(self._nodes) + + def __iter__(self): + data = self._data + if data is False: + return iter(self._nodes) + if data is True: + return iter(self._nodes.items()) + return ( + (n, dd[data] if data in dd else self._default) + for n, dd in self._nodes.items() + ) + + def __contains__(self, n): + try: + node_in = n in self._nodes + except TypeError: + n, d = n + return n in self._nodes and self[n] == d + if node_in is True: + return node_in + try: + n, d = n + except (TypeError, ValueError): + return False + return n in self._nodes and self[n] == d + + def __getitem__(self, n): + if isinstance(n, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.nodes.data())[{n.start}:{n.stop}:{n.step}]" + ) + ddict = self._nodes[n] + data = self._data + if data is False or data is True: + return ddict + return ddict[data] if data in ddict else self._default + + def __str__(self): + return str(list(self)) + + def __repr__(self): + name = self.__class__.__name__ + if self._data is False: + return f"{name}({tuple(self)})" + if self._data is True: + return f"{name}({dict(self)})" + return f"{name}({dict(self)}, data={self._data!r})" + + +# DegreeViews +class DiDegreeView: + """A View class for degree of nodes in a NetworkX Graph + + The functionality is like dict.items() with (node, degree) pairs. + Additional functionality includes read-only lookup of node degree, + and calling with optional features nbunch (for only a subset of nodes) + and weight (use edge weights to compute degree). + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : node, container of nodes, or None meaning all nodes (default=None) + weight : bool or string (default=None) + + Notes + ----- + DegreeView can still lookup any node even if nbunch is specified. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> DV = G.degree() + >>> assert DV[2] == 1 + >>> assert sum(deg for n, deg in DV) == 4 + + >>> DVweight = G.degree(weight="span") + >>> G.add_edge(1, 2, span=34) + >>> DVweight[2] + 34 + >>> DVweight[0] # default edge weight is 1 + 1 + >>> sum(span for n, span in DVweight) # sum weighted degrees + 70 + + >>> DVnbunch = G.degree(nbunch=(1, 2)) + >>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only + """ + + def __init__(self, G, nbunch=None, weight=None): + self._graph = G + self._succ = G._succ if hasattr(G, "_succ") else G._adj + self._pred = G._pred if hasattr(G, "_pred") else G._adj + self._nodes = self._succ if nbunch is None else list(G.nbunch_iter(nbunch)) + self._weight = weight + + def __call__(self, nbunch=None, weight=None): + if nbunch is None: + if weight == self._weight: + return self + return self.__class__(self._graph, None, weight) + try: + if nbunch in self._nodes: + if weight == self._weight: + return self[nbunch] + return self.__class__(self._graph, None, weight)[nbunch] + except TypeError: + pass + return self.__class__(self._graph, nbunch, weight) + + def __getitem__(self, n): + weight = self._weight + succs = self._succ[n] + preds = self._pred[n] + if weight is None: + return len(succs) + len(preds) + return sum(dd.get(weight, 1) for dd in succs.values()) + sum( + dd.get(weight, 1) for dd in preds.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + yield (n, len(succs) + len(preds)) + else: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum(dd.get(weight, 1) for dd in succs.values()) + sum( + dd.get(weight, 1) for dd in preds.values() + ) + yield (n, deg) + + def __len__(self): + return len(self._nodes) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({dict(self)})" + + +class DegreeView(DiDegreeView): + """A DegreeView class to act as G.degree for a NetworkX Graph + + Typical usage focuses on iteration over `(node, degree)` pairs. + The degree is by default the number of edges incident to the node. + Optional argument `weight` enables weighted degree using the edge + attribute named in the `weight` argument. Reporting and iteration + can also be restricted to a subset of nodes using `nbunch`. + + Additional functionality include node lookup so that `G.degree[n]` + reported the (possibly weighted) degree of node `n`. Calling the + view creates a view with different arguments `nbunch` or `weight`. + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : node, container of nodes, or None meaning all nodes (default=None) + weight : string or None (default=None) + + Notes + ----- + DegreeView can still lookup any node even if nbunch is specified. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> DV = G.degree() + >>> assert DV[2] == 1 + >>> assert G.degree[2] == 1 + >>> assert sum(deg for n, deg in DV) == 4 + + >>> DVweight = G.degree(weight="span") + >>> G.add_edge(1, 2, span=34) + >>> DVweight[2] + 34 + >>> DVweight[0] # default edge weight is 1 + 1 + >>> sum(span for n, span in DVweight) # sum weighted degrees + 70 + + >>> DVnbunch = G.degree(nbunch=(1, 2)) + >>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only + """ + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return len(nbrs) + (n in nbrs) + return sum(dd.get(weight, 1) for dd in nbrs.values()) + ( + n in nbrs and nbrs[n].get(weight, 1) + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + yield (n, len(nbrs) + (n in nbrs)) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(dd.get(weight, 1) for dd in nbrs.values()) + ( + n in nbrs and nbrs[n].get(weight, 1) + ) + yield (n, deg) + + +class OutDegreeView(DiDegreeView): + """A DegreeView class to report out_degree for a DiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if self._weight is None: + return len(nbrs) + return sum(dd.get(self._weight, 1) for dd in nbrs.values()) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + yield (n, len(succs)) + else: + for n in self._nodes: + succs = self._succ[n] + deg = sum(dd.get(weight, 1) for dd in succs.values()) + yield (n, deg) + + +class InDegreeView(DiDegreeView): + """A DegreeView class to report in_degree for a DiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._pred[n] + if weight is None: + return len(nbrs) + return sum(dd.get(weight, 1) for dd in nbrs.values()) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + preds = self._pred[n] + yield (n, len(preds)) + else: + for n in self._nodes: + preds = self._pred[n] + deg = sum(dd.get(weight, 1) for dd in preds.values()) + yield (n, deg) + + +class MultiDegreeView(DiDegreeView): + """A DegreeView class for undirected multigraphs; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return sum(len(keys) for keys in nbrs.values()) + ( + n in nbrs and len(nbrs[n]) + ) + # edge weighted graph - degree is sum of nbr edge weights + deg = sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + if n in nbrs: + deg += sum(d.get(weight, 1) for d in nbrs[n].values()) + return deg + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(len(keys) for keys in nbrs.values()) + ( + n in nbrs and len(nbrs[n]) + ) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + if n in nbrs: + deg += sum(d.get(weight, 1) for d in nbrs[n].values()) + yield (n, deg) + + +class DiMultiDegreeView(DiDegreeView): + """A DegreeView class for MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + succs = self._succ[n] + preds = self._pred[n] + if weight is None: + return sum(len(keys) for keys in succs.values()) + sum( + len(keys) for keys in preds.values() + ) + # edge weighted graph - degree is sum of nbr edge weights + deg = sum( + d.get(weight, 1) for key_dict in succs.values() for d in key_dict.values() + ) + sum( + d.get(weight, 1) for key_dict in preds.values() for d in key_dict.values() + ) + return deg + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum(len(keys) for keys in succs.values()) + sum( + len(keys) for keys in preds.values() + ) + yield (n, deg) + else: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum( + d.get(weight, 1) + for key_dict in succs.values() + for d in key_dict.values() + ) + sum( + d.get(weight, 1) + for key_dict in preds.values() + for d in key_dict.values() + ) + yield (n, deg) + + +class InMultiDegreeView(DiDegreeView): + """A DegreeView class for inward degree of MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._pred[n] + if weight is None: + return sum(len(data) for data in nbrs.values()) + # edge weighted graph - degree is sum of nbr edge weights + return sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._pred[n] + deg = sum(len(data) for data in nbrs.values()) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._pred[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + yield (n, deg) + + +class OutMultiDegreeView(DiDegreeView): + """A DegreeView class for outward degree of MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return sum(len(data) for data in nbrs.values()) + # edge weighted graph - degree is sum of nbr edge weights + return sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(len(data) for data in nbrs.values()) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + yield (n, deg) + + +# EdgeDataViews +class OutEdgeDataView: + """EdgeDataView for outward edges of DiGraph; See EdgeDataView""" + + __slots__ = ( + "_viewer", + "_nbunch", + "_data", + "_default", + "_adjdict", + "_nodes_nbrs", + "_report", + ) + + def __getstate__(self): + return { + "viewer": self._viewer, + "nbunch": self._nbunch, + "data": self._data, + "default": self._default, + } + + def __setstate__(self, state): + self.__init__(**state) + + def __init__(self, viewer, nbunch=None, data=False, default=None): + self._viewer = viewer + adjdict = self._adjdict = viewer._adjdict + if nbunch is None: + self._nodes_nbrs = adjdict.items + else: + # dict retains order of nodes but acts like a set + nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch)) + self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch] + self._nbunch = nbunch + self._data = data + self._default = default + # Set _report based on data and default + if data is True: + self._report = lambda n, nbr, dd: (n, nbr, dd) + elif data is False: + self._report = lambda n, nbr, dd: (n, nbr) + else: # data is attribute name + self._report = ( + lambda n, nbr, dd: (n, nbr, dd[data]) + if data in dd + else (n, nbr, default) + ) + + def __len__(self): + return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) + + def __iter__(self): + return ( + self._report(n, nbr, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, dd in nbrs.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch: + return False # this edge doesn't start in nbunch + try: + ddict = self._adjdict[u][v] + except KeyError: + return False + return e == self._report(u, v, ddict) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self)})" + + +class EdgeDataView(OutEdgeDataView): + """A EdgeDataView class for edges of Graph + + This view is primarily used to iterate over the edges reporting + edges as node-tuples with edge data optionally reported. The + argument `nbunch` allows restriction to edges incident to nodes + in that container/singleton. The default (nbunch=None) + reports all edges. The arguments `data` and `default` control + what edge data is reported. The default `data is False` reports + only node-tuples for each edge. If `data is True` the entire edge + data dict is returned. Otherwise `data` is assumed to hold the name + of the edge attribute to report with default `default` if that + edge attribute is not present. + + Parameters + ---------- + nbunch : container of nodes, node or None (default None) + data : False, True or string (default False) + default : default value (default None) + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.add_edge(1, 2, foo="bar") + >>> list(G.edges(data="foo", default="biz")) + [(0, 1, 'biz'), (1, 2, 'bar')] + >>> assert (0, 1, "biz") in G.edges(data="foo", default="biz") + """ + + __slots__ = () + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, dd in nbrs.items(): + if nbr not in seen: + yield self._report(n, nbr, dd) + seen[n] = 1 + del seen + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch: + return False # this edge doesn't start and it doesn't end in nbunch + try: + ddict = self._adjdict[u][v] + except KeyError: + return False + return e == self._report(u, v, ddict) + + +class InEdgeDataView(OutEdgeDataView): + """An EdgeDataView class for outward edges of DiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + return ( + self._report(nbr, n, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, dd in nbrs.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and v not in self._nbunch: + return False # this edge doesn't end in nbunch + try: + ddict = self._adjdict[v][u] + except KeyError: + return False + return e == self._report(u, v, ddict) + + +class OutMultiEdgeDataView(OutEdgeDataView): + """An EdgeDataView for outward edges of MultiDiGraph; See EdgeDataView""" + + __slots__ = ("keys",) + + def __getstate__(self): + return { + "viewer": self._viewer, + "nbunch": self._nbunch, + "keys": self.keys, + "data": self._data, + "default": self._default, + } + + def __setstate__(self, state): + self.__init__(**state) + + def __init__(self, viewer, nbunch=None, data=False, keys=False, default=None): + self._viewer = viewer + adjdict = self._adjdict = viewer._adjdict + self.keys = keys + if nbunch is None: + self._nodes_nbrs = adjdict.items + else: + # dict retains order of nodes but acts like a set + nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch)) + self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch] + self._nbunch = nbunch + self._data = data + self._default = default + # Set _report based on data and default + if data is True: + if keys is True: + self._report = lambda n, nbr, k, dd: (n, nbr, k, dd) + else: + self._report = lambda n, nbr, k, dd: (n, nbr, dd) + elif data is False: + if keys is True: + self._report = lambda n, nbr, k, dd: (n, nbr, k) + else: + self._report = lambda n, nbr, k, dd: (n, nbr) + else: # data is attribute name + if keys is True: + self._report = ( + lambda n, nbr, k, dd: (n, nbr, k, dd[data]) + if data in dd + else (n, nbr, k, default) + ) + else: + self._report = ( + lambda n, nbr, k, dd: (n, nbr, dd[data]) + if data in dd + else (n, nbr, default) + ) + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + return ( + self._report(n, nbr, k, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, kd in nbrs.items() + for k, dd in kd.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch: + return False # this edge doesn't start in nbunch + try: + kdict = self._adjdict[u][v] + except KeyError: + return False + if self.keys is True: + k = e[2] + try: + dd = kdict[k] + except KeyError: + return False + return e == self._report(u, v, k, dd) + for k, dd in kdict.items(): + if e == self._report(u, v, k, dd): + return True + return False + + +class MultiEdgeDataView(OutMultiEdgeDataView): + """An EdgeDataView class for edges of MultiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, kd in nbrs.items(): + if nbr not in seen: + for k, dd in kd.items(): + yield self._report(n, nbr, k, dd) + seen[n] = 1 + del seen + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch: + return False # this edge doesn't start and doesn't end in nbunch + try: + kdict = self._adjdict[u][v] + except KeyError: + try: + kdict = self._adjdict[v][u] + except KeyError: + return False + if self.keys is True: + k = e[2] + try: + dd = kdict[k] + except KeyError: + return False + return e == self._report(u, v, k, dd) + for k, dd in kdict.items(): + if e == self._report(u, v, k, dd): + return True + return False + + +class InMultiEdgeDataView(OutMultiEdgeDataView): + """An EdgeDataView for inward edges of MultiDiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + return ( + self._report(nbr, n, k, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, kd in nbrs.items() + for k, dd in kd.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and v not in self._nbunch: + return False # this edge doesn't end in nbunch + try: + kdict = self._adjdict[v][u] + except KeyError: + return False + if self.keys is True: + k = e[2] + dd = kdict[k] + return e == self._report(u, v, k, dd) + for k, dd in kdict.items(): + if e == self._report(u, v, k, dd): + return True + return False + + +# EdgeViews have set operations and no data reported +class OutEdgeView(Set, Mapping): + """A EdgeView class for outward edges of a DiGraph""" + + __slots__ = ("_adjdict", "_graph", "_nodes_nbrs") + + def __getstate__(self): + return {"_graph": self._graph, "_adjdict": self._adjdict} + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + @classmethod + def _from_iterable(cls, it): + return set(it) + + dataview = OutEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._succ if hasattr(G, "succ") else G._adj + self._nodes_nbrs = self._adjdict.items + + # Set methods + def __len__(self): + return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr in nbrs: + yield (n, nbr) + + def __contains__(self, e): + try: + u, v = e + return v in self._adjdict[u] + except KeyError: + return False + + # Mapping Methods + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v = e + return self._adjdict[u][v] + + # EdgeDataView methods + def __call__(self, nbunch=None, data=False, default=None): + if nbunch is None and data is False: + return self + return self.dataview(self, nbunch, data, default) + + def data(self, data=True, default=None, nbunch=None): + """ + Return a read-only view of edge data. + + Parameters + ---------- + data : bool or edge attribute key + If ``data=True``, then the data view maps each edge to a dictionary + containing all of its attributes. If `data` is a key in the edge + dictionary, then the data view maps each edge to its value for + the keyed attribute. In this case, if the edge doesn't have the + attribute, the `default` value is returned. + default : object, default=None + The value used when an edge does not have a specific attribute + nbunch : container of nodes, optional (default=None) + Allows restriction to edges only involving certain nodes. All edges + are considered by default. + + Returns + ------- + dataview + Returns an `EdgeDataView` for undirected Graphs, `OutEdgeDataView` + for DiGraphs, `MultiEdgeDataView` for MultiGraphs and + `OutMultiEdgeDataView` for MultiDiGraphs. + + Notes + ----- + If ``data=False``, returns an `EdgeView` without any edge data. + + See Also + -------- + EdgeDataView + OutEdgeDataView + MultiEdgeDataView + OutMultiEdgeDataView + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([ + ... (0, 1, {"dist": 3, "capacity": 20}), + ... (1, 2, {"dist": 4}), + ... (2, 0, {"dist": 5}) + ... ]) + + Accessing edge data with ``data=True`` (the default) returns an + edge data view object listing each edge with all of its attributes: + + >>> G.edges.data() + EdgeDataView([(0, 1, {'dist': 3, 'capacity': 20}), (0, 2, {'dist': 5}), (1, 2, {'dist': 4})]) + + If `data` represents a key in the edge attribute dict, a dataview listing + each edge with its value for that specific key is returned: + + >>> G.edges.data("dist") + EdgeDataView([(0, 1, 3), (0, 2, 5), (1, 2, 4)]) + + `nbunch` can be used to limit the edges: + + >>> G.edges.data("dist", nbunch=[0]) + EdgeDataView([(0, 1, 3), (0, 2, 5)]) + + If a specific key is not found in an edge attribute dict, the value + specified by `default` is used: + + >>> G.edges.data("capacity") + EdgeDataView([(0, 1, 20), (0, 2, None), (1, 2, None)]) + + Note that there is no check that the `data` key is present in any of + the edge attribute dictionaries: + + >>> G.edges.data("speed") + EdgeDataView([(0, 1, None), (0, 2, None), (1, 2, None)]) + """ + if nbunch is None and data is False: + return self + return self.dataview(self, nbunch, data, default) + + # String Methods + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self)})" + + +class EdgeView(OutEdgeView): + """A EdgeView class for edges of a Graph + + This densely packed View allows iteration over edges, data lookup + like a dict and set operations on edges represented by node-tuples. + In addition, edge data can be controlled by calling this object + possibly creating an EdgeDataView. Typically edges are iterated over + and reported as `(u, v)` node tuples or `(u, v, key)` node/key tuples + for multigraphs. Those edge representations can also be using to + lookup the data dict for any edge. Set operations also are available + where those tuples are the elements of the set. + Calling this object with optional arguments `data`, `default` and `keys` + controls the form of the tuple (see EdgeDataView). Optional argument + `nbunch` allows restriction to edges only involving certain nodes. + + If `data is False` (the default) then iterate over 2-tuples `(u, v)`. + If `data is True` iterate over 3-tuples `(u, v, datadict)`. + Otherwise iterate over `(u, v, datadict.get(data, default))`. + For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` above. + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : (default= all nodes in graph) only report edges with these nodes + keys : (only for MultiGraph. default=False) report edge key in tuple + data : bool or string (default=False) see above + default : object (default=None) + + Examples + ======== + >>> G = nx.path_graph(4) + >>> EV = G.edges() + >>> (2, 3) in EV + True + >>> for u, v in EV: + ... print((u, v)) + (0, 1) + (1, 2) + (2, 3) + >>> assert EV & {(1, 2), (3, 4)} == {(1, 2)} + + >>> EVdata = G.edges(data="color", default="aqua") + >>> G.add_edge(2, 3, color="blue") + >>> assert (2, 3, "blue") in EVdata + >>> for u, v, c in EVdata: + ... print(f"({u}, {v}) has color: {c}") + (0, 1) has color: aqua + (1, 2) has color: aqua + (2, 3) has color: blue + + >>> EVnbunch = G.edges(nbunch=2) + >>> assert (2, 3) in EVnbunch + >>> assert (0, 1) not in EVnbunch + >>> for u, v in EVnbunch: + ... assert u == 2 or v == 2 + + >>> MG = nx.path_graph(4, create_using=nx.MultiGraph) + >>> EVmulti = MG.edges(keys=True) + >>> (2, 3, 0) in EVmulti + True + >>> (2, 3) in EVmulti # 2-tuples work even when keys is True + True + >>> key = MG.add_edge(2, 3) + >>> for u, v, k in EVmulti: + ... print((u, v, k)) + (0, 1, 0) + (1, 2, 0) + (2, 3, 0) + (2, 3, 1) + """ + + __slots__ = () + + dataview = EdgeDataView + + def __len__(self): + num_nbrs = (len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs()) + return sum(num_nbrs) // 2 + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr in list(nbrs): + if nbr not in seen: + yield (n, nbr) + seen[n] = 1 + del seen + + def __contains__(self, e): + try: + u, v = e[:2] + return v in self._adjdict[u] or u in self._adjdict[v] + except (KeyError, ValueError): + return False + + +class InEdgeView(OutEdgeView): + """A EdgeView class for inward edges of a DiGraph""" + + __slots__ = () + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + dataview = InEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._pred if hasattr(G, "pred") else G._adj + self._nodes_nbrs = self._adjdict.items + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr in nbrs: + yield (nbr, n) + + def __contains__(self, e): + try: + u, v = e + return u in self._adjdict[v] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v = e + return self._adjdict[v][u] + + +class OutMultiEdgeView(OutEdgeView): + """A EdgeView class for outward edges of a MultiDiGraph""" + + __slots__ = () + + dataview = OutMultiEdgeDataView + + def __len__(self): + return sum( + len(kdict) for n, nbrs in self._nodes_nbrs() for nbr, kdict in nbrs.items() + ) + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr, kdict in nbrs.items(): + for key in kdict: + yield (n, nbr, key) + + def __contains__(self, e): + N = len(e) + if N == 3: + u, v, k = e + elif N == 2: + u, v = e + k = 0 + else: + raise ValueError("MultiEdge must have length 2 or 3") + try: + return k in self._adjdict[u][v] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v, k = e + return self._adjdict[u][v][k] + + def __call__(self, nbunch=None, data=False, keys=False, default=None): + if nbunch is None and data is False and keys is True: + return self + return self.dataview(self, nbunch, data, keys, default) + + def data(self, data=True, keys=False, default=None, nbunch=None): + if nbunch is None and data is False and keys is True: + return self + return self.dataview(self, nbunch, data, keys, default) + + +class MultiEdgeView(OutMultiEdgeView): + """A EdgeView class for edges of a MultiGraph""" + + __slots__ = () + + dataview = MultiEdgeDataView + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, kd in nbrs.items(): + if nbr not in seen: + for k, dd in kd.items(): + yield (n, nbr, k) + seen[n] = 1 + del seen + + +class InMultiEdgeView(OutMultiEdgeView): + """A EdgeView class for inward edges of a MultiDiGraph""" + + __slots__ = () + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + dataview = InMultiEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._pred if hasattr(G, "pred") else G._adj + self._nodes_nbrs = self._adjdict.items + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr, kdict in nbrs.items(): + for key in kdict: + yield (nbr, n, key) + + def __contains__(self, e): + N = len(e) + if N == 3: + u, v, k = e + elif N == 2: + u, v = e + k = 0 + else: + raise ValueError("MultiEdge must have length 2 or 3") + try: + return k in self._adjdict[v][u] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v, k = e + return self._adjdict[v][u][k] diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/__init__.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/historical_tests.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/historical_tests.py new file mode 100644 index 0000000..44b72fd --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/historical_tests.py @@ -0,0 +1,474 @@ +"""Original NetworkX graph tests""" +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.utils import edges_equal, nodes_equal + + +class HistoricalTests: + @classmethod + def setup_class(cls): + cls.null = nx.null_graph() + cls.P1 = cnlti(nx.path_graph(1), first_label=1) + cls.P3 = cnlti(nx.path_graph(3), first_label=1) + cls.P10 = cnlti(nx.path_graph(10), first_label=1) + cls.K1 = cnlti(nx.complete_graph(1), first_label=1) + cls.K3 = cnlti(nx.complete_graph(3), first_label=1) + cls.K4 = cnlti(nx.complete_graph(4), first_label=1) + cls.K5 = cnlti(nx.complete_graph(5), first_label=1) + cls.K10 = cnlti(nx.complete_graph(10), first_label=1) + cls.G = nx.Graph + + def test_name(self): + G = self.G(name="test") + assert G.name == "test" + H = self.G() + assert H.name == "" + + # Nodes + + def test_add_remove_node(self): + G = self.G() + G.add_node("A") + assert G.has_node("A") + G.remove_node("A") + assert not G.has_node("A") + + def test_nonhashable_node(self): + # Test if a non-hashable object is in the Graph. A python dict will + # raise a TypeError, but for a Graph class a simple False should be + # returned (see Graph __contains__). If it cannot be a node then it is + # not a node. + G = self.G() + assert not G.has_node(["A"]) + assert not G.has_node({"A": 1}) + + def test_add_nodes_from(self): + G = self.G() + G.add_nodes_from(list("ABCDEFGHIJKL")) + assert G.has_node("L") + G.remove_nodes_from(["H", "I", "J", "K", "L"]) + G.add_nodes_from([1, 2, 3, 4]) + assert sorted(G.nodes(), key=str) == [ + 1, + 2, + 3, + 4, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + ] + # test __iter__ + assert sorted(G, key=str) == [1, 2, 3, 4, "A", "B", "C", "D", "E", "F", "G"] + + def test_contains(self): + G = self.G() + G.add_node("A") + assert "A" in G + assert not [] in G # never raise a Key or TypeError in this test + assert not {1: 1} in G + + def test_add_remove(self): + # Test add_node and remove_node acting for various nbunch + G = self.G() + G.add_node("m") + assert G.has_node("m") + G.add_node("m") # no complaints + pytest.raises(nx.NetworkXError, G.remove_node, "j") + G.remove_node("m") + assert list(G) == [] + + def test_nbunch_is_list(self): + G = self.G() + G.add_nodes_from(list("ABCD")) + G.add_nodes_from(self.P3) # add nbunch of nodes (nbunch=Graph) + assert sorted(G.nodes(), key=str) == [1, 2, 3, "A", "B", "C", "D"] + G.remove_nodes_from(self.P3) # remove nbunch of nodes (nbunch=Graph) + assert sorted(G.nodes(), key=str) == ["A", "B", "C", "D"] + + def test_nbunch_is_set(self): + G = self.G() + nbunch = set("ABCDEFGHIJKL") + G.add_nodes_from(nbunch) + assert G.has_node("L") + + def test_nbunch_dict(self): + # nbunch is a dict with nodes as keys + G = self.G() + nbunch = set("ABCDEFGHIJKL") + G.add_nodes_from(nbunch) + nbunch = {"I": "foo", "J": 2, "K": True, "L": "spam"} + G.remove_nodes_from(nbunch) + assert sorted(G.nodes(), key=str), ["A", "B", "C", "D", "E", "F", "G", "H"] + + def test_nbunch_iterator(self): + G = self.G() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + n_iter = self.P3.nodes() + G.add_nodes_from(n_iter) + assert sorted(G.nodes(), key=str) == [ + 1, + 2, + 3, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + ] + n_iter = self.P3.nodes() # rebuild same iterator + G.remove_nodes_from(n_iter) # remove nbunch of nodes (nbunch=iterator) + assert sorted(G.nodes(), key=str) == ["A", "B", "C", "D", "E", "F", "G", "H"] + + def test_nbunch_graph(self): + G = self.G() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + nbunch = self.K3 + G.add_nodes_from(nbunch) + assert sorted(G.nodes(), key=str), [ + 1, + 2, + 3, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + ] + + # Edges + + def test_add_edge(self): + G = self.G() + pytest.raises(TypeError, G.add_edge, "A") + + G.add_edge("A", "B") # testing add_edge() + G.add_edge("A", "B") # should fail silently + assert G.has_edge("A", "B") + assert not G.has_edge("A", "C") + assert G.has_edge(*("A", "B")) + if G.is_directed(): + assert not G.has_edge("B", "A") + else: + # G is undirected, so B->A is an edge + assert G.has_edge("B", "A") + + G.add_edge("A", "C") # test directedness + G.add_edge("C", "A") + G.remove_edge("C", "A") + if G.is_directed(): + assert G.has_edge("A", "C") + else: + assert not G.has_edge("A", "C") + assert not G.has_edge("C", "A") + + def test_self_loop(self): + G = self.G() + G.add_edge("A", "A") # test self loops + assert G.has_edge("A", "A") + G.remove_edge("A", "A") + G.add_edge("X", "X") + assert G.has_node("X") + G.remove_node("X") + G.add_edge("A", "Z") # should add the node silently + assert G.has_node("Z") + + def test_add_edges_from(self): + G = self.G() + G.add_edges_from([("B", "C")]) # test add_edges_from() + assert G.has_edge("B", "C") + if G.is_directed(): + assert not G.has_edge("C", "B") + else: + assert G.has_edge("C", "B") # undirected + + G.add_edges_from([("D", "F"), ("B", "D")]) + assert G.has_edge("D", "F") + assert G.has_edge("B", "D") + + if G.is_directed(): + assert not G.has_edge("D", "B") + else: + assert G.has_edge("D", "B") # undirected + + def test_add_edges_from2(self): + G = self.G() + # after failing silently, should add 2nd edge + G.add_edges_from([tuple("IJ"), list("KK"), tuple("JK")]) + assert G.has_edge(*("I", "J")) + assert G.has_edge(*("K", "K")) + assert G.has_edge(*("J", "K")) + if G.is_directed(): + assert not G.has_edge(*("K", "J")) + else: + assert G.has_edge(*("K", "J")) + + def test_add_edges_from3(self): + G = self.G() + G.add_edges_from(zip(list("ACD"), list("CDE"))) + assert G.has_edge("D", "E") + assert not G.has_edge("E", "C") + + def test_remove_edge(self): + G = self.G() + G.add_nodes_from([1, 2, 3, "A", "B", "C", "D", "E", "F", "G", "H"]) + + G.add_edges_from(zip(list("MNOP"), list("NOPM"))) + assert G.has_edge("O", "P") + assert G.has_edge("P", "M") + G.remove_node("P") # tests remove_node()'s handling of edges. + assert not G.has_edge("P", "M") + pytest.raises(TypeError, G.remove_edge, "M") + + G.add_edge("N", "M") + assert G.has_edge("M", "N") + G.remove_edge("M", "N") + assert not G.has_edge("M", "N") + + # self loop fails silently + G.remove_edges_from([list("HI"), list("DF"), tuple("KK"), tuple("JK")]) + assert not G.has_edge("H", "I") + assert not G.has_edge("J", "K") + G.remove_edges_from([list("IJ"), list("KK"), list("JK")]) + assert not G.has_edge("I", "J") + G.remove_nodes_from(set("ZEFHIMNO")) + G.add_edge("J", "K") + + def test_edges_nbunch(self): + # Test G.edges(nbunch) with various forms of nbunch + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + # node not in nbunch should be quietly ignored + pytest.raises(nx.NetworkXError, G.edges, 6) + assert list(G.edges("Z")) == [] # iterable non-node + # nbunch can be an empty list + assert list(G.edges([])) == [] + if G.is_directed(): + elist = [("A", "B"), ("A", "C"), ("B", "D")] + else: + elist = [("A", "B"), ("A", "C"), ("B", "C"), ("B", "D")] + # nbunch can be a list + assert edges_equal(list(G.edges(["A", "B"])), elist) + # nbunch can be a set + assert edges_equal(G.edges({"A", "B"}), elist) + # nbunch can be a graph + G1 = self.G() + G1.add_nodes_from("AB") + assert edges_equal(G.edges(G1), elist) + # nbunch can be a dict with nodes as keys + ndict = {"A": "thing1", "B": "thing2"} + assert edges_equal(G.edges(ndict), elist) + # nbunch can be a single node + assert edges_equal(list(G.edges("A")), [("A", "B"), ("A", "C")]) + assert nodes_equal(sorted(G), ["A", "B", "C", "D"]) + + # nbunch can be nothing (whole graph) + assert edges_equal( + list(G.edges()), + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")], + ) + + def test_degree(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + assert G.degree("A") == 2 + + # degree of single node in iterable container must return dict + assert list(G.degree(["A"])) == [("A", 2)] + assert sorted(d for n, d in G.degree(["A", "B"])) == [2, 3] + assert sorted(d for n, d in G.degree()) == [2, 2, 3, 3] + + def test_degree2(self): + H = self.G() + H.add_edges_from([(1, 24), (1, 2)]) + assert sorted(d for n, d in H.degree([1, 24])) == [1, 2] + + def test_degree_graph(self): + P3 = nx.path_graph(3) + P5 = nx.path_graph(5) + # silently ignore nodes not in P3 + assert dict(d for n, d in P3.degree(["A", "B"])) == {} + # nbunch can be a graph + assert sorted(d for n, d in P5.degree(P3)) == [1, 2, 2] + # nbunch can be a graph that's way too big + assert sorted(d for n, d in P3.degree(P5)) == [1, 1, 2] + assert list(P5.degree([])) == [] + assert dict(P5.degree([])) == {} + + def test_null(self): + null = nx.null_graph() + assert list(null.degree()) == [] + assert dict(null.degree()) == {} + + def test_order_size(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + assert G.order() == 4 + assert G.size() == 5 + assert G.number_of_edges() == 5 + assert G.number_of_edges("A", "B") == 1 + assert G.number_of_edges("A", "D") == 0 + + def test_copy(self): + G = self.G() + H = G.copy() # copy + assert H.adj == G.adj + assert H.name == G.name + assert H is not G + + def test_subgraph(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + SG = G.subgraph(["A", "B", "D"]) + assert nodes_equal(list(SG), ["A", "B", "D"]) + assert edges_equal(list(SG.edges()), [("A", "B"), ("B", "D")]) + + def test_to_directed(self): + G = self.G() + if not G.is_directed(): + G.add_edges_from( + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + + DG = G.to_directed() + assert DG is not G # directed copy or copy + + assert DG.is_directed() + assert DG.name == G.name + assert DG.adj == G.adj + assert sorted(DG.out_edges(list("AB"))) == [ + ("A", "B"), + ("A", "C"), + ("B", "A"), + ("B", "C"), + ("B", "D"), + ] + DG.remove_edge("A", "B") + assert DG.has_edge("B", "A") # this removes B-A but not A-B + assert not DG.has_edge("A", "B") + + def test_to_undirected(self): + G = self.G() + if G.is_directed(): + G.add_edges_from( + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + UG = G.to_undirected() # to_undirected + assert UG is not G + assert not UG.is_directed() + assert G.is_directed() + assert UG.name == G.name + assert UG.adj != G.adj + assert sorted(UG.edges(list("AB"))) == [ + ("A", "B"), + ("A", "C"), + ("B", "C"), + ("B", "D"), + ] + assert sorted(UG.edges(["A", "B"])) == [ + ("A", "B"), + ("A", "C"), + ("B", "C"), + ("B", "D"), + ] + UG.remove_edge("A", "B") + assert not UG.has_edge("B", "A") + assert not UG.has_edge("A", "B") + + def test_neighbors(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + G.add_nodes_from("GJK") + assert sorted(G["A"]) == ["B", "C"] + assert sorted(G.neighbors("A")) == ["B", "C"] + assert sorted(G.neighbors("A")) == ["B", "C"] + assert sorted(G.neighbors("G")) == [] + pytest.raises(nx.NetworkXError, G.neighbors, "j") + + def test_iterators(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + G.add_nodes_from("GJK") + assert sorted(G.nodes()) == ["A", "B", "C", "D", "G", "J", "K"] + assert edges_equal( + G.edges(), [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + + assert sorted(v for k, v in G.degree()) == [0, 0, 0, 2, 2, 3, 3] + assert sorted(G.degree(), key=str) == [ + ("A", 2), + ("B", 3), + ("C", 3), + ("D", 2), + ("G", 0), + ("J", 0), + ("K", 0), + ] + assert sorted(G.neighbors("A")) == ["B", "C"] + pytest.raises(nx.NetworkXError, G.neighbors, "X") + G.clear() + assert nx.number_of_nodes(G) == 0 + assert nx.number_of_edges(G) == 0 + + def test_null_subgraph(self): + # Subgraph of a null graph is a null graph + nullgraph = nx.null_graph() + G = nx.null_graph() + H = G.subgraph([]) + assert nx.is_isomorphic(H, nullgraph) + + def test_empty_subgraph(self): + # Subgraph of an empty graph is an empty graph. test 1 + nullgraph = nx.null_graph() + E5 = nx.empty_graph(5) + E10 = nx.empty_graph(10) + H = E10.subgraph([]) + assert nx.is_isomorphic(H, nullgraph) + H = E10.subgraph([1, 2, 3, 4, 5]) + assert nx.is_isomorphic(H, E5) + + def test_complete_subgraph(self): + # Subgraph of a complete graph is a complete graph + K1 = nx.complete_graph(1) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + H = K5.subgraph([1, 2, 3]) + assert nx.is_isomorphic(H, K3) + + def test_subgraph_nbunch(self): + nullgraph = nx.null_graph() + K1 = nx.complete_graph(1) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + # Test G.subgraph(nbunch), where nbunch is a single node + H = K5.subgraph(1) + assert nx.is_isomorphic(H, K1) + # Test G.subgraph(nbunch), where nbunch is a set + H = K5.subgraph({1}) + assert nx.is_isomorphic(H, K1) + # Test G.subgraph(nbunch), where nbunch is an iterator + H = K5.subgraph(iter(K3)) + assert nx.is_isomorphic(H, K3) + # Test G.subgraph(nbunch), where nbunch is another graph + H = K5.subgraph(K3) + assert nx.is_isomorphic(H, K3) + H = K5.subgraph([9]) + assert nx.is_isomorphic(H, nullgraph) + + def test_node_tuple_issue(self): + H = self.G() + # Test error handling of tuple as a node + pytest.raises(nx.NetworkXError, H.remove_node, (1, 2)) + H.remove_nodes_from([(1, 2)]) # no error + pytest.raises(nx.NetworkXError, H.neighbors, (1, 2)) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_coreviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_coreviews.py new file mode 100644 index 0000000..12572b4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_coreviews.py @@ -0,0 +1,431 @@ +import pickle + +import pytest + +import networkx as nx + + +class TestAtlasView: + # node->data + def setup(self): + self.d = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}} + self.av = nx.classes.coreviews.AtlasView(self.d) + + def test_pickle(self): + view = self.av + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + pview = pickle.loads(pickle.dumps(view)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.av) == len(self.d) + + def test_iter(self): + assert list(self.av) == list(self.d) + + def test_getitem(self): + assert self.av[1] is self.d[1] + assert self.av[2]["color"] == 1 + pytest.raises(KeyError, self.av.__getitem__, 3) + + def test_copy(self): + avcopy = self.av.copy() + assert avcopy[0] == self.av[0] + assert avcopy == self.av + assert avcopy[0] is not self.av[0] + assert avcopy is not self.av + avcopy[5] = {} + assert avcopy != self.av + + avcopy[0]["ht"] = 4 + assert avcopy[0] != self.av[0] + self.av[0]["ht"] = 4 + assert avcopy[0] == self.av[0] + del self.av[0]["ht"] + + assert not hasattr(self.av, "__setitem__") + + def test_items(self): + assert sorted(self.av.items()) == sorted(self.d.items()) + + def test_str(self): + out = str(self.d) + assert str(self.av) == out + + def test_repr(self): + out = "AtlasView(" + str(self.d) + ")" + assert repr(self.av) == out + + +class TestAdjacencyView: + # node->nbr->data + def setup(self): + dd = {"color": "blue", "weight": 1.2} + self.nd = {0: dd, 1: {}, 2: {"color": 1}} + self.adj = {3: self.nd, 0: {3: dd}, 1: {}, 2: {3: {"color": 1}}} + self.adjview = nx.classes.coreviews.AdjacencyView(self.adj) + + def test_pickle(self): + view = self.adjview + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.adjview) == len(self.adj) + + def test_iter(self): + assert list(self.adjview) == list(self.adj) + + def test_getitem(self): + assert self.adjview[1] is not self.adj[1] + assert self.adjview[3][0] is self.adjview[0][3] + assert self.adjview[2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + def test_items(self): + view_items = sorted((n, dict(d)) for n, d in self.adjview.items()) + assert view_items == sorted(self.adj.items()) + + def test_str(self): + out = str(dict(self.adj)) + assert str(self.adjview) == out + + def test_repr(self): + out = self.adjview.__class__.__name__ + "(" + str(self.adj) + ")" + assert repr(self.adjview) == out + + +class TestMultiAdjacencyView(TestAdjacencyView): + # node->nbr->key->data + def setup(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {0: dd, 1: {}, 2: {"color": 1}} + self.nd = {3: self.kd, 0: {3: dd}, 1: {0: {}}, 2: {3: {"color": 1}}} + self.adj = {3: self.nd, 0: {3: {3: dd}}, 1: {}, 2: {3: {8: {}}}} + self.adjview = nx.classes.coreviews.MultiAdjacencyView(self.adj) + + def test_getitem(self): + assert self.adjview[1] is not self.adj[1] + assert self.adjview[3][0][3] is self.adjview[0][3][3] + assert self.adjview[3][2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3][8]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3][8]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3][8]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + +class TestUnionAtlas: + # node->data + def setup(self): + self.s = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}} + self.p = {3: {"color": "blue", "weight": 1.2}, 4: {}, 2: {"watch": 2}} + self.av = nx.classes.coreviews.UnionAtlas(self.s, self.p) + + def test_pickle(self): + view = self.av + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.av) == len(self.s) + len(self.p) + + def test_iter(self): + assert set(self.av) == set(self.s) | set(self.p) + + def test_getitem(self): + assert self.av[0] is self.s[0] + assert self.av[4] is self.p[4] + assert self.av[2]["color"] == 1 + pytest.raises(KeyError, self.av[2].__getitem__, "watch") + pytest.raises(KeyError, self.av.__getitem__, 8) + + def test_copy(self): + avcopy = self.av.copy() + assert avcopy[0] == self.av[0] + assert avcopy[0] is not self.av[0] + assert avcopy is not self.av + avcopy[5] = {} + assert avcopy != self.av + + avcopy[0]["ht"] = 4 + assert avcopy[0] != self.av[0] + self.av[0]["ht"] = 4 + assert avcopy[0] == self.av[0] + del self.av[0]["ht"] + + assert not hasattr(self.av, "__setitem__") + + def test_items(self): + expected = dict(self.p.items()) + expected.update(self.s) + assert sorted(self.av.items()) == sorted(expected.items()) + + def test_str(self): + out = str(dict(self.av)) + assert str(self.av) == out + + def test_repr(self): + out = f"{self.av.__class__.__name__}({self.s}, {self.p})" + assert repr(self.av) == out + + +class TestUnionAdjacency: + # node->nbr->data + def setup(self): + dd = {"color": "blue", "weight": 1.2} + self.nd = {0: dd, 1: {}, 2: {"color": 1}} + self.s = {3: self.nd, 0: {}, 1: {}, 2: {3: {"color": 1}}} + self.p = {3: {}, 0: {3: dd}, 1: {0: {}}, 2: {1: {"color": 1}}} + self.adjview = nx.classes.coreviews.UnionAdjacency(self.s, self.p) + + def test_pickle(self): + view = self.adjview + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.adjview) == len(self.s) + + def test_iter(self): + assert sorted(self.adjview) == sorted(self.s) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[3][0] is self.adjview[0][3] + assert self.adjview[2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + def test_str(self): + out = str(dict(self.adjview)) + assert str(self.adjview) == out + + def test_repr(self): + clsname = self.adjview.__class__.__name__ + out = f"{clsname}({self.s}, {self.p})" + assert repr(self.adjview) == out + + +class TestUnionMultiInner(TestUnionAdjacency): + # nbr->key->data + def setup(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {7: {}, "ekey": {}, 9: {"color": 1}} + self.s = {3: self.kd, 0: {7: dd}, 1: {}, 2: {"key": {"color": 1}}} + self.p = {3: {}, 0: {3: dd}, 1: {}, 2: {1: {"span": 2}}} + self.adjview = nx.classes.coreviews.UnionMultiInner(self.s, self.p) + + def test_len(self): + assert len(self.adjview) == len(self.s) + len(self.p) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[0][7] is self.adjview[0][3] + assert self.adjview[2]["key"]["color"] == 1 + assert self.adjview[2][1]["span"] == 2 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + pytest.raises(KeyError, self.adjview[1].__getitem__, "key") + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][1]["width"] = 8 + assert avcopy[2] != self.adjview[2] + self.adjview[2][1]["width"] = 8 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][1]["width"] + + assert not hasattr(self.adjview, "__setitem__") + assert hasattr(avcopy, "__setitem__") + + +class TestUnionMultiAdjacency(TestUnionAdjacency): + # node->nbr->key->data + def setup(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {7: {}, 8: {}, 9: {"color": 1}} + self.nd = {3: self.kd, 0: {9: dd}, 1: {8: {}}, 2: {9: {"color": 1}}} + self.s = {3: self.nd, 0: {3: {7: dd}}, 1: {}, 2: {3: {8: {}}}} + self.p = {3: {}, 0: {3: {9: dd}}, 1: {}, 2: {1: {8: {}}}} + self.adjview = nx.classes.coreviews.UnionMultiAdjacency(self.s, self.p) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[3][0][9] is self.adjview[0][3][9] + assert self.adjview[3][2][9]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3][8]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3][8]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3][8]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + assert hasattr(avcopy, "__setitem__") + + +class TestFilteredGraphs: + def setup(self): + self.Graphs = [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] + + def test_hide_show_nodes(self): + SubGraph = nx.graphviews.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, nx.filters.hide_nodes([0, 1])) + assert SG.nodes == RG.nodes + assert SG.edges == RG.edges + SGC = SG.copy() + RGC = RG.copy() + assert SGC.nodes == RGC.nodes + assert SGC.edges == RGC.edges + + def test_str_repr(self): + SubGraph = nx.graphviews.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, nx.filters.hide_nodes([0, 1])) + str(SG.adj) + str(RG.adj) + repr(SG.adj) + repr(RG.adj) + str(SG.adj[2]) + str(RG.adj[2]) + repr(SG.adj[2]) + repr(RG.adj[2]) + + def test_copy(self): + SubGraph = nx.graphviews.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, nx.filters.hide_nodes([0, 1])) + RsG = SubGraph(G, nx.filters.show_nodes([2, 3])) + assert G.adj.copy() == G.adj + assert G.adj[2].copy() == G.adj[2] + assert SG.adj.copy() == SG.adj + assert SG.adj[2].copy() == SG.adj[2] + assert RG.adj.copy() == RG.adj + assert RG.adj[2].copy() == RG.adj[2] + assert RsG.adj.copy() == RsG.adj + assert RsG.adj[2].copy() == RsG.adj[2] + + def test_filtered_copy(self): + # TODO: This function can be removed when filtered.copy() + # deprecation expires + SubGraph = nx.graphviews.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, nx.filters.hide_nodes([0, 1])) + RsG = SubGraph(G, nx.filters.show_nodes([2, 3])) + # test FilterAtlas & co in these subgraphs + assert SG._node.copy() == SG._node + assert SG.adj._atlas.copy() == SG.adj._atlas + assert SG.adj[2]._atlas.copy() == SG.adj[2]._atlas + assert SG.adj[2]._atlas[3].copy() == SG.adj[2]._atlas[3] + assert RG.adj._atlas.copy() == RG.adj._atlas + assert RG.adj[2]._atlas.copy() == RG.adj[2]._atlas + assert RG.adj[2]._atlas[3].copy() == RG.adj[2]._atlas[3] + assert RG._node.copy() == RG._node + assert RsG.adj._atlas.copy() == RsG.adj._atlas + assert RsG.adj[2]._atlas.copy() == RsG.adj[2]._atlas + assert RsG.adj[2]._atlas[3].copy() == RsG.adj[2]._atlas[3] + assert RsG._node.copy() == RsG._node + # test MultiFilterInner + if G.is_multigraph(): + assert SG.adj[2]._atlas[3][0].copy() == SG.adj[2]._atlas[3][0] + assert RG.adj[2]._atlas[3][0].copy() == RG.adj[2]._atlas[3][0] + assert RsG.adj[2]._atlas[3][0].copy() == RsG.adj[2]._atlas[3][0] + + # test deprecation + # FilterAtlas.copy() + pytest.deprecated_call(SG._node.copy) + # FilterAdjacency.copy() + pytest.deprecated_call(SG.adj._atlas.copy) + # FilterMultiAdjacency.copy() + if G.is_multigraph(): + pytest.deprecated_call(SG.adj._atlas.copy) + # FilterMultiInner.copy() + if G.is_multigraph(): + pytest.deprecated_call(SG.adj[2]._atlas.copy) + + SSG = SG.subgraph([2]) + assert list(SSG) == [2] + + # check case when node_ok is small + G = nx.complete_graph(9, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, nx.filters.hide_nodes([0, 1])) + RsG = SubGraph(G, nx.filters.show_nodes([2, 3, 4, 5, 6, 7, 8])) + assert SG.adj._atlas.copy() == SG.adj._atlas + assert SG.adj[2]._atlas.copy() == SG.adj[2]._atlas + assert SG.adj[2]._atlas[3].copy() == SG.adj[2]._atlas[3] + assert SG._node.copy() == SG._node + assert RG.adj._atlas.copy() == RG.adj._atlas + assert RG.adj[2]._atlas.copy() == RG.adj[2]._atlas + assert RG.adj[2]._atlas[3].copy() == RG.adj[2]._atlas[3] + assert RG._node.copy() == RG._node + assert RsG.adj._atlas.copy() == RsG.adj._atlas + assert RsG.adj[2]._atlas.copy() == RsG.adj[2]._atlas + assert RsG.adj[2]._atlas[3].copy() == RsG.adj[2]._atlas[3] + assert RsG._node.copy() == RsG._node + # test MultiFilterInner + if G.is_multigraph(): + assert SG.adj[2][3]._atlas.copy() == SG.adj[2][3]._atlas + assert RG.adj[2][3]._atlas.copy() == RG.adj[2][3]._atlas + assert RsG.adj[2][3]._atlas.copy() == RsG.adj[2][3]._atlas + + SSG = SG.subgraph([2]) + assert list(SSG) == [2] diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_digraph.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_digraph.py new file mode 100644 index 0000000..c8e8655 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_digraph.py @@ -0,0 +1,327 @@ +import pytest + +import networkx as nx +from networkx.utils import nodes_equal + +from .test_graph import BaseAttrGraphTester, BaseGraphTester +from .test_graph import TestEdgeSubgraph as _TestGraphEdgeSubgraph +from .test_graph import TestGraph as _TestGraph + + +class BaseDiGraphTester(BaseGraphTester): + def test_has_successor(self): + G = self.K3 + assert G.has_successor(0, 1) + assert not G.has_successor(0, -1) + + def test_successors(self): + G = self.K3 + assert sorted(G.successors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.successors(-1) + + def test_has_predecessor(self): + G = self.K3 + assert G.has_predecessor(0, 1) + assert not G.has_predecessor(0, -1) + + def test_predecessors(self): + G = self.K3 + assert sorted(G.predecessors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.predecessors(-1) + + def test_edges(self): + G = self.K3 + assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + assert sorted(G.edges([0, 1])) == [(0, 1), (0, 2), (1, 0), (1, 2)] + with pytest.raises(nx.NetworkXError): + G.edges(-1) + + def test_out_edges(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + with pytest.raises(nx.NetworkXError): + G.out_edges(-1) + + def test_out_edges_dir(self): + G = self.P3 + assert sorted(G.out_edges()) == [(0, 1), (1, 2)] + assert sorted(G.out_edges(0)) == [(0, 1)] + assert sorted(G.out_edges(2)) == [] + + def test_out_edges_data(self): + G = nx.DiGraph([(0, 1, {"data": 0}), (1, 0, {})]) + assert sorted(G.out_edges(data=True)) == [(0, 1, {"data": 0}), (1, 0, {})] + assert sorted(G.out_edges(0, data=True)) == [(0, 1, {"data": 0})] + assert sorted(G.out_edges(data="data")) == [(0, 1, 0), (1, 0, None)] + assert sorted(G.out_edges(0, data="data")) == [(0, 1, 0)] + + def test_in_edges_dir(self): + G = self.P3 + assert sorted(G.in_edges()) == [(0, 1), (1, 2)] + assert sorted(G.in_edges(0)) == [] + assert sorted(G.in_edges(2)) == [(1, 2)] + + def test_in_edges_data(self): + G = nx.DiGraph([(0, 1, {"data": 0}), (1, 0, {})]) + assert sorted(G.in_edges(data=True)) == [(0, 1, {"data": 0}), (1, 0, {})] + assert sorted(G.in_edges(1, data=True)) == [(0, 1, {"data": 0})] + assert sorted(G.in_edges(data="data")) == [(0, 1, 0), (1, 0, None)] + assert sorted(G.in_edges(1, data="data")) == [(0, 1, 0)] + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)] + assert dict(G.degree()) == {0: 4, 1: 4, 2: 4} + assert G.degree(0) == 4 + assert list(G.degree(iter([0]))) == [(0, 4)] # run through iterator + + def test_in_degree(self): + G = self.K3 + assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2} + assert G.in_degree(0) == 2 + assert list(G.in_degree(iter([0]))) == [(0, 2)] # run through iterator + + def test_out_degree(self): + G = self.K3 + assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2} + assert G.out_degree(0) == 2 + assert list(G.out_degree(iter([0]))) == [(0, 2)] + + def test_size(self): + G = self.K3 + assert G.size() == 6 + assert G.number_of_edges() == 6 + + def test_to_undirected_reciprocal(self): + G = self.Graph() + G.add_edge(1, 2) + assert G.to_undirected().has_edge(1, 2) + assert not G.to_undirected(reciprocal=True).has_edge(1, 2) + G.add_edge(2, 1) + assert G.to_undirected(reciprocal=True).has_edge(1, 2) + + def test_reverse_copy(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse() + assert sorted(R.edges()) == [(1, 0), (2, 1)] + R.remove_edge(1, 0) + assert sorted(R.edges()) == [(2, 1)] + assert sorted(G.edges()) == [(0, 1), (1, 2)] + + def test_reverse_nocopy(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse(copy=False) + assert sorted(R.edges()) == [(1, 0), (2, 1)] + with pytest.raises(nx.NetworkXError): + R.remove_edge(1, 0) + + def test_reverse_hashable(self): + class Foo: + pass + + x = Foo() + y = Foo() + G = nx.DiGraph() + G.add_edge(x, y) + assert nodes_equal(G.nodes(), G.reverse().nodes()) + assert [(y, x)] == list(G.reverse().edges()) + + def test_di_cache_reset(self): + G = self.K3.copy() + old_succ = G.succ + assert id(G.succ) == id(old_succ) + old_adj = G.adj + assert id(G.adj) == id(old_adj) + + G._succ = {} + assert id(G.succ) != id(old_succ) + assert id(G.adj) != id(old_adj) + + old_pred = G.pred + assert id(G.pred) == id(old_pred) + G._pred = {} + assert id(G.pred) != id(old_pred) + + def test_di_attributes_cached(self): + G = self.K3.copy() + assert id(G.in_edges) == id(G.in_edges) + assert id(G.out_edges) == id(G.out_edges) + assert id(G.in_degree) == id(G.in_degree) + assert id(G.out_degree) == id(G.out_degree) + assert id(G.succ) == id(G.succ) + assert id(G.pred) == id(G.pred) + + +class BaseAttrDiGraphTester(BaseDiGraphTester, BaseAttrGraphTester): + def test_edges_data(self): + G = self.K3 + all_edges = [ + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + ] + assert sorted(G.edges(data=True)) == all_edges + assert sorted(G.edges(0, data=True)) == all_edges[:2] + assert sorted(G.edges([0, 1], data=True)) == all_edges[:4] + with pytest.raises(nx.NetworkXError): + G.edges(-1, True) + + def test_in_degree_weighted(self): + G = self.K3.copy() + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.in_degree(weight="weight")) == [(0, 2), (1, 1.3), (2, 2)] + assert dict(G.in_degree(weight="weight")) == {0: 2, 1: 1.3, 2: 2} + assert G.in_degree(1, weight="weight") == 1.3 + assert sorted(G.in_degree(weight="other")) == [(0, 2), (1, 2.2), (2, 2)] + assert dict(G.in_degree(weight="other")) == {0: 2, 1: 2.2, 2: 2} + assert G.in_degree(1, weight="other") == 2.2 + assert list(G.in_degree(iter([1]), weight="other")) == [(1, 2.2)] + + def test_out_degree_weighted(self): + G = self.K3.copy() + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.out_degree(weight="weight")) == [(0, 1.3), (1, 2), (2, 2)] + assert dict(G.out_degree(weight="weight")) == {0: 1.3, 1: 2, 2: 2} + assert G.out_degree(0, weight="weight") == 1.3 + assert sorted(G.out_degree(weight="other")) == [(0, 2.2), (1, 2), (2, 2)] + assert dict(G.out_degree(weight="other")) == {0: 2.2, 1: 2, 2: 2} + assert G.out_degree(0, weight="other") == 2.2 + assert list(G.out_degree(iter([0]), weight="other")) == [(0, 2.2)] + + +class TestDiGraph(BaseAttrDiGraphTester, _TestGraph): + """Tests specific to dict-of-dict-of-dict digraph data structure""" + + def setup_method(self): + self.Graph = nx.DiGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3, ed4, ed5, ed6 = ({}, {}, {}, {}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.k3adj # K3._adj is synced with K3._succ + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + ed1, ed2 = ({}, {}) + self.P3 = self.Graph() + self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}} + self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}} + # P3._adj is synced with P3._succ + self.P3._node = {} + self.P3._node[0] = {} + self.P3._node[1] = {} + self.P3._node[2] = {} + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + assert sorted(G.adj.items()) == [(1, {2: {}}), (2, {1: {}})] + assert sorted(G.succ.items()) == [(1, {2: {}}), (2, {1: {}})] + assert sorted(G.pred.items()) == [(1, {2: {}}), (2, {1: {}})] + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {}}, 1: {}} + assert G.succ == {0: {1: {}}, 1: {}} + assert G.pred == {0: {}, 1: {0: {}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {}}, 1: {}} + assert G.succ == {0: {1: {}}, 1: {}} + assert G.pred == {0: {}, 1: {0: {}}} + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"data": 3})], data=2) + assert G.adj == {0: {1: {"data": 2}, 2: {"data": 3}}, 1: {}, 2: {}} + assert G.succ == {0: {1: {"data": 2}, 2: {"data": 3}}, 1: {}, 2: {}} + assert G.pred == {0: {}, 1: {0: {"data": 2}}, 2: {0: {"data": 3}}} + + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3)]) # too many in tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) # not a tuple + + def test_remove_edge(self): + G = self.K3.copy() + G.remove_edge(0, 1) + assert G.succ == {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}} + assert G.pred == {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + assert G.succ == {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}} + assert G.pred == {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + G.remove_edges_from([(0, 0)]) # silent fail + + def test_clear(self): + G = self.K3 + G.graph["name"] = "K3" + G.clear() + assert list(G.nodes) == [] + assert G.succ == {} + assert G.pred == {} + assert G.graph == {} + + def test_clear_edges(self): + G = self.K3 + G.graph["name"] = "K3" + nodes = list(G.nodes) + G.clear_edges() + assert list(G.nodes) == nodes + expected = {0: {}, 1: {}, 2: {}} + assert G.succ == expected + assert G.pred == expected + assert list(G.edges) == [] + assert G.graph["name"] == "K3" + + +class TestEdgeSubgraph(_TestGraphEdgeSubgraph): + """Unit tests for the :meth:`DiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a doubly-linked path graph on five nodes. + G = nx.DiGraph(nx.path_graph(5)) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1), (3, 4)]) + + def test_pred_succ(self): + """Test that nodes are added to predecessors and successors. + + For more information, see GitHub issue #2370. + + """ + G = nx.DiGraph() + G.add_edge(0, 1) + H = G.edge_subgraph([(0, 1)]) + assert list(H.predecessors(0)) == [] + assert list(H.successors(0)) == [1] + assert list(H.predecessors(1)) == [0] + assert list(H.successors(1)) == [] diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_digraph_historical.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_digraph_historical.py new file mode 100644 index 0000000..6bcd1e1 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_digraph_historical.py @@ -0,0 +1,110 @@ +"""Original NetworkX graph tests""" +import pytest + +import networkx +import networkx as nx + +from .historical_tests import HistoricalTests + + +class TestDiGraphHistorical(HistoricalTests): + @classmethod + def setup_class(cls): + HistoricalTests.setup_class() + cls.G = nx.DiGraph + + def test_in_degree(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + + assert sorted(d for n, d in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2] + assert dict(G.in_degree()) == { + "A": 0, + "C": 2, + "B": 1, + "D": 2, + "G": 0, + "K": 0, + "J": 0, + } + + def test_out_degree(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(v for k, v in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2] + assert dict(G.out_degree()) == { + "A": 2, + "C": 1, + "B": 2, + "D": 0, + "G": 0, + "K": 0, + "J": 0, + } + + def test_degree_digraph(self): + H = nx.DiGraph() + H.add_edges_from([(1, 24), (1, 2)]) + assert sorted(d for n, d in H.in_degree([1, 24])) == [0, 1] + assert sorted(d for n, d in H.out_degree([1, 24])) == [0, 2] + assert sorted(d for n, d in H.degree([1, 24])) == [1, 2] + + def test_neighbors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + + assert sorted(G.neighbors("C")) == ["D"] + assert sorted(G["C"]) == ["D"] + assert sorted(G.neighbors("A")) == ["B", "C"] + pytest.raises(nx.NetworkXError, G.neighbors, "j") + pytest.raises(nx.NetworkXError, G.neighbors, "j") + + def test_successors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(G.successors("A")) == ["B", "C"] + assert sorted(G.successors("A")) == ["B", "C"] + assert sorted(G.successors("G")) == [] + assert sorted(G.successors("D")) == [] + assert sorted(G.successors("G")) == [] + pytest.raises(nx.NetworkXError, G.successors, "j") + pytest.raises(nx.NetworkXError, G.successors, "j") + + def test_predecessors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(G.predecessors("C")) == ["A", "B"] + assert sorted(G.predecessors("C")) == ["A", "B"] + assert sorted(G.predecessors("G")) == [] + assert sorted(G.predecessors("A")) == [] + assert sorted(G.predecessors("G")) == [] + assert sorted(G.predecessors("A")) == [] + assert sorted(G.successors("D")) == [] + + pytest.raises(nx.NetworkXError, G.predecessors, "j") + pytest.raises(nx.NetworkXError, G.predecessors, "j") + + def test_reverse(self): + G = nx.complete_graph(10) + H = G.to_directed() + HR = H.reverse() + assert nx.is_isomorphic(H, HR) + assert sorted(H.edges()) == sorted(HR.edges()) + + def test_reverse2(self): + H = nx.DiGraph() + foo = [H.add_edge(u, u + 1) for u in range(0, 5)] + HR = H.reverse() + for u in range(0, 5): + assert HR.has_edge(u + 1, u) + + def test_reverse3(self): + H = nx.DiGraph() + H.add_nodes_from([1, 2, 3, 4]) + HR = H.reverse() + assert sorted(HR.nodes()) == [1, 2, 3, 4] diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_filters.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_filters.py new file mode 100644 index 0000000..2da5911 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_filters.py @@ -0,0 +1,177 @@ +import pytest + +import networkx as nx + + +class TestFilterFactory: + def test_no_filter(self): + nf = nx.filters.no_filter + assert nf() + assert nf(1) + assert nf(2, 1) + + def test_hide_nodes(self): + f = nx.classes.filters.hide_nodes([1, 2, 3]) + assert not f(1) + assert not f(2) + assert not f(3) + assert f(4) + assert f(0) + assert f("a") + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f) + + def test_show_nodes(self): + f = nx.classes.filters.show_nodes([1, 2, 3]) + assert f(1) + assert f(2) + assert f(3) + assert not f(4) + assert not f(0) + assert not f("a") + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f) + + def test_hide_edges(self): + factory = nx.classes.filters.hide_edges + f = factory([(1, 2), (3, 4)]) + assert not f(1, 2) + assert not f(3, 4) + assert not f(4, 3) + assert f(2, 3) + assert f(0, -1) + assert f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_show_edges(self): + factory = nx.classes.filters.show_edges + f = factory([(1, 2), (3, 4)]) + assert f(1, 2) + assert f(3, 4) + assert f(4, 3) + assert not f(2, 3) + assert not f(0, -1) + assert not f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_hide_diedges(self): + factory = nx.classes.filters.hide_diedges + f = factory([(1, 2), (3, 4)]) + assert not f(1, 2) + assert not f(3, 4) + assert f(4, 3) + assert f(2, 3) + assert f(0, -1) + assert f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_show_diedges(self): + factory = nx.classes.filters.show_diedges + f = factory([(1, 2), (3, 4)]) + assert f(1, 2) + assert f(3, 4) + assert not f(4, 3) + assert not f(2, 3) + assert not f(0, -1) + assert not f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_hide_multiedges(self): + factory = nx.classes.filters.hide_multiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert not f(1, 2, 0) + assert not f(1, 2, 1) + assert f(1, 2, 2) + assert f(3, 4, 0) + assert not f(3, 4, 1) + assert not f(4, 3, 1) + assert f(4, 3, 0) + assert f(2, 3, 0) + assert f(0, -1, 0) + assert f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_show_multiedges(self): + factory = nx.classes.filters.show_multiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert f(1, 2, 0) + assert f(1, 2, 1) + assert not f(1, 2, 2) + assert not f(3, 4, 0) + assert f(3, 4, 1) + assert f(4, 3, 1) + assert not f(4, 3, 0) + assert not f(2, 3, 0) + assert not f(0, -1, 0) + assert not f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_hide_multidiedges(self): + factory = nx.classes.filters.hide_multidiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert not f(1, 2, 0) + assert not f(1, 2, 1) + assert f(1, 2, 2) + assert f(3, 4, 0) + assert not f(3, 4, 1) + assert f(4, 3, 1) + assert f(4, 3, 0) + assert f(2, 3, 0) + assert f(0, -1, 0) + assert f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_show_multidiedges(self): + factory = nx.classes.filters.show_multidiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert f(1, 2, 0) + assert f(1, 2, 1) + assert not f(1, 2, 2) + assert not f(3, 4, 0) + assert f(3, 4, 1) + assert not f(4, 3, 1) + assert not f(4, 3, 0) + assert not f(2, 3, 0) + assert not f(0, -1, 0) + assert not f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_function.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_function.py new file mode 100644 index 0000000..051d655 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_function.py @@ -0,0 +1,799 @@ +import random + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestFunction: + def setup_method(self): + self.G = nx.Graph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}, name="Test") + self.Gdegree = {0: 3, 1: 2, 2: 2, 3: 1, 4: 0} + self.Gnodes = list(range(5)) + self.Gedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)] + self.DG = nx.DiGraph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}) + self.DGin_degree = {0: 1, 1: 2, 2: 2, 3: 1, 4: 0} + self.DGout_degree = {0: 3, 1: 3, 2: 0, 3: 0, 4: 0} + self.DGnodes = list(range(5)) + self.DGedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)] + + def test_nodes(self): + assert nodes_equal(self.G.nodes(), list(nx.nodes(self.G))) + assert nodes_equal(self.DG.nodes(), list(nx.nodes(self.DG))) + + def test_edges(self): + assert edges_equal(self.G.edges(), list(nx.edges(self.G))) + assert sorted(self.DG.edges()) == sorted(nx.edges(self.DG)) + assert edges_equal( + self.G.edges(nbunch=[0, 1, 3]), list(nx.edges(self.G, nbunch=[0, 1, 3])) + ) + assert sorted(self.DG.edges(nbunch=[0, 1, 3])) == sorted( + nx.edges(self.DG, nbunch=[0, 1, 3]) + ) + + def test_degree(self): + assert edges_equal(self.G.degree(), list(nx.degree(self.G))) + assert sorted(self.DG.degree()) == sorted(nx.degree(self.DG)) + assert edges_equal( + self.G.degree(nbunch=[0, 1]), list(nx.degree(self.G, nbunch=[0, 1])) + ) + assert sorted(self.DG.degree(nbunch=[0, 1])) == sorted( + nx.degree(self.DG, nbunch=[0, 1]) + ) + assert edges_equal( + self.G.degree(weight="weight"), list(nx.degree(self.G, weight="weight")) + ) + assert sorted(self.DG.degree(weight="weight")) == sorted( + nx.degree(self.DG, weight="weight") + ) + + def test_neighbors(self): + assert list(self.G.neighbors(1)) == list(nx.neighbors(self.G, 1)) + assert list(self.DG.neighbors(1)) == list(nx.neighbors(self.DG, 1)) + + def test_number_of_nodes(self): + assert self.G.number_of_nodes() == nx.number_of_nodes(self.G) + assert self.DG.number_of_nodes() == nx.number_of_nodes(self.DG) + + def test_number_of_edges(self): + assert self.G.number_of_edges() == nx.number_of_edges(self.G) + assert self.DG.number_of_edges() == nx.number_of_edges(self.DG) + + def test_is_directed(self): + assert self.G.is_directed() == nx.is_directed(self.G) + assert self.DG.is_directed() == nx.is_directed(self.DG) + + def test_add_star(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + nx.add_star(G, nlist) + assert edges_equal(G.edges(nlist), [(12, 13), (12, 14), (12, 15)]) + + G = self.G.copy() + nx.add_star(G, nlist, weight=2.0) + assert edges_equal( + G.edges(nlist, data=True), + [ + (12, 13, {"weight": 2.0}), + (12, 14, {"weight": 2.0}), + (12, 15, {"weight": 2.0}), + ], + ) + + G = self.G.copy() + nlist = [12] + nx.add_star(G, nlist) + assert nodes_equal(G, list(self.G) + nlist) + + G = self.G.copy() + nlist = [] + nx.add_star(G, nlist) + assert nodes_equal(G.nodes, self.Gnodes) + assert edges_equal(G.edges, self.G.edges) + + def test_add_path(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), [(12, 13), (13, 14), (14, 15)]) + G = self.G.copy() + nx.add_path(G, nlist, weight=2.0) + assert edges_equal( + G.edges(nlist, data=True), + [ + (12, 13, {"weight": 2.0}), + (13, 14, {"weight": 2.0}), + (14, 15, {"weight": 2.0}), + ], + ) + + G = self.G.copy() + nlist = ["node"] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), []) + assert nodes_equal(G, list(self.G) + ["node"]) + + G = self.G.copy() + nlist = iter(["node"]) + nx.add_path(G, nlist) + assert edges_equal(G.edges(["node"]), []) + assert nodes_equal(G, list(self.G) + ["node"]) + + G = self.G.copy() + nlist = [12] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), []) + assert nodes_equal(G, list(self.G) + [12]) + + G = self.G.copy() + nlist = iter([12]) + nx.add_path(G, nlist) + assert edges_equal(G.edges([12]), []) + assert nodes_equal(G, list(self.G) + [12]) + + G = self.G.copy() + nlist = [] + nx.add_path(G, nlist) + assert edges_equal(G.edges, self.G.edges) + assert nodes_equal(G, list(self.G)) + + G = self.G.copy() + nlist = iter([]) + nx.add_path(G, nlist) + assert edges_equal(G.edges, self.G.edges) + assert nodes_equal(G, list(self.G)) + + def test_add_cycle(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + oklists = [ + [(12, 13), (12, 15), (13, 14), (14, 15)], + [(12, 13), (13, 14), (14, 15), (15, 12)], + ] + nx.add_cycle(G, nlist) + assert sorted(G.edges(nlist)) in oklists + G = self.G.copy() + oklists = [ + [ + (12, 13, {"weight": 1.0}), + (12, 15, {"weight": 1.0}), + (13, 14, {"weight": 1.0}), + (14, 15, {"weight": 1.0}), + ], + [ + (12, 13, {"weight": 1.0}), + (13, 14, {"weight": 1.0}), + (14, 15, {"weight": 1.0}), + (15, 12, {"weight": 1.0}), + ], + ] + nx.add_cycle(G, nlist, weight=1.0) + assert sorted(G.edges(nlist, data=True)) in oklists + + G = self.G.copy() + nlist = [12] + nx.add_cycle(G, nlist) + assert nodes_equal(G, list(self.G) + nlist) + + G = self.G.copy() + nlist = [] + nx.add_cycle(G, nlist) + assert nodes_equal(G.nodes, self.Gnodes) + assert edges_equal(G.edges, self.G.edges) + + def test_subgraph(self): + assert ( + self.G.subgraph([0, 1, 2, 4]).adj == nx.subgraph(self.G, [0, 1, 2, 4]).adj + ) + assert ( + self.DG.subgraph([0, 1, 2, 4]).adj == nx.subgraph(self.DG, [0, 1, 2, 4]).adj + ) + assert ( + self.G.subgraph([0, 1, 2, 4]).adj + == nx.induced_subgraph(self.G, [0, 1, 2, 4]).adj + ) + assert ( + self.DG.subgraph([0, 1, 2, 4]).adj + == nx.induced_subgraph(self.DG, [0, 1, 2, 4]).adj + ) + # subgraph-subgraph chain is allowed in function interface + H = nx.induced_subgraph(self.G.subgraph([0, 1, 2, 4]), [0, 1, 4]) + assert H._graph is not self.G + assert H.adj == self.G.subgraph([0, 1, 4]).adj + + def test_edge_subgraph(self): + assert ( + self.G.edge_subgraph([(1, 2), (0, 3)]).adj + == nx.edge_subgraph(self.G, [(1, 2), (0, 3)]).adj + ) + assert ( + self.DG.edge_subgraph([(1, 2), (0, 3)]).adj + == nx.edge_subgraph(self.DG, [(1, 2), (0, 3)]).adj + ) + + def test_create_empty_copy(self): + G = nx.create_empty_copy(self.G, with_data=False) + assert nodes_equal(G, list(self.G)) + assert G.graph == {} + assert G._node == {}.fromkeys(self.G.nodes(), {}) + assert G._adj == {}.fromkeys(self.G.nodes(), {}) + G = nx.create_empty_copy(self.G) + assert nodes_equal(G, list(self.G)) + assert G.graph == self.G.graph + assert G._node == self.G._node + assert G._adj == {}.fromkeys(self.G.nodes(), {}) + + def test_degree_histogram(self): + assert nx.degree_histogram(self.G) == [1, 1, 1, 1, 1] + + def test_density(self): + assert nx.density(self.G) == 0.5 + assert nx.density(self.DG) == 0.3 + G = nx.Graph() + G.add_node(1) + assert nx.density(G) == 0.0 + + def test_density_selfloop(self): + G = nx.Graph() + G.add_edge(1, 1) + assert nx.density(G) == 0.0 + G.add_edge(1, 2) + assert nx.density(G) == 2.0 + + def test_freeze(self): + G = nx.freeze(self.G) + assert G.frozen + pytest.raises(nx.NetworkXError, G.add_node, 1) + pytest.raises(nx.NetworkXError, G.add_nodes_from, [1]) + pytest.raises(nx.NetworkXError, G.remove_node, 1) + pytest.raises(nx.NetworkXError, G.remove_nodes_from, [1]) + pytest.raises(nx.NetworkXError, G.add_edge, 1, 2) + pytest.raises(nx.NetworkXError, G.add_edges_from, [(1, 2)]) + pytest.raises(nx.NetworkXError, G.remove_edge, 1, 2) + pytest.raises(nx.NetworkXError, G.remove_edges_from, [(1, 2)]) + pytest.raises(nx.NetworkXError, G.clear) + + def test_is_frozen(self): + assert not nx.is_frozen(self.G) + G = nx.freeze(self.G) + assert G.frozen == nx.is_frozen(self.G) + assert G.frozen + + def test_info(self): + G = nx.path_graph(5) + G.name = "path_graph(5)" + info = nx.info(G) + expected_graph_info = "Graph named 'path_graph(5)' with 5 nodes and 4 edges" + assert info == expected_graph_info + + info = nx.info(G, n=1) + assert type(info) == str + expected_node_info = "\n".join( + ["Node 1 has the following properties:", "Degree: 2", "Neighbors: 0 2"] + ) + assert info == expected_node_info + + # must raise an error for a non-existent node + pytest.raises(nx.NetworkXError, nx.info, G, 1248) + + def test_info_digraph(self): + G = nx.DiGraph(name="path_graph(5)") + nx.add_path(G, [0, 1, 2, 3, 4]) + info = nx.info(G) + expected_graph_info = "DiGraph named 'path_graph(5)' with 5 nodes and 4 edges" + assert info == expected_graph_info + + info = nx.info(G, n=1) + expected_node_info = "\n".join( + ["Node 1 has the following properties:", "Degree: 2", "Neighbors: 2"] + ) + assert info == expected_node_info + + pytest.raises(nx.NetworkXError, nx.info, G, n=-1) + + def test_neighbors_complete_graph(self): + graph = nx.complete_graph(100) + pop = random.sample(list(graph), 1) + nbors = list(nx.neighbors(graph, pop[0])) + # should be all the other vertices in the graph + assert len(nbors) == len(graph) - 1 + + graph = nx.path_graph(100) + node = random.sample(list(graph), 1)[0] + nbors = list(nx.neighbors(graph, node)) + # should be all the other vertices in the graph + if node != 0 and node != 99: + assert len(nbors) == 2 + else: + assert len(nbors) == 1 + + # create a star graph with 99 outer nodes + graph = nx.star_graph(99) + nbors = list(nx.neighbors(graph, 0)) + assert len(nbors) == 99 + + def test_non_neighbors(self): + graph = nx.complete_graph(100) + pop = random.sample(list(graph), 1) + nbors = list(nx.non_neighbors(graph, pop[0])) + # should be all the other vertices in the graph + assert len(nbors) == 0 + + graph = nx.path_graph(100) + node = random.sample(list(graph), 1)[0] + nbors = list(nx.non_neighbors(graph, node)) + # should be all the other vertices in the graph + if node != 0 and node != 99: + assert len(nbors) == 97 + else: + assert len(nbors) == 98 + + # create a star graph with 99 outer nodes + graph = nx.star_graph(99) + nbors = list(nx.non_neighbors(graph, 0)) + assert len(nbors) == 0 + + # disconnected graph + graph = nx.Graph() + graph.add_nodes_from(range(10)) + nbors = list(nx.non_neighbors(graph, 0)) + assert len(nbors) == 9 + + def test_non_edges(self): + # All possible edges exist + graph = nx.complete_graph(5) + nedges = list(nx.non_edges(graph)) + assert len(nedges) == 0 + + graph = nx.path_graph(4) + expected = [(0, 2), (0, 3), (1, 3)] + nedges = list(nx.non_edges(graph)) + for (u, v) in expected: + assert (u, v) in nedges or (v, u) in nedges + + graph = nx.star_graph(4) + expected = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + nedges = list(nx.non_edges(graph)) + for (u, v) in expected: + assert (u, v) in nedges or (v, u) in nedges + + # Directed graphs + graph = nx.DiGraph() + graph.add_edges_from([(0, 2), (2, 0), (2, 1)]) + expected = [(0, 1), (1, 0), (1, 2)] + nedges = list(nx.non_edges(graph)) + for e in expected: + assert e in nedges + + def test_is_weighted(self): + G = nx.Graph() + assert not nx.is_weighted(G) + + G = nx.path_graph(4) + assert not nx.is_weighted(G) + assert not nx.is_weighted(G, (2, 3)) + + G.add_node(4) + G.add_edge(3, 4, weight=4) + assert not nx.is_weighted(G) + assert nx.is_weighted(G, (3, 4)) + + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + assert nx.is_weighted(G) + assert nx.is_weighted(G, ("1", "0")) + + G = G.to_undirected() + assert nx.is_weighted(G) + assert nx.is_weighted(G, ("1", "0")) + + pytest.raises(nx.NetworkXError, nx.is_weighted, G, (1, 2)) + + def test_is_negatively_weighted(self): + G = nx.Graph() + assert not nx.is_negatively_weighted(G) + + G.add_node(1) + G.add_nodes_from([2, 3, 4, 5]) + assert not nx.is_negatively_weighted(G) + + G.add_edge(1, 2, weight=4) + assert not nx.is_negatively_weighted(G, (1, 2)) + + G.add_edges_from([(1, 3), (2, 4), (2, 6)]) + G[1][3]["color"] = "blue" + assert not nx.is_negatively_weighted(G) + assert not nx.is_negatively_weighted(G, (1, 3)) + + G[2][4]["weight"] = -2 + assert nx.is_negatively_weighted(G, (2, 4)) + assert nx.is_negatively_weighted(G) + + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -2), + ("0", "2", 2), + ("1", "2", -3), + ("2", "3", 1), + ] + ) + assert nx.is_negatively_weighted(G) + assert not nx.is_negatively_weighted(G, ("0", "3")) + assert nx.is_negatively_weighted(G, ("1", "0")) + + pytest.raises(nx.NetworkXError, nx.is_negatively_weighted, G, (1, 4)) + + +class TestCommonNeighbors: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.common_neighbors) + + def test_func(G, u, v, expected): + result = sorted(cls.func(G, u, v)) + assert result == expected + + cls.test = staticmethod(test_func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, 0, 1, [2, 3, 4]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, 0, 2, [1]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, 1, 2, [0]) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2)]) + self.func(G, 0, 2) + + def test_nonexistent_nodes(self): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 4) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 4, 5) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 6) + + def test_custom1(self): + """Case of no common neighbors.""" + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, 0, 1, []) + + def test_custom2(self): + """Case of equal nodes.""" + G = nx.complete_graph(4) + self.test(G, 0, 0, [1, 2, 3]) + + +@pytest.mark.parametrize( + "graph_type", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) +) +def test_set_node_attributes(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + vals = 100 + attr = "hello" + nx.set_node_attributes(G, vals, attr) + assert G.nodes[0][attr] == vals + assert G.nodes[1][attr] == vals + assert G.nodes[2][attr] == vals + + # Test dictionary + G = nx.path_graph(3, create_using=graph_type) + vals = dict(zip(sorted(G.nodes()), range(len(G)))) + attr = "hi" + nx.set_node_attributes(G, vals, attr) + assert G.nodes[0][attr] == 0 + assert G.nodes[1][attr] == 1 + assert G.nodes[2][attr] == 2 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + vals = dict.fromkeys(G.nodes(), d) + vals.pop(0) + nx.set_node_attributes(G, vals) + assert G.nodes[0] == {} + assert G.nodes[1]["hi"] == 0 + assert G.nodes[2]["hello"] == 200 + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({0: "red", 1: "blue"}, "color"), # values dictionary + ({0: {"color": "red"}, 1: {"color": "blue"}}, None), # dict-of-dict + ), +) +def test_set_node_attributes_ignores_extra_nodes(values, name): + """ + When `values` is a dict or dict-of-dict keyed by nodes, ensure that keys + that correspond to nodes not in G are ignored. + """ + G = nx.Graph() + G.add_node(0) + nx.set_node_attributes(G, values, name) + assert G.nodes[0]["color"] == "red" + assert 1 not in G.nodes + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_set_edge_attributes(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 3 + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][attr] == vals + assert G[1][2][attr] == vals + + # Test multiple values + G = nx.path_graph(3, create_using=graph_type) + attr = "hi" + edges = [(0, 1), (1, 2)] + vals = dict(zip(edges, range(len(edges)))) + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][attr] == 0 + assert G[1][2][attr] == 1 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + edges = [(0, 1)] + vals = dict.fromkeys(edges, d) + nx.set_edge_attributes(G, vals) + assert G[0][1]["hi"] == 0 + assert G[0][1]["hello"] == 200 + assert G[1][2] == {} + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({(0, 1): 1.0, (0, 2): 2.0}, "weight"), # values dict + ({(0, 1): {"weight": 1.0}, (0, 2): {"weight": 2.0}}, None), # values dod + ), +) +def test_set_edge_attributes_ignores_extra_edges(values, name): + """If `values` is a dict or dict-of-dicts containing edges that are not in + G, data associate with these edges should be ignored. + """ + G = nx.Graph([(0, 1)]) + nx.set_edge_attributes(G, values, name) + assert G[0][1]["weight"] == 1.0 + assert (0, 2) not in G.edges + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_set_edge_attributes_multi(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 3 + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][0][attr] == vals + assert G[1][2][0][attr] == vals + + # Test multiple values + G = nx.path_graph(3, create_using=graph_type) + attr = "hi" + edges = [(0, 1, 0), (1, 2, 0)] + vals = dict(zip(edges, range(len(edges)))) + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][0][attr] == 0 + assert G[1][2][0][attr] == 1 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + edges = [(0, 1, 0)] + vals = dict.fromkeys(edges, d) + nx.set_edge_attributes(G, vals) + assert G[0][1][0]["hi"] == 0 + assert G[0][1][0]["hello"] == 200 + assert G[1][2][0] == {} + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({(0, 1, 0): 1.0, (0, 2, 0): 2.0}, "weight"), # values dict + ({(0, 1, 0): {"weight": 1.0}, (0, 2, 0): {"weight": 2.0}}, None), # values dod + ), +) +def test_set_edge_attributes_multi_ignores_extra_edges(values, name): + """If `values` is a dict or dict-of-dicts containing edges that are not in + G, data associate with these edges should be ignored. + """ + G = nx.MultiGraph([(0, 1, 0), (0, 1, 1)]) + nx.set_edge_attributes(G, values, name) + assert G[0][1][0]["weight"] == 1.0 + assert G[0][1][1] == {} + assert (0, 2) not in G.edges() + + +def test_get_node_attributes(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + G = nx.path_graph(3, create_using=G) + attr = "hello" + vals = 100 + nx.set_node_attributes(G, vals, attr) + attrs = nx.get_node_attributes(G, attr) + assert attrs[0] == vals + assert attrs[1] == vals + assert attrs[2] == vals + + +def test_get_edge_attributes(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + G = nx.path_graph(3, create_using=G) + attr = "hello" + vals = 100 + nx.set_edge_attributes(G, vals, attr) + attrs = nx.get_edge_attributes(G, attr) + + assert len(attrs) == 2 + if G.is_multigraph(): + keys = [(0, 1, 0), (1, 2, 0)] + for u, v, k in keys: + try: + assert attrs[(u, v, k)] == 100 + except KeyError: + assert attrs[(v, u, k)] == 100 + else: + keys = [(0, 1), (1, 2)] + for u, v in keys: + try: + assert attrs[(u, v)] == 100 + except KeyError: + assert attrs[(v, u)] == 100 + + +def test_is_empty(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + assert nx.is_empty(G) + G.add_nodes_from(range(5)) + assert nx.is_empty(G) + G.add_edges_from([(1, 2), (3, 4)]) + assert not nx.is_empty(G) + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_selfloops(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + assert nodes_equal(nx.nodes_with_selfloops(G), [0]) + assert edges_equal(nx.selfloop_edges(G), [(0, 0)]) + assert edges_equal(nx.selfloop_edges(G, data=True), [(0, 0, {})]) + assert nx.number_of_selfloops(G) == 1 + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_selfloop_edges_attr(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + G.add_edge(1, 1, weight=2) + assert edges_equal( + nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})] + ) + assert edges_equal(nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)]) + + +def test_selfloop_edges_multi_with_data_and_keys(): + G = nx.complete_graph(3, create_using=nx.MultiGraph) + G.add_edge(0, 0, weight=10) + G.add_edge(0, 0, weight=100) + assert edges_equal( + nx.selfloop_edges(G, data="weight", keys=True), [(0, 0, 0, 10), (0, 0, 1, 100)] + ) + + +@pytest.mark.parametrize("graph_type", [nx.Graph, nx.DiGraph]) +def test_selfloops_removal(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, keys=True)) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, data=True)) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, keys=True, data=True)) + + +@pytest.mark.parametrize("graph_type", [nx.MultiGraph, nx.MultiDiGraph]) +def test_selfloops_removal_multi(graph_type): + """test removing selfloops behavior vis-a-vis altering a dict while iterating. + cf. gh-4068""" + G = nx.complete_graph(3, create_using=graph_type) + # Defaults - see gh-4080 + G.add_edge(0, 0) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G)) + assert (0, 0) not in G.edges() + # With keys + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(RuntimeError): + G.remove_edges_from(nx.selfloop_edges(G, keys=True)) + # With data + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(TypeError): + G.remove_edges_from(nx.selfloop_edges(G, data=True)) + # With keys and data + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(RuntimeError): + G.remove_edges_from(nx.selfloop_edges(G, data=True, keys=True)) + + +def test_pathweight(): + valid_path = [1, 2, 3] + invalid_path = [1, 3, 2] + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + edges = [ + (1, 2, dict(cost=5, dist=6)), + (2, 3, dict(cost=3, dist=4)), + (1, 2, dict(cost=1, dist=2)), + ] + for graph in graphs: + graph.add_edges_from(edges) + assert nx.path_weight(graph, valid_path, "cost") == 4 + assert nx.path_weight(graph, valid_path, "dist") == 6 + pytest.raises(nx.NetworkXNoPath, nx.path_weight, graph, invalid_path, "cost") + + +def test_ispath(): + valid_path = [1, 2, 3, 4] + invalid_path = [1, 2, 4, 3] + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + edges = [(1, 2), (2, 3), (1, 2), (3, 4)] + for graph in graphs: + graph.add_edges_from(edges) + assert nx.is_path(graph, valid_path) + assert not nx.is_path(graph, invalid_path) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_restricted_view(G): + G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]) + G.add_node(4) + H = nx.restricted_view(G, [0, 2, 5], [(1, 2), (3, 4)]) + assert set(H.nodes()) == {1, 3, 4} + assert set(H.edges()) == {(1, 1)} + + +@pytest.mark.parametrize("G", (nx.MultiGraph(), nx.MultiDiGraph())) +def test_restricted_view_multi(G): + G.add_edges_from( + [(0, 1, 0), (0, 2, 0), (0, 3, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0), (1, 2, 0)] + ) + G.add_node(4) + H = nx.restricted_view(G, [0, 2, 5], [(1, 2, 0), (3, 4, 0)]) + assert set(H.nodes()) == {1, 3, 4} + assert set(H.edges()) == {(1, 1)} diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graph.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graph.py new file mode 100644 index 0000000..ebaa04b --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graph.py @@ -0,0 +1,897 @@ +import gc +import pickle +import platform + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class BaseGraphTester: + """Tests for data-structure independent graph class features.""" + + def test_contains(self): + G = self.K3 + assert 1 in G + assert 4 not in G + assert "b" not in G + assert [] not in G # no exception for nonhashable + assert {1: 1} not in G # no exception for nonhashable + + def test_order(self): + G = self.K3 + assert len(G) == 3 + assert G.order() == 3 + assert G.number_of_nodes() == 3 + + def test_nodes(self): + G = self.K3 + assert isinstance(G._node, G.node_dict_factory) + assert isinstance(G._adj, G.adjlist_outer_dict_factory) + assert all( + isinstance(adj, G.adjlist_inner_dict_factory) for adj in G._adj.values() + ) + assert sorted(G.nodes()) == self.k3nodes + assert sorted(G.nodes(data=True)) == [(0, {}), (1, {}), (2, {})] + + def test_none_node(self): + G = self.Graph() + with pytest.raises(ValueError): + G.add_node(None) + with pytest.raises(ValueError): + G.add_nodes_from([None]) + with pytest.raises(ValueError): + G.add_edge(0, None) + with pytest.raises(ValueError): + G.add_edges_from([(0, None)]) + + def test_has_node(self): + G = self.K3 + assert G.has_node(1) + assert not G.has_node(4) + assert not G.has_node([]) # no exception for nonhashable + assert not G.has_node({1: 1}) # no exception for nonhashable + + def test_has_edge(self): + G = self.K3 + assert G.has_edge(0, 1) + assert not G.has_edge(0, -1) + + def test_neighbors(self): + G = self.K3 + assert sorted(G.neighbors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.neighbors(-1) + + @pytest.mark.skipif( + platform.python_implementation() == "PyPy", reason="PyPy gc is different" + ) + def test_memory_leak(self): + G = self.Graph() + + def count_objects_of_type(_type): + return sum(1 for obj in gc.get_objects() if isinstance(obj, _type)) + + gc.collect() + before = count_objects_of_type(self.Graph) + G.copy() + gc.collect() + after = count_objects_of_type(self.Graph) + assert before == after + + # test a subgraph of the base class + class MyGraph(self.Graph): + pass + + gc.collect() + G = MyGraph() + before = count_objects_of_type(MyGraph) + G.copy() + gc.collect() + after = count_objects_of_type(MyGraph) + assert before == after + + def test_edges(self): + G = self.K3 + assert isinstance(G._adj, G.adjlist_outer_dict_factory) + assert edges_equal(G.edges(), [(0, 1), (0, 2), (1, 2)]) + assert edges_equal(G.edges(0), [(0, 1), (0, 2)]) + assert edges_equal(G.edges([0, 1]), [(0, 1), (0, 2), (1, 2)]) + with pytest.raises(nx.NetworkXError): + G.edges(-1) + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.degree()) == {0: 2, 1: 2, 2: 2} + assert G.degree(0) == 2 + with pytest.raises(nx.NetworkXError): + G.degree(-1) # node not in graph + + def test_size(self): + G = self.K3 + assert G.size() == 3 + assert G.number_of_edges() == 3 + + def test_nbunch_iter(self): + G = self.K3 + assert nodes_equal(G.nbunch_iter(), self.k3nodes) # all nodes + assert nodes_equal(G.nbunch_iter(0), [0]) # single node + assert nodes_equal(G.nbunch_iter([0, 1]), [0, 1]) # sequence + # sequence with none in graph + assert nodes_equal(G.nbunch_iter([-1]), []) + # string sequence with none in graph + assert nodes_equal(G.nbunch_iter("foo"), []) + # node not in graph doesn't get caught upon creation of iterator + bunch = G.nbunch_iter(-1) + # but gets caught when iterator used + with pytest.raises(nx.NetworkXError, match="is not a node or a sequence"): + list(bunch) + # unhashable doesn't get caught upon creation of iterator + bunch = G.nbunch_iter([0, 1, 2, {}]) + # but gets caught when iterator hits the unhashable + with pytest.raises( + nx.NetworkXError, match="in sequence nbunch is not a valid node" + ): + list(bunch) + + def test_nbunch_iter_node_format_raise(self): + # Tests that a node that would have failed string formatting + # doesn't cause an error when attempting to raise a + # :exc:`nx.NetworkXError`. + + # For more information, see pull request #1813. + G = self.Graph() + nbunch = [("x", set())] + with pytest.raises(nx.NetworkXError): + list(G.nbunch_iter(nbunch)) + + def test_selfloop_degree(self): + G = self.Graph() + G.add_edge(1, 1) + assert sorted(G.degree()) == [(1, 2)] + assert dict(G.degree()) == {1: 2} + assert G.degree(1) == 2 + assert sorted(G.degree([1])) == [(1, 2)] + assert G.degree(1, weight="weight") == 2 + + def test_selfloops(self): + G = self.K3.copy() + G.add_edge(0, 0) + assert nodes_equal(nx.nodes_with_selfloops(G), [0]) + assert edges_equal(nx.selfloop_edges(G), [(0, 0)]) + assert nx.number_of_selfloops(G) == 1 + G.remove_edge(0, 0) + G.add_edge(0, 0) + G.remove_edges_from([(0, 0)]) + G.add_edge(1, 1) + G.remove_node(1) + G.add_edge(0, 0) + G.add_edge(1, 1) + G.remove_nodes_from([0, 1]) + + def test_cache_reset(self): + G = self.K3.copy() + old_adj = G.adj + assert id(G.adj) == id(old_adj) + G._adj = {} + assert id(G.adj) != id(old_adj) + + def test_attributes_cached(self): + G = self.K3.copy() + assert id(G.nodes) == id(G.nodes) + assert id(G.edges) == id(G.edges) + assert id(G.degree) == id(G.degree) + assert id(G.adj) == id(G.adj) + + +class BaseAttrGraphTester(BaseGraphTester): + """Tests of graph class attribute features.""" + + def test_weighted_degree(self): + G = self.Graph() + G.add_edge(1, 2, weight=2, other=3) + G.add_edge(2, 3, weight=3, other=4) + assert sorted(d for n, d in G.degree(weight="weight")) == [2, 3, 5] + assert dict(G.degree(weight="weight")) == {1: 2, 2: 5, 3: 3} + assert G.degree(1, weight="weight") == 2 + assert nodes_equal((G.degree([1], weight="weight")), [(1, 2)]) + + assert nodes_equal((d for n, d in G.degree(weight="other")), [3, 7, 4]) + assert dict(G.degree(weight="other")) == {1: 3, 2: 7, 3: 4} + assert G.degree(1, weight="other") == 3 + assert edges_equal((G.degree([1], weight="other")), [(1, 3)]) + + def add_attributes(self, G): + G.graph["foo"] = [] + G.nodes[0]["foo"] = [] + G.remove_edge(1, 2) + ll = [] + G.add_edge(1, 2, foo=ll) + G.add_edge(2, 1, foo=ll) + + def test_name(self): + G = self.Graph(name="") + assert G.name == "" + G = self.Graph(name="test") + assert G.name == "test" + + def test_str_unnamed(self): + G = self.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + assert str(G) == f"{type(G).__name__} with 3 nodes and 2 edges" + + def test_str_named(self): + G = self.Graph(name="foo") + G.add_edges_from([(1, 2), (2, 3)]) + assert str(G) == f"{type(G).__name__} named 'foo' with 3 nodes and 2 edges" + + def test_graph_chain(self): + G = self.Graph([(0, 1), (1, 2)]) + DG = G.to_directed(as_view=True) + SDG = DG.subgraph([0, 1]) + RSDG = SDG.reverse(copy=False) + assert G is DG._graph + assert DG is SDG._graph + assert SDG is RSDG._graph + + def test_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy edge datadict but any container attr are same + H = G.copy() + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + def test_class_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy edge datadict but any container attr are same + H = G.__class__(G) + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + def test_fresh_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy graph structure but use fresh datadict + H = G.__class__() + H.add_nodes_from(G) + H.add_edges_from(G.edges()) + assert len(G.nodes[0]) == 1 + ddict = G.adj[1][2][0] if G.is_multigraph() else G.adj[1][2] + assert len(ddict) == 1 + assert len(H.nodes[0]) == 0 + ddict = H.adj[1][2][0] if H.is_multigraph() else H.adj[1][2] + assert len(ddict) == 0 + + def is_deepcopy(self, H, G): + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.deep_copy_attrdict(H, G) + + def deep_copy_attrdict(self, H, G): + self.deepcopy_graph_attr(H, G) + self.deepcopy_node_attr(H, G) + self.deepcopy_edge_attr(H, G) + + def deepcopy_graph_attr(self, H, G): + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] != H.graph["foo"] + + def deepcopy_node_attr(self, H, G): + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] != H.nodes[0]["foo"] + + def deepcopy_edge_attr(self, H, G): + assert G[1][2]["foo"] == H[1][2]["foo"] + G[1][2]["foo"].append(1) + assert G[1][2]["foo"] != H[1][2]["foo"] + + def is_shallow_copy(self, H, G): + self.graphs_equal(H, G) + self.shallow_copy_attrdict(H, G) + + def shallow_copy_attrdict(self, H, G): + self.shallow_copy_graph_attr(H, G) + self.shallow_copy_node_attr(H, G) + self.shallow_copy_edge_attr(H, G) + + def shallow_copy_graph_attr(self, H, G): + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] == H.graph["foo"] + + def shallow_copy_node_attr(self, H, G): + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + + def shallow_copy_edge_attr(self, H, G): + assert G[1][2]["foo"] == H[1][2]["foo"] + G[1][2]["foo"].append(1) + assert G[1][2]["foo"] == H[1][2]["foo"] + + def same_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.adj[1][2]["foo"] = "baz" + assert G.edges == H.edges + H.adj[1][2]["foo"] = old_foo + assert G.edges == H.edges + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G.nodes == H.nodes + H.nodes[0]["foo"] = old_foo + assert G.nodes == H.nodes + + def different_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.adj[1][2]["foo"] = "baz" + assert G._adj != H._adj + H.adj[1][2]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node != H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2] is H._adj[2][1] + assert G._adj[1][2] is G._adj[2][1] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2] is H._pred[2][1] + assert G._succ[1][2] is G._pred[2][1] + + def test_graph_attr(self): + G = self.K3.copy() + G.graph["foo"] = "bar" + assert isinstance(G.graph, G.graph_attr_dict_factory) + assert G.graph["foo"] == "bar" + del G.graph["foo"] + assert G.graph == {} + H = self.Graph(foo="bar") + assert H.graph["foo"] == "bar" + + def test_node_attr(self): + G = self.K3.copy() + G.add_node(1, foo="bar") + assert all( + isinstance(d, G.node_attr_dict_factory) for u, d in G.nodes(data=True) + ) + assert nodes_equal(G.nodes(), [0, 1, 2]) + assert nodes_equal(G.nodes(data=True), [(0, {}), (1, {"foo": "bar"}), (2, {})]) + G.nodes[1]["foo"] = "baz" + assert nodes_equal(G.nodes(data=True), [(0, {}), (1, {"foo": "baz"}), (2, {})]) + assert nodes_equal(G.nodes(data="foo"), [(0, None), (1, "baz"), (2, None)]) + assert nodes_equal( + G.nodes(data="foo", default="bar"), [(0, "bar"), (1, "baz"), (2, "bar")] + ) + + def test_node_attr2(self): + G = self.K3.copy() + a = {"foo": "bar"} + G.add_node(3, **a) + assert nodes_equal(G.nodes(), [0, 1, 2, 3]) + assert nodes_equal( + G.nodes(data=True), [(0, {}), (1, {}), (2, {}), (3, {"foo": "bar"})] + ) + + def test_edge_lookup(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + assert edges_equal(G.edges[1, 2], {"foo": "bar"}) + + def test_edge_attr(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + assert all( + isinstance(d, G.edge_attr_dict_factory) for u, v, d in G.edges(data=True) + ) + assert edges_equal(G.edges(data=True), [(1, 2, {"foo": "bar"})]) + assert edges_equal(G.edges(data="foo"), [(1, 2, "bar")]) + + def test_edge_attr2(self): + G = self.Graph() + G.add_edges_from([(1, 2), (3, 4)], foo="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"foo": "foo"}), (3, 4, {"foo": "foo"})] + ) + assert edges_equal(G.edges(data="foo"), [(1, 2, "foo"), (3, 4, "foo")]) + + def test_edge_attr3(self): + G = self.Graph() + G.add_edges_from([(1, 2, {"weight": 32}), (3, 4, {"weight": 64})], foo="foo") + assert edges_equal( + G.edges(data=True), + [ + (1, 2, {"foo": "foo", "weight": 32}), + (3, 4, {"foo": "foo", "weight": 64}), + ], + ) + + G.remove_edges_from([(1, 2), (3, 4)]) + G.add_edge(1, 2, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + + def test_edge_attr4(self): + G = self.Graph() + G.add_edge(1, 2, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + G[1][2]["data"] = 10 # OK to set data like this + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 10, "spam": "bar", "bar": "foo"})] + ) + + G.adj[1][2]["data"] = 20 + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 20, "spam": "bar", "bar": "foo"})] + ) + G.edges[1, 2]["data"] = 21 # another spelling, "edge" + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 21, "spam": "bar", "bar": "foo"})] + ) + G.adj[1][2]["listdata"] = [20, 200] + G.adj[1][2]["weight"] = 20 + dd = { + "data": 21, + "spam": "bar", + "bar": "foo", + "listdata": [20, 200], + "weight": 20, + } + assert edges_equal(G.edges(data=True), [(1, 2, dd)]) + + def test_to_undirected(self): + G = self.K3 + self.add_attributes(G) + H = nx.Graph(G) + self.is_shallow_copy(H, G) + self.different_attrdict(H, G) + H = G.to_undirected() + self.is_deepcopy(H, G) + + def test_to_directed_as_view(self): + H = nx.path_graph(2, create_using=self.Graph) + H2 = H.to_directed(as_view=True) + assert H is H2._graph + assert H2.has_edge(0, 1) + assert H2.has_edge(1, 0) or H.is_directed() + pytest.raises(nx.NetworkXError, H2.add_node, -1) + pytest.raises(nx.NetworkXError, H2.add_edge, 1, 2) + H.add_edge(1, 2) + assert H2.has_edge(1, 2) + assert H2.has_edge(2, 1) or H.is_directed() + + def test_to_undirected_as_view(self): + H = nx.path_graph(2, create_using=self.Graph) + H2 = H.to_undirected(as_view=True) + assert H is H2._graph + assert H2.has_edge(0, 1) + assert H2.has_edge(1, 0) + pytest.raises(nx.NetworkXError, H2.add_node, -1) + pytest.raises(nx.NetworkXError, H2.add_edge, 1, 2) + H.add_edge(1, 2) + assert H2.has_edge(1, 2) + assert H2.has_edge(2, 1) + + def test_directed_class(self): + G = self.Graph() + + class newGraph(G.to_undirected_class()): + def to_directed_class(self): + return newDiGraph + + def to_undirected_class(self): + return newGraph + + class newDiGraph(G.to_directed_class()): + def to_directed_class(self): + return newDiGraph + + def to_undirected_class(self): + return newGraph + + G = newDiGraph() if G.is_directed() else newGraph() + H = G.to_directed() + assert isinstance(H, newDiGraph) + H = G.to_undirected() + assert isinstance(H, newGraph) + + def test_to_directed(self): + G = self.K3 + self.add_attributes(G) + H = nx.DiGraph(G) + self.is_shallow_copy(H, G) + self.different_attrdict(H, G) + H = G.to_directed() + self.is_deepcopy(H, G) + + def test_subgraph(self): + G = self.K3 + self.add_attributes(G) + H = G.subgraph([0, 1, 2, 5]) + self.graphs_equal(H, G) + self.same_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + H = G.subgraph(0) + assert H.adj == {0: {}} + H = G.subgraph([]) + assert H.adj == {} + assert G.adj != {} + + def test_selfloops_attr(self): + G = self.K3.copy() + G.add_edge(0, 0) + G.add_edge(1, 1, weight=2) + assert edges_equal( + nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})] + ) + assert edges_equal( + nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)] + ) + + +class TestGraph(BaseAttrGraphTester): + """Tests specific to dict-of-dict-of-dict graph data structure""" + + def setup_method(self): + self.Graph = nx.Graph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = ({}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_pickle(self): + G = self.K3 + pg = pickle.loads(pickle.dumps(G, -1)) + self.graphs_equal(pg, G) + pg = pickle.loads(pickle.dumps(G)) + self.graphs_equal(pg, G) + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + assert sorted(G.adj.items()) == [(1, {2: {}}), (2, {1: {}})] + + def test_adjacency(self): + G = self.K3 + assert dict(G.adjacency()) == { + 0: {1: {}, 2: {}}, + 1: {0: {}, 2: {}}, + 2: {0: {}, 1: {}}, + } + + def test_getitem(self): + G = self.K3 + assert G.adj[0] == {1: {}, 2: {}} + assert G[0] == {1: {}, 2: {}} + with pytest.raises(KeyError): + G.__getitem__("j") + with pytest.raises(TypeError): + G.__getitem__(["A"]) + + def test_add_node(self): + G = self.Graph() + G.add_node(0) + assert G.adj == {0: {}} + # test add attributes + G.add_node(1, c="red") + G.add_node(2, c="blue") + G.add_node(3, c="red") + assert G.nodes[1]["c"] == "red" + assert G.nodes[2]["c"] == "blue" + assert G.nodes[3]["c"] == "red" + # test updating attributes + G.add_node(1, c="blue") + G.add_node(2, c="red") + G.add_node(3, c="blue") + assert G.nodes[1]["c"] == "blue" + assert G.nodes[2]["c"] == "red" + assert G.nodes[3]["c"] == "blue" + + def test_add_nodes_from(self): + G = self.Graph() + G.add_nodes_from([0, 1, 2]) + assert G.adj == {0: {}, 1: {}, 2: {}} + # test add attributes + G.add_nodes_from([0, 1, 2], c="red") + assert G.nodes[0]["c"] == "red" + assert G.nodes[2]["c"] == "red" + # test that attribute dicts are not the same + assert G.nodes[0] is not G.nodes[1] + # test updating attributes + G.add_nodes_from([0, 1, 2], c="blue") + assert G.nodes[0]["c"] == "blue" + assert G.nodes[2]["c"] == "blue" + assert G.nodes[0] is not G.nodes[1] + # test tuple input + H = self.Graph() + H.add_nodes_from(G.nodes(data=True)) + assert H.nodes[0]["c"] == "blue" + assert H.nodes[2]["c"] == "blue" + assert H.nodes[0] is not H.nodes[1] + # specific overrides general + H.add_nodes_from([0, (1, {"c": "green"}), (3, {"c": "cyan"})], c="red") + assert H.nodes[0]["c"] == "red" + assert H.nodes[1]["c"] == "green" + assert H.nodes[2]["c"] == "blue" + assert H.nodes[3]["c"] == "cyan" + + def test_remove_node(self): + G = self.K3.copy() + G.remove_node(0) + assert G.adj == {1: {2: {}}, 2: {1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_node(-1) + + # generator here to implement list,set,string... + + def test_remove_nodes_from(self): + G = self.K3.copy() + G.remove_nodes_from([0, 1]) + assert G.adj == {2: {}} + G.remove_nodes_from([-1]) # silent fail + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {}}, 1: {0: {}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {}}, 1: {0: {}}} + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"weight": 3})]) + assert G.adj == { + 0: {1: {}, 2: {"weight": 3}}, + 1: {0: {}}, + 2: {0: {"weight": 3}}, + } + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"weight": 3}), (1, 2, {"data": 4})], data=2) + assert G.adj == { + 0: {1: {"data": 2}, 2: {"weight": 3, "data": 2}}, + 1: {0: {"data": 2}, 2: {"data": 4}}, + 2: {0: {"weight": 3, "data": 2}, 1: {"data": 4}}, + } + + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3)]) # too many in tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) # not a tuple + + def test_remove_edge(self): + G = self.K3.copy() + G.remove_edge(0, 1) + assert G.adj == {0: {2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + assert G.adj == {0: {2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + G.remove_edges_from([(0, 0)]) # silent fail + + def test_clear(self): + G = self.K3.copy() + G.graph["name"] = "K3" + G.clear() + assert list(G.nodes) == [] + assert G.adj == {} + assert G.graph == {} + + def test_clear_edges(self): + G = self.K3.copy() + G.graph["name"] = "K3" + nodes = list(G.nodes) + G.clear_edges() + assert list(G.nodes) == nodes + assert G.adj == {0: {}, 1: {}, 2: {}} + assert list(G.edges) == [] + assert G.graph["name"] == "K3" + + def test_edges_data(self): + G = self.K3 + all_edges = [(0, 1, {}), (0, 2, {}), (1, 2, {})] + assert edges_equal(G.edges(data=True), all_edges) + assert edges_equal(G.edges(0, data=True), [(0, 1, {}), (0, 2, {})]) + assert edges_equal(G.edges([0, 1], data=True), all_edges) + with pytest.raises(nx.NetworkXError): + G.edges(-1, True) + + def test_get_edge_data(self): + G = self.K3.copy() + assert G.get_edge_data(0, 1) == {} + assert G[0][1] == {} + assert G.get_edge_data(10, 20) is None + assert G.get_edge_data(-1, 0) is None + assert G.get_edge_data(-1, 0, default=1) == 1 + + def test_update(self): + # specify both edgees and nodes + G = self.K3.copy() + G.update(nodes=[3, (4, {"size": 2})], edges=[(4, 5), (6, 7, {"weight": 2})]) + nlist = [ + (0, {}), + (1, {}), + (2, {}), + (3, {}), + (4, {"size": 2}), + (5, {}), + (6, {}), + (7, {}), + ] + assert sorted(G.nodes.data()) == nlist + if G.is_directed(): + elist = [ + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + (4, 5, {}), + (6, 7, {"weight": 2}), + ] + else: + elist = [ + (0, 1, {}), + (0, 2, {}), + (1, 2, {}), + (4, 5, {}), + (6, 7, {"weight": 2}), + ] + assert sorted(G.edges.data()) == elist + assert G.graph == {} + + # no keywords -- order is edges, nodes + G = self.K3.copy() + G.update([(4, 5), (6, 7, {"weight": 2})], [3, (4, {"size": 2})]) + assert sorted(G.nodes.data()) == nlist + assert sorted(G.edges.data()) == elist + assert G.graph == {} + + # update using only a graph + G = self.Graph() + G.graph["foo"] = "bar" + G.add_node(2, data=4) + G.add_edge(0, 1, weight=0.5) + GG = G.copy() + H = self.Graph() + GG.update(H) + assert graphs_equal(G, GG) + H.update(G) + assert graphs_equal(H, G) + + # update nodes only + H = self.Graph() + H.update(nodes=[3, 4]) + assert H.nodes ^ {3, 4} == set() + assert H.size() == 0 + + # update edges only + H = self.Graph() + H.update(edges=[(3, 4)]) + assert sorted(H.edges.data()) == [(3, 4, {})] + assert H.size() == 1 + + # No inputs -> exception + with pytest.raises(nx.NetworkXError): + nx.Graph().update() + + +class TestEdgeSubgraph: + """Unit tests for the :meth:`Graph.edge_subgraph` method.""" + + def setup_method(self): + # Create a path graph on five nodes. + G = nx.path_graph(5) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1), (3, 4)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert [(0, 1, "edge01"), (3, 4, "edge34")] == sorted(self.H.edges(data="name")) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_remove_node(self): + """Tests that removing a node in the original graph does + affect the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes()) + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v in self.H.edges(): + assert self.G.edges[u, v] == self.H.edges[u, v] + # Making a change to G should make a change in H and vice versa. + self.G.edges[0, 1]["name"] = "foo" + assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"] + self.H.edges[3, 4]["name"] = "bar" + assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"] + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graph_historical.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graph_historical.py new file mode 100644 index 0000000..7af081c --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graph_historical.py @@ -0,0 +1,12 @@ +"""Original NetworkX graph tests""" +import networkx +import networkx as nx + +from .historical_tests import HistoricalTests + + +class TestGraphHistorical(HistoricalTests): + @classmethod + def setup_class(cls): + HistoricalTests.setup_class() + cls.G = nx.Graph diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graphviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graphviews.py new file mode 100644 index 0000000..96a0978 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_graphviews.py @@ -0,0 +1,352 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + +# Note: SubGraph views are not tested here. They have their own testing file + + +class TestReverseView: + def setup(self): + self.G = nx.path_graph(9, create_using=nx.DiGraph()) + self.rv = nx.reverse_view(self.G) + + def test_pickle(self): + import pickle + + rv = self.rv + prv = pickle.loads(pickle.dumps(rv, -1)) + assert rv._node == prv._node + assert rv._adj == prv._adj + assert rv.graph == prv.graph + + def test_contains(self): + assert (2, 3) in self.G.edges + assert (3, 2) not in self.G.edges + assert (2, 3) not in self.rv.edges + assert (3, 2) in self.rv.edges + + def test_iter(self): + expected = sorted(tuple(reversed(e)) for e in self.G.edges) + assert sorted(self.rv.edges) == expected + + def test_exceptions(self): + nxg = nx.graphviews + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nxg.reverse_view, G) + + def test_subclass(self): + class MyGraph(nx.DiGraph): + def my_method(self): + return "me" + + def to_directed_class(self): + return MyGraph() + + M = MyGraph() + M.add_edge(1, 2) + RM = nx.reverse_view(M) + print("RM class", RM.__class__) + RMC = RM.copy() + print("RMC class", RMC.__class__) + print(RMC.edges) + assert RMC.has_edge(2, 1) + assert RMC.my_method() == "me" + + +class TestMultiReverseView: + def setup(self): + self.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + self.G.add_edge(4, 5) + self.rv = nx.reverse_view(self.G) + + def test_pickle(self): + import pickle + + rv = self.rv + prv = pickle.loads(pickle.dumps(rv, -1)) + assert rv._node == prv._node + assert rv._adj == prv._adj + assert rv.graph == prv.graph + + def test_contains(self): + assert (2, 3, 0) in self.G.edges + assert (3, 2, 0) not in self.G.edges + assert (2, 3, 0) not in self.rv.edges + assert (3, 2, 0) in self.rv.edges + assert (5, 4, 1) in self.rv.edges + assert (4, 5, 1) not in self.rv.edges + + def test_iter(self): + expected = sorted((v, u, k) for u, v, k in self.G.edges) + assert sorted(self.rv.edges) == expected + + def test_exceptions(self): + nxg = nx.graphviews + MG = nx.MultiGraph(self.G) + pytest.raises(nx.NetworkXNotImplemented, nxg.reverse_view, MG) + + +def test_generic_multitype(): + nxg = nx.graphviews + G = nx.DiGraph([(1, 2)]) + with pytest.raises(nx.NetworkXError): + nxg.generic_graph_view(G, create_using=nx.MultiGraph) + G = nx.MultiDiGraph([(1, 2)]) + with pytest.raises(nx.NetworkXError): + nxg.generic_graph_view(G, create_using=nx.DiGraph) + + +class TestToDirected: + def setup(self): + self.G = nx.path_graph(9) + self.dv = nx.to_directed(self.G) + self.MG = nx.path_graph(9, create_using=nx.MultiGraph()) + self.Mdv = nx.to_directed(self.MG) + + def test_directed(self): + assert not self.G.is_directed() + assert self.dv.is_directed() + + def test_already_directed(self): + dd = nx.to_directed(self.dv) + Mdd = nx.to_directed(self.Mdv) + assert edges_equal(dd.edges, self.dv.edges) + assert edges_equal(Mdd.edges, self.Mdv.edges) + + def test_pickle(self): + import pickle + + dv = self.dv + pdv = pickle.loads(pickle.dumps(dv, -1)) + assert dv._node == pdv._node + assert dv._succ == pdv._succ + assert dv._pred == pdv._pred + assert dv.graph == pdv.graph + + def test_contains(self): + assert (2, 3) in self.G.edges + assert (3, 2) in self.G.edges + assert (2, 3) in self.dv.edges + assert (3, 2) in self.dv.edges + + def test_iter(self): + revd = [tuple(reversed(e)) for e in self.G.edges] + expected = sorted(list(self.G.edges) + revd) + assert sorted(self.dv.edges) == expected + + +class TestToUndirected: + def setup(self): + self.DG = nx.path_graph(9, create_using=nx.DiGraph()) + self.uv = nx.to_undirected(self.DG) + self.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) + self.Muv = nx.to_undirected(self.MDG) + + def test_directed(self): + assert self.DG.is_directed() + assert not self.uv.is_directed() + + def test_already_directed(self): + uu = nx.to_undirected(self.uv) + Muu = nx.to_undirected(self.Muv) + assert edges_equal(uu.edges, self.uv.edges) + assert edges_equal(Muu.edges, self.Muv.edges) + + def test_pickle(self): + import pickle + + uv = self.uv + puv = pickle.loads(pickle.dumps(uv, -1)) + assert uv._node == puv._node + assert uv._adj == puv._adj + assert uv.graph == puv.graph + assert hasattr(uv, "_graph") + + def test_contains(self): + assert (2, 3) in self.DG.edges + assert (3, 2) not in self.DG.edges + assert (2, 3) in self.uv.edges + assert (3, 2) in self.uv.edges + + def test_iter(self): + expected = sorted(self.DG.edges) + assert sorted(self.uv.edges) == expected + + +class TestChainsOfViews: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.DG = nx.path_graph(9, create_using=nx.DiGraph()) + cls.MG = nx.path_graph(9, create_using=nx.MultiGraph()) + cls.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.Gv = nx.to_undirected(cls.DG) + cls.DGv = nx.to_directed(cls.G) + cls.MGv = nx.to_undirected(cls.MDG) + cls.MDGv = nx.to_directed(cls.MG) + cls.Rv = cls.DG.reverse() + cls.MRv = cls.MDG.reverse() + cls.graphs = [ + cls.G, + cls.DG, + cls.MG, + cls.MDG, + cls.Gv, + cls.DGv, + cls.MGv, + cls.MDGv, + cls.Rv, + cls.MRv, + ] + for G in cls.graphs: + G.edges, G.nodes, G.degree + + def test_pickle(self): + import pickle + + for G in self.graphs: + H = pickle.loads(pickle.dumps(G, -1)) + assert edges_equal(H.edges, G.edges) + assert nodes_equal(H.nodes, G.nodes) + + def test_subgraph_of_subgraph(self): + SGv = nx.subgraph(self.G, range(3, 7)) + SDGv = nx.subgraph(self.DG, range(3, 7)) + SMGv = nx.subgraph(self.MG, range(3, 7)) + SMDGv = nx.subgraph(self.MDG, range(3, 7)) + for G in self.graphs + [SGv, SDGv, SMGv, SMDGv]: + SG = nx.induced_subgraph(G, [4, 5, 6]) + assert list(SG) == [4, 5, 6] + SSG = SG.subgraph([6, 7]) + assert list(SSG) == [6] + # subgraph-subgraph chain is short-cut in base class method + assert SSG._graph is G + + def test_restricted_induced_subgraph_chains(self): + """Test subgraph chains that both restrict and show nodes/edges. + + A restricted_view subgraph should allow induced subgraphs using + G.subgraph that automagically without a chain (meaning the result + is a subgraph view of the original graph not a subgraph-of-subgraph. + """ + hide_nodes = [3, 4, 5] + hide_edges = [(6, 7)] + RG = nx.restricted_view(self.G, hide_nodes, hide_edges) + nodes = [4, 5, 6, 7, 8] + SG = nx.induced_subgraph(RG, nodes) + SSG = RG.subgraph(nodes) + assert RG._graph is self.G + assert SSG._graph is self.G + assert SG._graph is RG + assert edges_equal(SG.edges, SSG.edges) + # should be same as morphing the graph + CG = self.G.copy() + CG.remove_nodes_from(hide_nodes) + CG.remove_edges_from(hide_edges) + assert edges_equal(CG.edges(nodes), SSG.edges) + CG.remove_nodes_from([0, 1, 2, 3]) + assert edges_equal(CG.edges, SSG.edges) + # switch order: subgraph first, then restricted view + SSSG = self.G.subgraph(nodes) + RSG = nx.restricted_view(SSSG, hide_nodes, hide_edges) + assert RSG._graph is not self.G + assert edges_equal(RSG.edges, CG.edges) + + def test_subgraph_copy(self): + for origG in self.graphs: + G = nx.OrderedGraph(origG) + SG = G.subgraph([4, 5, 6]) + H = SG.copy() + assert type(G) == type(H) + + def test_subgraph_todirected(self): + SG = nx.induced_subgraph(self.G, [4, 5, 6]) + SSG = SG.to_directed() + assert sorted(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 4), (5, 6), (6, 5)] + + def test_subgraph_toundirected(self): + SG = nx.induced_subgraph(self.G, [4, 5, 6]) + SSG = SG.to_undirected() + assert list(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 6)] + + def test_reverse_subgraph_toundirected(self): + G = self.DG.reverse(copy=False) + SG = G.subgraph([4, 5, 6]) + SSG = SG.to_undirected() + assert list(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 6)] + + def test_reverse_reverse_copy(self): + G = self.DG.reverse(copy=False) + H = G.reverse(copy=True) + assert H.nodes == self.DG.nodes + assert H.edges == self.DG.edges + G = self.MDG.reverse(copy=False) + H = G.reverse(copy=True) + assert H.nodes == self.MDG.nodes + assert H.edges == self.MDG.edges + + def test_subgraph_edgesubgraph_toundirected(self): + G = self.G.copy() + SG = G.subgraph([4, 5, 6]) + SSG = SG.edge_subgraph([(4, 5), (5, 4)]) + USSG = SSG.to_undirected() + assert list(USSG) == [4, 5] + assert sorted(USSG.edges) == [(4, 5)] + + def test_copy_subgraph(self): + G = self.G.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_disubgraph(self): + G = self.DG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_multidisubgraph(self): + G = self.MDG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_multisubgraph(self): + G = self.MG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_of_view(self): + G = nx.OrderedMultiGraph(self.MGv) + assert G.__class__.__name__ == "OrderedMultiGraph" + G = G.copy(as_view=True) + assert G.__class__.__name__ == "OrderedMultiGraph" + + def test_subclass(self): + class MyGraph(nx.DiGraph): + def my_method(self): + return "me" + + def to_directed_class(self): + return MyGraph() + + for origG in self.graphs: + G = MyGraph(origG) + SG = G.subgraph([4, 5, 6]) + H = SG.copy() + assert SG.my_method() == "me" + assert H.my_method() == "me" + assert not 3 in H or 3 in SG diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_multidigraph.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_multidigraph.py new file mode 100644 index 0000000..52c8267 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_multidigraph.py @@ -0,0 +1,455 @@ +from collections import UserDict + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + +from .test_multigraph import BaseMultiGraphTester +from .test_multigraph import TestEdgeSubgraph as _TestMultiGraphEdgeSubgraph +from .test_multigraph import TestMultiGraph as _TestMultiGraph + + +class BaseMultiDiGraphTester(BaseMultiGraphTester): + def test_edges(self): + G = self.K3 + edges = [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges()) == edges + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + pytest.raises((KeyError, nx.NetworkXError), G.edges, -1) + + def test_edges_data(self): + G = self.K3 + edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})] + assert sorted(G.edges(data=True)) == edges + assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})] + pytest.raises((KeyError, nx.NetworkXError), G.neighbors, -1) + + def test_edges_multi(self): + G = self.K3 + assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + G.add_edge(0, 1) + assert sorted(G.edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + def test_out_edges(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + pytest.raises((KeyError, nx.NetworkXError), G.out_edges, -1) + assert sorted(G.out_edges(0, keys=True)) == [(0, 1, 0), (0, 2, 0)] + + def test_out_edges_multi(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + G.add_edge(0, 1, 2) + assert sorted(G.out_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + def test_out_edges_data(self): + G = self.K3 + assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})] + G.remove_edge(0, 1) + G.add_edge(0, 1, data=1) + assert sorted(G.edges(0, data=True)) == [(0, 1, {"data": 1}), (0, 2, {})] + assert sorted(G.edges(0, data="data")) == [(0, 1, 1), (0, 2, None)] + assert sorted(G.edges(0, data="data", default=-1)) == [(0, 1, 1), (0, 2, -1)] + + def test_in_edges(self): + G = self.K3 + assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)] + pytest.raises((KeyError, nx.NetworkXError), G.in_edges, -1) + G.add_edge(0, 1, 2) + assert sorted(G.in_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + assert sorted(G.in_edges(0, keys=True)) == [(1, 0, 0), (2, 0, 0)] + + def test_in_edges_no_keys(self): + G = self.K3 + assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)] + G.add_edge(0, 1, 2) + assert sorted(G.in_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + assert sorted(G.in_edges(data=True, keys=False)) == [ + (0, 1, {}), + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + ] + + def test_in_edges_data(self): + G = self.K3 + assert sorted(G.in_edges(0, data=True)) == [(1, 0, {}), (2, 0, {})] + G.remove_edge(1, 0) + G.add_edge(1, 0, data=1) + assert sorted(G.in_edges(0, data=True)) == [(1, 0, {"data": 1}), (2, 0, {})] + assert sorted(G.in_edges(0, data="data")) == [(1, 0, 1), (2, 0, None)] + assert sorted(G.in_edges(0, data="data", default=-1)) == [(1, 0, 1), (2, 0, -1)] + + def is_shallow(self, H, G): + # graph + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] == H.graph["foo"] + # node + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + # edge + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + + def is_deep(self, H, G): + # graph + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] != H.graph["foo"] + # node + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] != H.nodes[0]["foo"] + # edge + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] != H[1][2][0]["foo"] + + def test_to_undirected(self): + # MultiDiGraph -> MultiGraph changes number of edges so it is + # not a copy operation... use is_shallow, not is_shallow_copy + G = self.K3 + self.add_attributes(G) + H = nx.MultiGraph(G) + # self.is_shallow(H,G) + # the result is traversal order dependent so we + # can't use the is_shallow() test here. + try: + assert edges_equal(H.edges(), [(0, 1), (1, 2), (2, 0)]) + except AssertionError: + assert edges_equal(H.edges(), [(0, 1), (1, 2), (1, 2), (2, 0)]) + H = G.to_undirected() + self.is_deep(H, G) + + def test_has_successor(self): + G = self.K3 + assert G.has_successor(0, 1) + assert not G.has_successor(0, -1) + + def test_successors(self): + G = self.K3 + assert sorted(G.successors(0)) == [1, 2] + pytest.raises((KeyError, nx.NetworkXError), G.successors, -1) + + def test_has_predecessor(self): + G = self.K3 + assert G.has_predecessor(0, 1) + assert not G.has_predecessor(0, -1) + + def test_predecessors(self): + G = self.K3 + assert sorted(G.predecessors(0)) == [1, 2] + pytest.raises((KeyError, nx.NetworkXError), G.predecessors, -1) + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)] + assert dict(G.degree()) == {0: 4, 1: 4, 2: 4} + assert G.degree(0) == 4 + assert list(G.degree(iter([0]))) == [(0, 4)] + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.degree(weight="weight")) == [(0, 4.3), (1, 4.3), (2, 4)] + assert sorted(G.degree(weight="other")) == [(0, 5.2), (1, 5.2), (2, 4)] + + def test_in_degree(self): + G = self.K3 + assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2} + assert G.in_degree(0) == 2 + assert list(G.in_degree(iter([0]))) == [(0, 2)] + assert G.in_degree(0, weight="weight") == 2 + + def test_out_degree(self): + G = self.K3 + assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2} + assert G.out_degree(0) == 2 + assert list(G.out_degree(iter([0]))) == [(0, 2)] + assert G.out_degree(0, weight="weight") == 2 + + def test_size(self): + G = self.K3 + assert G.size() == 6 + assert G.number_of_edges() == 6 + G.add_edge(0, 1, weight=0.3, other=1.2) + assert round(G.size(weight="weight"), 2) == 6.3 + assert round(G.size(weight="other"), 2) == 7.2 + + def test_to_undirected_reciprocal(self): + G = self.Graph() + G.add_edge(1, 2) + assert G.to_undirected().has_edge(1, 2) + assert not G.to_undirected(reciprocal=True).has_edge(1, 2) + G.add_edge(2, 1) + assert G.to_undirected(reciprocal=True).has_edge(1, 2) + + def test_reverse_copy(self): + G = nx.MultiDiGraph([(0, 1), (0, 1)]) + R = G.reverse() + assert sorted(R.edges()) == [(1, 0), (1, 0)] + R.remove_edge(1, 0) + assert sorted(R.edges()) == [(1, 0)] + assert sorted(G.edges()) == [(0, 1), (0, 1)] + + def test_reverse_nocopy(self): + G = nx.MultiDiGraph([(0, 1), (0, 1)]) + R = G.reverse(copy=False) + assert sorted(R.edges()) == [(1, 0), (1, 0)] + pytest.raises(nx.NetworkXError, R.remove_edge, 1, 0) + + def test_di_attributes_cached(self): + G = self.K3.copy() + assert id(G.in_edges) == id(G.in_edges) + assert id(G.out_edges) == id(G.out_edges) + assert id(G.in_degree) == id(G.in_degree) + assert id(G.out_degree) == id(G.out_degree) + assert id(G.succ) == id(G.succ) + assert id(G.pred) == id(G.pred) + + +class TestMultiDiGraph(BaseMultiDiGraphTester, _TestMultiGraph): + def setup_method(self): + self.Graph = nx.MultiDiGraph + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = {0: {}, 1: {}, 2: {}} + # K3._adj is synced with K3._succ + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u == v: + continue + d = {0: {}} + self.K3._succ[u][v] = d + self.K3._pred[v][u] = d + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G._adj == {0: {1: {0: {}}}, 1: {}} + assert G._succ == {0: {1: {0: {}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G._adj == {0: {1: {0: {}}}, 1: {}} + assert G._succ == {0: {1: {0: {}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}}}} + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})]) + assert G._adj == {0: {1: {0: {}, 1: {"weight": 3}}}, 1: {}} + assert G._succ == {0: {1: {0: {}, 1: {"weight": 3}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}, 1: {"weight": 3}}}} + + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2) + assert G._succ == { + 0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + 1: {}, + } + assert G._pred == { + 0: {}, + 1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + } + + G = self.Graph() + edges = [ + (0, 1, {"weight": 3}), + (0, 1, (("weight", 2),)), + (0, 1, 5), + (0, 1, "s"), + ] + G.add_edges_from(edges) + keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}} + assert G._succ == {0: {1: keydict}, 1: {}} + assert G._pred == {1: {0: keydict}, 0: {}} + + # too few in tuple + pytest.raises(nx.NetworkXError, G.add_edges_from, [(0,)]) + # too many in tuple + pytest.raises(nx.NetworkXError, G.add_edges_from, [(0, 1, 2, 3, 4)]) + # not a tuple + pytest.raises(TypeError, G.add_edges_from, [0]) + + def test_remove_edge(self): + G = self.K3 + G.remove_edge(0, 1) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, 0, 2, key=1) + + def test_remove_multiedge(self): + G = self.K3 + G.add_edge(0, 1, key="parallel edge") + G.remove_edge(0, 1, key="parallel edge") + assert G._adj == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + assert G._succ == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edge(0, 1) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) + + def test_remove_edges_from(self): + G = self.K3 + G.remove_edges_from([(0, 1)]) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edges_from([(0, 0)]) # silent fail + + +class TestEdgeSubgraph(_TestMultiGraphEdgeSubgraph): + """Unit tests for the :meth:`MultiDiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a quadruply-linked path graph on five nodes. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + nx.add_path(G, range(5)) + nx.add_path(G, reversed(range(5))) + nx.add_path(G, reversed(range(5))) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.adj[0][1][0]["name"] = "edge010" + G.adj[0][1][1]["name"] = "edge011" + G.adj[3][4][0]["name"] = "edge340" + G.adj[3][4][1]["name"] = "edge341" + G.graph["name"] = "graph" + # Get the subgraph induced by one of the first edges and one of + # the last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1, 0), (3, 4, 1)]) + + +class CustomDictClass(UserDict): + pass + + +class MultiDiGraphSubClass(nx.MultiDiGraph): + node_dict_factory = CustomDictClass # type: ignore + node_attr_dict_factory = CustomDictClass # type: ignore + adjlist_outer_dict_factory = CustomDictClass # type: ignore + adjlist_inner_dict_factory = CustomDictClass # type: ignore + edge_key_dict_factory = CustomDictClass # type: ignore + edge_attr_dict_factory = CustomDictClass # type: ignore + graph_attr_dict_factory = CustomDictClass # type: ignore + + +class TestMultiDiGraphSubclass(TestMultiDiGraph): + def setup_method(self): + self.Graph = MultiDiGraphSubClass + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.K3.adjlist_outer_dict_factory( + { + 0: self.K3.adjlist_inner_dict_factory(), + 1: self.K3.adjlist_inner_dict_factory(), + 2: self.K3.adjlist_inner_dict_factory(), + } + ) + # K3._adj is synced with K3._succ + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u == v: + continue + d = {0: {}} + self.K3._succ[u][v] = d + self.K3._pred[v][u] = d + self.K3._node = self.K3.node_dict_factory() + self.K3._node[0] = self.K3.node_attr_dict_factory() + self.K3._node[1] = self.K3.node_attr_dict_factory() + self.K3._node[2] = self.K3.node_attr_dict_factory() diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_multigraph.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_multigraph.py new file mode 100644 index 0000000..0584f56 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_multigraph.py @@ -0,0 +1,525 @@ +from collections import UserDict + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + +from .test_graph import BaseAttrGraphTester +from .test_graph import TestGraph as _TestGraph + + +class BaseMultiGraphTester(BaseAttrGraphTester): + def test_has_edge(self): + G = self.K3 + assert G.has_edge(0, 1) + assert not G.has_edge(0, -1) + assert G.has_edge(0, 1, 0) + assert not G.has_edge(0, 1, 1) + + def test_get_edge_data(self): + G = self.K3 + assert G.get_edge_data(0, 1) == {0: {}} + assert G[0][1] == {0: {}} + assert G[0][1][0] == {} + assert G.get_edge_data(10, 20) is None + assert G.get_edge_data(0, 1, 0) == {} + + def test_adjacency(self): + G = self.K3 + assert dict(G.adjacency()) == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + def deepcopy_edge_attr(self, H, G): + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] != H[1][2][0]["foo"] + + def shallow_copy_edge_attr(self, H, G): + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2][0] is H._adj[2][1][0] + assert G._adj[1][2][0] is G._adj[2][1][0] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2][0] is H._pred[2][1][0] + assert G._succ[1][2][0] is G._pred[2][1][0] + + def same_attrdict(self, H, G): + # same attrdict in the edgedata + old_foo = H[1][2][0]["foo"] + H.adj[1][2][0]["foo"] = "baz" + assert G._adj == H._adj + H.adj[1][2][0]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node == H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def different_attrdict(self, H, G): + # used by graph_equal_but_different + old_foo = H[1][2][0]["foo"] + H.adj[1][2][0]["foo"] = "baz" + assert G._adj != H._adj + H.adj[1][2][0]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node != H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def test_to_undirected(self): + G = self.K3 + self.add_attributes(G) + H = nx.MultiGraph(G) + self.is_shallow_copy(H, G) + H = G.to_undirected() + self.is_deepcopy(H, G) + + def test_to_directed(self): + G = self.K3 + self.add_attributes(G) + H = nx.MultiDiGraph(G) + self.is_shallow_copy(H, G) + H = G.to_directed() + self.is_deepcopy(H, G) + + def test_number_of_edges_selfloops(self): + G = self.K3 + G.add_edge(0, 0) + G.add_edge(0, 0) + G.add_edge(0, 0, key="parallel edge") + G.remove_edge(0, 0, key="parallel edge") + assert G.number_of_edges(0, 0) == 2 + G.remove_edge(0, 0) + assert G.number_of_edges(0, 0) == 1 + + def test_edge_lookup(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + G.add_edge(1, 2, "key", foo="biz") + assert edges_equal(G.edges[1, 2, 0], {"foo": "bar"}) + assert edges_equal(G.edges[1, 2, "key"], {"foo": "biz"}) + + def test_edge_attr(self): + G = self.Graph() + G.add_edge(1, 2, key="k1", foo="bar") + G.add_edge(1, 2, key="k2", foo="baz") + assert isinstance(G.get_edge_data(1, 2), G.edge_key_dict_factory) + assert all( + isinstance(d, G.edge_attr_dict_factory) for u, v, d in G.edges(data=True) + ) + assert edges_equal( + G.edges(keys=True, data=True), + [(1, 2, "k1", {"foo": "bar"}), (1, 2, "k2", {"foo": "baz"})], + ) + assert edges_equal( + G.edges(keys=True, data="foo"), [(1, 2, "k1", "bar"), (1, 2, "k2", "baz")] + ) + + def test_edge_attr4(self): + G = self.Graph() + G.add_edge(1, 2, key=0, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + G[1][2][0]["data"] = 10 # OK to set data like this + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 10, "spam": "bar", "bar": "foo"})] + ) + + G.adj[1][2][0]["data"] = 20 + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 20, "spam": "bar", "bar": "foo"})] + ) + G.edges[1, 2, 0]["data"] = 21 # another spelling, "edge" + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 21, "spam": "bar", "bar": "foo"})] + ) + G.adj[1][2][0]["listdata"] = [20, 200] + G.adj[1][2][0]["weight"] = 20 + assert edges_equal( + G.edges(data=True), + [ + ( + 1, + 2, + { + "data": 21, + "spam": "bar", + "bar": "foo", + "listdata": [20, 200], + "weight": 20, + }, + ) + ], + ) + + +class TestMultiGraph(BaseMultiGraphTester, _TestGraph): + def setup_method(self): + self.Graph = nx.MultiGraph + # build K3 + ed1, ed2, ed3 = ({0: {}}, {0: {}}, {0: {}}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + expected = [(1, {2: {0: {}}}), (2, {1: {0: {}}})] + assert sorted(G.adj.items()) == expected + + def test_data_multigraph_input(self): + # standard case with edge keys and edge data + edata0 = dict(w=200, s="foo") + edata1 = dict(w=201, s="bar") + keydict = {0: edata0, 1: edata1} + dododod = {"a": {"b": keydict}} + + multiple_edge = [("a", "b", 0, edata0), ("a", "b", 1, edata1)] + single_edge = [("a", "b", 0, keydict)] + + G = self.Graph(dododod, multigraph_input=True) + assert list(G.edges(keys=True, data=True)) == multiple_edge + G = self.Graph(dododod, multigraph_input=None) + assert list(G.edges(keys=True, data=True)) == multiple_edge + G = self.Graph(dododod, multigraph_input=False) + assert list(G.edges(keys=True, data=True)) == single_edge + + # test round-trip to_dict_of_dict and MultiGraph constructor + G = self.Graph(dododod, multigraph_input=True) + H = self.Graph(nx.to_dict_of_dicts(G)) + assert nx.is_isomorphic(G, H) is True # test that default is True + for mgi in [True, False]: + H = self.Graph(nx.to_dict_of_dicts(G), multigraph_input=mgi) + assert nx.is_isomorphic(G, H) == mgi + + # Set up cases for when incoming_graph_data is not multigraph_input + etraits = {"w": 200, "s": "foo"} + egraphics = {"color": "blue", "shape": "box"} + edata = {"traits": etraits, "graphics": egraphics} + dodod1 = {"a": {"b": edata}} + dodod2 = {"a": {"b": etraits}} + dodod3 = {"a": {"b": {"traits": etraits, "s": "foo"}}} + dol = {"a": ["b"]} + + multiple_edge = [("a", "b", "traits", etraits), ("a", "b", "graphics", egraphics)] + single_edge = [("a", "b", 0, {})] # type: ignore + single_edge1 = [("a", "b", 0, edata)] + single_edge2 = [("a", "b", 0, etraits)] + single_edge3 = [("a", "b", 0, {"traits": etraits, "s": "foo"})] + + cases = [ # (dod, mgi, edges) + (dodod1, True, multiple_edge), + (dodod1, False, single_edge1), + (dodod2, False, single_edge2), + (dodod3, False, single_edge3), + (dol, False, single_edge), + ] + + @pytest.mark.parametrize("dod, mgi, edges", cases) + def test_non_multigraph_input(self, dod, mgi, edges): + G = self.Graph(dod, multigraph_input=mgi) + assert list(G.edges(keys=True, data=True)) == edges + G = nx.to_networkx_graph(dod, create_using=self.Graph, multigraph_input=mgi) + assert list(G.edges(keys=True, data=True)) == edges + + mgi_none_cases = [ + (dodod1, multiple_edge), + (dodod2, single_edge2), + (dodod3, single_edge3), + ] + + @pytest.mark.parametrize("dod, edges", mgi_none_cases) + def test_non_multigraph_input_mgi_none(self, dod, edges): + # test constructor without to_networkx_graph for mgi=None + G = self.Graph(dod) + assert list(G.edges(keys=True, data=True)) == edges + + raise_cases = [dodod2, dodod3, dol] + + @pytest.mark.parametrize("dod", raise_cases) + def test_non_multigraph_input_raise(self, dod): + # cases where NetworkXError is raised + pytest.raises(nx.NetworkXError, self.Graph, dod, multigraph_input=True) + pytest.raises( + nx.NetworkXError, + nx.to_networkx_graph, + dod, + create_using=self.Graph, + multigraph_input=True, + ) + + def test_getitem(self): + G = self.K3 + assert G[0] == {1: {0: {}}, 2: {0: {}}} + with pytest.raises(KeyError): + G.__getitem__("j") + with pytest.raises(TypeError): + G.__getitem__(["A"]) + + def test_remove_node(self): + G = self.K3 + G.remove_node(0) + assert G.adj == {1: {2: {0: {}}}, 2: {1: {0: {}}}} + with pytest.raises(nx.NetworkXError): + G.remove_node(-1) + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} + + def test_add_edge_conflicting_key(self): + G = self.Graph() + G.add_edge(0, 1, key=1) + G.add_edge(0, 1) + assert G.number_of_edges() == 2 + G = self.Graph() + G.add_edges_from([(0, 1, 1, {})]) + G.add_edges_from([(0, 1)]) + assert G.number_of_edges() == 2 + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})]) + assert G.adj == { + 0: {1: {0: {}, 1: {"weight": 3}}}, + 1: {0: {0: {}, 1: {"weight": 3}}}, + } + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2) + assert G.adj == { + 0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + 1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + } + G = self.Graph() + edges = [ + (0, 1, {"weight": 3}), + (0, 1, (("weight", 2),)), + (0, 1, 5), + (0, 1, "s"), + ] + G.add_edges_from(edges) + keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}} + assert G._adj == {0: {1: keydict}, 1: {0: keydict}} + + # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) + # too many in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3, 4)]) + # not a tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) + + def test_multigraph_add_edges_from_four_tuple_misordered(self): + """add_edges_from expects 4-tuples of the format (u, v, key, data_dict). + + Ensure 4-tuples of form (u, v, data_dict, key) raise exception. + """ + G = nx.MultiGraph() + with pytest.raises(TypeError): + # key/data values flipped in 4-tuple + G.add_edges_from([(0, 1, {"color": "red"}, 0)]) + + def test_remove_edge(self): + G = self.K3 + G.remove_edge(0, 1) + assert G.adj == {0: {2: {0: {}}}, 1: {2: {0: {}}}, 2: {0: {0: {}}, 1: {0: {}}}} + + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + with pytest.raises(nx.NetworkXError): + G.remove_edge(0, 2, key=1) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + kd = {0: {}} + assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}} + G.remove_edges_from([(0, 0)]) # silent fail + self.K3.add_edge(0, 1) + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=True, keys=True))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=False, keys=True))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=False, keys=False))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from([(0, 1, 0), (0, 2, 0, {}), (1, 2)]) + assert G.adj == {0: {1: {1: {}}}, 1: {0: {1: {}}}, 2: {}} + + def test_remove_multiedge(self): + G = self.K3 + G.add_edge(0, 1, key="parallel edge") + G.remove_edge(0, 1, key="parallel edge") + assert G.adj == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edge(0, 1) + kd = {0: {}} + assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + +class TestEdgeSubgraph: + """Unit tests for the :meth:`MultiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a doubly-linked path graph on five nodes. + G = nx.MultiGraph() + nx.add_path(G, range(5)) + nx.add_path(G, range(5)) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.adj[0][1][0]["name"] = "edge010" + G.adj[0][1][1]["name"] = "edge011" + G.adj[3][4][0]["name"] = "edge340" + G.adj[3][4][1]["name"] = "edge341" + G.graph["name"] = "graph" + # Get the subgraph induced by one of the first edges and one of + # the last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1, 0), (3, 4, 1)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert [(0, 1, 0, "edge010"), (3, 4, 1, "edge341")] == sorted( + self.H.edges(keys=True, data="name") + ) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_remove_node(self): + """Tests that removing a node in the original graph does + affect the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes()) + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v, k in self.H.edges(keys=True): + assert self.G._adj[u][v][k] == self.H._adj[u][v][k] + # Making a change to G should make a change in H and vice versa. + self.G._adj[0][1][0]["name"] = "foo" + assert self.G._adj[0][1][0]["name"] == self.H._adj[0][1][0]["name"] + self.H._adj[3][4][1]["name"] = "bar" + assert self.G._adj[3][4][1]["name"] == self.H._adj[3][4][1]["name"] + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph + + +class CustomDictClass(UserDict): + pass + + +class MultiGraphSubClass(nx.MultiGraph): + node_dict_factory = CustomDictClass # type: ignore + node_attr_dict_factory = CustomDictClass # type: ignore + adjlist_outer_dict_factory = CustomDictClass # type: ignore + adjlist_inner_dict_factory = CustomDictClass # type: ignore + edge_key_dict_factory = CustomDictClass # type: ignore + edge_attr_dict_factory = CustomDictClass # type: ignore + graph_attr_dict_factory = CustomDictClass # type: ignore + + +class TestMultiGraphSubclass(TestMultiGraph): + def setup_method(self): + self.Graph = MultiGraphSubClass + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.K3.adjlist_outer_dict_factory( + { + 0: self.K3.adjlist_inner_dict_factory(), + 1: self.K3.adjlist_inner_dict_factory(), + 2: self.K3.adjlist_inner_dict_factory(), + } + ) + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u != v: + d = {0: {}} + self.K3._adj[u][v] = d + self.K3._adj[v][u] = d + self.K3._node = self.K3.node_dict_factory() + self.K3._node[0] = self.K3.node_attr_dict_factory() + self.K3._node[1] = self.K3.node_attr_dict_factory() + self.K3._node[2] = self.K3.node_attr_dict_factory() diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_ordered.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_ordered.py new file mode 100644 index 0000000..f29ecb4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_ordered.py @@ -0,0 +1,40 @@ +import networkx as nx + + +class TestOrdered: + # Just test instantiation. + def test_graph(self): + G = nx.OrderedGraph() + + def test_digraph(self): + G = nx.OrderedDiGraph() + + def test_multigraph(self): + G = nx.OrderedMultiGraph() + + def test_multidigraph(self): + G = nx.OrderedMultiDiGraph() + + +class TestOrderedFeatures: + @classmethod + def setup_class(cls): + cls.G = nx.OrderedDiGraph() + cls.G.add_nodes_from([1, 2, 3]) + cls.G.add_edges_from([(2, 3), (1, 3)]) + + def test_subgraph_order(self): + G = self.G + G_sub = G.subgraph([1, 2, 3]) + assert list(G.nodes) == list(G_sub.nodes) + assert list(G.edges) == list(G_sub.edges) + assert list(G.pred[3]) == list(G_sub.pred[3]) + assert [2, 1] == list(G_sub.pred[3]) + assert [] == list(G_sub.succ[3]) + + G_sub = nx.induced_subgraph(G, [1, 2, 3]) + assert list(G.nodes) == list(G_sub.nodes) + assert list(G.edges) == list(G_sub.edges) + assert list(G.pred[3]) == list(G_sub.pred[3]) + assert [2, 1] == list(G_sub.pred[3]) + assert [] == list(G_sub.succ[3]) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_reportviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_reportviews.py new file mode 100644 index 0000000..7e8dc3d --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_reportviews.py @@ -0,0 +1,1419 @@ +import pickle +from copy import deepcopy + +import pytest + +import networkx as nx +from networkx.classes import reportviews as rv +from networkx.classes.reportviews import NodeDataView + + +# Nodes +class TestNodeView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.nv = cls.G.nodes # NodeView(G) + + def test_pickle(self): + import pickle + + nv = self.nv + pnv = pickle.loads(pickle.dumps(nv, -1)) + assert nv == pnv + assert nv.__slots__ == pnv.__slots__ + + def test_str(self): + assert str(self.nv) == "[0, 1, 2, 3, 4, 5, 6, 7, 8]" + + def test_repr(self): + assert repr(self.nv) == "NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8))" + + def test_contains(self): + G = self.G.copy() + nv = G.nodes + assert 7 in nv + assert 9 not in nv + G.remove_node(7) + G.add_node(9) + assert 7 not in nv + assert 9 in nv + + def test_getitem(self): + G = self.G.copy() + nv = G.nodes + G.nodes[3]["foo"] = "bar" + assert nv[7] == {} + assert nv[3] == {"foo": "bar"} + # slicing + with pytest.raises(nx.NetworkXError): + G.nodes[0:5] + + def test_iter(self): + nv = self.nv + for i, n in enumerate(nv): + assert i == n + inv = iter(nv) + assert next(inv) == 0 + assert iter(nv) != nv + assert iter(inv) == inv + inv2 = iter(nv) + next(inv2) + assert list(inv) == list(inv2) + # odd case where NodeView calls NodeDataView with data=False + nnv = nv(data=False) + for i, n in enumerate(nnv): + assert i == n + + def test_call(self): + nodes = self.nv + assert nodes is nodes() + assert nodes is not nodes(data=True) + assert nodes is not nodes(data="weight") + + +class TestNodeDataView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.nv = NodeDataView(cls.G) + cls.ndv = cls.G.nodes.data(True) + cls.nwv = cls.G.nodes.data("foo") + + def test_viewtype(self): + nv = self.G.nodes + ndvfalse = nv.data(False) + assert nv is ndvfalse + assert nv is not self.ndv + + def test_pickle(self): + import pickle + + nv = self.nv + pnv = pickle.loads(pickle.dumps(nv, -1)) + assert nv == pnv + assert nv.__slots__ == pnv.__slots__ + + def test_str(self): + msg = str([(n, {}) for n in range(9)]) + assert str(self.ndv) == msg + + def test_repr(self): + expected = "NodeDataView((0, 1, 2, 3, 4, 5, 6, 7, 8))" + assert repr(self.nv) == expected + expected = ( + "NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, " + + "4: {}, 5: {}, 6: {}, 7: {}, 8: {}})" + ) + assert repr(self.ndv) == expected + expected = ( + "NodeDataView({0: None, 1: None, 2: None, 3: None, 4: None, " + + "5: None, 6: None, 7: None, 8: None}, data='foo')" + ) + assert repr(self.nwv) == expected + + def test_contains(self): + G = self.G.copy() + nv = G.nodes.data() + nwv = G.nodes.data("foo") + G.nodes[3]["foo"] = "bar" + assert (7, {}) in nv + assert (3, {"foo": "bar"}) in nv + assert (3, "bar") in nwv + assert (7, None) in nwv + # default + nwv_def = G.nodes(data="foo", default="biz") + assert (7, "biz") in nwv_def + assert (3, "bar") in nwv_def + + def test_getitem(self): + G = self.G.copy() + nv = G.nodes + G.nodes[3]["foo"] = "bar" + assert nv[3] == {"foo": "bar"} + # default + nwv_def = G.nodes(data="foo", default="biz") + assert nwv_def[7], "biz" + assert nwv_def[3] == "bar" + # slicing + with pytest.raises(nx.NetworkXError): + G.nodes.data()[0:5] + + def test_iter(self): + G = self.G.copy() + nv = G.nodes.data() + ndv = G.nodes.data(True) + nwv = G.nodes.data("foo") + for i, (n, d) in enumerate(nv): + assert i == n + assert d == {} + inv = iter(nv) + assert next(inv) == (0, {}) + G.nodes[3]["foo"] = "bar" + # default + for n, d in nv: + if n == 3: + assert d == {"foo": "bar"} + else: + assert d == {} + # data=True + for n, d in ndv: + if n == 3: + assert d == {"foo": "bar"} + else: + assert d == {} + # data='foo' + for n, d in nwv: + if n == 3: + assert d == "bar" + else: + assert d is None + # data='foo', default=1 + for n, d in G.nodes.data("foo", default=1): + if n == 3: + assert d == "bar" + else: + assert d == 1 + + +def test_nodedataview_unhashable(): + G = nx.path_graph(9) + G.nodes[3]["foo"] = "bar" + nvs = [G.nodes.data()] + nvs.append(G.nodes.data(True)) + H = G.copy() + H.nodes[4]["foo"] = {1, 2, 3} + nvs.append(H.nodes.data(True)) + # raise unhashable + for nv in nvs: + pytest.raises(TypeError, set, nv) + pytest.raises(TypeError, eval, "nv | nv", locals()) + # no raise... hashable + Gn = G.nodes.data(False) + set(Gn) + Gn | Gn + Gn = G.nodes.data("foo") + set(Gn) + Gn | Gn + + +class TestNodeViewSetOps: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes + + def n_its(self, nodes): + return {node for node in nodes} + + def test_len(self): + G = self.G.copy() + nv = G.nodes + assert len(nv) == 9 + G.remove_node(7) + assert len(nv) == 8 + G.add_node(9) + assert len(nv) == 9 + + def test_and(self): + # print("G & H nodes:", gnv & hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv & some_nodes == self.n_its(range(5, 9)) + assert some_nodes & nv == self.n_its(range(5, 9)) + + def test_or(self): + # print("G | H nodes:", gnv | hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv | some_nodes == self.n_its(range(12)) + assert some_nodes | nv == self.n_its(range(12)) + + def test_xor(self): + # print("G ^ H nodes:", gnv ^ hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + nodes = {0, 1, 2, 3, 4, 9, 10, 11} + assert nv ^ some_nodes == self.n_its(nodes) + assert some_nodes ^ nv == self.n_its(nodes) + + def test_sub(self): + # print("G - H nodes:", gnv - hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv - some_nodes == self.n_its(range(5)) + assert some_nodes - nv == self.n_its(range(9, 12)) + + +class TestNodeDataViewSetOps(TestNodeViewSetOps): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes.data("foo") + + def n_its(self, nodes): + return {(node, "bar" if node == 3 else None) for node in nodes} + + +class TestNodeDataViewDefaultSetOps(TestNodeDataViewSetOps): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes.data("foo", default=1) + + def n_its(self, nodes): + return {(node, "bar" if node == 3 else 1) for node in nodes} + + +# Edges Data View +class TestEdgeDataView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.eview = nx.reportviews.EdgeView + + def test_pickle(self): + import pickle + + ev = self.eview(self.G)(data=True) + pev = pickle.loads(pickle.dumps(ev, -1)) + assert list(ev) == list(pev) + assert ev.__slots__ == pev.__slots__ + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]].update(kwds) + + def test_str(self): + ev = self.eview(self.G)(data=True) + rep = str([(n, n + 1, {}) for n in range(8)]) + assert str(ev) == rep + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "EdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_iterdata(self): + G = self.G.copy() + evr = self.eview(G) + ev = evr(data=True) + ev_def = evr(data="foo", default=1) + + for u, v, d in ev: + pass + assert d == {} + + for u, v, wt in ev_def: + pass + assert wt == 1 + + self.modify_edge(G, (2, 3), foo="bar") + for e in ev: + assert len(e) == 3 + if set(e[:2]) == {2, 3}: + assert e[2] == {"foo": "bar"} + checked = True + else: + assert e[2] == {} + assert checked + + for e in ev_def: + assert len(e) == 3 + if set(e[:2]) == {2, 3}: + assert e[2] == "bar" + checked_wt = True + else: + assert e[2] == 1 + assert checked_wt + + def test_iter(self): + evr = self.eview(self.G) + ev = evr() + for u, v in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_contains(self): + evr = self.eview(self.G) + ev = evr() + if self.G.is_directed(): + assert (1, 2) in ev and (2, 1) not in ev + else: + assert (1, 2) in ev and (2, 1) in ev + assert not (1, 4) in ev + assert not (1, 90) in ev + assert not (90, 1) in ev + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + if self.G.is_directed(): + assert (0, 1) in ev + assert not (1, 2) in ev + assert (2, 3) in ev + else: + assert (0, 1) in ev + assert (1, 2) in ev + assert (2, 3) in ev + assert not (3, 4) in ev + assert not (4, 5) in ev + assert not (5, 6) in ev + assert not (7, 8) in ev + assert not (8, 9) in ev + + def test_len(self): + evr = self.eview(self.G) + ev = evr(data="foo") + assert len(ev) == 8 + assert len(evr(1)) == 2 + assert len(evr([1, 2, 3])) == 4 + + assert len(self.G.edges(1)) == 2 + assert len(self.G.edges()) == 8 + assert len(self.G.edges) == 8 + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 3 + assert len(H.edges()) == 9 + assert len(H.edges) == 9 + + +class TestOutEdgeDataView(TestEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.DiGraph()) + cls.eview = nx.reportviews.OutEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "OutEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_len(self): + evr = self.eview(self.G) + ev = evr(data="foo") + assert len(ev) == 8 + assert len(evr(1)) == 1 + assert len(evr([1, 2, 3])) == 3 + + assert len(self.G.edges(1)) == 1 + assert len(self.G.edges()) == 8 + assert len(self.G.edges) == 8 + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 2 + assert len(H.edges()) == 9 + assert len(H.edges) == 9 + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert not (1, 2) in ev + assert (2, 3) in ev + assert not (3, 4) in ev + assert not (4, 5) in ev + assert not (5, 6) in ev + assert not (7, 8) in ev + assert not (8, 9) in ev + + +class TestInEdgeDataView(TestOutEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.DiGraph()) + cls.eview = nx.reportviews.InEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "InEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert not (0, 1) in ev + assert (1, 2) in ev + assert not (2, 3) in ev + assert not (3, 4) in ev + assert not (4, 5) in ev + assert not (5, 6) in ev + assert not (7, 8) in ev + assert not (8, 9) in ev + + +class TestMultiEdgeDataView(TestEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiGraph()) + cls.eview = nx.reportviews.MultiEdgeView + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]][0].update(kwds) + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "MultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) in ev + assert (2, 3) in ev + assert not (3, 4) in ev + assert not (4, 5) in ev + assert not (5, 6) in ev + assert not (7, 8) in ev + assert not (8, 9) in ev + + +class TestOutMultiEdgeDataView(TestOutEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.eview = nx.reportviews.OutMultiEdgeView + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]][0].update(kwds) + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "OutMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert not (1, 2) in ev + assert (2, 3) in ev + assert not (3, 4) in ev + assert not (4, 5) in ev + assert not (5, 6) in ev + assert not (7, 8) in ev + assert not (8, 9) in ev + + +class TestInMultiEdgeDataView(TestOutMultiEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.eview = nx.reportviews.InMultiEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "InMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert not (0, 1) in ev + assert (1, 2) in ev + assert not (2, 3) in ev + assert not (3, 4) in ev + assert not (4, 5) in ev + assert not (5, 6) in ev + assert not (7, 8) in ev + assert not (8, 9) in ev + + +# Edge Views +class TestEdgeView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.eview = nx.reportviews.EdgeView + + def test_pickle(self): + import pickle + + ev = self.eview(self.G) + pev = pickle.loads(pickle.dumps(ev, -1)) + assert ev == pev + assert ev.__slots__ == pev.__slots__ + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]].update(kwds) + + def test_str(self): + ev = self.eview(self.G) + rep = str([(n, n + 1) for n in range(8)]) + assert str(ev) == rep + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_getitem(self): + G = self.G.copy() + ev = G.edges + G.edges[0, 1]["foo"] = "bar" + assert ev[0, 1] == {"foo": "bar"} + + # slicing + with pytest.raises(nx.NetworkXError): + G.edges[0:5] + + def test_call(self): + ev = self.eview(self.G) + assert id(ev) == id(ev()) + assert id(ev) == id(ev(data=False)) + assert id(ev) != id(ev(data=True)) + assert id(ev) != id(ev(nbunch=1)) + + def test_data(self): + ev = self.eview(self.G) + assert id(ev) != id(ev.data()) + assert id(ev) == id(ev.data(data=False)) + assert id(ev) != id(ev.data(data=True)) + assert id(ev) != id(ev.data(nbunch=1)) + + def test_iter(self): + ev = self.eview(self.G) + for u, v in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_contains(self): + ev = self.eview(self.G) + edv = ev() + if self.G.is_directed(): + assert (1, 2) in ev and (2, 1) not in ev + assert (1, 2) in edv and (2, 1) not in edv + else: + assert (1, 2) in ev and (2, 1) in ev + assert (1, 2) in edv and (2, 1) in edv + assert not (1, 4) in ev + assert not (1, 4) in edv + # edge not in graph + assert not (1, 90) in ev + assert not (90, 1) in ev + assert not (1, 90) in edv + assert not (90, 1) in edv + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) in evn + assert (2, 3) in evn + assert not (3, 4) in evn + assert not (4, 5) in evn + assert not (5, 6) in evn + assert not (7, 8) in evn + assert not (8, 9) in evn + + def test_len(self): + ev = self.eview(self.G) + num_ed = 9 if self.G.is_multigraph() else 8 + assert len(ev) == num_ed + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 3 + H.is_multigraph() - H.is_directed() + assert len(H.edges()) == num_ed + 1 + assert len(H.edges) == num_ed + 1 + + def test_and(self): + # print("G & H edges:", gnv & hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + if self.G.is_directed(): + assert some_edges & ev, {(0, 1)} + assert ev & some_edges, {(0, 1)} + else: + assert ev & some_edges == {(0, 1), (1, 0)} + assert some_edges & ev == {(0, 1), (1, 0)} + return + + def test_or(self): + # print("G | H edges:", gnv | hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + result1 = {(n, n + 1) for n in range(8)} + result1.update(some_edges) + result2 = {(n + 1, n) for n in range(8)} + result2.update(some_edges) + assert (ev | some_edges) in (result1, result2) + assert (some_edges | ev) in (result1, result2) + + def test_xor(self): + # print("G ^ H edges:", gnv ^ hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + if self.G.is_directed(): + result = {(n, n + 1) for n in range(1, 8)} + result.update({(1, 0), (0, 2)}) + assert ev ^ some_edges == result + else: + result = {(n, n + 1) for n in range(1, 8)} + result.update({(0, 2)}) + assert ev ^ some_edges == result + return + + def test_sub(self): + # print("G - H edges:", gnv - hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + result = {(n, n + 1) for n in range(8)} + result.remove((0, 1)) + assert ev - some_edges, result + + +class TestOutEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.DiGraph()) + cls.eview = nx.reportviews.OutEdgeView + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "OutEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert not (1, 2) in evn + assert (2, 3) in evn + assert not (3, 4) in evn + assert not (4, 5) in evn + assert not (5, 6) in evn + assert not (7, 8) in evn + assert not (8, 9) in evn + + +class TestInEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.DiGraph()) + cls.eview = nx.reportviews.InEdgeView + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert not (0, 1) in evn + assert (1, 2) in evn + assert not (2, 3) in evn + assert not (3, 4) in evn + assert not (4, 5) in evn + assert not (5, 6) in evn + assert not (7, 8) in evn + assert not (8, 9) in evn + + +class TestMultiEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.MultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_str(self): + ev = self.eview(self.G) + replist = [(n, n + 1, 0) for n in range(8)] + replist.insert(2, (1, 2, 3)) + rep = str(replist) + assert str(ev) == rep + + def test_getitem(self): + G = self.G.copy() + ev = G.edges + G.edges[0, 1, 0]["foo"] = "bar" + assert ev[0, 1, 0] == {"foo": "bar"} + + # slicing + with pytest.raises(nx.NetworkXError): + G.edges[0:5] + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + + "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_call(self): + ev = self.eview(self.G) + assert id(ev) == id(ev(keys=True)) + assert id(ev) == id(ev(data=False, keys=True)) + assert id(ev) != id(ev(keys=False)) + assert id(ev) != id(ev(data=True)) + assert id(ev) != id(ev(nbunch=1)) + + def test_data(self): + ev = self.eview(self.G) + assert id(ev) != id(ev.data()) + assert id(ev) == id(ev.data(data=False, keys=True)) + assert id(ev) != id(ev.data(keys=False)) + assert id(ev) != id(ev.data(data=True)) + assert id(ev) != id(ev.data(nbunch=1)) + + def test_iter(self): + ev = self.eview(self.G) + for u, v, k in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1, 0) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_iterkeys(self): + G = self.G + evr = self.eview(G) + ev = evr(keys=True) + for u, v, k in ev: + pass + assert k == 0 + ev = evr(keys=True, data="foo", default=1) + for u, v, k, wt in ev: + pass + assert wt == 1 + + self.modify_edge(G, (2, 3, 0), foo="bar") + ev = evr(keys=True, data=True) + for e in ev: + assert len(e) == 4 + print("edge:", e) + if set(e[:2]) == {2, 3}: + print(self.G._adj[2][3]) + assert e[2] == 0 + assert e[3] == {"foo": "bar"} + checked = True + elif set(e[:3]) == {1, 2, 3}: + assert e[2] == 3 + assert e[3] == {"foo": "bar"} + checked_multi = True + else: + assert e[2] == 0 + assert e[3] == {} + assert checked + assert checked_multi + ev = evr(keys=True, data="foo", default=1) + for e in ev: + if set(e[:2]) == {1, 2} and e[2] == 3: + assert e[3] == "bar" + if set(e[:2]) == {1, 2} and e[2] == 0: + assert e[3] == 1 + if set(e[:2]) == {2, 3}: + assert e[2] == 0 + assert e[3] == "bar" + assert len(e) == 4 + checked_wt = True + assert checked_wt + ev = evr(keys=True) + for e in ev: + assert len(e) == 3 + elist = sorted([(i, i + 1, 0) for i in range(8)] + [(1, 2, 3)]) + assert sorted(list(ev)) == elist + # test order of arguments:graph, nbunch, data, keys, default + ev = evr((1, 2), "foo", True, 1) + for e in ev: + if set(e[:2]) == {1, 2}: + assert e[2] in {0, 3} + if e[2] == 3: + assert e[3] == "bar" + else: # e[2] == 0 + assert e[3] == 1 + if G.is_directed(): + assert len(list(ev)) == 3 + else: + assert len(list(ev)) == 4 + + def test_or(self): + # print("G | H edges:", gnv | hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + result = {(n, n + 1, 0) for n in range(8)} + result.update(some_edges) + result.update({(1, 2, 3)}) + assert ev | some_edges == result + assert some_edges | ev == result + + def test_sub(self): + # print("G - H edges:", gnv - hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + result = {(n, n + 1, 0) for n in range(8)} + result.remove((0, 1, 0)) + result.update({(1, 2, 3)}) + assert ev - some_edges, result + assert some_edges - ev, result + + def test_xor(self): + # print("G ^ H edges:", gnv ^ hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + if self.G.is_directed(): + result = {(n, n + 1, 0) for n in range(1, 8)} + result.update({(1, 0, 0), (0, 2, 0), (1, 2, 3)}) + assert ev ^ some_edges == result + assert some_edges ^ ev == result + else: + result = {(n, n + 1, 0) for n in range(1, 8)} + result.update({(0, 2, 0), (1, 2, 3)}) + assert ev ^ some_edges == result + assert some_edges ^ ev == result + + def test_and(self): + # print("G & H edges:", gnv & hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + if self.G.is_directed(): + assert ev & some_edges == {(0, 1, 0)} + assert some_edges & ev == {(0, 1, 0)} + else: + assert ev & some_edges == {(0, 1, 0), (1, 0, 0)} + assert some_edges & ev == {(0, 1, 0), (1, 0, 0)} + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) in evn + assert (2, 3) in evn + assert not (3, 4) in evn + assert not (4, 5) in evn + assert not (5, 6) in evn + assert not (7, 8) in evn + assert not (8, 9) in evn + + +class TestOutMultiEdgeView(TestMultiEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiDiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.OutMultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "OutMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0)," + + " (3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert not (1, 2) in evn + assert (2, 3) in evn + assert not (3, 4) in evn + assert not (4, 5) in evn + assert not (5, 6) in evn + assert not (7, 8) in evn + assert not (8, 9) in evn + + +class TestInMultiEdgeView(TestMultiEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiDiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.InMultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "InMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + + "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert not (0, 1) in evn + assert (1, 2) in evn + assert not (2, 3) in evn + assert not (3, 4) in evn + assert not (4, 5) in evn + assert not (5, 6) in evn + assert not (7, 8) in evn + assert not (8, 9) in evn + + +# Degrees +class TestDegreeView: + GRAPH = nx.Graph + dview = nx.reportviews.DegreeView + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(6, cls.GRAPH()) + cls.G.add_edge(1, 3, foo=2) + cls.G.add_edge(1, 3, foo=3) + + def test_pickle(self): + import pickle + + deg = self.G.degree + pdeg = pickle.loads(pickle.dumps(deg, -1)) + assert dict(deg) == dict(pdeg) + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 3), (2, 2), (3, 3), (4, 2), (5, 1)]) + assert str(dv) == rep + dv = self.G.degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.dview(self.G) + rep = "DegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})" + assert repr(dv) == rep + + def test_iter(self): + dv = self.dview(self.G) + for n, d in dv: + pass + idv = iter(dv) + assert iter(dv) != dv + assert iter(idv) == idv + assert next(idv) == (0, dv[0]) + assert next(idv) == (1, dv[1]) + # weighted + dv = self.dview(self.G, weight="foo") + for n, d in dv: + pass + idv = iter(dv) + assert iter(dv) != dv + assert iter(idv) == idv + assert next(idv) == (0, dv[0]) + assert next(idv) == (1, dv[1]) + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 2), (3, 3)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 3 + assert dv[2] == 2 + assert dv[3] == 3 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 5 + assert dv[2] == 2 + assert dv[3] == 5 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 5 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 2), (3, 5)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 5 + assert dvd[2] == 2 + assert dvd[3] == 5 + + def test_len(self): + dv = self.dview(self.G) + assert len(dv) == 6 + + +class TestDiDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.DiDegreeView + + def test_repr(self): + dv = self.G.degree() + rep = "DiDegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})" + assert repr(dv) == rep + + +class TestOutDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.OutDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 2), (2, 1), (3, 1), (4, 1), (5, 0)]) + assert str(dv) == rep + dv = self.G.out_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.out_degree() + rep = "OutDegreeView({0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 0})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 1)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 2 + assert dv[2] == 1 + assert dv[3] == 1 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 4 + assert dv[2] == 1 + assert dv[3] == 1 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 4 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 1)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 4 + assert dvd[2] == 1 + assert dvd[3] == 1 + + +class TestInDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.InDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 0), (1, 1), (2, 1), (3, 2), (4, 1), (5, 1)]) + assert str(dv) == rep + dv = self.G.in_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.in_degree() + rep = "InDegreeView({0: 0, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 0 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 2)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 2 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 4 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 0 + dvw = dv(1, weight="foo") + assert dvw == 1 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 4)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 0 + assert dvd[1] == 1 + assert dvd[2] == 1 + assert dvd[3] == 4 + + +class TestMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiGraph + dview = nx.reportviews.MultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 4), (2, 2), (3, 4), (4, 2), (5, 1)]) + assert str(dv) == rep + dv = self.G.degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.degree() + rep = "MultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 2), (3, 4)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 4 + assert dv[2] == 2 + assert dv[3] == 4 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 7 + assert dv[2] == 2 + assert dv[3] == 7 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 7 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 2), (3, 7)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 7 + assert dvd[2] == 2 + assert dvd[3] == 7 + + +class TestDiMultiDegreeView(TestMultiDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.DiMultiDegreeView + + def test_repr(self): + dv = self.G.degree() + rep = "DiMultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})" + assert repr(dv) == rep + + +class TestOutMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.OutMultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 3), (2, 1), (3, 1), (4, 1), (5, 0)]) + assert str(dv) == rep + dv = self.G.out_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.out_degree() + rep = "OutMultiDegreeView({0: 1, 1: 3, 2: 1, 3: 1, 4: 1, 5: 0})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 1)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 3 + assert dv[2] == 1 + assert dv[3] == 1 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 6 + assert dv[2] == 1 + assert dv[3] == 1 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 6 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 1)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 6 + assert dvd[2] == 1 + assert dvd[3] == 1 + + +class TestInMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.InMultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 0), (1, 1), (2, 1), (3, 3), (4, 1), (5, 1)]) + assert str(dv) == rep + dv = self.G.in_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.in_degree() + rep = "InMultiDegreeView({0: 0, 1: 1, 2: 1, 3: 3, 4: 1, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 0 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 3)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 3 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 6 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 0 + dvw = dv(1, weight="foo") + assert dvw == 1 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 6)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 0 + assert dvd[1] == 1 + assert dvd[2] == 1 + assert dvd[3] == 6 + + +@pytest.mark.parametrize( + ("reportview", "err_msg_terms"), + ( + (rv.NodeView, "list(G.nodes"), + (rv.NodeDataView, "list(G.nodes.data"), + (rv.EdgeView, "list(G.edges"), + # Directed EdgeViews + (rv.InEdgeView, "list(G.in_edges"), + (rv.OutEdgeView, "list(G.edges"), + # Multi EdgeViews + (rv.MultiEdgeView, "list(G.edges"), + (rv.InMultiEdgeView, "list(G.in_edges"), + (rv.OutMultiEdgeView, "list(G.edges"), + ), +) +def test_slicing_reportviews(reportview, err_msg_terms): + G = nx.complete_graph(3) + view = reportview(G) + with pytest.raises(nx.NetworkXError) as exc: + view[0:2] + errmsg = str(exc.value) + assert type(view).__name__ in errmsg + assert err_msg_terms in errmsg + + +@pytest.mark.parametrize( + "graph", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_cache_dict_get_set_state(graph): + G = nx.path_graph(5, graph()) + G.nodes, G.edges, G.adj, G.degree + if G.is_directed(): + G.pred, G.succ, G.in_edges, G.out_edges, G.in_degree, G.out_degree + cached_dict = G.__dict__ + assert "nodes" in cached_dict + assert "edges" in cached_dict + assert "adj" in cached_dict + assert "degree" in cached_dict + if G.is_directed(): + assert "pred" in cached_dict + assert "succ" in cached_dict + assert "in_edges" in cached_dict + assert "out_edges" in cached_dict + assert "in_degree" in cached_dict + assert "out_degree" in cached_dict + + # Raises error if the cached properties and views do not work + pickle.loads(pickle.dumps(G, -1)) + deepcopy(G) diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_special.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_special.py new file mode 100644 index 0000000..fbeb5f8 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_special.py @@ -0,0 +1,187 @@ +from collections import OrderedDict + +import networkx as nx + +from .test_digraph import BaseDiGraphTester +from .test_digraph import TestDiGraph as _TestDiGraph +from .test_graph import BaseGraphTester +from .test_graph import TestGraph as _TestGraph +from .test_multidigraph import TestMultiDiGraph as _TestMultiDiGraph +from .test_multigraph import TestMultiGraph as _TestMultiGraph + + +def test_factories(): + class mydict1(dict): + pass + + class mydict2(dict): + pass + + class mydict3(dict): + pass + + class mydict4(dict): + pass + + class mydict5(dict): + pass + + for Graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph): + # print("testing class: ", Graph.__name__) + class MyGraph(Graph): + node_dict_factory = mydict1 + adjlist_outer_dict_factory = mydict2 + adjlist_inner_dict_factory = mydict3 + edge_key_dict_factory = mydict4 + edge_attr_dict_factory = mydict5 + + G = MyGraph() + assert isinstance(G._node, mydict1) + assert isinstance(G._adj, mydict2) + G.add_node(1) + assert isinstance(G._adj[1], mydict3) + if G.is_directed(): + assert isinstance(G._pred, mydict2) + assert isinstance(G._succ, mydict2) + assert isinstance(G._pred[1], mydict3) + G.add_edge(1, 2) + if G.is_multigraph(): + assert isinstance(G._adj[1][2], mydict4) + assert isinstance(G._adj[1][2][0], mydict5) + else: + assert isinstance(G._adj[1][2], mydict5) + + +class TestSpecialGraph(_TestGraph): + def setup_method(self): + _TestGraph.setup_method(self) + self.Graph = nx.Graph + + +class TestOrderedGraph(_TestGraph): + def setup_method(self): + _TestGraph.setup_method(self) + + class MyGraph(nx.Graph): + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + self.Graph = MyGraph + + +class TestThinGraph(BaseGraphTester): + def setup_method(self): + all_edge_dict = {"weight": 1} + + class MyGraph(nx.Graph): + def edge_attr_dict_factory(self): + return all_edge_dict + + self.Graph = MyGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + +class TestSpecialDiGraph(_TestDiGraph): + def setup_method(self): + _TestDiGraph.setup_method(self) + self.Graph = nx.DiGraph + + +class TestOrderedDiGraph(_TestDiGraph): + def setup_method(self): + _TestDiGraph.setup_method(self) + + class MyGraph(nx.DiGraph): + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + self.Graph = MyGraph + + +class TestThinDiGraph(BaseDiGraphTester): + def setup_method(self): + all_edge_dict = {"weight": 1} + + class MyGraph(nx.DiGraph): + def edge_attr_dict_factory(self): + return all_edge_dict + + self.Graph = MyGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict) + ed4, ed5, ed6 = (all_edge_dict, all_edge_dict, all_edge_dict) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.k3adj + # K3._adj is synced with K3._succ + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + ed1, ed2 = (all_edge_dict, all_edge_dict) + self.P3 = self.Graph() + self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}} + # P3._adj is synced with P3._succ + self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}} + self.P3._node = {} + self.P3._node[0] = {} + self.P3._node[1] = {} + self.P3._node[2] = {} + + +class TestSpecialMultiGraph(_TestMultiGraph): + def setup_method(self): + _TestMultiGraph.setup_method(self) + self.Graph = nx.MultiGraph + + +class TestOrderedMultiGraph(_TestMultiGraph): + def setup_method(self): + _TestMultiGraph.setup_method(self) + + class MyGraph(nx.MultiGraph): + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_key_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + self.Graph = MyGraph + + +class TestSpecialMultiDiGraph(_TestMultiDiGraph): + def setup_method(self): + _TestMultiDiGraph.setup_method(self) + self.Graph = nx.MultiDiGraph + + +class TestOrderedMultiDiGraph(_TestMultiDiGraph): + def setup_method(self): + _TestMultiDiGraph.setup_method(self) + + class MyGraph(nx.MultiDiGraph): + node_dict_factory = OrderedDict + adjlist_outer_dict_factory = OrderedDict + adjlist_inner_dict_factory = OrderedDict + edge_key_dict_factory = OrderedDict + edge_attr_dict_factory = OrderedDict + + self.Graph = MyGraph diff --git a/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_subgraphviews.py b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_subgraphviews.py new file mode 100644 index 0000000..63c5136 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/classes/tests/test_subgraphviews.py @@ -0,0 +1,363 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +class TestSubGraphView: + gview = staticmethod(nx.graphviews.subgraph_view) + graph = nx.Graph + hide_edges_filter = staticmethod(nx.filters.hide_edges) + show_edges_filter = staticmethod(nx.filters.show_edges) + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=cls.graph()) + cls.hide_edges_w_hide_nodes = {(3, 4), (4, 5), (5, 6)} + + def test_hidden_nodes(self): + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + gview = self.gview + print(gview) + G = gview(self.G, filter_node=nodes_gone) + assert self.G.nodes - G.nodes == {4, 5} + assert self.G.edges - G.edges == self.hide_edges_w_hide_nodes + if G.is_directed(): + assert list(G[3]) == [] + assert list(G[2]) == [3] + else: + assert list(G[3]) == [2] + assert set(G[2]) == {1, 3} + pytest.raises(KeyError, G.__getitem__, 4) + pytest.raises(KeyError, G.__getitem__, 112) + pytest.raises(KeyError, G.__getitem__, 111) + assert G.degree(3) == (3 if G.is_multigraph() else 1) + assert G.size() == (7 if G.is_multigraph() else 5) + + def test_hidden_edges(self): + hide_edges = [(2, 3), (8, 7), (222, 223)] + edges_gone = self.hide_edges_filter(hide_edges) + gview = self.gview + G = gview(self.G, filter_edge=edges_gone) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert self.G.edges - G.edges == {(2, 3)} + assert list(G[2]) == [] + assert list(G.pred[3]) == [] + assert list(G.pred[2]) == [1] + assert G.size() == 7 + else: + assert self.G.edges - G.edges == {(2, 3), (7, 8)} + assert list(G[2]) == [1] + assert G.size() == 6 + assert list(G[3]) == [4] + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + assert G.degree(3) == 1 + + def test_shown_node(self): + induced_subgraph = nx.filters.show_nodes([2, 3, 111]) + gview = self.gview + G = gview(self.G, filter_node=induced_subgraph) + assert set(G.nodes) == {2, 3} + if G.is_directed(): + assert list(G[3]) == [] + else: + assert list(G[3]) == [2] + assert list(G[2]) == [3] + pytest.raises(KeyError, G.__getitem__, 4) + pytest.raises(KeyError, G.__getitem__, 112) + pytest.raises(KeyError, G.__getitem__, 111) + assert G.degree(3) == (3 if G.is_multigraph() else 1) + assert G.size() == (3 if G.is_multigraph() else 1) + + def test_shown_edges(self): + show_edges = [(2, 3), (8, 7), (222, 223)] + edge_subgraph = self.show_edges_filter(show_edges) + G = self.gview(self.G, filter_edge=edge_subgraph) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert G.edges == {(2, 3)} + assert list(G[3]) == [] + assert list(G[2]) == [3] + assert list(G.pred[3]) == [2] + assert list(G.pred[2]) == [] + assert G.size() == 1 + else: + assert G.edges == {(2, 3), (7, 8)} + assert list(G[3]) == [2] + assert list(G[2]) == [3] + assert G.size() == 2 + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + assert G.degree(3) == 1 + + +class TestSubDiGraphView(TestSubGraphView): + gview = staticmethod(nx.graphviews.subgraph_view) + graph = nx.DiGraph + hide_edges_filter = staticmethod(nx.filters.hide_diedges) + show_edges_filter = staticmethod(nx.filters.show_diedges) + hide_edges = [(2, 3), (8, 7), (222, 223)] + excluded = {(2, 3), (3, 4), (4, 5), (5, 6)} + + def test_inoutedges(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, nodes_gone, edges_gone) + + assert self.G.in_edges - G.in_edges == self.excluded + assert self.G.out_edges - G.out_edges == self.excluded + + def test_pred(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, nodes_gone, edges_gone) + + assert list(G.pred[2]) == [1] + assert list(G.pred[6]) == [] + + def test_inout_degree(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, nodes_gone, edges_gone) + + assert G.degree(2) == 1 + assert G.out_degree(2) == 0 + assert G.in_degree(2) == 1 + assert G.size() == 4 + + +# multigraph +class TestMultiGraphView(TestSubGraphView): + gview = staticmethod(nx.graphviews.subgraph_view) + graph = nx.MultiGraph + hide_edges_filter = staticmethod(nx.filters.hide_multiedges) + show_edges_filter = staticmethod(nx.filters.show_multiedges) + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=cls.graph()) + multiedges = {(2, 3, 4), (2, 3, 5)} + cls.G.add_edges_from(multiedges) + cls.hide_edges_w_hide_nodes = {(3, 4, 0), (4, 5, 0), (5, 6, 0)} + + def test_hidden_edges(self): + hide_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)] + edges_gone = self.hide_edges_filter(hide_edges) + G = self.gview(self.G, filter_edge=edges_gone) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert self.G.edges - G.edges == {(2, 3, 4)} + assert list(G[3]) == [4] + assert list(G[2]) == [3] + assert list(G.pred[3]) == [2] # only one 2 but two edges + assert list(G.pred[2]) == [1] + assert G.size() == 9 + else: + assert self.G.edges - G.edges == {(2, 3, 4), (7, 8, 0)} + assert list(G[3]) == [2, 4] + assert list(G[2]) == [1, 3] + assert G.size() == 8 + assert G.degree(3) == 3 + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + + def test_shown_edges(self): + show_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)] + edge_subgraph = self.show_edges_filter(show_edges) + G = self.gview(self.G, filter_edge=edge_subgraph) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert G.edges == {(2, 3, 4)} + assert list(G[3]) == [] + assert list(G.pred[3]) == [2] + assert list(G.pred[2]) == [] + assert G.size() == 1 + else: + assert G.edges == {(2, 3, 4), (7, 8, 0)} + assert G.size() == 2 + assert list(G[3]) == [2] + assert G.degree(3) == 1 + assert list(G[2]) == [3] + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + + +# multidigraph +class TestMultiDiGraphView(TestMultiGraphView, TestSubDiGraphView): + gview = staticmethod(nx.graphviews.subgraph_view) + graph = nx.MultiDiGraph + hide_edges_filter = staticmethod(nx.filters.hide_multidiedges) + show_edges_filter = staticmethod(nx.filters.show_multidiedges) + hide_edges = [(2, 3, 0), (8, 7, 0), (222, 223, 0)] + excluded = {(2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 6, 0)} + + def test_inout_degree(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, nodes_gone, edges_gone) + + assert G.degree(2) == 3 + assert G.out_degree(2) == 2 + assert G.in_degree(2) == 1 + assert G.size() == 6 + + +# induced_subgraph +class TestInducedSubGraph: + @classmethod + def setup_class(cls): + cls.K3 = G = nx.complete_graph(3) + G.graph["foo"] = [] + G.nodes[0]["foo"] = [] + G.remove_edge(1, 2) + ll = [] + G.add_edge(1, 2, foo=ll) + G.add_edge(2, 1, foo=ll) + + def test_full_graph(self): + G = self.K3 + H = nx.induced_subgraph(G, [0, 1, 2, 5]) + assert H.name == G.name + self.graphs_equal(H, G) + self.same_attrdict(H, G) + + def test_partial_subgraph(self): + G = self.K3 + H = nx.induced_subgraph(G, 0) + assert dict(H.adj) == {0: {}} + assert dict(G.adj) != {0: {}} + + H = nx.induced_subgraph(G, [0, 1]) + assert dict(H.adj) == {0: {1: {}}, 1: {0: {}}} + + def same_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.edges[1, 2]["foo"] = "baz" + assert G.edges == H.edges + H.edges[1, 2]["foo"] = old_foo + assert G.edges == H.edges + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G.nodes == H.nodes + H.nodes[0]["foo"] = old_foo + assert G.nodes == H.nodes + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2] is H._adj[2][1] + assert G._adj[1][2] is G._adj[2][1] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2] is H._pred[2][1] + assert G._succ[1][2] is G._pred[2][1] + + +# edge_subgraph +class TestEdgeSubGraph: + @classmethod + def setup_class(cls): + # Create a path graph on five nodes. + cls.G = G = nx.path_graph(5) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + cls.H = nx.edge_subgraph(G, [(0, 1), (3, 4)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [(0, "node0"), (1, "node1"), (3, "node3"), (4, "node4")] == sorted( + self.H.nodes.data("name") + ) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert edges_equal( + [(0, 1, "edge01"), (3, 4, "edge34")], self.H.edges.data("name") + ) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes) + self.G.remove_node(5) + + def test_remove_node(self): + """Tests that removing a node in the original graph + removes the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes) + self.G.add_node(0, name="node0") + self.G.add_edge(0, 1, name="edge01") + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + # Revert the change, so tests pass with pytest-randomly + self.G.nodes[0]["name"] = "node0" + self.H.nodes[1]["name"] = "node1" + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v in self.H.edges(): + assert self.G.edges[u, v] == self.H.edges[u, v] + # Making a change to G should make a change in H and vice versa. + self.G.edges[0, 1]["name"] = "foo" + assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"] + self.H.edges[3, 4]["name"] = "bar" + assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"] + # Revert the change, so tests pass with pytest-randomly + self.G.edges[0, 1]["name"] = "edge01" + self.H.edges[3, 4]["name"] = "edge34" + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph + + def test_readonly(self): + """Tests that the subgraph cannot change the graph structure""" + pytest.raises(nx.NetworkXError, self.H.add_node, 5) + pytest.raises(nx.NetworkXError, self.H.remove_node, 0) + pytest.raises(nx.NetworkXError, self.H.add_edge, 5, 6) + pytest.raises(nx.NetworkXError, self.H.remove_edge, 0, 1) diff --git a/myenv/lib/python3.9/site-packages/networkx/conftest.py b/myenv/lib/python3.9/site-packages/networkx/conftest.py new file mode 100644 index 0000000..bfed321 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/conftest.py @@ -0,0 +1,393 @@ +""" +Testing +======= + +General guidelines for writing good tests: + +- doctests always assume ``import networkx as nx`` so don't add that +- prefer pytest fixtures over classes with setup methods. +- use the ``@pytest.mark.parametrize`` decorator +- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy. + and add the module to the relevant entries below. + +""" +import sys +import warnings + +import pytest + +import networkx + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "slow: mark test as slow to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) + + +# TODO: The warnings below need to be dealt with, but for now we silence them. +@pytest.fixture(autouse=True) +def set_warnings(): + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="k_nearest_neighbors" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="numeric_mixing_matrix" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message=r"Ordered.* is deprecated" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="literal_stringizer is deprecated", + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="literal_destringizer is deprecated", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="is_string_like is deprecated" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\nauthority_matrix" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\nhub_matrix" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="default_opener is deprecated" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="empty_generator is deprecated" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="make_str is deprecated" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="generate_unique_node is deprecated", + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="context manager reversed is deprecated", + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="This will return a generator in 3.0*", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="betweenness_centrality_source" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="edge_betweeness" + ) + warnings.filterwarnings( + "ignore", category=PendingDeprecationWarning, message="the matrix subclass" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="to_numpy_matrix" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="from_numpy_matrix" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="networkx.pagerank_numpy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="networkx.pagerank_scipy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="write_gpickle" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="read_gpickle" + ) + warnings.filterwarnings("ignore", category=DeprecationWarning, message="write_shp") + warnings.filterwarnings("ignore", category=DeprecationWarning, message="read_shp") + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="edges_from_line" + ) + warnings.filterwarnings("ignore", category=DeprecationWarning, message="write_yaml") + warnings.filterwarnings("ignore", category=DeprecationWarning, message="read_yaml") + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="FilterAtlas.copy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="FilterAdjacency.copy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="FilterMultiAdjacency.copy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="FilterMultiInner.copy" + ) + warnings.filterwarnings("ignore", category=DeprecationWarning, message="jit_data") + warnings.filterwarnings("ignore", category=DeprecationWarning, message="jit_graph") + warnings.filterwarnings("ignore", category=DeprecationWarning, message="consume") + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="iterable is deprecated" + ) + warnings.filterwarnings( + "ignore", + category=FutureWarning, + message="\nThe function signature for cytoscape", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\nThe `attrs` keyword" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="preserve_random_state" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="`almost_equal`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="`assert_nodes_equal`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="`assert_edges_equal`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="`assert_graphs_equal`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="networkx.hits_scipy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="networkx.hits_numpy" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="preserve_random_state" + ) + warnings.filterwarnings( + "ignore", + category=FutureWarning, + message="google_matrix will return an np.ndarray instead of a np.matrix", + ) + ### Future warnings from scipy.sparse array transition + warnings.filterwarnings( + "ignore", category=FutureWarning, message="biadjacency_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="bethe_hessian_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="incidence_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="laplacian_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="normalized_laplacian_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="directed_laplacian_matrix" + ) + warnings.filterwarnings( + "ignore", + category=FutureWarning, + message="directed_combinatorial_laplacian_matrix", + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="modularity_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="directed_modularity_matrix" + ) + warnings.filterwarnings( + "ignore", category=FutureWarning, message="adjacency_matrix" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="\n\nThe scipy.sparse array containers", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="networkx.project" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\nfind_cores" + ) + warnings.filterwarnings("ignore", category=FutureWarning, message="attr_matrix") + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message=r"\n\nmake_small_.*" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="to_numpy_recarray" + ) + warnings.filterwarnings("ignore", category=DeprecationWarning, message="info") + warnings.filterwarnings("ignore", category=DeprecationWarning, message="to_tuple") + # create_using for scale_free_graph + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="The create_using argument" + ) + warnings.filterwarnings( + "ignore", category=PendingDeprecationWarning, message="nx.nx_pydot" + ) + + +@pytest.fixture(autouse=True) +def add_nx(doctest_namespace): + doctest_namespace["nx"] = networkx + + +# What dependencies are installed? + +try: + import numpy + + has_numpy = True +except ImportError: + has_numpy = False + +try: + import scipy + + has_scipy = True +except ImportError: + has_scipy = False + +try: + import matplotlib + + has_matplotlib = True +except ImportError: + has_matplotlib = False + +try: + import pandas + + has_pandas = True +except ImportError: + has_pandas = False + +try: + import pygraphviz + + has_pygraphviz = True +except ImportError: + has_pygraphviz = False + +try: + import yaml + + has_yaml = True +except ImportError: + has_yaml = False + +try: + import pydot + + has_pydot = True +except ImportError: + has_pydot = False + +try: + import ogr + + has_ogr = True +except ImportError: + has_ogr = False + +try: + import sympy + + has_sympy = True +except ImportError: + has_sympy = False + + +# List of files that pytest should ignore + +collect_ignore = [] + +needs_numpy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/centrality/current_flow_closeness.py", + "algorithms/node_classification/__init__.py", + "algorithms/non_randomness.py", + "algorithms/shortest_paths/dense.py", + "linalg/bethehessianmatrix.py", + "linalg/laplacianmatrix.py", + "utils/misc.py", +] +needs_scipy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/assortativity/correlation.py", + "algorithms/assortativity/mixing.py", + "algorithms/assortativity/pairs.py", + "algorithms/bipartite/matrix.py", + "algorithms/bipartite/spectral.py", + "algorithms/centrality/current_flow_betweenness.py", + "algorithms/centrality/current_flow_betweenness_subset.py", + "algorithms/centrality/eigenvector.py", + "algorithms/centrality/katz.py", + "algorithms/centrality/second_order.py", + "algorithms/centrality/subgraph_alg.py", + "algorithms/communicability_alg.py", + "algorithms/link_analysis/hits_alg.py", + "algorithms/link_analysis/pagerank_alg.py", + "algorithms/node_classification/__init__.py", + "algorithms/node_classification/hmn.py", + "algorithms/node_classification/lgc.py", + "algorithms/similarity.py", + "convert_matrix.py", + "drawing/layout.py", + "generators/spectral_graph_forge.py", + "linalg/algebraicconnectivity.py", + "linalg/attrmatrix.py", + "linalg/bethehessianmatrix.py", + "linalg/graphmatrix.py", + "linalg/modularitymatrix.py", + "linalg/spectrum.py", + "utils/rcm.py", +] +needs_matplotlib = ["drawing/nx_pylab.py"] +needs_pandas = ["convert_matrix.py"] +needs_yaml = ["readwrite/nx_yaml.py"] +needs_pygraphviz = ["drawing/nx_agraph.py"] +needs_pydot = ["drawing/nx_pydot.py"] +needs_ogr = ["readwrite/nx_shp.py"] +needs_sympy = ["algorithms/polynomials.py"] + +if not has_numpy: + collect_ignore += needs_numpy +if not has_scipy: + collect_ignore += needs_scipy +if not has_matplotlib: + collect_ignore += needs_matplotlib +if not has_pandas: + collect_ignore += needs_pandas +if not has_yaml: + collect_ignore += needs_yaml +if not has_pygraphviz: + collect_ignore += needs_pygraphviz +if not has_pydot: + collect_ignore += needs_pydot +if not has_ogr: + collect_ignore += needs_ogr +if not has_sympy: + collect_ignore += needs_sympy + +# FIXME: This is to avoid errors on AppVeyor +if sys.platform.startswith("win"): + collect_ignore += ["readwrite/graph6.py", "readwrite/sparse6.py"] diff --git a/myenv/lib/python3.9/site-packages/networkx/convert.py b/myenv/lib/python3.9/site-packages/networkx/convert.py new file mode 100644 index 0000000..3356dd0 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/convert.py @@ -0,0 +1,491 @@ +"""Functions to convert NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the to_networkx_graph() function +which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a graph with a single edge from a dictionary of dictionaries + +>>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G = nx.Graph(d) + +See Also +-------- +nx_agraph, nx_pydot +""" +import warnings +from collections.abc import Collection, Generator, Iterator + +import networkx as nx + +__all__ = [ + "to_networkx_graph", + "from_dict_of_dicts", + "to_dict_of_dicts", + "from_dict_of_lists", + "to_dict_of_lists", + "from_edgelist", + "to_edgelist", +] + + +def to_networkx_graph(data, create_using=None, multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1) + >>> G = nx.Graph(d) + + instead of the equivalent + + >>> G = nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : object to be converted + + Current known types are: + any NetworkX graph + dict-of-dicts + dict-of-lists + container (e.g. set, list, tuple) of edges + iterator (e.g. itertools.chain) that produces edges + generator of edges + Pandas DataFrame (row per edge) + 2D numpy array + scipy sparse matrix + pygraphviz agraph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data, "adj"): + try: + result = from_dict_of_dicts( + data.adj, + create_using=create_using, + multigraph_input=data.is_multigraph(), + ) + # data.graph should be dict-like + result.graph.update(data.graph) + # data.nodes should be dict-like + # result.add_node_from(data.nodes.items()) possible but + # for custom node_attr_dict_factory which may be hashable + # will be unexpected behavior + for n, dd in data.nodes.items(): + result._node[n].update(dd) + return result + except Exception as err: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err + + # pygraphviz agraph + if hasattr(data, "is_strict"): + try: + return nx.nx_agraph.from_agraph(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err + + # dict of dicts/lists + if isinstance(data, dict): + try: + return from_dict_of_dicts( + data, create_using=create_using, multigraph_input=multigraph_input + ) + except Exception as err1: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err1)}: {err1}" + ) + try: + return from_dict_of_lists(data, create_using=create_using) + except Exception as err2: + raise TypeError("Input is not known type.") from err2 + + # Pandas DataFrame + try: + import pandas as pd + + if isinstance(data, pd.DataFrame): + if data.shape[0] == data.shape[1]: + try: + return nx.from_pandas_adjacency(data, create_using=create_using) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame adjacency matrix." + raise nx.NetworkXError(msg) from err + else: + try: + return nx.from_pandas_edgelist( + data, edge_attr=True, create_using=create_using + ) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame edge-list." + raise nx.NetworkXError(msg) from err + except ImportError: + warnings.warn("pandas not found, skipping conversion test.", ImportWarning) + + # numpy matrix or ndarray + try: + import numpy as np + + if isinstance(data, np.ndarray): + try: + return nx.from_numpy_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + f"Failed to interpret array as an adjacency matrix." + ) from err + except ImportError: + warnings.warn("numpy not found, skipping conversion test.", ImportWarning) + + # scipy sparse matrix - any format + try: + import scipy + + if hasattr(data, "format"): + try: + return nx.from_scipy_sparse_matrix(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + "Input is not a correct scipy sparse matrix type." + ) from err + except ImportError: + warnings.warn("scipy not found, skipping conversion test.", ImportWarning) + + # Note: most general check - should remain last in order of execution + # Includes containers (e.g. list, set, dict, etc.), generators, and + # iterators (e.g. itertools.chain) of edges + + if isinstance(data, (Collection, Generator, Iterator)): + try: + return from_edgelist(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a valid edge list") from err + + raise nx.NetworkXError("Input is not a known data type for conversion.") + + +def to_dict_of_lists(G, nodelist=None): + """Returns adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist = G + + d = {} + for n in nodelist: + d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + + +def from_dict_of_lists(d, create_using=None): + """Returns a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> dol = {0: [1]} # single edge (0,1) + >>> G = nx.from_dict_of_lists(dol) + + or + + >>> G = nx.Graph(dol) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen = {} + for node, nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node, nbr) + seen[node] = 1 # don't allow reverse edge to show up + else: + G.add_edges_from( + ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist) + ) + return G + + +def to_dict_of_dicts(G, nodelist=None, edge_data=None): + """Returns adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + edge_data : scalar, optional + If provided, the value of the dictionary will be set to `edge_data` for + all edges. Usual values could be `1` or `True`. If `edge_data` is + `None` (the default), the edgedata in `G` is used, resulting in a + dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a + dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize + handling edge data. `edge_data` should *not* be a container. + + Returns + ------- + dod : dict + A nested dictionary representation of `G`. Note that the level of + nesting depends on the type of `G` and the value of `edge_data` + (see Examples). + + See Also + -------- + from_dict_of_dicts, to_dict_of_lists + + Notes + ----- + For a more custom approach to handling edge data, try:: + + dod = { + n: { + nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items() + } + for n, nbrdict in G.adj.items() + } + + where `custom` returns the desired edge data for each edge between `n` and + `nbr`, given existing edge data `dd`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> nx.to_dict_of_dicts(G) + {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}} + + Edge data is preserved by default (``edge_data=None``), resulting + in dict-of-dict-of-dicts where the innermost dictionary contains the + edge data: + + >>> G = nx.Graph() + >>> G.add_edges_from( + ... [ + ... (0, 1, {'weight': 1.0}), + ... (1, 2, {'weight': 2.0}), + ... (2, 0, {'weight': 1.0}), + ... ] + ... ) + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}}, + 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}}, + 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}} + >>> d[1][2]['weight'] + 2.0 + + If `edge_data` is not `None`, edge data in the original graph (if any) is + replaced: + + >>> d = nx.to_dict_of_dicts(G, edge_data=1) + >>> d + {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}} + >>> d[1][2] + 1 + + This also applies to MultiGraphs: edge data is preserved by default: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, key='a', weight=1.0) + 'a' + >>> G.add_edge(0, 1, key='b', weight=5.0) + 'b' + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}, + 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}} + >>> d[0][1]['b']['weight'] + 5.0 + + But multi edge data is lost if `edge_data` is not `None`: + + >>> d = nx.to_dict_of_dicts(G, edge_data=10) + >>> d + {0: {1: 10}, 1: {0: 10}} + """ + dod = {} + if nodelist is None: + if edge_data is None: + for u, nbrdict in G.adjacency(): + dod[u] = nbrdict.copy() + else: # edge_data is not None + for u, nbrdict in G.adjacency(): + dod[u] = dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u] = {} + for v, data in ((v, data) for v, data in G[u].items() if v in nodelist): + dod[u][v] = data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u] = {} + for v in (v for v in G[u] if v in nodelist): + dod[u][v] = edge_data + return dod + + +def from_dict_of_dicts(d, create_using=None, multigraph_input=False): + """Returns a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + When True, the dict `d` is assumed + to be a dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + Otherwise this routine assumes dict-of-dict-of-dict keyed by + node to neighbor to edge data. + + Examples + -------- + >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1) + >>> G = nx.from_dict_of_dicts(dod) + + or + + >>> G = nx.Graph(dod) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + # does dict d represent a MultiGraph or MultiDiGraph? + if multigraph_input: + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( + (u, v, key, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: + G.add_edges_from( + (u, v, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, key, data) for key, data in datadict.items() + ) + seen.add((v, u)) + else: + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, data) for key, data in datadict.items() + ) + seen.add((v, u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen = set() + for u, nbrs in d.items(): + for v, data in nbrs.items(): + if (u, v) not in seen: + G.add_edge(u, v, key=0) + G[u][v][0].update(data) + seen.add((v, u)) + else: + G.add_edges_from( + ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items()) + ) + return G + + +def to_edgelist(G, nodelist=None): + """Returns a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + return G.edges(nodelist, data=True) + + +def from_edgelist(edgelist, create_using=None): + """Returns a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> edgelist = [(0, 1)] # single edge (0,1) + >>> G = nx.from_edgelist(edgelist) + + or + + >>> G = nx.Graph(edgelist) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_edges_from(edgelist) + return G diff --git a/myenv/lib/python3.9/site-packages/networkx/convert_matrix.py b/myenv/lib/python3.9/site-packages/networkx/convert_matrix.py new file mode 100644 index 0000000..fecc8ca --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/convert_matrix.py @@ -0,0 +1,1673 @@ +"""Functions to convert NetworkX graphs to and from common data containers +like numpy arrays, scipy sparse arrays, and pandas DataFrames. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph` +function which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a 10 node random graph from a numpy array + +>>> import numpy as np +>>> rng = np.random.default_rng() +>>> a = rng.integers(low=0, high=2, size=(10, 10)) +>>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph) + +or equivalently: + +>>> DG = nx.DiGraph(a) + +which calls `from_numpy_array` internally based on the type of ``a``. + +See Also +-------- +nx_agraph, nx_pydot +""" + +import itertools +import warnings +from collections import defaultdict + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_numpy_matrix", + "to_numpy_matrix", + "from_pandas_adjacency", + "to_pandas_adjacency", + "from_pandas_edgelist", + "to_pandas_edgelist", + "to_numpy_recarray", + "from_scipy_sparse_array", + "from_scipy_sparse_matrix", + "to_scipy_sparse_array", + "to_scipy_sparse_matrix", + "from_numpy_array", + "to_numpy_array", +] + + +def to_pandas_adjacency( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None, optional + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float, optional + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + df : Pandas DataFrame + Graph adjacency matrix + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + The DataFrame entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Pandas DataFrame can be modified as follows: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> df = nx.to_pandas_adjacency(G, dtype=int) + >>> df + 1 + 1 1 + >>> df.values[np.diag_indices_from(df)] *= 2 + >>> df + 1 + 1 2 + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int) + 0 1 2 + 0 0 2 0 + 1 1 0 0 + 2 0 0 4 + + """ + import pandas as pd + + M = to_numpy_array( + G, + nodelist=nodelist, + dtype=dtype, + order=order, + multigraph_weight=multigraph_weight, + weight=weight, + nonedge=nonedge, + ) + if nodelist is None: + nodelist = list(G) + return pd.DataFrame(data=M, index=nodelist, columns=nodelist) + + +def from_pandas_adjacency(df, create_using=None): + r"""Returns a graph from Pandas DataFrame. + + The Pandas DataFrame is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + df : Pandas DataFrame + An adjacency matrix representation of a graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of df corresponds to an edge from i to j. + + If `df` has a single data type for each entry it will be converted to an + appropriate Python data type. + + If `df` has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_pandas_adjacency + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> df = pd.DataFrame([[1, 1], [2, 1]]) + >>> df + 0 1 + 0 1 1 + 1 2 1 + >>> G = nx.from_pandas_adjacency(df) + >>> G.name = "Graph from pandas adjacency matrix" + >>> print(nx.info(G)) + Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges + """ + + try: + df = df[df.index] + except Exception as err: + missing = list(set(df.index).difference(set(df.columns))) + msg = f"{missing} not in columns" + raise nx.NetworkXError("Columns must match Indices.", msg) from err + + A = df.values + G = from_numpy_array(A, create_using=create_using) + + nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False) + return G + + +def to_pandas_edgelist( + G, + source="source", + target="target", + nodelist=None, + dtype=None, + order=None, + edge_key=None, +): + """Returns the graph edge list as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + source : str or int, optional + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int, optional + A valid column name (string or integer) for the target nodes (for the + directed case). + + nodelist : list, optional + Use only nodes specified in nodelist + + dtype : dtype, default None + Use to create the DataFrame. Data type to force. + Only a single dtype is allowed. If None, infer. + + order : None + An unused parameter mistakenly included in the function. + + .. deprecated:: 2.6 + This is deprecated and will be removed in NetworkX v3.0. + + edge_key : str or int or None, optional (default=None) + A valid column name (string or integer) for the edge keys (for the + multigraph case). If None, edge keys are not stored in the DataFrame. + + Returns + ------- + df : Pandas DataFrame + Graph edge list + + Examples + -------- + >>> G = nx.Graph( + ... [ + ... ("A", "B", {"cost": 1, "weight": 7}), + ... ("C", "E", {"cost": 9, "weight": 10}), + ... ] + ... ) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"]) + >>> df[["source", "target", "cost", "weight"]] + source target cost weight + 0 A B 1 7 + 1 C E 9 10 + + >>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})]) + >>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey') + >>> df[['source', 'target', 'cost', 'ekey']] + source target cost ekey + 0 A B 1 0 + 1 A B 9 1 + + """ + import pandas as pd + + if nodelist is None: + edgelist = G.edges(data=True) + else: + edgelist = G.edges(nodelist, data=True) + source_nodes = [s for s, _, _ in edgelist] + target_nodes = [t for _, t, _ in edgelist] + + all_attrs = set().union(*(d.keys() for _, _, d in edgelist)) + if source in all_attrs: + raise nx.NetworkXError(f"Source name {source!r} is an edge attr name") + if target in all_attrs: + raise nx.NetworkXError(f"Target name {target!r} is an edge attr name") + + nan = float("nan") + edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs} + + if G.is_multigraph() and edge_key is not None: + if edge_key in all_attrs: + raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name") + edge_keys = [k for _, _, k in G.edges(keys=True)] + edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} + else: + edgelistdict = {source: source_nodes, target: target_nodes} + + edgelistdict.update(edge_attr) + return pd.DataFrame(edgelistdict, dtype=dtype) + + +def from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=None, + create_using=None, + edge_key=None, +): + """Returns a graph from Pandas DataFrame containing an edge list. + + The Pandas DataFrame should contain at least two columns of node names and + zero or more columns of edge attributes. Each row will be processed as one + edge instance. + + Note: This function iterates over DataFrame.values, which is not + guaranteed to retain the data type across columns in the row. This is only + a problem if your row is entirely numeric and a mix of ints and floats. In + that case, all values will be returned as floats. See the + DataFrame.iterrows documentation for an example. + + Parameters + ---------- + df : Pandas DataFrame + An edge list representation of a graph + + source : str or int + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int + A valid column name (string or integer) for the target nodes (for the + directed case). + + edge_attr : str or int, iterable, True, or None + A valid column name (str or int) or iterable of column names that are + used to retrieve items and add them to the graph as edge attributes. + If `True`, all of the remaining columns will be added. + If `None`, no edge attributes are added to the graph. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_key : str or None, optional (default=None) + A valid column name for the edge keys (for a MultiGraph). The values in + this column are used for the edge keys when adding edges if create_using + is a multigraph. + + See Also + -------- + to_pandas_edgelist + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> rng = np.random.RandomState(seed=5) + >>> ints = rng.randint(1, 11, size=(3, 2)) + >>> a = ["A", "B", "C"] + >>> b = ["D", "A", "E"] + >>> df = pd.DataFrame(ints, columns=["weight", "cost"]) + >>> df[0] = a + >>> df["b"] = b + >>> df[["weight", "cost", 0, "b"]] + weight cost 0 b + 0 4 7 A D + 1 7 1 B A + 2 10 9 C E + >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"]) + >>> G["E"]["C"]["weight"] + 10 + >>> G["E"]["C"]["cost"] + 9 + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2], + ... "target": [2, 2, 3], + ... "weight": [3, 4, 5], + ... "color": ["red", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist(edges, edge_attr=True) + >>> G[0][2]["color"] + 'red' + + Build multigraph with custom keys: + + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2, 0], + ... "target": [2, 2, 3, 2], + ... "my_edge_key": ["A", "B", "C", "D"], + ... "weight": [3, 4, 5, 6], + ... "color": ["red", "blue", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist( + ... edges, + ... edge_key="my_edge_key", + ... edge_attr=["weight", "color"], + ... create_using=nx.MultiGraph(), + ... ) + >>> G[0][2] + AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}}) + + + """ + g = nx.empty_graph(0, create_using) + + if edge_attr is None: + g.add_edges_from(zip(df[source], df[target])) + return g + + reserved_columns = [source, target] + + # Additional columns requested + attr_col_headings = [] + attribute_data = [] + if edge_attr is True: + attr_col_headings = [c for c in df.columns if c not in reserved_columns] + elif isinstance(edge_attr, (list, tuple)): + attr_col_headings = edge_attr + else: + attr_col_headings = [edge_attr] + if len(attr_col_headings) == 0: + raise nx.NetworkXError( + f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}" + ) + + try: + attribute_data = zip(*[df[col] for col in attr_col_headings]) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_attr argument: {edge_attr}" + raise nx.NetworkXError(msg) from err + + if g.is_multigraph(): + # => append the edge keys from the df to the bundled data + if edge_key is not None: + try: + multigraph_edge_keys = df[edge_key] + attribute_data = zip(attribute_data, multigraph_edge_keys) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_key argument: {edge_key}" + raise nx.NetworkXError(msg) from err + + for s, t, attrs in zip(df[source], df[target], attribute_data): + if edge_key is not None: + attrs, multigraph_edge_key = attrs + key = g.add_edge(s, t, key=multigraph_edge_key) + else: + key = g.add_edge(s, t) + + g[s][t][key].update(zip(attr_col_headings, attrs)) + else: + for s, t, attrs in zip(df[source], df[target], attribute_data): + g.add_edge(s, t) + g[s][t].update(zip(attr_col_headings, attrs)) + + return g + + +def to_numpy_matrix( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a NumPy matrix. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data type, optional + A valid single NumPy data type used to initialize the array. + This must be a simple type such as int or numpy.float64 and + not a compound data type (see to_numpy_recarray) + If None, then the NumPy default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float (default = 0.0) + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + M : NumPy matrix + Graph adjacency matrix + + See Also + -------- + to_numpy_recarray + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + The matrix entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the `multigraph_weight` parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Numpy matrix can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_matrix(G) + >>> A + matrix([[1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + matrix([[2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_numpy_matrix(G, nodelist=[0, 1, 2]) + matrix([[0., 2., 0.], + [1., 0., 0.], + [0., 0., 4.]]) + + """ + warnings.warn( + ( + "to_numpy_matrix is deprecated and will be removed in NetworkX 3.0.\n" + "Use to_numpy_array instead, e.g. np.asmatrix(to_numpy_array(G, **kwargs))" + ), + DeprecationWarning, + ) + + import numpy as np + + A = to_numpy_array( + G, + nodelist=nodelist, + dtype=dtype, + order=order, + multigraph_weight=multigraph_weight, + weight=weight, + nonedge=nonedge, + ) + M = np.asmatrix(A, dtype=dtype) + return M + + +def from_numpy_matrix(A, parallel_edges=False, create_using=None): + """Returns a graph from numpy matrix. + + The numpy matrix is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : numpy matrix + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If False, then the entries in the adjacency matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + If the numpy matrix has a single data type for each matrix entry it + will be converted to an appropriate Python data type. + + If the numpy matrix has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_recarray + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_matrix(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_matrix(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_matrix(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> dt = [("weight", float), ("cost", int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_matrix(A) + >>> list(G.edges()) + [(0, 0)] + >>> G[0][0]["cost"] + 2 + >>> G[0][0]["weight"] + 1.0 + + """ + warnings.warn( + ( + "from_numpy_matrix is deprecated and will be removed in NetworkX 3.0.\n" + "Use from_numpy_array instead, e.g. from_numpy_array(A, **kwargs)" + ), + DeprecationWarning, + ) + return from_numpy_array(A, parallel_edges=parallel_edges, create_using=create_using) + + +@not_implemented_for("multigraph") +def to_numpy_recarray(G, nodelist=None, dtype=None, order=None): + """Returns the graph adjacency matrix as a NumPy recarray. + + .. deprecated:: 2.7 + + ``to_numpy_recarray`` is deprecated and will be removed in NetworkX 3.0. + Use ``nx.to_numpy_array(G, dtype=dtype, weight=None).view(np.recarray)`` + instead. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy recarray. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy named dtype used to initialize the NumPy recarray. + The data type names are assumed to be keys in the graph edge attribute + dictionary. The default is ``dtype([("weight", float)])``. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + Returns + ------- + M : NumPy recarray + The graph with specified edge data as a Numpy recarray + + Notes + ----- + When `nodelist` does not contain every node in `G`, the adjacency + matrix is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7.0, cost=5) + >>> A = nx.to_numpy_recarray(G, dtype=[("weight", float), ("cost", int)]) + >>> print(A.weight) + [[0. 7.] + [7. 0.]] + >>> print(A.cost) + [[0 5] + [5 0]] + + """ + import warnings + + import numpy as np + + warnings.warn( + ( + "to_numpy_recarray is deprecated and will be removed in version 3.0.\n" + "Use to_numpy_array instead::\n\n" + " nx.to_numpy_array(G, dtype=dtype, weight=None).view(np.recarray)" + ), + DeprecationWarning, + stacklevel=2, + ) + + if dtype is None: + dtype = [("weight", float)] + + if nodelist is None: + nodelist = list(G) + nodeset = G + nlen = len(G) + else: + nlen = len(nodelist) + nodeset = set(G.nbunch_iter(nodelist)) + if nlen != len(nodeset): + for n in nodelist: + if n not in G: + raise nx.NetworkXError(f"Node {n} in nodelist is not in G") + raise nx.NetworkXError("nodelist contains duplicates.") + + undirected = not G.is_directed() + index = dict(zip(nodelist, range(nlen))) + M = np.zeros((nlen, nlen), dtype=dtype, order=order) + + names = M.dtype.names + for u, v, attrs in G.edges(data=True): + if (u in nodeset) and (v in nodeset): + i, j = index[u], index[v] + values = tuple(attrs[n] for n in names) + M[i, j] = values + if undirected: + M[j, i] = M[i, j] + + return M.view(np.recarray) + + +def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"): + """Returns the graph adjacency matrix as a SciPy sparse array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the sparse matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [1]_ for details. + + Returns + ------- + A : SciPy sparse array + Graph adjacency matrix. + + Notes + ----- + For directed graphs, matrix entry i,j corresponds to an edge from i to j. + + The matrix entries are populated using the edge attribute held in + parameter weight. When an edge does not have that attribute, the + value of the entry is 1. + + For multiple edges the matrix values are the sums of the edge weights. + + When `nodelist` does not contain every node in `G`, the adjacency matrix + is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Scipy sparse matrix can be modified as follows: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_scipy_sparse_array(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal() * 2) + >>> print(A.toarray()) + [[2]] + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> S = nx.to_scipy_sparse_array(G, nodelist=[0, 1, 2]) + >>> print(S.toarray()) + [[0 2 0] + [1 0 0] + [0 0 4]] + + References + ---------- + .. [1] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + import scipy.sparse # call as sp.sparse + + if len(G) == 0: + raise nx.NetworkXError("Graph has no nodes or edges") + + if nodelist is None: + nodelist = list(G) + nlen = len(G) + else: + nlen = len(nodelist) + if nlen == 0: + raise nx.NetworkXError("nodelist has no nodes") + nodeset = set(G.nbunch_iter(nodelist)) + if nlen != len(nodeset): + for n in nodelist: + if n not in G: + raise nx.NetworkXError(f"Node {n} in nodelist is not in G") + raise nx.NetworkXError("nodelist contains duplicates.") + if nlen < len(G): + G = G.subgraph(nodelist) + + index = dict(zip(nodelist, range(nlen))) + coefficients = zip( + *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1)) + ) + try: + row, col, data = coefficients + except ValueError: + # there is no edge in the subgraph + row, col, data = [], [], [] + + if G.is_directed(): + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype) + else: + # symmetrize matrix + d = data + data + r = row + col + c = col + row + # selfloop entries get double counted when symmetrizing + # so we subtract the data on the diagonal + selfloops = list(nx.selfloop_edges(G, data=weight, default=1)) + if selfloops: + diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops)) + d += diag_data + r += diag_index + c += diag_index + A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err + + +def to_scipy_sparse_matrix(G, nodelist=None, dtype=None, weight="weight", format="csr"): + """Returns the graph adjacency matrix as a SciPy sparse matrix. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the sparse matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [1]_ for details. + + Returns + ------- + A : SciPy sparse matrix + Graph adjacency matrix. + + Notes + ----- + For directed graphs, matrix entry i,j corresponds to an edge from i to j. + + The matrix entries are populated using the edge attribute held in + parameter weight. When an edge does not have that attribute, the + value of the entry is 1. + + For multiple edges the matrix values are the sums of the edge weights. + + When `nodelist` does not contain every node in `G`, the adjacency matrix + is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Scipy sparse matrix can be modified as follows: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_scipy_sparse_matrix(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal() * 2) + >>> print(A.todense()) + [[2]] + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0, 1, 2]) + >>> print(S.todense()) + [[0 2 0] + [1 0 0] + [0 0 4]] + + References + ---------- + .. [1] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + import scipy.sparse + + warnings.warn( + ( + "\n\nThe scipy.sparse array containers will be used instead of matrices\n" + "in Networkx 3.0. Use `to_scipy_sparse_array` instead." + ), + DeprecationWarning, + stacklevel=2, + ) + A = to_scipy_sparse_array( + G, nodelist=nodelist, dtype=dtype, weight=weight, format=format + ) + return sp.sparse.csr_matrix(A).asformat(format) + + +def from_scipy_sparse_matrix( + A, parallel_edges=False, create_using=None, edge_attribute="weight" +): + """Creates a new graph from an adjacency matrix given as a SciPy sparse + matrix. + + Parameters + ---------- + A: scipy sparse matrix + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + In this case, `edge_attribute` will be ignored. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + Examples + -------- + >>> import scipy as sp + >>> import scipy.sparse # call as sp.sparse + >>> A = sp.sparse.eye(2, 2, 1) + >>> G = nx.from_scipy_sparse_matrix(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = sp.sparse.csr_matrix([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = sp.sparse.csr_matrix([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_matrix( + ... A, parallel_edges=True, create_using=nx.MultiGraph + ... ) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + """ + warnings.warn( + ( + "\n\nThe scipy.sparse array containers will be used instead of matrices\n" + "in Networkx 3.0. Use `from_scipy_sparse_array` instead." + ), + DeprecationWarning, + stacklevel=2, + ) + return from_scipy_sparse_array( + A, + parallel_edges=parallel_edges, + create_using=create_using, + edge_attribute=edge_attribute, + ) + + +def _csr_gen_triples(A): + """Converts a SciPy sparse matrix in **Compressed Sparse Row** format to + an iterable of weighted edge triples. + + """ + nrows = A.shape[0] + data, indices, indptr = A.data, A.indices, A.indptr + for i in range(nrows): + for j in range(indptr[i], indptr[i + 1]): + yield i, indices[j], data[j] + + +def _csc_gen_triples(A): + """Converts a SciPy sparse matrix in **Compressed Sparse Column** format to + an iterable of weighted edge triples. + + """ + ncols = A.shape[1] + data, indices, indptr = A.data, A.indices, A.indptr + for i in range(ncols): + for j in range(indptr[i], indptr[i + 1]): + yield indices[j], i, data[j] + + +def _coo_gen_triples(A): + """Converts a SciPy sparse matrix in **Coordinate** format to an iterable + of weighted edge triples. + + """ + row, col, data = A.row, A.col, A.data + return zip(row, col, data) + + +def _dok_gen_triples(A): + """Converts a SciPy sparse matrix in **Dictionary of Keys** format to an + iterable of weighted edge triples. + + """ + for (r, c), v in A.items(): + yield r, c, v + + +def _generate_weighted_edges(A): + """Returns an iterable over (u, v, w) triples, where u and v are adjacent + vertices and w is the weight of the edge joining u and v. + + `A` is a SciPy sparse matrix (in any format). + + """ + if A.format == "csr": + return _csr_gen_triples(A) + if A.format == "csc": + return _csc_gen_triples(A) + if A.format == "dok": + return _dok_gen_triples(A) + # If A is in any other format (including COO), convert it to COO format. + return _coo_gen_triples(A.tocoo()) + + +def from_scipy_sparse_array( + A, parallel_edges=False, create_using=None, edge_attribute="weight" +): + """Creates a new graph from an adjacency matrix given as a SciPy sparse + array. + + Parameters + ---------- + A: scipy.sparse array + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + In this case, `edge_attribute` will be ignored. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + Examples + -------- + >>> import scipy as sp + >>> import scipy.sparse # call as sp.sparse + >>> A = sp.sparse.eye(2, 2, 1) + >>> G = nx.from_scipy_sparse_array(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array( + ... A, parallel_edges=True, create_using=nx.MultiGraph + ... ) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = _generate_weighted_edges(A) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_weighted_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G + + +def to_numpy_array( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a NumPy array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data type, optional + A NumPy data type used to initialize the array. If None, then the NumPy + default is used. The dtype can be structured if `weight=None`, in which + case the dtype field names are used to look up edge attributes. The + result is a structured array where each named field in the dtype + corresponds to the adjaceny for that edge attribute. See examples for + details. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : callable, optional + An function that determines how weights in multigraphs are handled. + The function should accept a sequence of weights and return a single + value. The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. `weight` must be ``None`` if a structured + dtype is used. + + nonedge : array_like (default = 0.0) + The value used to represent non-edges in the adjaceny matrix. + The array values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are array values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as ``nan``. + + Returns + ------- + A : NumPy ndarray + Graph adjacency matrix + + Raises + ------ + NetworkXError + If `dtype` is a structured dtype and `G` is a multigraph + ValueError + If `dtype` is a structured dtype and `weight` is not `None` + + See Also + -------- + from_numpy_array + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + Entries in the adjacency matrix are given by the `weight` edge attribute. + When an edge does not have a weight attribute, the value of the entry is + set to the number 1. For multiple (parallel) edges, the values of the + entries are determined by the `multigraph_weight` parameter. The default is + to sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the adjacency matrix is + built from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal array entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting NumPy array can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_array(G) + >>> A + array([[1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + array([[2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) + array([[0., 2., 0.], + [1., 0., 0.], + [0., 0., 4.]]) + + When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist` + and their edges are not included in the adjacency matrix. Here is an example: + + >>> G = nx.Graph() + >>> G.add_edge(3, 1) + >>> G.add_edge(2, 0) + >>> G.add_edge(2, 1) + >>> G.add_edge(3, 0) + >>> nx.to_numpy_array(G, nodelist=[1, 2, 3]) + array([[0., 1., 1.], + [1., 0., 0.], + [1., 0., 0.]]) + + This function can also be used to create adjacency matrices for multiple + edge attributes with structured dtypes: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, weight=10) + >>> G.add_edge(1, 2, cost=5) + >>> G.add_edge(2, 3, weight=3, cost=-4.0) + >>> dtype = np.dtype([("weight", int), ("cost", float)]) + >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None) + >>> A["weight"] + array([[ 0, 10, 0, 0], + [10, 0, 1, 0], + [ 0, 1, 0, 3], + [ 0, 0, 3, 0]]) + >>> A["cost"] + array([[ 0., 1., 0., 0.], + [ 1., 0., 5., 0.], + [ 0., 5., 0., -4.], + [ 0., 0., -4., 0.]]) + + As stated above, the argument "nonedge" is useful especially when there are + actually edges with weight 0 in the graph. Setting a nonedge value different than 0, + makes it much clearer to differentiate such 0-weighted edges and actual nonedge values. + + >>> G = nx.Graph() + >>> G.add_edge(3, 1, weight=2) + >>> G.add_edge(2, 0, weight=0) + >>> G.add_edge(2, 1, weight=0) + >>> G.add_edge(3, 0, weight=1) + >>> nx.to_numpy_array(G, nonedge=-1.) + array([[-1., 2., -1., 1.], + [ 2., -1., 0., -1.], + [-1., 0., -1., 0.], + [ 1., -1., 0., -1.]]) + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + nlen = len(nodelist) + + # Input validation + nodeset = set(nodelist) + if nodeset - set(G): + raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G") + if len(nodeset) < nlen: + raise nx.NetworkXError("nodelist contains duplicates.") + + A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) + + # Corner cases: empty nodelist or graph without any edges + if nlen == 0 or G.number_of_edges() == 0: + return A + + # If dtype is structured and weight is None, use dtype field names as + # edge attributes + edge_attrs = None # Only single edge attribute by default + if A.dtype.names: + if weight is None: + edge_attrs = dtype.names + else: + raise ValueError( + "Specifying `weight` not supported for structured dtypes\n." + "To create adjacency matrices from structured dtypes, use `weight=None`." + ) + + # Map nodes to row/col in matrix + idx = dict(zip(nodelist, range(nlen))) + if len(nodelist) < len(G): + G = G.subgraph(nodelist).copy() + + # Collect all edge weights and reduce with `multigraph_weights` + if G.is_multigraph(): + if edge_attrs: + raise nx.NetworkXError( + "Structured arrays are not supported for MultiGraphs" + ) + d = defaultdict(list) + for u, v, wt in G.edges(data=weight, default=1.0): + d[(idx[u], idx[v])].append(wt) + i, j = np.array(list(d.keys())).T # indices + wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights + else: + i, j, wts = [], [], [] + + # Special branch: multi-attr adjacency from structured dtypes + if edge_attrs: + # Extract edges with all data + for u, v, data in G.edges(data=True): + i.append(idx[u]) + j.append(idx[v]) + wts.append(data) + # Map each attribute to the appropriate named field in the + # structured dtype + for attr in edge_attrs: + attr_data = [wt.get(attr, 1.0) for wt in wts] + A[attr][i, j] = attr_data + if not G.is_directed(): + A[attr][j, i] = attr_data + return A + + for u, v, wt in G.edges(data=weight, default=1.0): + i.append(idx[u]) + j.append(idx[v]) + wts.append(wt) + + # Set array values with advanced indexing + A[i, j] = wts + if not G.is_directed(): + A[j, i] = wts + + return A + + +def from_numpy_array(A, parallel_edges=False, create_using=None): + """Returns a graph from a 2D NumPy array. + + The 2D NumPy array is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : a 2D numpy.ndarray + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer array, then entry *(i, j)* in the array is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the array are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (of the same type as `create_using`) with parallel edges. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the array `A` will be added to the + graph. + + If the NumPy array has a single data type for each array entry it + will be converted to an appropriate Python data type. + + If the NumPy array has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_array + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_array(A) + >>> G.edges(data=True) + EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})]) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> dt = [("weight", float), ("cost", int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_array(A) + >>> G.edges() + EdgeView([(0, 0)]) + >>> G[0][0]["cost"] + 2 + >>> G[0][0]["weight"] + 1.0 + + """ + kind_to_python_type = { + "f": float, + "i": int, + "u": int, + "b": bool, + "c": complex, + "S": str, + "U": str, + "V": "void", + } + G = nx.empty_graph(0, create_using) + if A.ndim != 2: + raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}") + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + dt = A.dtype + try: + python_type = kind_to_python_type[dt.kind] + except Exception as err: + raise TypeError(f"Unknown numpy data type: {dt}") from err + + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Get a list of all the entries in the array with nonzero entries. These + # coordinates become edges in the graph. (convert to int from np.int64) + edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero())) + # handle numpy constructed data type + if python_type == "void": + # Sort the fields by their offset, then by dtype, then by name. + fields = sorted( + (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items() + ) + triples = ( + ( + u, + v, + { + name: kind_to_python_type[dtype.kind](val) + for (_, dtype, name), val in zip(fields, A[u, v]) + }, + ) + for u, v in edges + ) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + elif python_type is int and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + triples = chain( + ((u, v, {"weight": 1}) for d in range(A[u, v])) for (u, v) in edges + ) + else: # basic data type + triples = ((u, v, dict(weight=python_type(A[u, v]))) for u, v in edges) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_edges_from(triples) + return G diff --git a/myenv/lib/python3.9/site-packages/networkx/drawing/__init__.py b/myenv/lib/python3.9/site-packages/networkx/drawing/__init__.py new file mode 100644 index 0000000..1e8542f --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/drawing/__init__.py @@ -0,0 +1,6 @@ +# graph drawing and interface to graphviz + +from .layout import * +from .nx_pylab import * +from . import nx_agraph +from . import nx_pydot diff --git a/myenv/lib/python3.9/site-packages/networkx/drawing/layout.py b/myenv/lib/python3.9/site-packages/networkx/drawing/layout.py new file mode 100644 index 0000000..b6d2afe --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/drawing/layout.py @@ -0,0 +1,1189 @@ +""" +****** +Layout +****** + +Node positioning algorithms for graph drawing. + +For `random_layout()` the possible resulting shape +is a square of side [0, scale] (default: [0, 1]) +Changing `center` shifts the layout by that amount. + +For the other layout routines, the extent is +[center - scale, center + scale] (default: [-1, 1]). + +Warning: Most layout routines have only been tested in 2-dimensions. + +""" +import networkx as nx +from networkx.utils import np_random_state + +__all__ = [ + "bipartite_layout", + "circular_layout", + "kamada_kawai_layout", + "random_layout", + "rescale_layout", + "rescale_layout_dict", + "shell_layout", + "spring_layout", + "spectral_layout", + "planar_layout", + "fruchterman_reingold_layout", + "spiral_layout", + "multipartite_layout", +] + + +def _process_params(G, center, dim): + # Some boilerplate code. + import numpy as np + + if not isinstance(G, nx.Graph): + empty_graph = nx.Graph() + empty_graph.add_nodes_from(G) + G = empty_graph + + if center is None: + center = np.zeros(dim) + else: + center = np.asarray(center) + + if len(center) != dim: + msg = "length of center coordinates must match dimension of layout" + raise ValueError(msg) + + return G, center + + +@np_random_state(3) +def random_layout(G, center=None, dim=2, seed=None): + """Position nodes uniformly at random in the unit square. + + For every node, a position is generated by choosing each of dim + coordinates uniformly at random on the interval [0.0, 1.0). + + NumPy (http://scipy.org) is required for this function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> pos = nx.random_layout(G) + + """ + import numpy as np + + G, center = _process_params(G, center, dim) + pos = seed.rand(len(G), dim) + center + pos = pos.astype(np.float32) + pos = dict(zip(G, pos)) + + return pos + + +def circular_layout(G, scale=1, center=None, dim=2): + # dim=2 only + """Position nodes on a circle. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + If dim>2, the remaining dimensions are set to zero + in the returned positions. + If dim<2, a ValueError is raised. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim < 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.circular_layout(G) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim < 2: + raise ValueError("cannot handle dimensions < 2") + + G, center = _process_params(G, center, dim) + + paddims = max(0, (dim - 2)) + + if len(G) == 0: + pos = {} + elif len(G) == 1: + pos = {nx.utils.arbitrary_element(G): center} + else: + # Discard the extra angle since it matches 0 radians. + theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi + theta = theta.astype(np.float32) + pos = np.column_stack( + [np.cos(theta), np.sin(theta), np.zeros((len(G), paddims))] + ) + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + return pos + + +def shell_layout(G, nlist=None, rotate=None, scale=1, center=None, dim=2): + """Position nodes in concentric circles. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nlist : list of lists + List of node lists for each shell. + + rotate : angle in radians (default=pi/len(nlist)) + Angle by which to rotate the starting position of each shell + relative to the starting position of the previous shell. + To recreate behavior before v2.5 use rotate=0. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> shells = [[0], [1, 2, 3]] + >>> pos = nx.shell_layout(G, shells) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + if nlist is None: + # draw the whole graph in one shell + nlist = [list(G)] + + radius_bump = scale / len(nlist) + + if len(nlist[0]) == 1: + # single node at center + radius = 0.0 + else: + # else start at r=1 + radius = radius_bump + + if rotate is None: + rotate = np.pi / len(nlist) + first_theta = rotate + npos = {} + for nodes in nlist: + # Discard the last angle (endpoint=False) since 2*pi matches 0 radians + theta = ( + np.linspace(0, 2 * np.pi, len(nodes), endpoint=False, dtype=np.float32) + + first_theta + ) + pos = radius * np.column_stack([np.cos(theta), np.sin(theta)]) + center + npos.update(zip(nodes, pos)) + radius += radius_bump + first_theta += rotate + + return npos + + +def bipartite_layout( + G, nodes, align="vertical", scale=1, center=None, aspect_ratio=4 / 3 +): + """Position nodes in two straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nodes : list or container + Nodes in one node set of the bipartite graph. + This set will be placed on left or top. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + aspect_ratio : number (default=4/3): + The ratio of the width to the height of the layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.bipartite.gnmk_random_graph(3, 5, 10, seed=123) + >>> top = nx.bipartite.sets(G)[0] + >>> pos = nx.bipartite_layout(G, top) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + height = 1 + width = aspect_ratio * height + offset = (width / 2, height / 2) + + top = set(nodes) + bottom = set(G) - top + nodes = list(top) + list(bottom) + + left_xs = np.repeat(0, len(top)) + right_xs = np.repeat(width, len(bottom)) + left_ys = np.linspace(0, height, len(top)) + right_ys = np.linspace(0, height, len(bottom)) + + top_pos = np.column_stack([left_xs, left_ys]) - offset + bottom_pos = np.column_stack([right_xs, right_ys]) - offset + + pos = np.concatenate([top_pos, bottom_pos]) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + return pos + + +@np_random_state(10) +def spring_layout( + G, + k=None, + pos=None, + fixed=None, + iterations=50, + threshold=1e-4, + weight="weight", + scale=1, + center=None, + dim=2, + seed=None, +): + """Position nodes using Fruchterman-Reingold force-directed algorithm. + + The algorithm simulates a force-directed representation of the network + treating edges as springs holding nodes close, while treating nodes + as repelling objects, sometimes called an anti-gravity force. + Simulation continues until the positions are close to an equilibrium. + + There are some hard-coded values: minimal distance between + nodes (0.01) and "temperature" of 0.1 to ensure nodes don't fly away. + During the simulation, `k` helps determine the distance between nodes, + though `scale` and `center` determine the size and place after + rescaling occurs at the end of the simulation. + + Fixing some nodes doesn't allow them to move in the simulation. + It also turns off the rescaling feature at the simulation's end. + In addition, setting `scale` to `None` turns off rescaling. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + k : float (default=None) + Optimal distance between nodes. If None the distance is set to + 1/sqrt(n) where n is the number of nodes. Increase this value + to move nodes farther apart. + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + random initial positions. + + fixed : list or None optional (default=None) + Nodes to keep fixed at initial position. + Nodes not in ``G.nodes`` are ignored. + ValueError raised if `fixed` specified and `pos` not. + + iterations : int optional (default=50) + Maximum number of iterations taken + + threshold: float optional (default = 1e-4) + Threshold for relative error in node position changes. + The iteration stops if the error is below this threshold. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. Larger means a stronger attractive force. + If None, then all edge weights are 1. + + scale : number or None (default: 1) + Scale factor for positions. Not used unless `fixed is None`. + If scale is None, no rescaling is performed. + + center : array-like or None + Coordinate pair around which to center the layout. + Not used unless `fixed is None`. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spring_layout(G) + + # The same using longer but equivalent function name + >>> pos = nx.fruchterman_reingold_layout(G) + """ + import numpy as np + + G, center = _process_params(G, center, dim) + + if fixed is not None: + if pos is None: + raise ValueError("nodes are fixed without positions given") + for node in fixed: + if node not in pos: + raise ValueError("nodes are fixed without positions given") + nfixed = {node: i for i, node in enumerate(G)} + fixed = np.asarray([nfixed[node] for node in fixed if node in nfixed]) + + if pos is not None: + # Determine size of existing domain to adjust initial positions + dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup) + if dom_size == 0: + dom_size = 1 + pos_arr = seed.rand(len(G), dim) * dom_size + center + + for i, n in enumerate(G): + if n in pos: + pos_arr[i] = np.asarray(pos[n]) + else: + pos_arr = None + dom_size = 1 + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G.nodes()): center} + + try: + # Sparse matrix + if len(G) < 500: # sparse solver for large graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="f") + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _sparse_fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + except ValueError: + A = nx.to_numpy_array(G, weight=weight) + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + if fixed is None and scale is not None: + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + return pos + + +fruchterman_reingold_layout = spring_layout + + +@np_random_state(7) +def _fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + # We need to calculate this in case our fixed positions force our domain + # to be much bigger than 1x1 + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype) + # the inscrutable (but fast) version + # this is still O(V^2) + # could use multilevel methods to speed this up significantly + for iteration in range(iterations): + # matrix of difference between points + delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :] + # distance between points + distance = np.linalg.norm(delta, axis=-1) + # enforce minimum distance of 0.01 + np.clip(distance, 0.01, None, out=distance) + # displacement "force" + displacement = np.einsum( + "ijk,ij->ik", delta, (k * k / distance**2 - A * distance / k) + ) + # update positions + length = np.linalg.norm(displacement, axis=-1) + length = np.where(length < 0.01, 0.1, length) + delta_pos = np.einsum("ij,i->ij", displacement, t / length) + if fixed is not None: + # don't change positions of fixed nodes + delta_pos[fixed] = 0.0 + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +@np_random_state(7) +def _sparse_fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + # Sparse version + import numpy as np + import scipy as sp + import scipy.sparse # call as sp.sparse + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + # make sure we have a LIst of Lists representation + try: + A = A.tolil() + except AttributeError: + A = (sp.sparse.coo_array(A)).tolil() + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # no fixed nodes + if fixed is None: + fixed = [] + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + + displacement = np.zeros((dim, nnodes)) + for iteration in range(iterations): + displacement *= 0 + # loop over rows + for i in range(A.shape[0]): + if i in fixed: + continue + # difference between this row's node position and all others + delta = (pos[i] - pos).T + # distance between points + distance = np.sqrt((delta**2).sum(axis=0)) + # enforce minimum distance of 0.01 + distance = np.where(distance < 0.01, 0.01, distance) + # the adjacency matrix row + Ai = A.getrowview(i).toarray() # TODO: revisit w/ sparse 1D container + # displacement "force" + displacement[:, i] += ( + delta * (k * k / distance**2 - Ai * distance / k) + ).sum(axis=1) + # update positions + length = np.sqrt((displacement**2).sum(axis=0)) + length = np.where(length < 0.01, 0.1, length) + delta_pos = (displacement * t / length).T + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +def kamada_kawai_layout( + G, dist=None, pos=None, weight="weight", scale=1, center=None, dim=2 +): + """Position nodes using Kamada-Kawai path-length cost-function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + dist : dict (default=None) + A two-level dictionary of optimal distances between nodes, + indexed by source and destination node. + If None, the distance is computed using shortest_path_length(). + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + circular_layout() for dim >= 2 and a linear layout for dim == 1. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.kamada_kawai_layout(G) + """ + import numpy as np + + G, center = _process_params(G, center, dim) + nNodes = len(G) + if nNodes == 0: + return {} + + if dist is None: + dist = dict(nx.shortest_path_length(G, weight=weight)) + dist_mtx = 1e6 * np.ones((nNodes, nNodes)) + for row, nr in enumerate(G): + if nr not in dist: + continue + rdist = dist[nr] + for col, nc in enumerate(G): + if nc not in rdist: + continue + dist_mtx[row][col] = rdist[nc] + + if pos is None: + if dim >= 3: + pos = random_layout(G, dim=dim) + elif dim == 2: + pos = circular_layout(G, dim=dim) + else: + pos = {n: pt for n, pt in zip(G, np.linspace(0, 1, len(G)))} + pos_arr = np.array([pos[n] for n in G]) + + pos = _kamada_kawai_solve(dist_mtx, pos_arr, dim) + + pos = rescale_layout(pos, scale=scale) + center + return dict(zip(G, pos)) + + +def _kamada_kawai_solve(dist_mtx, pos_arr, dim): + # Anneal node locations based on the Kamada-Kawai cost-function, + # using the supplied matrix of preferred inter-node distances, + # and starting locations. + + import numpy as np + import scipy as sp + import scipy.optimize # call as sp.optimize + + meanwt = 1e-3 + costargs = (np, 1 / (dist_mtx + np.eye(dist_mtx.shape[0]) * 1e-3), meanwt, dim) + + optresult = sp.optimize.minimize( + _kamada_kawai_costfn, + pos_arr.ravel(), + method="L-BFGS-B", + args=costargs, + jac=True, + ) + + return optresult.x.reshape((-1, dim)) + + +def _kamada_kawai_costfn(pos_vec, np, invdist, meanweight, dim): + # Cost-function and gradient for Kamada-Kawai layout algorithm + nNodes = invdist.shape[0] + pos_arr = pos_vec.reshape((nNodes, dim)) + + delta = pos_arr[:, np.newaxis, :] - pos_arr[np.newaxis, :, :] + nodesep = np.linalg.norm(delta, axis=-1) + direction = np.einsum("ijk,ij->ijk", delta, 1 / (nodesep + np.eye(nNodes) * 1e-3)) + + offset = nodesep * invdist - 1.0 + offset[np.diag_indices(nNodes)] = 0 + + cost = 0.5 * np.sum(offset**2) + grad = np.einsum("ij,ij,ijk->ik", invdist, offset, direction) - np.einsum( + "ij,ij,ijk->jk", invdist, offset, direction + ) + + # Additional parabolic term to encourage mean position to be near origin: + sumpos = np.sum(pos_arr, axis=0) + cost += 0.5 * meanweight * np.sum(sumpos**2) + grad += meanweight * sumpos + + return (cost, grad.ravel()) + + +def spectral_layout(G, weight="weight", scale=1, center=None, dim=2): + """Position nodes using the eigenvectors of the graph Laplacian. + + Using the unnormalized Laplacian, the layout shows possible clusters of + nodes which are an approximation of the ratio cut. If dim is the number of + dimensions then the positions are the entries of the dim eigenvectors + corresponding to the ascending eigenvalues starting from the second one. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spectral_layout(G) + + Notes + ----- + Directed graphs will be considered as undirected graphs when + positioning the nodes. + + For larger graphs (>500 nodes) this will use the SciPy sparse + eigenvalue solver (ARPACK). + """ + # handle some special cases that break the eigensolvers + import numpy as np + + G, center = _process_params(G, center, dim) + + if len(G) <= 2: + if len(G) == 0: + pos = np.array([]) + elif len(G) == 1: + pos = np.array([center]) + else: + pos = np.array([np.zeros(dim), np.array(center) * 2.0]) + return dict(zip(G, pos)) + try: + # Sparse matrix + if len(G) < 500: # dense solver is faster for small graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="d") + # Symmetrize directed graphs + if G.is_directed(): + A = A + np.transpose(A) + pos = _sparse_spectral(A, dim) + except (ImportError, ValueError): + # Dense matrix + A = nx.to_numpy_array(G, weight=weight) + # Symmetrize directed graphs + if G.is_directed(): + A += A.T + pos = _spectral(A, dim) + + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + return pos + + +def _spectral(A, dim=2): + # Input adjacency matrix A + # Uses dense eigenvalue solver from numpy + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix where D is diagonal of degrees + D = np.identity(nnodes, dtype=A.dtype) * np.sum(A, axis=1) + L = D - A + + eigenvalues, eigenvectors = np.linalg.eig(L) + # sort and keep smallest nonzero + index = np.argsort(eigenvalues)[1 : dim + 1] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def _sparse_spectral(A, dim=2): + # Input adjacency matrix A + # Uses sparse eigenvalue solver from scipy + # Could use multilevel methods here, see Koren "On spectral graph drawing" + import numpy as np + import scipy as sp + import scipy.sparse # call as sp.sparse + import scipy.sparse.linalg # call as sp.sparse.linalg + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "sparse_spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix + # TODO: Rm csr_array wrapper in favor of spdiags array constructor when available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, nnodes, nnodes)) + L = D - A + + k = dim + 1 + # number of Lanczos vectors for ARPACK solver.What is the right scaling? + ncv = max(2 * k + 1, int(np.sqrt(nnodes))) + # return smallest k eigenvalues and eigenvectors + eigenvalues, eigenvectors = sp.sparse.linalg.eigsh(L, k, which="SM", ncv=ncv) + index = np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def planar_layout(G, scale=1, center=None, dim=2): + """Position nodes without edge intersections. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. If G is of type + nx.PlanarEmbedding, the positions are selected accordingly. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + NetworkXException + If G is not planar + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.planar_layout(G) + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + + if isinstance(G, nx.PlanarEmbedding): + embedding = G + else: + is_planar, embedding = nx.check_planarity(G) + if not is_planar: + raise nx.NetworkXException("G is not planar.") + pos = nx.combinatorial_embedding_to_pos(embedding) + node_list = list(embedding) + pos = np.row_stack([pos[x] for x in node_list]) + pos = pos.astype(np.float64) + pos = rescale_layout(pos, scale=scale) + center + return dict(zip(node_list, pos)) + + +def spiral_layout(G, scale=1, center=None, dim=2, resolution=0.35, equidistant=False): + """Position nodes in a spiral layout. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + scale : number (default: 1) + Scale factor for positions. + center : array-like or None + Coordinate pair around which to center the layout. + dim : int, default=2 + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + resolution : float, default=0.35 + The compactness of the spiral layout returned. + Lower values result in more compressed spiral layouts. + equidistant : bool, default=False + If True, nodes will be positioned equidistant from each other + by decreasing angle further from center. + If False, nodes will be positioned at equal angles + from each other by increasing separation further from center. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spiral_layout(G) + >>> nx.draw(G, pos=pos) + + Notes + ----- + This algorithm currently only works in two dimensions. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + pos = [] + if equidistant: + chord = 1 + step = 0.5 + theta = resolution + theta += chord / (step * theta) + for _ in range(len(G)): + r = step * theta + theta += chord / r + pos.append([np.cos(theta) * r, np.sin(theta) * r]) + + else: + dist = np.arange(len(G), dtype=float) + angle = resolution * dist + pos = np.transpose(dist * np.array([np.cos(angle), np.sin(angle)])) + + pos = rescale_layout(np.array(pos), scale=scale) + center + + pos = dict(zip(G, pos)) + + return pos + + +def multipartite_layout(G, subset_key="subset", align="vertical", scale=1, center=None): + """Position nodes in layers of straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + subset_key : string (default='subset') + Key of node data to be used as layer subset. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_multipartite_graph(28, 16, 10) + >>> pos = nx.multipartite_layout(G) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + Network does not need to be a complete multipartite graph. As long as nodes + have subset_key data, they will be placed in the corresponding layers. + + """ + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + layers = {} + for v, data in G.nodes(data=True): + try: + layer = data[subset_key] + except KeyError: + msg = "all nodes must have subset_key (default='subset') as data" + raise ValueError(msg) + layers[layer] = [v] + layers.get(layer, []) + + # Sort by layer, if possible + try: + layers = sorted(layers.items()) + except TypeError: + layers = list(layers.items()) + + pos = None + nodes = [] + width = len(layers) + for i, (_, layer) in enumerate(layers): + height = len(layer) + xs = np.repeat(i, height) + ys = np.arange(0, height, dtype=float) + offset = ((width - 1) / 2, (height - 1) / 2) + layer_pos = np.column_stack([xs, ys]) - offset + if pos is None: + pos = layer_pos + else: + pos = np.concatenate([pos, layer_pos]) + nodes.extend(layer) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + return pos + + +def rescale_layout(pos, scale=1): + """Returns scaled position array to (-scale, scale) in all axes. + + The function acts on NumPy arrays which hold position information. + Each position is one row of the array. The dimension of the space + equals the number of columns. Each coordinate in one column. + + To rescale, the mean (center) is subtracted from each axis separately. + Then all values are scaled so that the largest magnitude value + from all axes equals `scale` (thus, the aspect ratio is preserved). + The resulting NumPy Array is returned (order of rows unchanged). + + Parameters + ---------- + pos : numpy array + positions to be scaled. Each row is a position. + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : numpy array + scaled positions. Each row is a position. + + See Also + -------- + rescale_layout_dict + """ + # Find max length over all dimensions + lim = 0 # max coordinate for all axes + for i in range(pos.shape[1]): + pos[:, i] -= pos[:, i].mean() + lim = max(abs(pos[:, i]).max(), lim) + # rescale to (-scale, scale) in all directions, preserves aspect + if lim > 0: + for i in range(pos.shape[1]): + pos[:, i] *= scale / lim + return pos + + +def rescale_layout_dict(pos, scale=1): + """Return a dictionary of scaled positions keyed by node + + Parameters + ---------- + pos : A dictionary of positions keyed by node + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : A dictionary of positions keyed by node + + Examples + -------- + >>> import numpy as np + >>> pos = {0: np.array((0, 0)), 1: np.array((1, 1)), 2: np.array((0.5, 0.5))} + >>> nx.rescale_layout_dict(pos) + {0: array([-1., -1.]), 1: array([1., 1.]), 2: array([0., 0.])} + + >>> pos = {0: np.array((0, 0)), 1: np.array((-1, 1)), 2: np.array((-0.5, 0.5))} + >>> nx.rescale_layout_dict(pos, scale=2) + {0: array([ 2., -2.]), 1: array([-2., 2.]), 2: array([0., 0.])} + + See Also + -------- + rescale_layout + """ + import numpy as np + + if not pos: # empty_graph + return {} + pos_v = np.array(list(pos.values())) + pos_v = rescale_layout(pos_v, scale=scale) + return dict(zip(pos, pos_v)) diff --git a/myenv/lib/python3.9/site-packages/networkx/drawing/nx_agraph.py b/myenv/lib/python3.9/site-packages/networkx/drawing/nx_agraph.py new file mode 100644 index 0000000..dc636b4 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/drawing/nx_agraph.py @@ -0,0 +1,495 @@ +""" +*************** +Graphviz AGraph +*************** + +Interface to pygraphviz AGraph class. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> A = nx.nx_agraph.to_agraph(G) +>>> H = nx.nx_agraph.from_agraph(A) + +See Also +-------- + - Pygraphviz: http://pygraphviz.github.io/ + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" +import os +import tempfile + +import networkx as nx + +__all__ = [ + "from_agraph", + "to_agraph", + "write_dot", + "read_dot", + "graphviz_layout", + "pygraphviz_layout", + "view_pygraphviz", +] + + +def from_agraph(A, create_using=None): + """Returns a NetworkX Graph or DiGraph from a PyGraphviz graph. + + Parameters + ---------- + A : PyGraphviz AGraph + A graph created with PyGraphviz + + create_using : NetworkX graph constructor, optional (default=None) + Graph type to create. If graph instance, then cleared before populated. + If `None`, then the appropriate Graph type is inferred from `A`. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + >>> G = nx.nx_agraph.from_agraph(A) + + Notes + ----- + The Graph G will have a dictionary G.graph_attr containing + the default graphviz attributes for graphs, nodes and edges. + + Default node attributes will be in the dictionary G.node_attr + which is keyed by node. + + Edge attributes will be returned as edge data in G. With + edge_attr=False the edge data will be the Graphviz edge weight + attribute or the value 1 if no edge weight attribute is found. + + """ + if create_using is None: + if A.is_directed(): + if A.is_strict(): + create_using = nx.DiGraph + else: + create_using = nx.MultiDiGraph + else: + if A.is_strict(): + create_using = nx.Graph + else: + create_using = nx.MultiGraph + + # assign defaults + N = nx.empty_graph(0, create_using) + if A.name is not None: + N.name = A.name + + # add graph attributes + N.graph.update(A.graph_attr) + + # add nodes, attributes to N.node_attr + for n in A.nodes(): + str_attr = {str(k): v for k, v in n.attr.items()} + N.add_node(str(n), **str_attr) + + # add edges, assign edge data as dictionary of attributes + for e in A.edges(): + u, v = str(e[0]), str(e[1]) + attr = dict(e.attr) + str_attr = {str(k): v for k, v in attr.items()} + if not N.is_multigraph(): + if e.name is not None: + str_attr["key"] = e.name + N.add_edge(u, v, **str_attr) + else: + N.add_edge(u, v, key=e.name, **str_attr) + + # add default attributes for graph, nodes, and edges + # hang them on N.graph_attr + N.graph["graph"] = dict(A.graph_attr) + N.graph["node"] = dict(A.node_attr) + N.graph["edge"] = dict(A.edge_attr) + return N + + +def to_agraph(N): + """Returns a pygraphviz graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + + Notes + ----- + If N has an dict N.graph_attr an attempt will be made first + to copy properties attached to the graph (see from_agraph) + and then updated with the calling arguments if any. + + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + directed = N.is_directed() + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + A = pygraphviz.AGraph(name=N.name, strict=strict, directed=directed) + + # default graph attributes + A.graph_attr.update(N.graph.get("graph", {})) + A.node_attr.update(N.graph.get("node", {})) + A.edge_attr.update(N.graph.get("edge", {})) + + A.graph_attr.update( + (k, v) for k, v in N.graph.items() if k not in ("graph", "node", "edge") + ) + + # add nodes + for n, nodedata in N.nodes(data=True): + A.add_node(n) + # Add node data + a = A.get_node(n) + a.attr.update({k: str(v) for k, v in nodedata.items()}) + + # loop over edges + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {k: str(v) for k, v in edgedata.items() if k != "key"} + A.add_edge(u, v, key=str(key)) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {k: str(v) for k, v in edgedata.items()} + A.add_edge(u, v) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + return A + + +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Parameters + ---------- + G : graph + A networkx graph + path : filename + Filename or file handle to write + """ + A = to_agraph(G) + A.write(path) + A.clear() + return + + +def read_dot(path): + """Returns a NetworkX graph from a dot file on path. + + Parameters + ---------- + path : file or string + File name or file handle to read. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "read_dot() requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + A = pygraphviz.AGraph(file=path) + gr = from_agraph(A) + A.clear() + return gr + + +def graphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pygraphviz_layout. + """ + return pygraphviz_layout(G, prog=prog, root=root, args=args) + + +def pygraphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + node_pos : dict + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + >>> H = nx.convert_node_labels_to_integers(G, label_attribute="node_label") + >>> H_layout = nx.nx_agraph.pygraphviz_layout(G, prog="dot") + >>> G_layout = {H.nodes[n]["node_label"]: p for n, p in H_layout.items()} + + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + if root is not None: + args += f"-Groot={root}" + A = to_agraph(G) + A.layout(prog=prog, args=args) + node_pos = {} + for n in G: + node = pygraphviz.Node(A, n) + try: + xs = node.attr["pos"].split(",") + node_pos[n] = tuple(float(x) for x in xs) + except: + print("no position for node", n) + node_pos[n] = (0.0, 0.0) + return node_pos + + +@nx.utils.open_file(5, "w+b") +def view_pygraphviz( + G, edgelabel=None, prog="dot", args="", suffix="", path=None, show=True +): + """Views the graph G using the specified layout algorithm. + + Parameters + ---------- + G : NetworkX graph + The machine to draw. + edgelabel : str, callable, None + If a string, then it specifes the edge attribute to be displayed + on the edge labels. If a callable, then it is called for each + edge and it should return the string to be displayed on the edges. + The function signature of `edgelabel` should be edgelabel(data), + where `data` is the edge attribute dictionary. + prog : string + Name of Graphviz layout program. + args : str + Additional arguments to pass to the Graphviz layout program. + suffix : str + If `filename` is None, we save to a temporary file. The value of + `suffix` will appear at the tail end of the temporary filename. + path : str, None + The filename used to save the image. If None, save to a temporary + file. File formats are the same as those from pygraphviz.agraph.draw. + show : bool, default = True + Whether to display the graph with :mod:`PIL.Image.show`, + default is `True`. If `False`, the rendered graph is still available + at `path`. + + Returns + ------- + path : str + The filename of the generated image. + A : PyGraphviz graph + The PyGraphviz graph instance used to generate the image. + + Notes + ----- + If this function is called in succession too quickly, sometimes the + image is not displayed. So you might consider time.sleep(.5) between + calls if you experience problems. + + """ + if not len(G): + raise nx.NetworkXException("An empty graph cannot be drawn.") + + # If we are providing default values for graphviz, these must be set + # before any nodes or edges are added to the PyGraphviz graph object. + # The reason for this is that default values only affect incoming objects. + # If you change the default values after the objects have been added, + # then they inherit no value and are set only if explicitly set. + + # to_agraph() uses these values. + attrs = ["edge", "node", "graph"] + for attr in attrs: + if attr not in G.graph: + G.graph[attr] = {} + + # These are the default values. + edge_attrs = {"fontsize": "10"} + node_attrs = { + "style": "filled", + "fillcolor": "#0000FF40", + "height": "0.75", + "width": "0.75", + "shape": "circle", + } + graph_attrs = {} + + def update_attrs(which, attrs): + # Update graph attributes. Return list of those which were added. + added = [] + for k, v in attrs.items(): + if k not in G.graph[which]: + G.graph[which][k] = v + added.append(k) + + def clean_attrs(which, added): + # Remove added attributes + for attr in added: + del G.graph[which][attr] + if not G.graph[which]: + del G.graph[which] + + # Update all default values + update_attrs("edge", edge_attrs) + update_attrs("node", node_attrs) + update_attrs("graph", graph_attrs) + + # Convert to agraph, so we inherit default values + A = to_agraph(G) + + # Remove the default values we added to the original graph. + clean_attrs("edge", edge_attrs) + clean_attrs("node", node_attrs) + clean_attrs("graph", graph_attrs) + + # If the user passed in an edgelabel, we update the labels for all edges. + if edgelabel is not None: + if not callable(edgelabel): + + def func(data): + return "".join([" ", str(data[edgelabel]), " "]) + + else: + func = edgelabel + + # update all the edge labels + if G.is_multigraph(): + for u, v, key, data in G.edges(keys=True, data=True): + # PyGraphviz doesn't convert the key to a string. See #339 + edge = A.get_edge(u, v, str(key)) + edge.attr["label"] = str(func(data)) + else: + for u, v, data in G.edges(data=True): + edge = A.get_edge(u, v) + edge.attr["label"] = str(func(data)) + + if path is None: + ext = "png" + if suffix: + suffix = f"_{suffix}.{ext}" + else: + suffix = f".{ext}" + path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) + else: + # Assume the decorator worked and it is a file-object. + pass + + # Write graph to file + A.draw(path=path, format=None, prog=prog, args=args) + path.close() + + # Show graph in a new window (depends on platform configuration) + if show: + from PIL import Image + + Image.open(path.name).show() + + return path.name, A + + +def display_pygraphviz(graph, path, format=None, prog=None, args=""): + """Internal function to display a graph in OS dependent manner. + + Parameters + ---------- + graph : PyGraphviz graph + A PyGraphviz AGraph instance. + path : file object + An already opened file object that will be closed. + format : str, None + An attempt is made to guess the output format based on the extension + of the filename. If that fails, the value of `format` is used. + prog : string + Name of Graphviz layout program. + args : str + Additional arguments to pass to the Graphviz layout program. + + Notes + ----- + If this function is called in succession too quickly, sometimes the + image is not displayed. So you might consider time.sleep(.5) between + calls if you experience problems. + + """ + import warnings + + from PIL import Image + + warnings.warn( + "display_pygraphviz is deprecated and will be removed in NetworkX 3.0. " + "To view a graph G using pygraphviz, use nx.nx_agraph.view_pygraphviz(G). " + "To view a graph from file, consider an image processing libary like " + "`Pillow`, e.g. ``PIL.Image.open(path.name).show()``", + DeprecationWarning, + ) + if format is None: + filename = path.name + format = os.path.splitext(filename)[1].lower()[1:] + if not format: + # Let the draw() function use its default + format = None + + # Save to a file and display in the default viewer. + # We must close the file before viewing it. + graph.draw(path, format, prog, args) + path.close() + Image.open(filename).show() diff --git a/myenv/lib/python3.9/site-packages/networkx/drawing/nx_pydot.py b/myenv/lib/python3.9/site-packages/networkx/drawing/nx_pydot.py new file mode 100644 index 0000000..2055eb3 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/drawing/nx_pydot.py @@ -0,0 +1,452 @@ +""" +***** +Pydot +***** + +Import and export NetworkX graphs in Graphviz dot format using pydot. + +Either this module or nx_agraph can be used to interface with graphviz. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> PG = nx.nx_pydot.to_pydot(G) +>>> H = nx.nx_pydot.from_pydot(PG) + +See Also +-------- + - pydot: https://github.com/erocarrera/pydot + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" +import warnings +from locale import getpreferredencoding + +import networkx as nx +from networkx.utils import open_file + +__all__ = [ + "write_dot", + "read_dot", + "graphviz_layout", + "pydot_layout", + "to_pydot", + "from_pydot", +] + + +@open_file(1, mode="w") +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Path can be a string or a file handle. + """ + msg = ( + "nx.nx_pydot.write_dot depends on the pydot package, which has" + "known issues and is not actively maintained. Consider using" + "nx.nx_agraph.write_dot instead.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) + P = to_pydot(G) + path.write(P.to_string()) + return + + +@open_file(0, mode="r") +def read_dot(path): + """Returns a NetworkX :class:`MultiGraph` or :class:`MultiDiGraph` from the + dot file with the passed path. + + If this file contains multiple graphs, only the first such graph is + returned. All graphs _except_ the first are silently ignored. + + Parameters + ---------- + path : str or file + Filename or file handle. + + Returns + ------- + G : MultiGraph or MultiDiGraph + A :class:`MultiGraph` or :class:`MultiDiGraph`. + + Notes + ----- + Use `G = nx.Graph(nx.nx_pydot.read_dot(path))` to return a :class:`Graph` instead of a + :class:`MultiGraph`. + """ + import pydot + + msg = ( + "nx.nx_pydot.read_dot depends on the pydot package, which has" + "known issues and is not actively maintained. Consider using" + "nx.nx_agraph.read_dot instead.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) + + data = path.read() + + # List of one or more "pydot.Dot" instances deserialized from this file. + P_list = pydot.graph_from_dot_data(data) + + # Convert only the first such instance into a NetworkX graph. + return from_pydot(P_list[0]) + + +def from_pydot(P): + """Returns a NetworkX graph from a Pydot graph. + + Parameters + ---------- + P : Pydot graph + A graph created with Pydot + + Returns + ------- + G : NetworkX multigraph + A MultiGraph or MultiDiGraph. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_pydot.to_pydot(K5) + >>> G = nx.nx_pydot.from_pydot(A) # return MultiGraph + + # make a Graph instead of MultiGraph + >>> G = nx.Graph(nx.nx_pydot.from_pydot(A)) + + """ + msg = ( + "nx.nx_pydot.from_pydot depends on the pydot package, which has" + "known issues and is not actively maintained.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) + + if P.get_strict(None): # pydot bug: get_strict() shouldn't take argument + multiedges = False + else: + multiedges = True + + if P.get_type() == "graph": # undirected + if multiedges: + N = nx.MultiGraph() + else: + N = nx.Graph() + else: + if multiedges: + N = nx.MultiDiGraph() + else: + N = nx.DiGraph() + + # assign defaults + name = P.get_name().strip('"') + if name != "": + N.name = name + + # add nodes, attributes to N.node_attr + for p in P.get_node_list(): + n = p.get_name().strip('"') + if n in ("node", "graph", "edge"): + continue + N.add_node(n, **p.get_attributes()) + + # add edges + for e in P.get_edge_list(): + u = e.get_source() + v = e.get_destination() + attr = e.get_attributes() + s = [] + d = [] + + if isinstance(u, str): + s.append(u.strip('"')) + else: + for unodes in u["nodes"]: + s.append(unodes.strip('"')) + + if isinstance(v, str): + d.append(v.strip('"')) + else: + for vnodes in v["nodes"]: + d.append(vnodes.strip('"')) + + for source_node in s: + for destination_node in d: + N.add_edge(source_node, destination_node, **attr) + + # add default attributes for graph, nodes, edges + pattr = P.get_attributes() + if pattr: + N.graph["graph"] = pattr + try: + N.graph["node"] = P.get_node_defaults()[0] + except (IndexError, TypeError): + pass # N.graph['node']={} + try: + N.graph["edge"] = P.get_edge_defaults()[0] + except (IndexError, TypeError): + pass # N.graph['edge']={} + return N + + +def _check_colon_quotes(s): + # A quick helper function to check if a string has a colon in it + # and if it is quoted properly with double quotes. + # refer https://github.com/pydot/pydot/issues/258 + return ":" in s and (s[0] != '"' or s[-1] != '"') + + +def to_pydot(N): + """Returns a pydot graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> P = nx.nx_pydot.to_pydot(K5) + + Notes + ----- + + """ + import pydot + + msg = ( + "nx.nx_pydot.to_pydot depends on the pydot package, which has" + "known issues and is not actively maintained.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) + + # set Graphviz graph type + if N.is_directed(): + graph_type = "digraph" + else: + graph_type = "graph" + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + + name = N.name + graph_defaults = N.graph.get("graph", {}) + if name == "": + P = pydot.Dot("", graph_type=graph_type, strict=strict, **graph_defaults) + else: + P = pydot.Dot( + f'"{name}"', graph_type=graph_type, strict=strict, **graph_defaults + ) + try: + P.set_node_defaults(**N.graph["node"]) + except KeyError: + pass + try: + P.set_edge_defaults(**N.graph["edge"]) + except KeyError: + pass + + for n, nodedata in N.nodes(data=True): + str_nodedata = {str(k): str(v) for k, v in nodedata.items()} + # Explicitly catch nodes with ":" in node names or nodedata. + n = str(n) + raise_error = _check_colon_quotes(n) or ( + any( + (_check_colon_quotes(k) or _check_colon_quotes(v)) + for k, v in str_nodedata.items() + ) + ) + if raise_error: + raise ValueError( + f'Node names and attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + p = pydot.Node(n, **str_nodedata) + P.add_node(p) + + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {str(k): str(v) for k, v in edgedata.items() if k != "key"} + u, v = str(u), str(v) + raise_error = ( + _check_colon_quotes(u) + or _check_colon_quotes(v) + or ( + any( + (_check_colon_quotes(k) or _check_colon_quotes(val)) + for k, val in str_edgedata.items() + ) + ) + ) + if raise_error: + raise ValueError( + f'Node names and attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + edge = pydot.Edge(u, v, key=str(key), **str_edgedata) + P.add_edge(edge) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {str(k): str(v) for k, v in edgedata.items()} + u, v = str(u), str(v) + raise_error = ( + _check_colon_quotes(u) + or _check_colon_quotes(v) + or ( + any( + (_check_colon_quotes(k) or _check_colon_quotes(val)) + for k, val in str_edgedata.items() + ) + ) + ) + if raise_error: + raise ValueError( + f'Node names and attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + edge = pydot.Edge(u, v, **str_edgedata) + P.add_edge(edge) + return P + + +def graphviz_layout(G, prog="neato", root=None): + """Create node positions using Pydot and Graphviz. + + Returns a dictionary of positions keyed by node. + + Parameters + ---------- + G : NetworkX Graph + The graph for which the layout is computed. + prog : string (default: 'neato') + The name of the GraphViz program to use for layout. + Options depend on GraphViz version but may include: + 'dot', 'twopi', 'fdp', 'sfdp', 'circo' + root : Node from G or None (default: None) + The node of G from which to start some layout algorithms. + + Returns + ------- + Dictionary of (x, y) positions keyed by node. + + Examples + -------- + >>> G = nx.complete_graph(4) + >>> pos = nx.nx_pydot.graphviz_layout(G) + >>> pos = nx.nx_pydot.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pydot_layout. + """ + msg = ( + "nx.nx_pydot.graphviz_layout depends on the pydot package, which has" + "known issues and is not actively maintained. Consider using" + "nx.nx_agraph.graphviz_layout instead.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) + + return pydot_layout(G=G, prog=prog, root=root) + + +def pydot_layout(G, prog="neato", root=None): + """Create node positions using :mod:`pydot` and Graphviz. + + Parameters + ---------- + G : Graph + NetworkX graph to be laid out. + prog : string (default: 'neato') + Name of the GraphViz command to use for layout. + Options depend on GraphViz version but may include: + 'dot', 'twopi', 'fdp', 'sfdp', 'circo' + root : Node from G or None (default: None) + The node of G from which to start some layout algorithms. + + Returns + ------- + dict + Dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_graph(4) + >>> pos = nx.nx_pydot.pydot_layout(G) + >>> pos = nx.nx_pydot.pydot_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + H = nx.convert_node_labels_to_integers(G, label_attribute='node_label') + H_layout = nx.nx_pydot.pydot_layout(G, prog='dot') + G_layout = {H.nodes[n]['node_label']: p for n, p in H_layout.items()} + + """ + import pydot + + msg = ( + "nx.nx_pydot.pydot_layout depends on the pydot package, which has" + "known issues and is not actively maintained.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) + P = to_pydot(G) + if root is not None: + P.set("root", str(root)) + + # List of low-level bytes comprising a string in the dot language converted + # from the passed graph with the passed external GraphViz command. + D_bytes = P.create_dot(prog=prog) + + # Unique string decoded from these bytes with the preferred locale encoding + D = str(D_bytes, encoding=getpreferredencoding()) + + if D == "": # no data returned + print(f"Graphviz layout with {prog} failed") + print() + print("To debug what happened try:") + print("P = nx.nx_pydot.to_pydot(G)") + print('P.write_dot("file.dot")') + print(f"And then run {prog} on file.dot") + return + + # List of one or more "pydot.Dot" instances deserialized from this string. + Q_list = pydot.graph_from_dot_data(D) + assert len(Q_list) == 1 + + # The first and only such instance, as guaranteed by the above assertion. + Q = Q_list[0] + + node_pos = {} + for n in G.nodes(): + str_n = str(n) + # Explicitly catch nodes with ":" in node names or nodedata. + if _check_colon_quotes(str_n): + raise ValueError( + f'Node names and node attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + pydot_node = pydot.Node(str_n).get_name() + node = Q.get_node(pydot_node) + + if isinstance(node, list): + node = node[0] + pos = node.get_pos()[1:-1] # strip leading and trailing double quotes + if pos is not None: + xx, yy = pos.split(",") + node_pos[n] = (float(xx), float(yy)) + return node_pos diff --git a/myenv/lib/python3.9/site-packages/networkx/drawing/nx_pylab.py b/myenv/lib/python3.9/site-packages/networkx/drawing/nx_pylab.py new file mode 100644 index 0000000..18ce084 --- /dev/null +++ b/myenv/lib/python3.9/site-packages/networkx/drawing/nx_pylab.py @@ -0,0 +1,1517 @@ +""" +********** +Matplotlib +********** + +Draw networks with matplotlib. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> nx.draw(G) + +See Also +-------- + - :doc:`matplotlib ` + - :func:`matplotlib.pyplot.scatter` + - :obj:`matplotlib.patches.FancyArrowPatch` +""" +from numbers import Number + +import networkx as nx +from networkx.drawing.layout import ( + circular_layout, + kamada_kawai_layout, + planar_layout, + random_layout, + shell_layout, + spectral_layout, + spring_layout, +) + +__all__ = [ + "draw", + "draw_networkx", + "draw_networkx_nodes", + "draw_networkx_edges", + "draw_networkx_labels", + "draw_networkx_edge_labels", + "draw_circular", + "draw_kamada_kawai", + "draw_random", + "draw_spectral", + "draw_spring", + "draw_planar", + "draw_shell", +] + + +def draw(G, pos=None, ax=None, **kwds): + """Draw the graph G with Matplotlib. + + Draw the graph as a simple representation with no node + labels or edge labels and using the full Matplotlib figure area + and no axis labels by default. See draw_networkx() for more + full-featured drawing that allows title, axis labels etc. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary, optional + A dictionary with nodes as keys and positions as values. + If not specified a spring layout positioning will be computed. + See :py:mod:`networkx.drawing.layout` for functions that + compute node positions. + + ax : Matplotlib Axes object, optional + Draw the graph in specified Matplotlib axes. + + kwds : optional keywords + See networkx.draw_networkx() for a description of optional keywords. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> nx.draw(G) + >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout + + See Also + -------- + draw_networkx + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + + Notes + ----- + This function has the same name as pylab.draw and pyplot.draw + so beware when using `from networkx import *` + + since you might overwrite the pylab.draw function. + + With pyplot use + + >>> import matplotlib.pyplot as plt + >>> G = nx.dodecahedral_graph() + >>> nx.draw(G) # networkx draw() + >>> plt.draw() # pyplot draw() + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + """ + import matplotlib.pyplot as plt + + if ax is None: + cf = plt.gcf() + else: + cf = ax.get_figure() + cf.set_facecolor("w") + if ax is None: + if cf._axstack() is None: + ax = cf.add_axes((0, 0, 1, 1)) + else: + ax = cf.gca() + + if "with_labels" not in kwds: + kwds["with_labels"] = "labels" in kwds + + draw_networkx(G, pos=pos, ax=ax, **kwds) + ax.set_axis_off() + plt.draw_if_interactive() + return + + +def draw_networkx(G, pos=None, arrows=None, with_labels=True, **kwds): + r"""Draw the graph G using Matplotlib. + + Draw the graph with Matplotlib with options for node positions, + labeling, titles, and many other drawing features. + See draw() for simple drawing without labels or axes. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary, optional + A dictionary with nodes as keys and positions as values. + If not specified a spring layout positioning will be computed. + See :py:mod:`networkx.drawing.layout` for functions that + compute node positions. + + arrows : bool or None, optional (default=None) + If `None`, directed graphs draw arrowheads with + `~matplotlib.patches.FancyArrowPatch`, while undirected graphs draw edges + via `~matplotlib.collections.LineCollection` for speed. + If `True`, draw arrowheads with FancyArrowPatches (bendable and stylish). + If `False`, draw edges using LineCollection (linear and fast). + For directed graphs, if True draw arrowheads. + Note: Arrows will be the same color as edges. + + arrowstyle : str (default='-\|>' for directed graphs) + For directed graphs, choose the style of the arrowsheads. + For undirected graphs default to '-' + + See `matplotlib.patches.ArrowStyle` for more options. + + arrowsize : int or list (default=10) + For directed graphs, choose the size of the arrow head's length and + width. A list of values can be passed in to assign a different size for arrow head's length and width. + See `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` + for more info. + + with_labels : bool (default=True) + Set to True to draw labels on the nodes. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + nodelist : list (default=list(G)) + Draw only specified nodes + + edgelist : list (default=list(G.edges())) + Draw only specified edges + + node_size : scalar or array (default=300) + Size of nodes. If an array is specified it must be the + same length as nodelist. + + node_color : color or array of colors (default='#1f78b4') + Node color. Can be a single color or a sequence of colors with the same + length as nodelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + node_shape : string (default='o') + The shape of the node. Specification is as matplotlib.scatter + marker, one of 'so^>v>> G = nx.dodecahedral_graph() + >>> nx.draw(G) + >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout + + >>> import matplotlib.pyplot as plt + >>> limits = plt.axis("off") # turn off axis + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + """ + from inspect import signature + + import matplotlib.pyplot as plt + + # Get all valid keywords by inspecting the signatures of draw_networkx_nodes, + # draw_networkx_edges, draw_networkx_labels + + valid_node_kwds = signature(draw_networkx_nodes).parameters.keys() + valid_edge_kwds = signature(draw_networkx_edges).parameters.keys() + valid_label_kwds = signature(draw_networkx_labels).parameters.keys() + + # Create a set with all valid keywords across the three functions and + # remove the arguments of this function (draw_networkx) + valid_kwds = (valid_node_kwds | valid_edge_kwds | valid_label_kwds) - { + "G", + "pos", + "arrows", + "with_labels", + } + + if any([k not in valid_kwds for k in kwds]): + invalid_args = ", ".join([k for k in kwds if k not in valid_kwds]) + raise ValueError(f"Received invalid argument(s): {invalid_args}") + + node_kwds = {k: v for k, v in kwds.items() if k in valid_node_kwds} + edge_kwds = {k: v for k, v in kwds.items() if k in valid_edge_kwds} + label_kwds = {k: v for k, v in kwds.items() if k in valid_label_kwds} + + if pos is None: + pos = nx.drawing.spring_layout(G) # default to spring layout + + draw_networkx_nodes(G, pos, **node_kwds) + draw_networkx_edges(G, pos, arrows=arrows, **edge_kwds) + if with_labels: + draw_networkx_labels(G, pos, **label_kwds) + plt.draw_if_interactive() + + +def draw_networkx_nodes( + G, + pos, + nodelist=None, + node_size=300, + node_color="#1f78b4", + node_shape="o", + alpha=None, + cmap=None, + vmin=None, + vmax=None, + ax=None, + linewidths=None, + edgecolors=None, + label=None, + margins=None, +): + """Draw the nodes of the graph G. + + This draws only the nodes of the graph G. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + nodelist : list (default list(G)) + Draw only specified nodes + + node_size : scalar or array (default=300) + Size of nodes. If an array it must be the same length as nodelist. + + node_color : color or array of colors (default='#1f78b4') + Node color. Can be a single color or a sequence of colors with the same + length as nodelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + node_shape : string (default='o') + The shape of the node. Specification is as matplotlib.scatter + marker, one of 'so^>v>> G = nx.dodecahedral_graph() + >>> nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G)) + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + """ + from collections.abc import Iterable + + import matplotlib as mpl + import matplotlib.collections # call as mpl.collections + import matplotlib.pyplot as plt + import numpy as np + + if ax is None: + ax = plt.gca() + + if nodelist is None: + nodelist = list(G) + + if len(nodelist) == 0: # empty nodelist, no drawing + return mpl.collections.PathCollection(None) + + try: + xy = np.asarray([pos[v] for v in nodelist]) + except KeyError as err: + raise nx.NetworkXError(f"Node {err} has no position.") from err + + if isinstance(alpha, Iterable): + node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax) + alpha = None + + node_collection = ax.scatter( + xy[:, 0], + xy[:, 1], + s=node_size, + c=node_color, + marker=node_shape, + cmap=cmap, + vmin=vmin, + vmax=vmax, + alpha=alpha, + linewidths=linewidths, + edgecolors=edgecolors, + label=label, + ) + ax.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + if margins is not None: + if isinstance(margins, Iterable): + ax.margins(*margins) + else: + ax.margins(margins) + + node_collection.set_zorder(2) + return node_collection + + +def draw_networkx_edges( + G, + pos, + edgelist=None, + width=1.0, + edge_color="k", + style="solid", + alpha=None, + arrowstyle=None, + arrowsize=10, + edge_cmap=None, + edge_vmin=None, + edge_vmax=None, + ax=None, + arrows=None, + label=None, + node_size=300, + nodelist=None, + node_shape="o", + connectionstyle="arc3", + min_source_margin=0, + min_target_margin=0, +): + r"""Draw the edges of the graph G. + + This draws only the edges of the graph G. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + edgelist : collection of edge tuples (default=G.edges()) + Draw only specified edges + + width : float or array of floats (default=1.0) + Line width of edges + + edge_color : color or array of colors (default='k') + Edge color. Can be a single color or a sequence of colors with the same + length as edgelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters. + + style : string or array of strings (default='solid') + Edge line style e.g.: '-', '--', '-.', ':' + or words like 'solid' or 'dashed'. + Can be a single style or a sequence of styles with the same + length as the edge list. + If less styles than edges are given the styles will cycle. + If more styles than edges are given the styles will be used sequentially + and not be exhausted. + Also, `(offset, onoffseq)` tuples can be used as style instead of a strings. + (See `matplotlib.patches.FancyArrowPatch`: `linestyle`) + + alpha : float or None (default=None) + The edge transparency + + edge_cmap : Matplotlib colormap, optional + Colormap for mapping intensities of edges + + edge_vmin,edge_vmax : floats, optional + Minimum and maximum for edge colormap scaling + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + arrows : bool or None, optional (default=None) + If `None`, directed graphs draw arrowheads with + `~matplotlib.patches.FancyArrowPatch`, while undirected graphs draw edges + via `~matplotlib.collections.LineCollection` for speed. + If `True`, draw arrowheads with FancyArrowPatches (bendable and stylish). + If `False`, draw edges using LineCollection (linear and fast). + + Note: Arrowheads will be the same color as edges. + + arrowstyle : str (default='-\|>' for directed graphs) + For directed graphs and `arrows==True` defaults to '-\|>', + For undirected graphs default to '-'. + + See `matplotlib.patches.ArrowStyle` for more options. + + arrowsize : int (default=10) + For directed graphs, choose the size of the arrow head's length and + width. See `matplotlib.patches.FancyArrowPatch` for attribute + `mutation_scale` for more info. + + connectionstyle : string (default="arc3") + Pass the connectionstyle parameter to create curved arc of rounding + radius rad. For example, connectionstyle='arc3,rad=0.2'. + See `matplotlib.patches.ConnectionStyle` and + `matplotlib.patches.FancyArrowPatch` for more info. + + node_size : scalar or array (default=300) + Size of nodes. Though the nodes are not drawn with this function, the + node size is used in determining edge positioning. + + nodelist : list, optional (default=G.nodes()) + This provides the node order for the `node_size` array (if it is an array). + + node_shape : string (default='o') + The marker used for nodes, used in determining edge positioning. + Specification is as a `matplotlib.markers` marker, e.g. one of 'so^>v>> G = nx.dodecahedral_graph() + >>> edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G)) + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 3)]) + >>> arcs = nx.draw_networkx_edges(G, pos=nx.spring_layout(G)) + >>> alphas = [0.3, 0.4, 0.5] + >>> for i, arc in enumerate(arcs): # change alpha values of arcs + ... arc.set_alpha(alphas[i]) + + The FancyArrowPatches corresponding to self-loops are not always + returned, but can always be accessed via the ``patches`` attribute of the + `matplotlib.Axes` object. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> G = nx.Graph([(0, 1), (0, 0)]) # Self-loop at node 0 + >>> edge_collection = nx.draw_networkx_edges(G, pos=nx.circular_layout(G), ax=ax) + >>> self_loop_fap = ax.patches[0] + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_nodes + draw_networkx_labels + draw_networkx_edge_labels + + """ + import matplotlib as mpl + import matplotlib.collections # call as mpl.collections + import matplotlib.colors # call as mpl.colors + import matplotlib.patches # call as mpl.patches + import matplotlib.path # call as mpl.path + import matplotlib.pyplot as plt + import numpy as np + + # The default behavior is to use LineCollection to draw edges for + # undirected graphs (for performance reasons) and use FancyArrowPatches + # for directed graphs. + # The `arrows` keyword can be used to override the default behavior + + if arrowstyle == None: + if G.is_directed(): + arrowstyle = "-|>" + else: + arrowstyle = "-" + + use_linecollection = not G.is_directed() + if arrows in (True, False): + use_linecollection = not arrows + + if ax is None: + ax = plt.gca() + + if edgelist is None: + edgelist = list(G.edges()) + + if len(edgelist) == 0: # no edges! + return [] + + if nodelist is None: + nodelist = list(G.nodes()) + + # FancyArrowPatch handles color=None different from LineCollection + if edge_color is None: + edge_color = "k" + edgelist_tuple = list(map(tuple, edgelist)) + + # set edge positions + edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist]) + + # Check if edge_color is an array of floats and map to edge_cmap. + # This is the only case handled differently from matplotlib + if ( + np.iterable(edge_color) + and (len(edge_color) == len(edge_pos)) + and np.alltrue([isinstance(c, Number) for c in edge_color]) + ): + if edge_cmap is not None: + assert isinstance(edge_cmap, mpl.colors.Colormap) + else: + edge_cmap = plt.get_cmap() + if edge_vmin is None: + edge_vmin = min(edge_color) + if edge_vmax is None: + edge_vmax = max(edge_color) + color_normal = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax) + edge_color = [edge_cmap(color_normal(e)) for e in edge_color] + + def _draw_networkx_edges_line_collection(): + edge_collection = mpl.collections.LineCollection( + edge_pos, + colors=edge_color, + linewidths=width, + antialiaseds=(1,), + linestyle=style, + alpha=alpha, + ) + edge_collection.set_cmap(edge_cmap) + edge_collection.set_clim(edge_vmin, edge_vmax) + edge_collection.set_zorder(1) # edges go behind nodes + edge_collection.set_label(label) + ax.add_collection(edge_collection) + + return edge_collection + + def _draw_networkx_edges_fancy_arrow_patch(): + # Note: Waiting for someone to implement arrow to intersection with + # marker. Meanwhile, this works well for polygons with more than 4 + # sides and circle. + + def to_marker_edge(marker_size, marker): + if marker in "s^>v